diff --git a/.travis.yml b/.travis.yml index 39f50fd..14c1791 100644 --- a/.travis.yml +++ b/.travis.yml @@ -30,7 +30,7 @@ stages: - name: build if: tag IS present - name: gh-pages - if: tag =~ ^\d+\.\d+\.\d+ + if: tag =~ ^\d+\.\d+\.\d+$ script: skip jobs: fast_finish: true @@ -59,11 +59,11 @@ jobs: script: tox -e test-py36 python: 3.6 name: Unit Tests - - stage: test - script: python ci/travis_appveyor_build.py - python: 3.6 - name: Unit Tests (Windows) - if: branch in (master, develop) OR tag IS present + # - stage: test + # script: python ci/travis_appveyor_build.py + # python: 3.6 + # name: Unit Tests (Windows) + # if: branch in (master, develop) OR tag IS present - stage: examples-test script: tox -e examples-py27 python: 2.7 @@ -79,7 +79,7 @@ jobs: - stage: build python: 3.6 name: Build - before_deploy: python ci/get_appveyor_wheels.py + # before_deploy: python ci/get_appveyor_wheels.py deploy: - provider: pypi user: "$PYPI_USERNAME" diff --git a/Pipfile b/Pipfile index 0f5cd30..df850c7 100644 --- a/Pipfile +++ b/Pipfile @@ -9,4 +9,3 @@ scipy = "==1.0.0" matplotlib = "==2.1.0" [dev-packages] - diff --git a/appveyor.yml b/appveyor.yml index 1176176..ece1b19 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -25,7 +25,7 @@ environment: install: # We need wheel installed to build wheels - - cmd: "%PYTHON%\\python.exe -m pip install wheel tox numpy cython" + - cmd: "%PYTHON%\\python.exe -m pip install wheel tox numpy cython twine" build: off @@ -49,13 +49,15 @@ after_test: # Again, you only need if you're building C extensions for # 64-bit Python 3.3/3.4. And you need to use %PYTHON% to get the correct # interpreter - - cmd: if(%APPVEYOR_REPO_TAG% == 'True') %PYTHON%\\python.exe setup.py bdist_wheel ELSE echo "Not building because this is not a tag build" - - ps: if($env:APPVEYOR_REPO_TAG -eq 'True') {ls dist} + - ps: if ($(& $env:PYTHON_EXE .\ci\get_version.py) -ne '') {& $env:PYTHON_EXE setup.py bdist_wheel} ELSE {echo "Not building because this is not a tag build"} + - ps: if (& $env:PYTHON_EXE .\ci\get_version.py) {ls dist} + artifacts: # bdist_wheel puts your built wheel in the dist directory - path: dist\* -#on_success: +on_success: # You can use this step to upload your artifacts to a public website. # See Appveyor's documentation for more details. Or you can simply -# access your wheels from the Appveyor "artifacts" tab for your build. \ No newline at end of file +# access your wheels from the Appveyor "artifacts" tab for your build. + - ps if ($(& $env:PYTHON_EXE .\ci\get_version.py) -ne '') {& $env:PYTHON_EXE setup.py bdist_wheel} ELSE {echo "Not building because this is not a tag build"} \ No newline at end of file diff --git a/ci/get_appveyor_wheels.py b/ci/get_appveyor_wheels.py index 44b9ee4..d1f853b 100644 --- a/ci/get_appveyor_wheels.py +++ b/ci/get_appveyor_wheels.py @@ -46,8 +46,8 @@ r.raise_for_status() build = r.json() job_ids = [job['jobId'] for job in build['jobs']] -if not os.path.exists(os.path.join(REPO_PATH, 'dist')) -os.mkdir(os.path.join(REPO_PATH, 'dist')) +if not os.path.exists(os.path.join(REPO_PATH, 'dist')): + os.mkdir(os.path.join(REPO_PATH, 'dist')) for job_id in job_ids: r = requests.get('{}/buildjobs/{}/artifacts'.format(api_url, job_id), headers=headers) r.raise_for_status() diff --git a/ci/get_version.py b/ci/get_version.py new file mode 100644 index 0000000..520027c --- /dev/null +++ b/ci/get_version.py @@ -0,0 +1,12 @@ +import subprocess +import sys + +import pkg_resources + +version = pkg_resources.parse_version(subprocess.check_output([sys.executable, 'setup.py', '--version']).decode()) +if version.local: + print('') + sys.exit(1) +else: + print(version) + sys.exit(0) diff --git a/ci/travis_appveyor_build.py b/ci/travis_appveyor_build.py index bc2531e..f655d4c 100644 --- a/ci/travis_appveyor_build.py +++ b/ci/travis_appveyor_build.py @@ -45,7 +45,11 @@ def update_job(self, job_id, job_status): 'commitID': os.getenv('TRAVIS_COMMIT') } r = requests.post(api_url + '/builds', payload, headers=headers) -r.raise_for_status() +try: + r.raise_for_status() +except Exception as e: + print(r.content) + raise e build = r.json() print('Started AppVeyor build (buildId={buildId}, version={version})'.format(**build), flush=True) log = Log() diff --git a/src/MTfit/algorithms/markov_chain_monte_carlo.py b/src/MTfit/algorithms/markov_chain_monte_carlo.py index 64be869..cc69773 100644 --- a/src/MTfit/algorithms/markov_chain_monte_carlo.py +++ b/src/MTfit/algorithms/markov_chain_monte_carlo.py @@ -1516,7 +1516,7 @@ def new_sample(self, jump=0.0, gaussian_jump=False): gaussian_jump=gaussian_jump) return mt except Exception: - logging.exception('Cython error') + logging.exception('WARNING: Error running cython code, resorting to python code') else: logger.info(C_EXTENSION_FALLBACK_LOG_MSG) # Otherwise/Fallback to use python code @@ -1611,7 +1611,8 @@ def _acceptance_check(self, xi_1, ln_pi_1, scale_factori_1=False): return xi_1, ln_pi_1, scale_factori_1[index], index return xi_1, ln_pi_1, False, index except Exception: - logger.exception('Cython Error') + if not isinstance(xi_1, dict): + logger.exception('WARNING: Error running cython code, resorting to python code') else: logger.info(C_EXTENSION_FALLBACK_LOG_MSG) # Otherwise use/fallback to Python code diff --git a/src/MTfit/probability/probability.py b/src/MTfit/probability/probability.py index 065512e..4e830b3 100644 --- a/src/MTfit/probability/probability.py +++ b/src/MTfit/probability/probability.py @@ -137,7 +137,7 @@ def polarity_ln_pdf(a, mt, sigma, incorrect_polarity_probability=0.0, _use_c=Non except Exception as e: # Run using python # Testing C code - logger.exception('Error running cython code') + logger.exception('WARNING: Error running cython code, resorting to python code') if _C_LIB_TESTS: raise e else: @@ -171,7 +171,7 @@ def polarity_ln_pdf(a, mt, sigma, incorrect_polarity_probability=0.0, _use_c=Non ln_p = cprobability.ln_prod(ln_p) except Exception: if cprobability: - logger.exception('Error running cython code') + logger.exception('WARNING: Error running cython code, resorting to python code') ln_p = np.sum(ln_p, 0) if isinstance(ln_p, np.ndarray): ln_p[np.isnan(ln_p)] = -np.inf @@ -255,7 +255,7 @@ def polarity_probability_ln_pdf(a, mt, positive_probability, negative_probabilit except Exception as e: # Run using python # Testing C code - logger.exception('Error running cython code') + logger.exception('WARNING: Error running cython code, resorting to python code') if _C_LIB_TESTS: raise e else: @@ -360,7 +360,7 @@ def amplitude_ratio_ln_pdf(ratio, mt, a_x, a_y, percentage_error_x, percentage_e except Exception as e: # Run using python # Testing C code - logger.exception('Error running cython code') + logger.exception('WARNING: Error running cython code, resorting to python code') if _C_LIB_TESTS: raise e else: @@ -394,7 +394,7 @@ def amplitude_ratio_ln_pdf(ratio, mt, a_x, a_y, percentage_error_x, percentage_e ln_p = cprobability.ln_prod(ln_p) except Exception: if cprobability: - logger.exception('Error running cython code') + logger.exception('WARNING: Error running cython code, resorting to python code') ln_p = np.sum(ln_p, 0) if isinstance(ln_p, np.ndarray): ln_p[np.isnan(ln_p)] = -np.inf @@ -479,7 +479,7 @@ def relative_amplitude_ratio_ln_pdf(x_1, x_2, mt_1, mt_2, a_1, a_2, percentage_e except Exception as e: # Run using python # Testing C code - logger.exception('Error running cython code') + logger.exception('WARNING: Error running cython code, resorting to python code') if _C_LIB_TESTS: raise e else: @@ -826,7 +826,7 @@ def dkl(ln_probability_p, ln_probability_q, dV=1.0): try: return cprobability.dkl(ln_probability_p.copy(), ln_probability_q.copy(), dV) except Exception: - logger.exception('Error running cython code') + logger.exception('WARNING: Error running cython code, resorting to python code') else: logger.info(C_EXTENSION_FALLBACK_LOG_MSG) ind = ln_probability_p > -np.inf @@ -845,7 +845,7 @@ def dkl(ln_probability_p, ln_probability_q, dV=1.0): ln_probability_q[ind]*probability_p[ind]) * dV -def ln_marginalise(ln_pdf, axis=0, dV=1.0): +def ln_marginalise(ln_pdf, axis=0, dV=1.0, _cprob_err=True): """ Marginalise the pdf from the log pdf input @@ -870,7 +870,8 @@ def ln_marginalise(ln_pdf, axis=0, dV=1.0): return cprobability.ln_marginalise(ln_pdf._ln_pdf.astype(np.float64)) return cprobability.ln_marginalise(ln_pdf.astype(np.float64)) except Exception: - logger.exception('Error running cython code') + if _cprob_err: + logger.exception('WARNING: Error running cython code, resorting to python code') else: logger.info(C_EXTENSION_FALLBACK_LOG_MSG) # scale and then marginalise: @@ -922,7 +923,7 @@ def ln_normalise(ln_pdf, dV=1): normalised_ln_pdf = cprobability.ln_normalise(ln_pdf) return normalised_ln_pdf except Exception: - logger.exception('Error running cython code') + logger.exception('WARNING: Error running cython code, resorting to python code') else: logger.info(C_EXTENSION_FALLBACK_LOG_MSG) # scale and then marginalise: @@ -993,7 +994,7 @@ def dkl_estimate(ln_pdf, V, N): try: return cprobability.dkl_uniform(ln_pdf.copy(), V, dV) except Exception: - logger.exception('Error running cython code') + logger.exception('WARNING: Error running cython code, resorting to python code') else: logger.info(C_EXTENSION_FALLBACK_LOG_MSG) ind = ln_pdf > -np.inf @@ -1263,17 +1264,18 @@ def output(self, normalise=True): return self.marginalise().normalise() return self.marginalise() - def exp(self): + def exp(self, _cprob_err=True): if cprobability: try: return cprobability.ln_exp(self._ln_pdf) except Exception: - logger.exception('Error running cython code') + if _cprob_err: + logger.exception('WARNING: Error running cython code, resorting to python code') else: logger.info(C_EXTENSION_FALLBACK_LOG_MSG) return np.exp(self._ln_pdf) - def nonzero(self, discard=100000., n_samples=0): + def nonzero(self, discard=100000., n_samples=0, _cprob_err=True): """ Return the non-zero indices of the pdf @@ -1286,7 +1288,7 @@ def nonzero(self, discard=100000., n_samples=0): discard: float - discard scale [default = 100000.] n_samples: integer - number of samples generated [default = 0] """ - ln_pdf = np.array(self.marginalise(axis=0)._ln_pdf).flatten() + ln_pdf = np.array(self.marginalise(axis=0, _cprob_err=_cprob_err)._ln_pdf).flatten() m_val = -np.inf if n_samples > 0 and discard > 0: m_val = max(ln_pdf) - np.log(discard*n_samples) @@ -1308,7 +1310,7 @@ def normalise(self, dV=False): new._ln_pdf = ln_normalise(self._ln_pdf, self.dV) return new - def marginalise(self, axis=0, dV=False): + def marginalise(self, axis=0, dV=False, _cprob_err=True): """ Marginalise the pdf object over a given axis @@ -1322,7 +1324,7 @@ def marginalise(self, axis=0, dV=False): if dV: self._set_dv(dV) new = self.__class__(dV=self.dV) - new._ln_pdf = ln_marginalise(self._ln_pdf, axis=axis, dV=self.dV) + new._ln_pdf = ln_marginalise(self._ln_pdf, axis=axis, dV=self.dV, _cprob_err=_cprob_err) return new def append(self, other, axis=1): diff --git a/src/MTfit/sampling.py b/src/MTfit/sampling.py index 95c1ddf..61536d6 100644 --- a/src/MTfit/sampling.py +++ b/src/MTfit/sampling.py @@ -170,7 +170,7 @@ def output(self, normalise=True, convert=False, n_samples=0, discard=10000, mcmc # Check if there are samples if len(ln_pdf): # Get non_zero samples - non_zero = ln_pdf.nonzero(discard=discard, n_samples=n_samples) + non_zero = ln_pdf.nonzero(discard=discard, n_samples=n_samples, _cprob_err=False) if discard and n_samples: output_string += 'After discard, '+str(non_zero.shape[0])+' samples remain\n\n' if len(ln_pdf.shape) > 1: @@ -428,7 +428,7 @@ def ln_bayesian_evidence(output, n_samples, prior=_6sphere_prior): p = prior(output['g'], output['d']) if not isinstance(output['ln_pdf'], LnPDF): output['ln_pdf'] = LnPDF(output['ln_pdf']) - return np.log((output['ln_pdf']+np.log(p)-output['ln_pdf']._ln_pdf.max()).exp().sum())+output['ln_pdf']._ln_pdf.max()-np.log(n_samples) + return np.log((output['ln_pdf']+np.log(p)-output['ln_pdf']._ln_pdf.max()).exp(_cprob_err=False).sum())+output['ln_pdf']._ln_pdf.max()-np.log(n_samples) def _convert(moment_tensors, i=None): diff --git a/src/MTfit/tests/unit/test_inversion.py b/src/MTfit/tests/unit/test_inversion.py index 5df65ce..b9be510 100644 --- a/src/MTfit/tests/unit/test_inversion.py +++ b/src/MTfit/tests/unit/test_inversion.py @@ -1313,7 +1313,7 @@ def test__recover_test(self): algorithm='Time', parallel=self.parallel, phy_mem=1, max_time=10, convert=False) self.inversion.number_samples = 100 self.assertFalse(len(self.inversion.algorithm.pdf_sample)) - self.inversion.algorithm.max_time = 10 + self.inversion.algorithm.max_time = 5 self.inversion.forward() self.assertTrue(self.inversion.algorithm.pdf_sample.n, str( self.inversion.algorithm.pdf_sample.n)) @@ -1412,7 +1412,7 @@ def test_forward(self): algorithm='Time', parallel=self.parallel, phy_mem=0.1, n=2, max_time=10, convert=False) self.inversion.number_samples = 100 self.assertFalse(len(self.inversion.algorithm.pdf_sample)) - self.inversion.algorithm.max_time = 10 + self.inversion.algorithm.max_time = 5 self.inversion.forward() self.assertTrue(self.inversion.algorithm.pdf_sample.n, str( self.inversion.algorithm.pdf_sample.n)) @@ -1423,7 +1423,7 @@ def test_forward(self): 'Measured': np.matrix([[1], [-1], [-1]]), 'Error': np.matrix([[0.001], [0.5], [0.02]])}}, algorithm='Time', parallel=self.parallel, phy_mem=0.1, n=2, max_time=10, convert=False) self.inversion.location_pdf_files = ['test.scatangle'] - self.inversion.algorithm.max_time = 10 + self.inversion.algorithm.max_time = 5 self.inversion.forward() try: os.remove('test.scatangle') @@ -1438,7 +1438,7 @@ def test__random_sampling_forward(self): algorithm='Time', parallel=self.parallel, phy_mem=0.1, n=2, max_time=10, convert=False) self.inversion.number_samples = 100 self.assertFalse(len(self.inversion.algorithm.pdf_sample)) - self.inversion.algorithm.max_time = 10 + self.inversion.algorithm.max_time = 5 self.inversion._random_sampling_forward() self.assertTrue(self.inversion.algorithm.pdf_sample.n, str( self.inversion.algorithm.pdf_sample.n)) @@ -1450,7 +1450,7 @@ def test__random_sampling_forward(self): 'Measured': np.matrix([[1], [-1], [-1]]), 'Error': np.matrix([[0.001], [0.5], [0.02]])}}, algorithm='Time', parallel=self.parallel, phy_mem=0.1, n=2, max_time=10, convert=False) self.inversion.location_pdf_files = ['test.scatangle'] - self.inversion.algorithm.max_time = 10 + self.inversion.algorithm.max_time = 5 self.inversion._random_sampling_forward() try: os.remove('test.scatangle') @@ -1465,7 +1465,7 @@ def test__random_sampling_multiple_forward(self): self.inversion = Inversion([data, data], multiple_events=True, algorithm='Time', parallel=self.parallel, phy_mem=0.1, n=2, max_time=10, convert=False) self.inversion.number_samples = 100 self.assertFalse(len(self.inversion.algorithm.pdf_sample)) - self.inversion.algorithm.max_time = 10 + self.inversion.algorithm.max_time = 5 self.inversion._random_sampling_multiple_forward() self.assertTrue(self.inversion.algorithm.pdf_sample.n, str( self.inversion.algorithm.pdf_sample.n)) @@ -1475,7 +1475,7 @@ def test__random_sampling_multiple_forward(self): f.write(self.station_angles()) self.inversion = Inversion([data, data], multiple_events=True, algorithm='Time', parallel=self.parallel, phy_mem=0.1, n=2, max_time=10, convert=False) self.inversion.location_pdf_files = ['test.scatangle', 'test.scatangle'] - self.inversion.algorithm.max_time = 10 + self.inversion.algorithm.max_time = 5 self.inversion._random_sampling_multiple_forward() try: os.remove('test.scatangle') @@ -1490,7 +1490,7 @@ def test__random_sampling_multiple_forward(self): self.inversion = Inversion([data, data], multiple_events=True, algorithm='Time', parallel=self.parallel, phy_mem=0.1, n=2, max_time=10, relative_amplitude=True, convert=False) self.inversion.number_samples = 100 self.assertFalse(len(self.inversion.algorithm.pdf_sample)) - self.inversion.algorithm.max_time = 10 + self.inversion.algorithm.max_time = 5 self.inversion._random_sampling_multiple_forward() self.assertTrue(self.inversion.algorithm.pdf_sample.n, str(self.inversion.algorithm.pdf_sample.n)) self.inversion._close_pool() @@ -1499,7 +1499,7 @@ def test__random_sampling_multiple_forward(self): f.write(self.station_angles()) self.inversion = Inversion([data, data], multiple_events=True, algorithm='Time', parallel=self.parallel, phy_mem=0.1, n=2, max_time=10, relative_amplitude=True, convert=False) self.inversion.location_pdf_files = ['test.scatangle', 'test.scatangle'] - self.inversion.algorithm.max_time = 10 + self.inversion.algorithm.max_time = 5 self.inversion._random_sampling_multiple_forward() try: os.remove('test.scatangle') @@ -1522,7 +1522,7 @@ def test__mcmc_forward(self): algorithm='McMC', parallel=False, learning_length=10, chain_length=100, acceptance_rate_window=5, phy_mem=1, convert=False) self.inversion.number_samples = 100 self.assertFalse(len(self.inversion.algorithm.pdf_sample)) - self.inversion.algorithm.max_time = 10 + self.inversion.algorithm.max_time = 5 self.inversion._mcmc_forward() self.assertTrue(os.path.exists('TestAMT.mat')) try: @@ -1537,7 +1537,7 @@ def test__mcmc_forward(self): 'Measured': np.matrix([[1], [-1], [-1]]), 'Error': np.matrix([[0.001], [0.5], [0.02]])}}, algorithm='Time', parallel=False, learning_length=10, chain_length=100, acceptance_rate_window=5, phy_mem=1, max_time=10, convert=False) self.inversion.location_pdf_files = ['test.scatangle'] - self.inversion.algorithm.max_time = 10 + self.inversion.algorithm.max_time = 5 self.inversion._mcmc_forward() self.assertTrue(os.path.exists('TestAMT.mat')) try: @@ -1580,7 +1580,7 @@ def test__mcmc_multiple_forward_location_uncertainty(self): with open('test.scatangle', 'w') as f: f.write(self.station_angles()) self.inversion.location_pdf_files = ['test.scatangle', 'test.scatangle'] - self.inversion.algorithm.max_time = 10 + self.inversion.algorithm.max_time = 5 self.inversion._mcmc_multiple_forward() try: os.remove('test.scatangle') @@ -1629,7 +1629,7 @@ def test__mcmc_multiple_forward_amplitude_location_uncertainty(self): with open('test.scatangle', 'w') as f: f.write(self.station_angles()) self.inversion.location_pdf_files = ['test.scatangle', 'test.scatangle'] - self.inversion.algorithm.max_time = 10 + self.inversion.algorithm.max_time = 5 self.inversion._mcmc_multiple_forward() try: os.remove('test.scatangle') @@ -1649,7 +1649,7 @@ def test__MATLAB_output(self): 'Measured': np.matrix([[1], [-1], [-1]]), 'Error': np.matrix([[0.001], [0.001], [0.002]])}}, algorithm='Time', parallel=self.parallel, phy_mem=1, max_time=10, convert=False) self.inversion.location_pdf_files = ['test.scatangle'] - self.inversion.algorithm.max_time = 10 + self.inversion.algorithm.max_time = 5 self.inversion.forward() try: os.remove('test.scatangle') @@ -1669,7 +1669,7 @@ def test__pickle_output(self): 'Measured': np.matrix([[1], [-1], [-1]]), 'Error': np.matrix([[0.001], [0.001], [0.002]])}}, algorithm='Time', parallel=self.parallel, phy_mem=1, max_time=10, output_format='pickle', convert=False) self.inversion.location_pdf_files = ['test.scatangle'] - self.inversion.algorithm.max_time = 10 + self.inversion.algorithm.max_time = 5 self.inversion.forward() try: os.remove('test.scatangle') @@ -1716,7 +1716,7 @@ def test_recover(self): 'Measured': np.matrix([[1], [-1], [-1]]), 'Error': np.matrix([[0.1], [0.1], [0.2]])}}, algorithm='Time', parallel=self.parallel, phy_mem=1, max_time=10, convert=False) self.inversion.location_pdf_files = ['test.scatangle'] - self.inversion.algorithm.max_time = 10 + self.inversion.algorithm.max_time = 5 self.inversion.forward() try: os.remove('test.scatangle') @@ -1732,7 +1732,7 @@ def test_recover(self): 'Measured': np.matrix([[1], [-1], [-1]]), 'Error': np.matrix([[0.001], [0.001], [0.02]])}}, algorithm='Time', recover=True, parallel=self.parallel, phy_mem=1, max_time=10, convert=False) self.inversion.location_pdf_files = ['test.scatangle'] - self.inversion.algorithm.max_time = 10 + self.inversion.algorithm.max_time = 5 self.inversion.forward() # LOG FILE NOT DELETING try: