diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 3192d87..0000000 --- a/.coveragerc +++ /dev/null @@ -1,28 +0,0 @@ -# .coveragerc to control coverage.py -[run] -branch = True -include = */giddy/* -disable_warnings=include-ignored - -[report] -# Regexes for lines to exclude from consideration -exclude_lines = - # Have to re-enable the standard pragma - pragma: no cover - - # Don't complain about missing debug-only code: - def __repr__ - if self\.debug - - # Don't complain if tests don't hit defensive assertion code: - raise AssertionError - raise NotImplementedError - - # Don't complain if non-runnable code isn't run: - if 0: - if __name__ == .__main__.: - -ignore_errors = True - -[html] -directory = coverage_html_report \ No newline at end of file diff --git a/.github/release.yml b/.github/release.yml new file mode 100644 index 0000000..f543544 --- /dev/null +++ b/.github/release.yml @@ -0,0 +1,16 @@ +changelog: + exclude: + labels: + - ignore-for-release + authors: + - dependabot + categories: + - title: Bug Fixes + labels: + - bug + - title: Enhancements + labels: + - enhancement + - title: Other Changes + labels: + - "*" diff --git a/.github/workflows/release_and_publish.yml b/.github/workflows/release_and_publish.yml index 89efcfc..c2f4e2e 100644 --- a/.github/workflows/release_and_publish.yml +++ b/.github/workflows/release_and_publish.yml @@ -1,10 +1,4 @@ -# Release package on GitHub and publish to PyPI -# IMPORTANT -- 1 MANUAL STEP -# * FOLLOWING TAGGED RELEASE -# - update CHANGELOG.md -#-------------------------------------------------- - -name: Build, Release, and publish +name: Release Package on: push: @@ -20,69 +14,32 @@ on: jobs: build: - name: Create release & publish to PyPI runs-on: ubuntu-latest steps: - - name: Checkout repo - uses: actions/checkout@v3 + - uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + + - name: Build source and wheel distributions + run: | + python -m pip install --upgrade build twine + python -m build + twine check --strict dist/* - - name: Set up python - uses: actions/setup-python@v4 - with: - python-version: "3.x" + - name: Create Release Notes + uses: actions/github-script@v6 + with: + github-token: ${{secrets.GITHUB_TOKEN}} + script: | + await github.request(`POST /repos/${{ github.repository }}/releases`, { + tag_name: "${{ github.ref }}", + generate_release_notes: true + }); - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install setuptools wheel twine jupyter urllib3 pandas pyyaml - python setup.py sdist bdist_wheel -# - name: Run Changelog -# run: | -# jupyter nbconvert --to notebook --execute --inplace --ExecutePreprocessor.timeout=-1 --ExecutePreprocessor.kernel_name=python3 tools/gitcount.ipynb -# - name: Cat Changelog -# uses: pCYSl5EDgo/cat@master -# id: changetxt -# with: -# path: ./tools/changelog.md -# env: -# TEXT: ${{ steps.changetxt.outputs.text }} -# - name: Create Release Notes -# uses: actions/github-script@v6 -# with: -# github-token: ${{secrets.GITHUB_TOKEN}} -# script: | -# await github.request(`POST /repos/${{ github.repository }}/releases`, { -# tag_name: "${{ github.ref }}", -# generate_release_notes: true -# }); -# - name: Create Release -# id: create_release -# uses: actions/create-release@v1 -# env: -# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # token is provided by GHA, DO NOT create -# with: -# tag_name: ${{ github.ref }} -# release_name: Release ${{ github.ref }} -# body: ${{ steps.changetxt.outputs.text }} -# draft: false -# prerelease: false -# - name: Get Asset name -# run: | -# export PKG=$(ls dist/) -# set -- $PKG -# echo "name=$1" >> $GITHUB_ENV -# - name: Upload Release Asset -# id: upload-release-asset -# uses: actions/upload-release-asset@v1 -# env: -# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} -# with: -# upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps -# asset_path: dist/${{ env.name }} -# asset_name: ${{ env.name }} -# asset_content_type: application/zip - - name: Publish distribution 📦 to PyPI - uses: pypa/gh-action-pypi-publish@release/v1 - with: - user: __token__ - password: ${{ secrets.PYPI_PASSWORD }} + - name: Publish distribution 📦 to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 + with: + user: __token__ + password: ${{ secrets.PYPI_PASSWORD }} diff --git a/.github/workflows/unittests.yml b/.github/workflows/tests.yml similarity index 90% rename from .github/workflows/unittests.yml rename to .github/workflows/tests.yml index 889488e..fd515ef 100644 --- a/.github/workflows/unittests.yml +++ b/.github/workflows/tests.yml @@ -18,8 +18,6 @@ on: jobs: unittests: - env: - RUN_TEST: pytest giddy --cov giddy -v -n auto -r a --cov-config .coveragerc --cov-report xml --color yes --cov-append --cov-report term-missing name: ${{ matrix.os }}, ${{ matrix.environment-file }} runs-on: ${{ matrix.os }} defaults: @@ -30,7 +28,7 @@ jobs: matrix: os: [ubuntu-latest] environment-file: - - ci/39.yaml + - ci/39-MIN.yaml - ci/310.yaml - ci/311.yaml - ci/311-DEV.yaml @@ -72,7 +70,7 @@ jobs: if: contains(matrix.environment-file, 'DEV') - name: run tests - run: ${{ env.RUN_TEST }} + run: pytest giddy --cov giddy -v -n auto -r a --doctest-modules --color yes --cov-report term-missing --cov-report xml - name: codecov uses: codecov/codecov-action@v3 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 78f73a7..f49f66c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -6,7 +6,7 @@ repos: - id: black language_version: python3 - repo: https://github.com/astral-sh/ruff-pre-commit - rev: "v0.0.292" + rev: "v0.1.0" hooks: - id: ruff diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index fdc1fe1..0000000 --- a/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -include LICENSE.txt CHANGELOG.md MANIFEST.in requirements_docs.txt requirements_tests.txt requirements.txt - diff --git a/README.md b/README.md index 19b1eb9..381d0ea 100644 --- a/README.md +++ b/README.md @@ -1,32 +1,29 @@ PySAL-giddy for exploratory spatiotemporal data analysis -======================================================== +============================================ -[![Continuous Integration](https://github.com/pysal/giddy/actions/workflows/unittests.yml/badge.svg)](https://github.com/pysal/giddy/actions/workflows/unittests.yml) -[![codecov](https://codecov.io/gh/pysal/giddy/branch/master/graph/badge.svg)](https://codecov.io/gh/pysal/giddy) +[![Continuous Integration](https://github.com/pysal/giddy/actions/workflows/testing.yml/badge.svg)](https://github.com/pysal/giddy/actions/workflows/testing.yml) +[![codecov](https://codecov.io/gh/pysal/giddy/branch/main/graph/badge.svg)](https://codecov.io/gh/pysal/giddy) [![Gitter room](https://badges.gitter.im/pysal/giddy.svg)](https://gitter.im/pysal/giddy) [![PyPI version](https://badge.fury.io/py/giddy.svg)](https://badge.fury.io/py/giddy) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.7693957.svg)](https://doi.org/10.5281/zenodo.7693957) -[![badge](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/pysal/giddy/master) +[![badge](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/pysal/giddy/main) [![Downloads](https://static.pepy.tech/badge/giddy)](https://pepy.tech/project/giddy) -Giddy is an open-source python library for exploratory spatiotemporal data analysis and the analysis of -geospatial distribution dynamics. -It is under active development -for the inclusion of newly proposed analytics that consider the -role of space in the evolution of distributions over time. +Giddy is an open-source python library for exploratory spatiotemporal data analysis and the analysis of geospatial distribution dynamics. It is under active development for the inclusion of newly proposed analytics that consider the role of space in the evolution of distributions over time. *Below are six choropleth maps of U.S. state per-capita incomes from 1929 to 2004 at a fifteen-year interval.* ![us_qunitile_maps](figs/us_qunitile_maps.png) Documentation -------------- +-------------------- Online documentation is available [here](http://pysal.org/giddy/). Features --------- +------------ + - Directional LISA, inference and visualization as rose diagram [![rose_conditional](figs/rose_conditional.png)](notebooks/DirectionalLISA.ipynb) @@ -50,7 +47,7 @@ Features Examples --------- +------------- * [Directional LISA](notebooks/DirectionalLISA.ipynb) * [Markov based methods](notebooks/MarkovBasedMethods.ipynb) @@ -60,7 +57,7 @@ Examples * [Sequence methods (Optimal matching)](notebooks/Sequence.ipynb) Installation ------------- +-------------- Install the stable version released on the [Python Package Index](https://pypi.org/project/giddy/) from the command line: @@ -71,7 +68,7 @@ pip install giddy Install the development version on [pysal/giddy](https://github.com/pysal/giddy): ``` -pip install https://github.com/pysal/giddy/archive/main.zip +pip install git+https://github.com/pysal/giddy ``` #### Requirements @@ -83,25 +80,25 @@ pip install https://github.com/pysal/giddy/archive/main.zip - quantecon>=0.4.7 Contribute ----------- +-------------- PySAL-giddy is under active development and contributors are welcome. If you have any suggestion, feature request, or bug report, please open a new [issue](https://github.com/pysal/giddy/issues) on GitHub. To submit patches, please follow the PySAL development [guidelines](https://github.com/pysal/pysal/wiki) and open a [pull request](https://github.com/pysal/giddy). Once your changes get merged, you’ll automatically be added to the [Contributors List](https://github.com/pysal/giddy/graphs/contributors). Support -------- +----------- If you are having issues, please talk to us in the [gitter room](https://gitter.im/pysal/giddy). License -------- +---------- -The project is licensed under the [BSD license](https://github.com/pysal/giddy/blob/master/LICENSE.txt). +The project is licensed under the [BSD license](https://github.com/pysal/giddy/blob/main/LICENSE.txt). BibTeX Citation ---------------- +--------------------- ``` @software{wei_kang_2023_7693957, @@ -126,6 +123,6 @@ BibTeX Citation ``` Funding -------- +----------- Award #1421935 [New Approaches to Spatial Distribution Dynamics](https://www.nsf.gov/awardsearch/showAward?AWD_ID=1421935) diff --git a/ci/39.yaml b/ci/39-MIN.yaml similarity index 100% rename from ci/39.yaml rename to ci/39-MIN.yaml diff --git a/codecov.yml b/codecov.yml index d4442bf..7640976 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1,6 +1,6 @@ codecov: notify: - after_n_builds: 16 + after_n_builds: 5 coverage: range: 50..95 round: nearest @@ -18,5 +18,5 @@ coverage: comment: layout: "reach, diff, files" behavior: once - after_n_builds: 16 + after_n_builds: 5 require_changes: true diff --git a/giddy/__init__.py b/giddy/__init__.py index 8e89fee..025d8a4 100644 --- a/giddy/__init__.py +++ b/giddy/__init__.py @@ -1,16 +1,13 @@ -__version__ = "2.3.4" -# __version__ has to be defined in the first line - """ :mod:`giddy` --- Spatial Dynamics and Mobility ============================================== """ -from . import directional -from . import ergodic -from . import markov -from . import mobility -from . import rank -from . import util -from . import sequence +import contextlib +from importlib.metadata import PackageNotFoundError, version + +from . import directional, ergodic, markov, mobility, rank, sequence, util + +with contextlib.suppress(PackageNotFoundError): + __version__ = version("giddy") diff --git a/giddy/components.py b/giddy/components.py index 1a256a2..9a1e9d1 100644 --- a/giddy/components.py +++ b/giddy/components.py @@ -32,7 +32,7 @@ def is_component(w, ids): """ components = 0 - marks = dict([(node, 0) for node in ids]) + marks = {node: 0 for node in ids} q = [] for node in ids: if marks[node] == 0: @@ -103,7 +103,7 @@ def check_contiguity(w, neighbors, leaver): return is_component(w, ids) -class Graph(object): +class Graph: def __init__(self, undirected=True): self.nodes = set() self.edges = {} diff --git a/giddy/directional.py b/giddy/directional.py index 6136881..5d4b36f 100644 --- a/giddy/directional.py +++ b/giddy/directional.py @@ -17,7 +17,7 @@ _NEG4 = 1 - _POS4 -class Rose(object): +class Rose: """ Rose diagram based inference for directional LISAs. @@ -200,7 +200,12 @@ def __init__(self, Y, w, k=8): rose diagram conditional on the starting relative income: >>> fig1, _ = r8.plot(attribute=Y[:,0]) - >>> plt.show() + >>> plt.show(block=False) + + Close plot when finished viewing. + + >>> plt.close("all") + """ self.Y = Y @@ -292,7 +297,7 @@ def permute(self, permutations=99, alternative="two.sided"): P = NEG * L + (1 - NEG) * S self.p = P else: - print(("Bad option for alternative: %s." % alternative)) + print("Bad option for alternative: %s." % alternative) def _calc(self, Y, w, k): wY = weights.lag_spatial(w, Y) diff --git a/giddy/ergodic.py b/giddy/ergodic.py index 12497ae..d1a3858 100644 --- a/giddy/ergodic.py +++ b/giddy/ergodic.py @@ -5,10 +5,12 @@ __all__ = ["steady_state", "var_mfpt_ergodic", "mfpt"] +from warnings import warn + import numpy as np import numpy.linalg as la import quantecon as qe -from warnings import warn + from .util import fill_empty_diagonals @@ -122,7 +124,9 @@ def steady_state(P, fill_empty_classes=False): >>> steady_state(p, fill_empty_classes = False) Traceback (most recent call last): ... - ValueError: Input transition probability matrix has 1 rows full of 0s. Please set fill_empty_classes=True to set diagonal elements for these rows to be 1 to make sure the matrix is stochastic. + ValueError: Input transition probability matrix has 1 rows full of 0s. \ +Please set fill_empty_classes=True to set diagonal elements for these \ +rows to be 1 to make sure the matrix is stochastic. """ @@ -292,7 +296,9 @@ def mfpt(P, fill_empty_classes=False): >>> mfpt(p, fill_empty_classes=False) Traceback (most recent call last): ... - ValueError: Input transition probability matrix has 1 rows full of 0s. Please set fill_empty_classes=True to set diagonal elements for these rows to be 1 to make sure the matrix is stochastic. + ValueError: Input transition probability matrix has 1 rows full of 0s. \ +Please set fill_empty_classes=True to set diagonal elements for these \ +rows to be 1 to make sure the matrix is stochastic. """ P = np.asarray(P) @@ -325,20 +331,19 @@ def mfpt(P, fill_empty_classes=False): try: m[none0] = np.linalg.solve(p_calc, b) except np.linalg.LinAlgError as err: - if "Singular matrix" in str(err): - if (row0 == 0).sum() > 0: - index0 = set(np.argwhere(row0 == 0).flatten()) + if "Singular matrix" in str(err) and (row0 == 0).sum() > 0: + index0 = set(np.argwhere(row0 == 0).flatten()) + x = (p_calc[:, list(index0)] != 0).sum(axis=1) + setx = set(np.argwhere(x).flatten()) + while not setx.issubset(index0): + index0 = index0.union(setx) x = (p_calc[:, list(index0)] != 0).sum(axis=1) setx = set(np.argwhere(x).flatten()) - while not setx.issubset(index0): - index0 = index0.union(setx) - x = (p_calc[:, list(index0)] != 0).sum(axis=1) - setx = set(np.argwhere(x).flatten()) - none0 = np.asarray(list(set(none0).difference(index0))) - if len(none0) >= 1: - p_calc = p_calc[none0, :][:, none0] - b = b[none0] - m[none0] = np.linalg.solve(p_calc, b) + none0 = np.asarray(list(set(none0).difference(index0))) + if len(none0) >= 1: + p_calc = p_calc[none0, :][:, none0] + b = b[none0] + m[none0] = np.linalg.solve(p_calc, b) recc = ( np.nan_to_num( (np.delete(P, desti, 1)[desti] * m), 0, posinf=np.inf diff --git a/giddy/markov.py b/giddy/markov.py index 6166d9f..5b1d5a8 100644 --- a/giddy/markov.py +++ b/giddy/markov.py @@ -15,18 +15,20 @@ "GeoRank_Markov", ] -import numpy as np -from .ergodic import steady_state, mfpt -from .util import fill_empty_diagonals -from .components import Graph -from scipy import stats -from scipy.stats import rankdata +import itertools from operator import gt -from libpysal import weights -from esda.moran import Moran_Local + import mapclassify as mc -import itertools +import numpy as np import quantecon as qe +from esda.moran import Moran_Local +from libpysal import weights +from scipy import stats +from scipy.stats import rankdata + +from .components import Graph +from .ergodic import mfpt, steady_state +from .util import fill_empty_diagonals # TT predefine LISA transitions # TT[i,j] is the transition type from i to j @@ -61,7 +63,7 @@ c += 1 -class Markov(object): +class Markov: """ Classic Markov Chain estimation. @@ -239,8 +241,8 @@ def __init__(self, class_ids, classes=None, fill_empty_classes=False, summary=Tr self.cclasses_indices = markovchain.communication_classes_indices self.rclasses_indices = markovchain.recurrent_classes_indices - transient = set(list(map(tuple, self.cclasses_indices))).difference( - set(list(map(tuple, self.rclasses_indices))) + transient = set(map(tuple, self.cclasses_indices)).difference( + set(map(tuple, self.rclasses_indices)) ) self.num_tclasses = len(transient) if len(transient): @@ -258,7 +260,7 @@ def __init__(self, class_ids, classes=None, fill_empty_classes=False, summary=Tr if self.num_rclasses == 1: print("1 Recurrent class (indices):") else: - print("{0} Recurrent classes (indices):".format(self.num_rclasses)) + print(f"{self.num_rclasses} Recurrent classes (indices):") print(*self.rclasses_indices, sep=", ") if self.num_tclasses == 0: print("0 Transient classes.") @@ -266,7 +268,7 @@ def __init__(self, class_ids, classes=None, fill_empty_classes=False, summary=Tr if self.num_tclasses == 1: print("1 Transient class (indices):") else: - print("{0} Transient classes (indices):".format(self.num_tclasses)) + print(f"{self.num_tclasses} Transient classes (indices):") print(*self.tclasses_indices, sep=", ") if self.num_astates == 0: print("The Markov Chain has 0 absorbing states.") @@ -275,9 +277,8 @@ def __init__(self, class_ids, classes=None, fill_empty_classes=False, summary=Tr print("The Markov Chain has 1 absorbing state (index):") else: print( - "The Markov Chain has {0} absorbing states (indices):".format( - self.num_astates - ) + f"The Markov Chain has {self.num_astates} " + "absorbing states (indices):" ) print(*self.astates_indices, sep=", ") @@ -300,7 +301,7 @@ def sojourn_time(self): return self._st -class Spatial_Markov(object): +class Spatial_Markov: """ Markov transitions conditioned on the value of the spatial lag. @@ -851,7 +852,7 @@ def s(self): def S(self): if not hasattr(self, "_S"): _S = [] - for i, p in enumerate(self.P): + for p in self.P: _S.append(steady_state(p)) # if np.array(_S).dtype is np.dtype('O'): self._S = np.asarray(_S, dtype=object) @@ -1529,8 +1530,8 @@ def spillover(self, quadrant=1, neighbors_on=False): spill_ids.append(j) break for spill_id in spill_ids: - id = self.w.id2i[spill_id] - spill_over[id, t] = 1 + id_ = self.w.id2i[spill_id] + spill_over[id_, t] = 1 for c, component in enumerate(components1): for i in component: ii = self.w.id2i[i] @@ -1861,7 +1862,7 @@ def summary(self, file_name=None, title="Markov Homogeneity Test"): contents.append(stat) stat = "%7s %20.3f %20.3f" % ("p-value", self.LR_p_value, self.Q_p_value) contents.append(stat) - print(("\n".join(contents))) + print("\n".join(contents)) print(lead) cols = ["P(%s)" % str(regime) for regime in self.regime_names] @@ -1874,14 +1875,14 @@ def summary(self, file_name=None, title="Markov Homogeneity Test"): p0 = [] line0 = ["{s: <{w}}".format(s="P(H0)", w=col_width)] line0.extend( - (["{s: >{w}}".format(s=cname, w=col_width) for cname in self.class_names]) + ["{s: >{w}}".format(s=cname, w=col_width) for cname in self.class_names] ) - print((" ".join(line0))) + print(" ".join(line0)) p0.append("&".join(line0)) for i, row in enumerate(self.p_h0): line = ["%*s" % (col_width, str(self.class_names[i]))] line.extend(["%*.3f" % (col_width, v) for v in row]) - print((" ".join(line))) + print(" ".join(line)) p0.append("&".join(line)) pmats = [p0] @@ -1890,19 +1891,14 @@ def summary(self, file_name=None, title="Markov Homogeneity Test"): p0 = [] line0 = ["{s: <{w}}".format(s="P(%s)" % regime_names[r], w=col_width)] line0.extend( - ( - [ - "{s: >{w}}".format(s=cname, w=col_width) - for cname in self.class_names - ] - ) + ["{s: >{w}}".format(s=cname, w=col_width) for cname in self.class_names] ) - print((" ".join(line0))) + print(" ".join(line0)) p0.append("&".join(line0)) for i, row in enumerate(p1): line = ["%*s" % (col_width, str(self.class_names[i]))] line.extend(["%*.3f" % (col_width, v) for v in row]) - print((" ".join(line))) + print(" ".join(line)) p0.append("&".join(line)) pmats.append(p0) print(lead) @@ -1910,7 +1906,7 @@ def summary(self, file_name=None, title="Markov Homogeneity Test"): if file_name: k = self.k ks = str(k + 1) - with open(file_name, "w") as f: + with open(file_name + ".tex", "w") as f: c = [] fmt = "r" * (k + 1) s = "\\begin{tabular}{|%s|}\\hline\n" % fmt @@ -2037,7 +2033,7 @@ def __init__(self, y, fill_empty_classes=False, summary=True): r_asc = np.array([rankdata(col, method="ordinal") for col in y.T]).T # ranks by high (1) to low (n) self.ranks = r_asc.shape[0] - r_asc + 1 - super(FullRank_Markov, self).__init__( + super().__init__( self.ranks, fill_empty_classes=fill_empty_classes, summary=summary ) @@ -2079,7 +2075,7 @@ def sojourn_time(p, summary=True): >>> sojourn_time(p) Sojourn times are infinite for absorbing states! In this Markov Chain, states [2] are absorbing states. array([ 2., 1., inf]) - """ + """ # noqa E501 p = np.asarray(p) if (p.sum(axis=1) == 0).sum() > 0: @@ -2095,9 +2091,7 @@ def sojourn_time(p, summary=True): if summary: print( "Sojourn times are infinite for absorbing states! In this " - "Markov Chain, states {} are absorbing states.".format( - list(absorbing_states) - ) + f"Markov Chain, states {list(absorbing_states)} are absorbing states." ) st[non_absorbing_states] = 1 / (1 - pii[non_absorbing_states]) else: @@ -2213,6 +2207,6 @@ def __init__(self, y, fill_empty_classes=False, summary=True): # to the order that the values occur in each cross section. ranks = np.array([rankdata(col, method="ordinal") for col in y.T]).T geo_ranks = np.argsort(ranks, axis=0) + 1 - super(GeoRank_Markov, self).__init__( + super().__init__( geo_ranks, fill_empty_classes=fill_empty_classes, summary=summary ) diff --git a/giddy/mobility.py b/giddy/mobility.py index 616d6fe..893dac6 100644 --- a/giddy/mobility.py +++ b/giddy/mobility.py @@ -11,7 +11,7 @@ def markov_mobility(p, measure="P", ini=None): - """ + r""" Markov-based mobility index. Parameters diff --git a/giddy/rank.py b/giddy/rank.py index efb6cf2..2b92dfc 100644 --- a/giddy/rank.py +++ b/giddy/rank.py @@ -13,10 +13,10 @@ "Tau_Regional", ] -from scipy.stats.mstats import rankdata -from scipy.special import erfc import numpy as np from libpysal import weights +from scipy.special import erfc +from scipy.stats.mstats import rankdata class Theta: @@ -264,7 +264,7 @@ def _calc(self, x, y): return tau, pval, Concordant, Discordant, ExtraX, ExtraY -class SpatialTau(object): +class SpatialTau: """ Spatial version of Kendall's rank correlation statistic. diff --git a/giddy/sequence.py b/giddy/sequence.py index 8d9d481..0b07a12 100644 --- a/giddy/sequence.py +++ b/giddy/sequence.py @@ -7,12 +7,14 @@ __all__ = ["Sequence"] import itertools + import numpy as np import scipy.spatial.distance as d + from .markov import Markov -class Sequence(object): +class Sequence: """ Pairwise sequence analysis. @@ -172,7 +174,7 @@ class Sequence(object): >>> seqAna = Sequence([seq1,seq2,seq3], indel=indel) Traceback (most recent call last): ValueError: Please specify a proper `dist_type` or `subs_mat` and `indel` to proceed! - """ + """ # noqa E501 def __init__(self, y, subs_mat=None, dist_type=None, indel=None, cluster_type=None): y = np.asarray(y) diff --git a/giddy/tests/test_ergodic.py b/giddy/tests/test_ergodic.py index e180e37..434d88d 100644 --- a/giddy/tests/test_ergodic.py +++ b/giddy/tests/test_ergodic.py @@ -1,3 +1,4 @@ +import pytest import unittest from .. import ergodic import numpy as np @@ -58,7 +59,8 @@ def test_mfpt(self): ) np.testing.assert_array_almost_equal(exp, obs) - obs = ergodic.fmpt(self.p2) + with pytest.warns(DeprecationWarning, match="fmpt is deprecated."): + obs = ergodic.fmpt(self.p2) exp = np.array( [ [2.66666667, 2.0, np.inf], diff --git a/giddy/util.py b/giddy/util.py index 02761e4..e31e609 100644 --- a/giddy/util.py +++ b/giddy/util.py @@ -4,9 +4,10 @@ __all__ = ["shuffle_matrix", "get_lower", "fill_empty_diagonals"] -import numpy as np import copy +import numpy as np + def shuffle_matrix(X, ids): """ diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..0c126a9 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,120 @@ +[build-system] +requires = ["setuptools>=61.0", "setuptools_scm[toml]>=6.2"] +build-backend = "setuptools.build_meta" + +[tool.setuptools_scm] + +[project] +name = "giddy" +dynamic = ["version"] +maintainers = [ + {name = "Wei Kang", email = "weikang9009@gmail.com"}, +] +license = {text = "BSD 3-Clause"} +description = "PySAL-giddy for exploratory spatiotemporal data analysis" +keywords = ["spatial statistics", "spatiotemporal analysis"] +readme = "README.md" +classifiers = [ + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "License :: OSI Approved :: BSD License", + "Operating System :: OS Independent", + "Intended Audience :: Science/Research", + "Topic :: Scientific/Engineering :: GIS", +] +requires-python = ">=3.9" +dependencies = [ + "esda>=2.1.1", + "libpysal>=4.0.1", + "quantecon>=0.4.7", + "scipy>=1.3", +] + +[project.urls] +Home = "https://pysal.org/giddy/" +Repository = "https://github.com/pysal/giddy" + +[project.optional-dependencies] +dev = [ + "black", + "ruff", + "pre-commit", +] +docs = [ + "nbsphinx", + "numpydoc", + "sphinx", + "sphinxcontrib-bibtex", + "sphinx_bootstrap_theme", +] +tests = [ + "codecov", + "ipywidgets", + "matplotlib", + "pytest", + "pytest-cov", + "pytest-xdist", + "splot", +] + +[tool.setuptools.packages.find] +include = [ + "giddy", + "giddy.*", +] + +[tool.black] +line-length = 88 +extend-exclude = ''' +( + docs/conf.py +) +#''' + +[tool.ruff] +line-length = 88 +select = ["E", "F", "W", "I", "UP", "N", "B", "A", "C4", "SIM", "ARG"] +target-version = "py39" +exclude = ["giddy/tests/*", "docs/*"] +[tool.ruff.per-file-ignores] +"__init__.py" = [ + "F401" # imported but unused +] +"giddy/{directional,ergodic,markov,rank,sequence,util}.py" = [ + "ARG002", # Unused method argument + "N801", # Class name should use CapWords convention + "N802", # Function name should be lowercase + "N803", # Argument name should be lowercase + "N806", # Variable in function should be lowercase +] +"giddy/markov.py" = [ + "B006", # Do not use mutable data structures for argument defaults + "UP031", # Use format specifiers instead of percent format +] +"giddy/{ergodic,markov,sequence}.py" = [ + "E501", # Line too long +] + +[tool.coverage.run] +source = ["./giddy"] + +[tool.coverage.report] +exclude_lines = [ + "raise NotImplementedError", + "except ModuleNotFoundError:", + "except ImportError", +] +ignore_errors = true +omit = ["giddy/tests/*", "docs/conf.py"] + +[tool.pytest.ini_options] +filterwarnings = [ + "ignore:The weights matrix is not fully connected", # libpysal + "ignore:Objects based on the `Geometry` class will deprecated", # libpysal + "ignore:divide by zero encountered", + "ignore:invalid value encountered", + "ignore:numba.generated_jit is deprecated.", # numba/quantecon + "ignore:::.*quantecon.lss:19", # numba/quantecon +] \ No newline at end of file diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 62173ce..0000000 --- a/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -esda>=2.1.1 -libpysal>=4.0.1 -mapclassify>=2.1.1 -quantecon>=0.4.7 -scipy>=1.3.0 diff --git a/requirements_docs.txt b/requirements_docs.txt deleted file mode 100644 index e2e48c6..0000000 --- a/requirements_docs.txt +++ /dev/null @@ -1,5 +0,0 @@ -nbsphinx -numpydoc -sphinx>=1.4.3 -sphinxcontrib-bibtex -sphinx_bootstrap_theme diff --git a/requirements_tests.txt b/requirements_tests.txt deleted file mode 100644 index 1259a06..0000000 --- a/requirements_tests.txt +++ /dev/null @@ -1,8 +0,0 @@ -codecov -ipywidgets -matplotlib -pandas -pytest -pytest-cov -pytest-xdist -splot \ No newline at end of file diff --git a/setup.py b/setup.py deleted file mode 100644 index 9f90916..0000000 --- a/setup.py +++ /dev/null @@ -1,95 +0,0 @@ -"""GIDDY: GeospatIal Distribution DYnamics and Exploratory SpatioTemporal Data Analysis (ESTDA) - -Giddy is an open-source python library for exploratory spatiotemporal data analysis -and the analysis of geospatial distribution dynamics. It is under active development -for the inclusion of many newly proposed analytics that consider the -role of space in the evolution of distributions over time and has -several new features including inter- and intra-regional decomposition -of mobility association and local measures of exchange mobility in -addition to space-time LISA and spatial markov methods. Give -giddy a try if you are interested in space-time analysis! - -""" - -DOCLINES = __doc__.split("\n") - -with open("README.md", "r", encoding="utf8") as file: - long_description = file.read() - - -from setuptools import setup, find_packages -from distutils.command.build_py import build_py -import os - -# Get __version__ from giddy/__init__.py without importing the package -# __version__ has to be defined in the first line -with open("giddy/__init__.py", "r") as f: - exec(f.readline()) - -# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly -# update it when the contents of directories change. -if os.path.exists("MANIFEST"): - os.remove("MANIFEST") - - -def _get_requirements_from_files(groups_files): - groups_reqlist = {} - - for k, v in groups_files.items(): - with open(v, "r") as f: - pkg_list = f.read().splitlines() - groups_reqlist[k] = pkg_list - - return groups_reqlist - - -def setup_package(): - _groups_files = { - "base": "requirements.txt", - "tests": "requirements_tests.txt", - "docs": "requirements_docs.txt", - } - - reqs = _get_requirements_from_files(_groups_files) - install_reqs = reqs.pop("base") - extras_reqs = reqs - - setup( - name="giddy", # name of package - version=__version__, - description=DOCLINES[0], - # long_description="\n".join(DOCLINES[2:]), - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/pysal/giddy", - maintainer="Wei Kang", - maintainer_email="weikang9009@gmail.com", - py_modules=["giddy"], - python_requires=">3.6", - # setup_requires=["pytest-runner"], - # tests_require=["pytest"], - keywords="spatial statistics, spatiotemporal analysis", - classifiers=[ - "Development Status :: 5 - Production/Stable", - "Intended Audience :: Science/Research", - "Intended Audience :: Developers", - "Intended Audience :: Education", - "Topic :: Scientific/Engineering", - "Topic :: Scientific/Engineering :: GIS", - "License :: OSI Approved :: BSD License", - "Programming Language :: Python", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - ], - license="3-Clause BSD", - packages=find_packages(), - install_requires=install_reqs, - extras_require=extras_reqs, - zip_safe=False, - cmdclass={"build.py": build_py}, - ) - - -if __name__ == "__main__": - setup_package() diff --git a/tools/gitcount.ipynb b/tools/gitcount.ipynb deleted file mode 100644 index ed19906..0000000 --- a/tools/gitcount.ipynb +++ /dev/null @@ -1,627 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## PySAL Change Log Statistics\n", - "\n", - "This notebook generates the summary statistics for a package. \n", - "\n", - "It assumes you are running this under the `tools` directory at the toplevel of the package\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Change the values only in the next cell" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# get date of last tag\n", - "from subprocess import Popen, PIPE\n", - "\n", - "x, err = Popen(\n", - " 'git log -1 --tags --simplify-by-decoration --pretty=\"%ai\"| cat',\n", - " stdin=PIPE,\n", - " stdout=PIPE,\n", - " stderr=PIPE,\n", - " shell=True,\n", - ").communicate()\n", - "start_date = x.split()[0].decode(\"utf-8\")\n", - "start_date" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# today's date\n", - "import datetime\n", - "\n", - "release_date = str(datetime.datetime.today()).split()[0]\n", - "release_date" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "package_name = \"giddy\"\n", - "# release_date = '2019-12-20'\n", - "# start_date = '2019-12-20'" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This notebook will generate a file in the current directory with the name \"changelog_VERSION.md\". You can edit and append this on front of the CHANGELOG file for the package release." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from __future__ import print_function\n", - "import os\n", - "import json\n", - "import re\n", - "import sys\n", - "import pandas\n", - "\n", - "from datetime import datetime, timedelta\n", - "from time import sleep\n", - "from subprocess import check_output\n", - "\n", - "try:\n", - " from urllib import urlopen\n", - "except:\n", - " from urllib.request import urlopen\n", - "\n", - "import ssl\n", - "import yaml\n", - "\n", - "context = ssl._create_unverified_context()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "CWD = os.path.abspath(os.path.curdir)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "CWD" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "since_date = '--since=\"{start}\"'.format(start=start_date)\n", - "since_date\n", - "since = datetime.strptime(start_date + \" 0:0:0\", \"%Y-%m-%d %H:%M:%S\")\n", - "since" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# get __version__\n", - "f = \"../{package}/__init__.py\".format(package=package_name)\n", - "\n", - "with open(f, \"r\") as initfile:\n", - " exec(initfile.readline())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Total commits by subpackage" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "cmd = [\"git\", \"log\", \"--oneline\", since_date]\n", - "ncommits = len(check_output(cmd).splitlines())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ncommits" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## List Contributors" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Some of our contributors have many aliases for the same identity. So, we've added a mapping to make sure that individuals are listed once (and only once). " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "identities = {\n", - " \"Levi John Wolf\": (\"ljwolf\", \"Levi John Wolf\"),\n", - " \"Serge Rey\": (\"Serge Rey\", \"Sergio Rey\", \"sjsrey\", \"serge\"),\n", - " \"Wei Kang\": (\"Wei Kang\", \"weikang9009\"),\n", - " \"Dani Arribas-Bel\": (\"Dani Arribas-Bel\", \"darribas\"),\n", - "}\n", - "\n", - "\n", - "def regularize_identity(string):\n", - " string = string.decode()\n", - " for name, aliases in identities.items():\n", - " for alias in aliases:\n", - " if alias in string:\n", - " string = string.replace(alias, name)\n", - " if len(string.split(\" \")) > 1:\n", - " string = string.title()\n", - " return string.lstrip(\"* \")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "author_cmd = [\"git\", \"log\", \"--format=* %aN\", since_date]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from collections import Counter" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ncommits = len(check_output(cmd).splitlines())\n", - "all_authors = check_output(author_cmd).splitlines()\n", - "counter = Counter([regularize_identity(author) for author in all_authors])\n", - "# global_counter += counter\n", - "# counters.update({'.'.join((package,subpackage)): counter})\n", - "unique_authors = sorted(set(all_authors))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "unique_authors = counter.keys()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "unique_authors" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Disaggregate by PR, Issue" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from datetime import datetime, timedelta\n", - "\n", - "ISO8601 = \"%Y-%m-%dT%H:%M:%SZ\"\n", - "PER_PAGE = 100\n", - "element_pat = re.compile(r\"<(.+?)>\")\n", - "rel_pat = re.compile(r'rel=[\\'\"](\\w+)[\\'\"]')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def parse_link_header(headers):\n", - " link_s = headers.get(\"link\", \"\")\n", - " urls = element_pat.findall(link_s)\n", - " rels = rel_pat.findall(link_s)\n", - " d = {}\n", - " for rel, url in zip(rels, urls):\n", - " d[rel] = url\n", - " return d\n", - "\n", - "\n", - "def get_paged_request(url):\n", - " \"\"\"get a full list, handling APIv3's paging\"\"\"\n", - " results = []\n", - " while url:\n", - " # print(\"fetching %s\" % url, file=sys.stderr)\n", - " f = urlopen(url)\n", - " results.extend(json.load(f))\n", - " links = parse_link_header(f.headers)\n", - " url = links.get(\"next\")\n", - " return results\n", - "\n", - "\n", - "def get_issues(project=\"pysal/giddy\", state=\"closed\", pulls=False):\n", - " \"\"\"Get a list of the issues from the Github API.\"\"\"\n", - " which = \"pulls\" if pulls else \"issues\"\n", - " url = \"https://api.github.com/repos/%s/%s?state=%s&per_page=%i\" % (\n", - " project,\n", - " which,\n", - " state,\n", - " PER_PAGE,\n", - " )\n", - " return get_paged_request(url)\n", - "\n", - "\n", - "def _parse_datetime(s):\n", - " \"\"\"Parse dates in the format returned by the Github API.\"\"\"\n", - " if s:\n", - " return datetime.strptime(s, ISO8601)\n", - " else:\n", - " return datetime.fromtimestamp(0)\n", - "\n", - "\n", - "def issues2dict(issues):\n", - " \"\"\"Convert a list of issues to a dict, keyed by issue number.\"\"\"\n", - " idict = {}\n", - " for i in issues:\n", - " idict[i[\"number\"]] = i\n", - " return idict\n", - "\n", - "\n", - "def is_pull_request(issue):\n", - " \"\"\"Return True if the given issue is a pull request.\"\"\"\n", - " return \"pull_request_url\" in issue\n", - "\n", - "\n", - "def issues_closed_since(period=timedelta(days=365), project=\"pysal/pysal\", pulls=False):\n", - " \"\"\"Get all issues closed since a particular point in time. period\n", - " can either be a datetime object, or a timedelta object. In the\n", - " latter case, it is used as a time before the present.\"\"\"\n", - "\n", - " which = \"pulls\" if pulls else \"issues\"\n", - "\n", - " if isinstance(period, timedelta):\n", - " period = datetime.now() - period\n", - " url = (\n", - " \"https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i\"\n", - " % (project, which, period.strftime(ISO8601), PER_PAGE)\n", - " )\n", - " allclosed = get_paged_request(url)\n", - " # allclosed = get_issues(project=project, state='closed', pulls=pulls, since=period)\n", - " filtered = [i for i in allclosed if _parse_datetime(i[\"closed_at\"]) > period]\n", - "\n", - " # exclude rejected PRs\n", - " if pulls:\n", - " filtered = [pr for pr in filtered if pr[\"merged_at\"]]\n", - "\n", - " return filtered\n", - "\n", - "\n", - "def sorted_by_field(issues, field=\"closed_at\", reverse=False):\n", - " \"\"\"Return a list of issues sorted by closing date date.\"\"\"\n", - " return sorted(issues, key=lambda i: i[field], reverse=reverse)\n", - "\n", - "\n", - "def report(issues, show_urls=False):\n", - " \"\"\"Summary report about a list of issues, printing number and title.\"\"\"\n", - " # titles may have unicode in them, so we must encode everything below\n", - " if show_urls:\n", - " for i in issues:\n", - " role = \"ghpull\" if \"merged_at\" in i else \"ghissue\"\n", - " print(\"* :%s:`%d`: %s\" % (role, i[\"number\"], i[\"title\"].encode(\"utf-8\")))\n", - " else:\n", - " for i in issues:\n", - " print(\"* %d: %s\" % (i[\"number\"], i[\"title\"].encode(\"utf-8\")))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "all_issues = {}\n", - "all_pulls = {}\n", - "total_commits = 0\n", - "# prj='pysal/libpysal'\n", - "prj = \"pysal/{package}\".format(package=package_name)\n", - "issues = issues_closed_since(since, project=prj, pulls=False)\n", - "pulls = issues_closed_since(since, project=prj, pulls=True)\n", - "issues = sorted_by_field(issues, reverse=True)\n", - "pulls = sorted_by_field(pulls, reverse=True)\n", - "n_issues, n_pulls = map(len, (issues, pulls))\n", - "n_total = n_issues + n_pulls" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "issue_listing = []\n", - "for issue in issues:\n", - " entry = \"{title} (#{number})\".format(title=issue[\"title\"], number=issue[\"number\"])\n", - " issue_listing.append(entry)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "pull_listing = []\n", - "for pull in pulls:\n", - " entry = \"{title} (#{number})\".format(title=pull[\"title\"], number=pull[\"number\"])\n", - " pull_listing.append(entry)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "pull_listing" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "message = \"We closed a total of {total} issues (enhancements and bug fixes) through {pr} pull requests\".format(\n", - " total=n_total, pr=n_pulls\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "message = \"{msg}, since our last release on {previous}.\".format(\n", - " msg=message, previous=str(start_date)\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "message" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "message += \"\\n\\n## Issues Closed\\n\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(message)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "issues = \"\\n\".join([\" - \" + issue for issue in issue_listing])\n", - "message += issues\n", - "message += \"\\n\\n## Pull Requests\\n\"\n", - "pulls = \"\\n\".join([\" - \" + pull for pull in pull_listing])\n", - "message += pulls" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(message)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "people = \"\\n\".join([\" - \" + person for person in unique_authors])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(people)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "message += (\n", - " \"\\n\\nThe following individuals contributed to this release:\\n\\n{people}\".format(\n", - " people=people\n", - " )\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(message)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "head = \"# Version {version} ({release_date})\\n\\n\".format(\n", - " version=__version__, release_date=release_date\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# new_content = head+message+\"\\n\"\n", - "# print(new_content)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# #insert the new changes in the begining of CHANGELOG.md\n", - "# with open(\"../CHANGELOG.md\", 'r+') as file:\n", - "# content = file.read()\n", - "# file.seek(0, 0)\n", - "# file.write(new_content+ content)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# outfile = 'changelog_{version}.md'.format(version=__version__)\n", - "outfile = \"changelog.md\"\n", - "with open(outfile, \"w\") as of:\n", - " of.write(head + message)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.13" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -}