From 73b2ab72a7096236e9a91e847656225d07d932cf Mon Sep 17 00:00:00 2001 From: Fabio Muratore Date: Sat, 16 Apr 2022 10:44:18 +0200 Subject: [PATCH] Initial commit --- .github/codecov.yml | 22 +++++ .github/workflows/black.yml | 13 +++ .github/workflows/run_tests.yml | 36 +++++++++ .gitignore | 133 +++++++++++++++++++++++++++++++ LICENSE.txt | 21 +++++ README.md | 85 ++++++++++++++++++++ examples/demo.py | 93 +++++++++++++++++++++ ndc/__init__.py | 1 + ndc/numerical_differentiation.py | 107 +++++++++++++++++++++++++ pyproject.toml | 32 ++++++++ setup.cfg | 27 +++++++ setup.py | 46 +++++++++++ tests/conftest.py | 31 +++++++ tests/pytest.ini | 4 + tests/test_ndc.py | 124 ++++++++++++++++++++++++++++ 15 files changed, 775 insertions(+) create mode 100644 .github/codecov.yml create mode 100644 .github/workflows/black.yml create mode 100644 .github/workflows/run_tests.yml create mode 100644 .gitignore create mode 100644 LICENSE.txt create mode 100644 README.md create mode 100644 examples/demo.py create mode 100644 ndc/__init__.py create mode 100644 ndc/numerical_differentiation.py create mode 100644 pyproject.toml create mode 100644 setup.cfg create mode 100644 setup.py create mode 100644 tests/conftest.py create mode 100644 tests/pytest.ini create mode 100644 tests/test_ndc.py diff --git a/.github/codecov.yml b/.github/codecov.yml new file mode 100644 index 0000000..ed896b4 --- /dev/null +++ b/.github/codecov.yml @@ -0,0 +1,22 @@ +codecov: + token: ad953cd4-94de-4240-98cd-fe7037ca4277 +ignore: + - "examples" + - "tests" + - "setup.py" +comment: + layout: "reach, diff, flags, files" + behavior: default + require_changes: false # if true: only post the comment if coverage changes + require_base: no # [yes :: must have a base report to post] + require_head: yes # [yes :: must have a head report to post] + branches: # branch names that can post comment + - "main" +github_checks: + annotations: false +coverage: + status: + project: + default: + target: auto + threshold: 80% diff --git a/.github/workflows/black.yml b/.github/workflows/black.yml new file mode 100644 index 0000000..01f8d5c --- /dev/null +++ b/.github/workflows/black.yml @@ -0,0 +1,13 @@ +name: Lint + +on: [push, pull_request] + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + - uses: psf/black@stable + with: + args: ". --check" diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml new file mode 100644 index 0000000..4186211 --- /dev/null +++ b/.github/workflows/run_tests.yml @@ -0,0 +1,36 @@ +name: Run Tests + +on: [push, pull_request] + +jobs: + build-images: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + #os: [ubuntu-latest, macos-latest, windows-latest] + python-version: ["3.6", "3.7", "3.8", "3.9", "3.10"] + env: + os: ${{ matrix.os }} + python: ${{ matrix.python-version }} + steps: + - uses: actions/checkout@v2 + with: + submodules: true + - uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Install + run: | + pip install -e .[dev] + - name: Execute tests + run: | + pytest --cov=ndc --cov-report=xml + - name: Upload Coverage to Codecov + uses: codecov/codecov-action@v1 + with: + file: ./coverage.xml + flags: unittests + env_vars: OS,PYTHON + fail_ci_if_error: true + verbose: true diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..03c7edc --- /dev/null +++ b/.gitignore @@ -0,0 +1,133 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# IDEs +.idea +.vscode diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000..e64b18b --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Fabio Muratore + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..05b4973 --- /dev/null +++ b/README.md @@ -0,0 +1,85 @@ +# Numerical Differentiation Leveraging Convolution (ndc) + +[![License](https://img.shields.io/badge/license-MIT-brightgreen)](https://opensource.org/licenses/MIT) +[![codecov](https://codecov.io/gh/famura/ndc/branch/main/graph/badge.svg?token=ESUTNFwtYY)](https://codecov.io/gh/famura/ndc) +[![isort](https://img.shields.io/badge/imports-isort-green)](https://pycqa.github.io/isort/) +[![codestyle](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) + +**What for?** + +Differentiate signals stored as PyTorch tensors, e.g. measurements obtained from a device or simulation, where automatic differentiation can not be applied. + +**Features** + +* Theoretically **any order, any stencils, and any step size** (see [this Wiki page](https://en.wikipedia.org/wiki/Finite_difference_coefficient) for information). Be aware that there are numerical limits when computing the filter kernel's coefficients, e.g. small step sized and high orders lead to numerical issues. +* Works for **multidimensional signals**, assuming that all dimensions share the same step size. +* Computations can be executed on **CUDA**. However, this has not been tested extensively. +* Straightforward implementation which you can easily adapt to your needs. + +**How?** + +The idea of this small repository is to use the duality between convolution, i.e., filtering, and [numerical differentiation](https://en.wikipedia.org/wiki/Numerical_differentiation) to leverage the existing functions for 1-dimensional convolution in order to compute the (time) derivatives. + +**Why PyTorch?** + +More often then not I received (recorded) simulation data as PyTorch tensors rather than numpy arrays. +Thus, I think it is nice to have a function to differentiate measurement signals without switching the data type or computation device. +Moreover, the `torch.conv1d` function fits perfectly for this purpose. + + +## Citing + +If you use code or ideas from this repository for your projects or research, please cite it. +``` +@misc{Muratore_ncd, + author = {Fabio Muratore}, + title = {ndc - Numerical differentiation leveraging convolutions}, + year = {2022}, + publisher = {GitHub}, + journal = {GitHub repository}, + howpublished = {\url{https://github.com/famura/ndc}} +} +``` + +## Installation + +To install the core part of the package run +``` +pip install ndc +``` + +For (local) development install the dependencies with +``` +pip install -e .[dev] +``` + +## Usage + +Consider a signal `x`, e.g. a measurement you obtained form a device. This package assumes that the signal to differentiate is of shape `(num_steps, dim_data)` + +```python +import torch +import ndc + +# Assuming you got x(t) from somewhere. +assert isinstance(x, torch.Tensor) +num_steps, dim_data = x.shape + +# Specify the derivative. Here, the first order central derivative. +stencils = [-1, 0, 1] +order = 1 +step_size = dt # should be known from your signal x(t), else use 1 +padding = True # if true, the initial and final values are repeated as often as necessary to match the length of x + +dx_dt_num = ndc.differentiate_numerically(x, stencils, order, step_size, padding) +assert dx_dt_num.device == x.device +if padding: + assert dx_dt_num.shape == (num_steps, dim_data) +else: + assert dx_dt_num.shape == (num_steps - sum(s != 0 for s in stencils), dim_data) +``` + + +## Contributions + +Maybe you want another padding mode, or you found a way to improve the CUDA support. Please feel free to leave a pull request or issue. diff --git a/examples/demo.py b/examples/demo.py new file mode 100644 index 0000000..189cd68 --- /dev/null +++ b/examples/demo.py @@ -0,0 +1,93 @@ +import math + +import matplotlib.pyplot as plt +import numpy +import torch + +import ndc + +# Configure. +stencils = [-4, -3, -2, -1, 0, 1, 2, 3, 4] +order = 1 +padding = True +t = torch.linspace(0, 1.5, math.floor(1.5 / 0.01)).reshape(-1, 1) + + +numpy.set_printoptions(precision=6, sign=" ", linewidth=200, suppress=True) + + +def _noisy_nonlin_fcn(t: torch.Tensor, freq: float = 1.0, noise_std: float = 0.0) -> torch.Tensor: + """ + A 1-dim function (sinus superposed with polynomial), representing some singal. + + Args: + t: Function argument, e.g. time. + noise_std: Scale of the additive noise sampled from a standard normal distribution. + freq: Frequency of the sinus wave. + Returns: + Function value. + """ + return -torch.sin(2 * torch.pi * freq * t) - torch.pow(t, 2) + 0.7 * t + noise_std * torch.randn_like(t) + + +if __name__ == "__main__": + # Get from configuration. + dt = float(t[1, 0] - t[0, 0]) + num_steps, dim_data = t.shape + + # Create a clean signal. + t.requires_grad_(True) + y = _noisy_nonlin_fcn(t) + + # Differentiate analytically + y.backward(torch.ones_like(y)) + dy_dt_anal = t.grad.data + t = t.detach() + y = y.detach() + + # Create a noisy signal. + z = _noisy_nonlin_fcn(t, noise_std=0.02) + + # Compute the filter coefficients, only for display here. + w = ndc.compute_coefficients(stencils, order, dt) + print(f"The convolution kernel's filter coefficients are:\n{w.numpy()}") + + # Differentiate numerically. + dy_dt_num = ndc.differentiate_numerically(y, stencils, order, dt, padding) + dz_dt_num = ndc.differentiate_numerically(z, stencils, order, dt, padding) + dz_dt_num_naive = torch.diff(z, dim=0) / dt + if padding: + dz_dt_num_naive = torch.cat((dz_dt_num_naive, dz_dt_num_naive[-1].unsqueeze(1))) + + # Plot. + fig_c, axs_c = plt.subplots(dim_data, 1, figsize=(12, 8)) + fig_n, axs_n = plt.subplots(dim_data, 1, figsize=(12, 8)) + axs_c = numpy.atleast_1d(axs_c) + axs_n = numpy.atleast_1d(axs_n) + + fig_c.suptitle("Clean Signal") + for idx in range(dim_data): + axs_c[idx].plot(t[:, idx], y[:, idx], label="y(t)") + if padding: + axs_c[idx].plot(t[:, idx], dy_dt_num[:, idx], label="dy/dt numerical", ls="-") + else: + num_pad_init = sum(s < 0 for s in stencils) + num_pad_end = sum(s > 0 for s in stencils) + axs_c[idx].plot(t[num_pad_init:-num_pad_end, idx], dy_dt_num[:, idx], label="dy/dt numerical", ls="-.") + axs_c[idx].plot(t[:, idx], dy_dt_anal[:, idx], label="dy/dt analytical", ls="--") + axs_c[idx].legend() + + fig_n.suptitle("Noisy Signal") + for idx in range(dim_data): + axs_n[idx].plot(t[:, idx], z[:, idx], label="z(t)") + if padding: + axs_n[idx].plot(t[:, idx], dz_dt_num[:, idx], label="dz/dt numerical (ndc)", ls="-") + axs_n[idx].plot(t[:, idx], dz_dt_num_naive[:, idx], label="dz/dt numerical (naive)", ls="-.") + else: + num_pad_init = sum(s < 0 for s in stencils) + num_pad_end = sum(s > 0 for s in stencils) + axs_n[idx].plot(t[num_pad_init:-num_pad_end, idx], dz_dt_num[:, idx], label="dz/dt numerical", ls="-.") + axs_n[idx].plot(t[:, idx], dy_dt_anal[:, idx], label="dy/dt (clean) analytical", ls="--") + axs_n[idx].legend() + + plt.show() diff --git a/ndc/__init__.py b/ndc/__init__.py new file mode 100644 index 0000000..44ac16e --- /dev/null +++ b/ndc/__init__.py @@ -0,0 +1 @@ +from .numerical_differentiation import compute_coefficients, differentiate_numerically diff --git a/ndc/numerical_differentiation.py b/ndc/numerical_differentiation.py new file mode 100644 index 0000000..3e60af4 --- /dev/null +++ b/ndc/numerical_differentiation.py @@ -0,0 +1,107 @@ +import math +from typing import List, Union + +import torch + + +def compute_coefficients( + stencils: Union[torch.Tensor, List[int]], + order: int, + step_size: Union[float, int], + device: Union[torch.device, str] = "cpu", +) -> torch.Tensor: + r""" + Compute the coefficients for discrete time numerical differentiation. These can later be convolved with the signal. + + Args: + stencils: Stencil points :math:`s` of length :math:`N`, negative means accessing data point in the past. For + example pass `[-1, 0, 1]` for a central difference or `[-3, -1, 0]` for a backward difference without + equally space stencil points. + Will be cast into an 8bit integer :external:class:`~torch.Tensor` of shape (N,). + order: Order :math:`d` of the derivative, must be an integer lower than the number of stencil points. + step_size: Time step size :math:`h`. Will be cast into a `float`. + device: Device to cast the :external:class:`~torch.Tensor`s to before solving the linear system of equations. + By default, the CPU is used. + Returns: + Coefficients for computing the derivative with the oder of the approximation :math:`\mathcal{O}(h^{N-d})`. + """ + stencils = torch.as_tensor(stencils, dtype=torch.int64).reshape(-1) + step_size = float(step_size) + num_eqs = len(stencils) # number of equations for the solver N + stencils = torch.unique(stencils) # remove double stencils + + if len(stencils) != num_eqs: # if the length was reduced, there have been non-unique elements + raise ValueError("The tensor of stencil points must only contain unique elements!") + if not isinstance(order, int): + raise TypeError(f"The order must be of type int, but is of type {type(order)}!") + if not 0 < order < num_eqs: + raise ValueError(f"The order must be greater than 0 and less than {num_eqs}, but is {order}!") + if step_size <= 0: + raise ValueError(f"The step size must be greater than 0, but is {step_size}!") + + A = torch.stack([torch.pow(stencils, idx) for idx in range(num_eqs)], dim=0).float() # shape (num_eqs, num_eqs) + b = torch.zeros(num_eqs) + b[order] = math.factorial(order) / step_size**order + + # Compute the coefficients which are the solution to a linear system of equations (see reference). + return torch.linalg.solve(A.to(device=device), b.to(device=device)) + + +def differentiate_numerically( + signal: torch.Tensor, + stencils: Union[torch.Tensor, List[int]], + order: int, + step_size: Union[float, int], + padding: bool = True, +) -> torch.Tensor: + r""" + Compute an arbitrary derivative of the given signal, leveraging :meth:`torch.nn.functional.conv1d`. + + Note: + Even though :meth:`torch.nn.functional.conv1d` has an option for padding, this function is designed to implement + its own padding logic. The reason is that the stencils could indicate a forward or backward derivative of any + order which is a custom use case not covered by PyTorch. + + Args: + signal: Time series of shape (num_steps, dim_data) to differentiate. The signal's device type is forwarded to + :meth:`~ndc.numerical_differentiation.compute_coefficients`. + stencils: Stencil points :math:`s` of length :math:`N`. Forwarded to + :meth:`~ndc.numerical_differentiation.compute_coefficients`. + order: Order :math:`d` of the derivative. + Forwarded to :meth:`~ndc.numerical_differentiation.compute_coefficients`. + step_size: Time step size :math:`h`. Forwarded to :meth:`~ndc.numerical_differentiation.compute_coefficients`. + padding: If `True`, a custom padding scheme is applied. Based on how many stencils are negative/positive, + the respective number of initial or final derivative values is repeated to restore the length of the signal. + Returns: + Derivative of the signal. If `padding=False` the shape is (num_steps - :math:`M`, dim_data) where + :math:`M = count(s \neq 0)`. If `padding=True` the shape is (num_steps, dim_data). + """ + dim_data = signal.size(1) + + # Prepare the input for the convolution. + sigal_conv = torch.atleast_3d(signal) # in our context this is typically (num_samples, dim_data, 1) + if sigal_conv.ndim > 3: + raise ValueError( + f"The input for the convolution must have 3 or less 3 dimensions, but it is of shape {signal.shape}!" + ) + sigal_conv = sigal_conv.permute(2, 1, 0) # of shape (num_minibatch, in_channels, -1) + + # Compute the weights for the convolution. The weights are the same for every dim, thus we repeat them. + weights_conv = compute_coefficients(stencils, order, step_size, signal.device) + weights_conv = weights_conv.repeat(dim_data, 1, 1) # of shape (out_channels, in_channels_per_group, -1) + + # Compute the derivative via a convolution (without using a padding). + derivative = torch.conv1d(sigal_conv, weights_conv, padding=0, dilation=1, groups=dim_data) + + # Reshape to be of similar shape (num_steps - M, dim_data) as the input signal. + derivative = derivative.squeeze(0).permute(1, 0) + + # Pad by repeating values. This is an approximation. + if padding: + num_pad_init = sum(s < 0 for s in stencils) + num_pad_end = sum(s > 0 for s in stencils) + pad_init = derivative[0].repeat(num_pad_init, 1) # of shape (num_pad_init, dim_data) + pad_end = derivative[-1].repeat(num_pad_end, 1) # of shape (num_pad_end, dim_data) + derivative = torch.cat((pad_init, derivative, pad_end)) + + return derivative diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..29be420 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,32 @@ +# Configuration file for Black + +# NOTE: you have to use single-quoted strings in TOML for regular expressions. +# It's the equivalent of r-strings in Python. Multiline strings are treated as +# verbose regular expressions by Black. Use [ ] to denote a significant space +# character. + +[tool.black] +line-length = 120 +target-version = ['py38'] +include = '\.pyi?$' +exclude = ''' +/( + \.eggs + | \.git + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | venv + | _build + | buck-out + | build + | dist +)/ +''' + +[tool.isort] +profile = "black" +line_length = 120 +multi_line_output = 3 +include_trailing_comma = true \ No newline at end of file diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..3957a60 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,27 @@ +[metadata] +name = ndc +version = 1.0 +url = https://github.com/famura/ndc +author = Fabio Muratore +author_email = robot-learning@famura.net +classifiers = + Development Status :: 4 - Beta + License :: OSI Approved :: MIT License + Programming Language :: Python :: 3.10 + Programming Language :: Python :: 3.9 + Programming Language :: Python :: 3.8 + Programming Language :: Python :: 3.7 + Programming Language :: Python :: 3.6 + Programming Language :: Python :: 3 :: Only +description = Numerical differentiation leveraging convolutions +keywords = numerical differentiation, finite differences, convolution, signal processing, pytorch + +[options] +zip_safe = True +packages = find: +python_requires = >=3.6, <4 +install_requires = + torch + +[options.extras_require] +dev = black; isort; matplotlib; pytest; pytest-cov diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..ec8d2da --- /dev/null +++ b/setup.py @@ -0,0 +1,46 @@ +import pathlib + +from setuptools import find_packages, setup + +here = pathlib.Path(__file__).parent.resolve() + +setup( + # This is the name of your project as to be published at PyPI: https://pypi.org/project/sampleproject/ + name="ndc", # https://packaging.python.org/specifications/core-metadata/#name + version="1.0", # https://www.python.org/dev/peps/pep-0440/ + description="Numerical differentiation leveraging convolutions based on PyTorch", + long_description=(here / "README.md").read_text(encoding="utf-8"), + long_description_content_type="text/markdown", + url="https://github.com/famura/ndc", + author="Fabio Muratore", + author_email="robo-learning@famura.net", + classifiers=[ # list of valid classifiers, see https://pypi.org/classifiers/ + "License :: OSI Approved :: MIT License", + "Development Status :: 4 - Beta", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3 :: Only", + ], + keywords="numerical differentiation, finite differences, convolution, signal processing", + packages=find_packages(include=["ndc"], exclude=["tests"]), + python_requires=">=3.6, <4", + # List mandatory groups of dependencies, installed via `pip install -e .[dev]` + install_requires=[ + "torch", + ], # https://packaging.python.org/en/latest/requirements.html + # List additional groups of dependencies, installed via `pip install -e .[dev]` + extras_require={ + "dev": ["black", "isort", "matplotlib", "pytest", "pytest-cov"], + }, + project_urls={ + "Source": "https://github.com/famura/ndc", + "Bug Reports": "https://github.com/famura/ndc/issues", + # 'Funding': 'https://donate.pypi.org', + }, + # Data files outside of the package http://docs.python.org/distutils/setupscript.html#installing-additional-files + # data_files=[('my_data', ['data/data_file'])], +) diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..4384dce --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,31 @@ +""" +This file is found by pytest and contains fixtures that can be used for all tests. +""" +import pytest +import torch + +# Check if optional packages are available. +try: + from matplotlib import pyplot as plt + + m_needs_pyplot = pytest.mark.skipif(False, reason="matplotlib.pyplot can be imported.") + +except (ImportError, ModuleNotFoundError): + m_needs_pyplot = pytest.mark.skip(reason="matplotlib.pyplot is not supported in this setup.") + +# Check if CUDA support is available. +m_needs_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA is not supported in this setup.") + + +def _noisy_nonlin_fcn(t: torch.Tensor, freq: float = 1.0, noise_std: float = 0.0) -> torch.Tensor: + """ + A 1-dim function (sinus superposed with polynomial), representing some singal. + + Args: + t: Function argument, e.g. time. + noise_std: Scale of the additive noise sampled from a standard normal distribution. + freq: Frequency of the sinus wave. + Returns: + Function value. + """ + return -torch.sin(2 * torch.pi * freq * t) - torch.pow(t, 2) + 0.7 * t + noise_std * torch.randn_like(t) diff --git a/tests/pytest.ini b/tests/pytest.ini new file mode 100644 index 0000000..a758a9b --- /dev/null +++ b/tests/pytest.ini @@ -0,0 +1,4 @@ +[pytest] +addopts = --strict-markers +markers = + visual: marks tests that produce a visual output, e.g. plots or animations diff --git a/tests/test_ndc.py b/tests/test_ndc.py new file mode 100644 index 0000000..bb04690 --- /dev/null +++ b/tests/test_ndc.py @@ -0,0 +1,124 @@ +import math +from typing import List, Union + +import numpy +import pytest +import torch + +import ndc +from tests.conftest import _noisy_nonlin_fcn, m_needs_cuda, m_needs_pyplot + + +def test_ill_parameterization(): + # Non-unique stencils. + with pytest.raises(ValueError): + ndc.compute_coefficients(stencils=[0, 1, 1], order=1, step_size=1) + + # Float order. + with pytest.raises(TypeError): + ndc.compute_coefficients(stencils=[0, 1], order=1.0, step_size=1) + + # Non-positive order. + with pytest.raises(ValueError): + ndc.compute_coefficients(stencils=[0, 1], order=0, step_size=1) + + # Non-positive step size. + with pytest.raises(ValueError): + ndc.compute_coefficients(stencils=[0, 1], order=1, step_size=0) + + # Number of dimensions for the signal tensor > 3. + with pytest.raises(ValueError): + ndc.differentiate_numerically(torch.randn(1, 2, 3, 4), stencils=[0, 1], order=1, step_size=1) + + +@pytest.mark.parametrize( + "s", + [[-1, 0, 1], torch.tensor([[-1, 0, 1]]), torch.tensor([-3, -1, 0])], + ids=["central_list", "central_tensor", "backward_tensor"], +) +@pytest.mark.parametrize("d", [1, 2], ids=["order_1", "order_2"]) +@pytest.mark.parametrize("h", [1, 1e-3], ids=["unit_step", "small_step"]) +@pytest.mark.parametrize("device", ["cpu", pytest.param("cuda", marks=m_needs_cuda)], ids=["cpu", "cuda"]) +def test_compute_coefficients(s: Union[torch.LongTensor, List[int]], d: int, h: Union[float, int], device: str): + c = ndc.compute_coefficients(stencils=s, order=d, step_size=h, device=device) + assert torch.isclose(torch.sum(c), torch.tensor(0.0), atol=1e-6) + assert c.device.type == device + + +@pytest.mark.parametrize( + "s, d, h, c_des", + [ + ([-3, -2, -1, 0, 1, 2, 3], 3, 1, torch.tensor([1 / 8, -1, 13 / 8, 0, -13 / 8, 1, -1 / 8])), + ([0, 1, 2, 3, 4], 2, 1, torch.tensor([35 / 12, -26 / 3, 19 / 2, -14 / 3, 11 / 12])), + ([-1, 0], 1, 1, torch.tensor([-1.0, 1])), + ], + ids=["central_d3_acc4", "forward_d2_acc3", "backward_d1_acc1"], +) +@pytest.mark.parametrize("device", ["cpu", pytest.param("cuda", marks=m_needs_cuda)], ids=["cpu", "cuda"]) +def test_compute_coefficients_cases( + s: Union[torch.LongTensor, List[int]], d: int, h: Union[float, int], c_des: torch.Tensor, device: str +): + # See https://en.wikipedia.org/wiki/Finite_difference_coefficient + c = ndc.compute_coefficients(stencils=s, order=d, step_size=h, device=device) + assert torch.allclose(c, c_des, atol=1e-5) + assert c.device.type == device + + +@pytest.mark.parametrize( + "x", + [ + torch.linspace(0, 2, math.floor(1.75 / 0.01)).reshape(-1, 1), + torch.meshgrid(torch.linspace(0, 1.75, math.floor(2 / 0.01)), torch.linspace(0, 1, 2))[0], + ], + ids=["1dim", "2dim"], +) +@pytest.mark.parametrize( + "stencils, order", [([-1, 0, 1], 1), ([0, 1, 2, 3, 4], 1)], ids=["o1_acc2_central", "o1_acc4_forward"] +) +@pytest.mark.parametrize("padding", [True, False], ids=["padding", "no_padding"]) +@pytest.mark.parametrize("device", ["cpu", pytest.param("cuda", marks=m_needs_cuda)], ids=["cpu", "cuda"]) +@pytest.mark.parametrize("visual", [pytest.param(True, marks=[m_needs_pyplot, pytest.mark.visual]), False]) +def test_numerical_differentiation( + x: torch.Tensor, stencils: Union[torch.LongTensor, List[int]], order: int, padding: bool, device: str, visual: bool +): + x = x.clone() + num_steps, dim_data = x.shape + x.requires_grad_(True) + dx = float(x[1, 0] - x[0, 0]) + y = _noisy_nonlin_fcn(x) + y.backward(torch.ones_like(y)) + dy_dx_anal = x.grad.data + x = x.detach() + y = y.detach() + + dy_dx_num = ndc.differentiate_numerically(y, stencils, order, step_size=dx, padding=padding) + assert dy_dx_num.device.type == y.device.type + + # Check the non-padded elements. + num_pad_init = sum(s < 0 for s in stencils) + num_pad_end = sum(s > 0 for s in stencils) + if padding: + assert torch.allclose(dy_dx_num[num_pad_init:-num_pad_end], dy_dx_anal[num_pad_init:-num_pad_end], atol=dx) + assert dy_dx_num.shape == (num_steps, dim_data) + else: + assert torch.allclose(dy_dx_num, dy_dx_anal[num_pad_init:-num_pad_end], atol=dx) + assert dy_dx_num.shape == (num_steps - sum(s != 0 for s in stencils), dim_data) + + # Plot. + if visual: + import matplotlib.pyplot as plt + + fig, axs = plt.subplots(dim_data, 1, figsize=(12, 8)) + axs = numpy.atleast_1d(axs) + + for idx in range(dim_data): + axs[idx].plot(x[:, idx], y[:, idx], label="y(x)") + axs[idx].plot(x[:, idx], dy_dx_anal[:, idx], label="dy/dx analytical") + if padding: + axs[idx].plot(x[:, idx], dy_dx_num[:, idx], label="dy/dx numerical", ls="-.") + else: + axs[idx].plot(x[num_pad_init:-num_pad_end, idx], dy_dx_num[:, idx], label="dy/dx numerical", ls="-.") + + fig.suptitle(f"padding = {padding}") + plt.legend() + # plt.show()