diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 0000000..7048eda --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,45 @@ +# Stage 1: NVIDIA CUDA Image +ARG CUDA_VERSION=12.5.0 +FROM nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu22.04 AS cuda-base + +# Stage 2: Miniconda setup from configuration +FROM continuumio/miniconda3 AS miniconda-stage + +# Stage 3: Final image combining CUDA and Miniconda +FROM mcr.microsoft.com/devcontainers/base:ubuntu-22.04 + +# Copy from CUDA base +COPY --from=cuda-base /usr/local/cuda /usr/local/cuda + +# Copy Miniconda from the Miniconda stage +COPY --from=miniconda-stage /opt/conda /opt/conda + +# Set environment variables for Miniconda +ENV PATH /opt/conda/bin:$PATH +ENV LANG=C.UTF-8 LC_ALL=C.UTF-8 + +# Install Python 3.10 +ARG PYTHON_VERSION=3.12 +RUN conda install python=${PYTHON_VERSION} + +# Arguments for PyTorch and CUDA Toolkit versions +ARG PYTORCH_VERSION=2.5.0 +ARG CUDATOOLKIT_VERSION=12.4 + +# Install PyTorch and other dependencies +RUN conda install pytorch=${PYTORCH_VERSION} pytorch-cuda=${CUDATOOLKIT_VERSION} -c pytorch -c nvidia + +# Handle environment.yml if it exists +RUN echo env_change_20241021_2 +COPY environment.yml* noop.txt /tmp/conda-tmp/ +RUN if [ -f "/tmp/conda-tmp/environment.yml" ]; then \ + /opt/conda/bin/conda env update -n base -f /tmp/conda-tmp/environment.yml; \ + fi \ + && rm -rf /tmp/conda-tmp + +# Append Miniconda to PATH in .bashrc for interactive shells +RUN echo ". /opt/conda/etc/profile.d/conda.sh" >> /root/.bashrc \ + && echo "conda activate base" >> /root/.bashrc + +# Final CMD or ENTRYPOINT +CMD ["bash"] diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..3187f71 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,51 @@ +{ + "name": "torchsparsegradutils Dev Container", + "build": { + "dockerfile": "./Dockerfile", + "context": ".", + "args": { + "CUDA_VERSION": "12.4.0", + "PYTORCH_VERSION": "2.5.0", + "CUDATOOLKIT_VERSION": "12.4", + "PYTHON_VERSION": "3.12" + } + }, + "runArgs": [ + "--gpus", + "all" + ], + "remoteEnv": { + "SSH_AUTH_SOCK": "/tmp/ssh-agent.sock" + }, + "customizations": { + "vscode": { + "settings": { + "python.defaultInterpreterPath": "/opt/conda/bin/python", + "terminal.integrated.shell.linux": "/bin/bash", + "terminal.integrated.env.linux": { + "CONDA_DEFAULT_ENV": "base", + "CONDA_PREFIX": "/opt/conda", + "CONDA_PYTHON_EXE": "/opt/conda/bin/python", + "PATH": "/opt/conda/bin:${env:PATH}" + }, + "python.testing.pytestArgs": [ + "torchsparsegradutils/tests" + ], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true + }, + "extensions": [ + "dbaeumer.vscode-eslint", + "ms-python.vscode-pylance", + "ms-python.python", + "github.copilot", + "GitHub.vscode-pull-request-github", + "GitHub.vscode-github-actions", + "mhutchie.git-graph", + "waderyan.gitblame" + ] + } + }, + "remoteUser": "vscode", + "postCreateCommand": "echo 'Container is ready!'" +} \ No newline at end of file diff --git a/.devcontainer/environment.yml b/.devcontainer/environment.yml new file mode 100644 index 0000000..369bdc7 --- /dev/null +++ b/.devcontainer/environment.yml @@ -0,0 +1,22 @@ +name: base +channels: + - conda-forge + - defaults +dependencies: + - numpy + - cupy + - scipy + - pre-commit==3.7.1 + - black==24.4.2 + - flake8==7.1.0 + - parameterized==0.9.0 + - pytest==8.2.2 + - pytest-rerunfailures==14.0 + - pyyaml==6.0.1 + - conda-libmamba-solver + - libmamba + - libmambapy + - libarchive + - pip + - pip: + - "jax[cuda12]" diff --git a/.devcontainer/noop.txt b/.devcontainer/noop.txt new file mode 100644 index 0000000..e69de29 diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 6f0492e..361e147 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -13,10 +13,12 @@ jobs: fail-fast: false matrix: python-version: ["3.8", "3.10", "3.12"] - torch-version: ["1.13.1", "2.4.1"] + torch-version: ["1.13.1", "2.5.0"] exclude: - python-version: "3.12" torch-version: "1.13.1" + - python-version: "3.8" + torch-version: "2.5.0" steps: - uses: actions/checkout@v4 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c6b953d..ede3113 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,16 +1,16 @@ repos: - repo: https://github.com/psf/black - rev: 23.3.0 + rev: 24.4.2 hooks: - id: black language_version: python3.10 - repo: https://github.com/pycqa/flake8 - rev: 6.0.0 + rev: 7.1.0 hooks: - id: flake8 - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.6.0 hooks: - id: trailing-whitespace diff --git a/pyproject.toml b/pyproject.toml index 97f7b51..908bc5e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.black] line-length = 120 -target-version = ['py37', 'py38', 'py39', 'py310'] +target-version = ['py38', 'py39', 'py310', 'py311', 'py312'] include = '\.pyi?$' exclude = ''' ( @@ -21,4 +21,4 @@ exclude = ''' # also separately exclude other files if needed #| some_file ) -''' \ No newline at end of file +''' diff --git a/setup.py b/setup.py index 3257e06..31b4d4b 100644 --- a/setup.py +++ b/setup.py @@ -8,7 +8,7 @@ def readme(): setuptools.setup( name="torchsparsegradutils", - version="0.1.2", + version="0.1.3", description="A collection of utility functions to work with PyTorch sparse tensors", long_description=readme(), long_description_content_type="text/markdown", @@ -18,6 +18,7 @@ def readme(): "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", ], python_requires=">=3.8", diff --git a/torchsparsegradutils/indexed_matmul.py b/torchsparsegradutils/indexed_matmul.py index ef0c8f7..b41535c 100644 --- a/torchsparsegradutils/indexed_matmul.py +++ b/torchsparsegradutils/indexed_matmul.py @@ -45,7 +45,7 @@ def segment_mm(a, b, seglen_a): if not a.shape[1] == D1 or not seglen_a.shape[0] == R: raise ValueError("Incompatible size for inputs") - segidx_a = torch.cumsum(seglen_a[:-1], dim=0) + segidx_a = torch.cumsum(seglen_a[:-1], dim=0).cpu() # Ideally the conversions below to nested tensor would be handled natively nested_a = torch.nested.as_nested_tensor(torch.tensor_split(a, segidx_a, dim=0))