diff --git a/.codespellrc b/.codespellrc index 4ee856e..e1e5ad1 100644 --- a/.codespellrc +++ b/.codespellrc @@ -1,2 +1,3 @@ [codespell] -skip = .git/* +skip = .git/*,*.pdf +ignore-words-list = INH diff --git a/.copier-answers.yml b/.copier-answers.yml index 3f23a7c..0879964 100644 --- a/.copier-answers.yml +++ b/.copier-answers.yml @@ -1,6 +1,6 @@ # Changes here will be overwritten by Copier -_commit: 0.1.65 +_commit: 0.1.67 _src_path: git@bbpgitlab.epfl.ch:neuromath/python-template.git author_email: '' author_name: Blue Brain Project, EPFL @@ -9,11 +9,11 @@ copyright_year: '2022' distribution_name: synthesis-workflow download_url: https://github.com/BlueBrain/synthesis-workflow init_git: false -maintainer: Adrien Berchet +maintainer: Adrien Berchet (@adrien-berchet) package_name: synthesis_workflow project_description: Workflow used for synthesis and its validation. project_name: Synthesis Workflow -project_url: https://synthesis_workflow.readthedocs.io +project_url: https://synthesis-workflow.readthedocs.io repository_name: synthesis-workflow repository_namespace: BlueBrain repository_provider: github diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml index 1921990..8b76e49 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yaml +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -43,7 +43,7 @@ Your reports must include the following features: - type: input id: relevant_documentation attributes: - label: Optional link from https://synthesis_workflow.readthedocs.io which documents the behavior that is expected + label: Optional link from https://synthesis-workflow.readthedocs.io which documents the behavior that is expected description: " Please make sure the behavior you are seeing is definitely in contradiction to what's documented as the correct behavior. " diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml index 1b4267d..fd87733 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yaml +++ b/.github/ISSUE_TEMPLATE/feature_request.yaml @@ -12,7 +12,7 @@ body: Thanks for taking the time to fill out this feature request! -Before submitting, make sure the feature does not already exist in the [documentation](https://synthesis_workflow.readthedocs.io) and that you searched in the [issue list](https://github.com/BlueBrain/synthesis-workflow/issues) that a similar feature request has not already been reported. +Before submitting, make sure the feature does not already exist in the [documentation](https://synthesis-workflow.readthedocs.io) and that you searched in the [issue list](https://github.com/BlueBrain/synthesis-workflow/issues) that a similar feature request has not already been reported. If it is not the case, please read the following guidelines. ### GUIDELINES FOR REQUESTING HELP diff --git a/.github/ISSUE_TEMPLATE/how_to_use.yaml b/.github/ISSUE_TEMPLATE/how_to_use.yaml index a6bc893..e284ad2 100644 --- a/.github/ISSUE_TEMPLATE/how_to_use.yaml +++ b/.github/ISSUE_TEMPLATE/how_to_use.yaml @@ -12,7 +12,7 @@ body: Thanks for using this package and taking the time to fill out this help request! -Before submitting, make sure you read the [documentation](https://synthesis_workflow.readthedocs.io) carefully. +Before submitting, make sure you read the [documentation](https://synthesis-workflow.readthedocs.io) carefully. If you still have a question, you should search in the [issue list](https://github.com/BlueBrain/synthesis-workflow/issues) that a similar issue has not already been reported, you might find your answer there. If it is not the case, please read the following guidelines. diff --git a/.github/workflows/run-tox.yml b/.github/workflows/run-tox.yml index be0401f..2f8ed15 100644 --- a/.github/workflows/run-tox.yml +++ b/.github/workflows/run-tox.yml @@ -34,7 +34,7 @@ jobs: # This action should only be used when you need extra system packages uses: awalsh128/cache-apt-pkgs-action@latest with: - packages: graphviz + packages: graphviz poppler-utils version: 1.0 execute_install_scripts: true - name: Cache tox and precommit environments @@ -64,7 +64,7 @@ jobs: pip install tox tox run -e min_versions - name: JUnit Report Action - uses: mikepenz/action-junit-report@v4 + uses: mikepenz/action-junit-report@v5 if: always() # always run even if the previous step fails with: report_paths: 'reports/pytest-*.xml' diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 997194b..6e8187f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v5.0.0 hooks: - id: check-added-large-files - id: check-case-conflict @@ -11,7 +11,7 @@ repos: - id: end-of-file-fixer - id: trailing-whitespace - repo: https://github.com/alessandrojcm/commitlint-pre-commit-hook - rev: v9.17.0 + rev: v9.18.0 hooks: - id: commitlint stages: @@ -22,7 +22,7 @@ repos: hooks: - id: isort - repo: https://github.com/psf/black - rev: 24.8.0 + rev: 24.10.0 hooks: - id: black - repo: https://github.com/codespell-project/codespell diff --git a/.pylintrc b/.pylintrc index 7db6576..feb22b0 100644 --- a/.pylintrc +++ b/.pylintrc @@ -51,6 +51,4 @@ ignore-docstrings=yes [TYPECHECK] # List of classes names for which member attributes should not be checked # (useful for classes with attributes dynamically set). - -# as of numpy 1.8.0, name resolution seems to be a problem. Ignore lookups in numpy -# ignored-classes=numpy,list +extension-pkg-allow-list=lxml.etree diff --git a/AUTHORS.md b/AUTHORS.md index 814515a..d517877 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -1,5 +1,5 @@ # Maintainer -Adrien Berchet +Adrien Berchet (@adrien-berchet) # Contributors diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a4ba867..ff67771 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -59,7 +59,7 @@ When you wish to contribute to the code base, please consider the following guid or ```shell - tox -e py39 -e lint -e docs -e check-packaging + tox run -e py39,lint,docs,check-packaging ``` * Commit your changes using a descriptive commit message. diff --git a/docs/Makefile b/docs/Makefile index cf4b04f..f5e4acd 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -21,4 +21,4 @@ help: @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) clean: - @rm -rf $(BUILDDIR) $(SOURCEDIR)/examples $(SOURCEDIR)/generated + @rm -rf $(BUILDDIR) $(SOURCEDIR)/examples $(SOURCEDIR)/autoapi diff --git a/docs/source/conf.py b/docs/source/conf.py index 6836457..01574b6 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -25,10 +25,11 @@ # -- Project information ----------------------------------------------------- -project = "Synthesis Workflow" +project_name = "Synthesis Workflow" +package_name = "synthesis-workflow" # The short X.Y version -version = metadata.version("synthesis-workflow") +version = metadata.version(package_name) # The full version, including alpha/beta/rc tags release = version @@ -94,10 +95,10 @@ # html_static_path = ['_static'] html_theme_options = { - "metadata_distribution": "synthesis-workflow", + "metadata_distribution": package_name, } -html_title = project +html_title = project_name # If true, links to the reST sources are added to the pages. html_show_sourcelink = False diff --git a/docs/source/contributing.rst b/docs/source/contributing.rst new file mode 100644 index 0000000..f2c987c --- /dev/null +++ b/docs/source/contributing.rst @@ -0,0 +1 @@ +.. mdinclude:: ../../CONTRIBUTING.md diff --git a/docs/source/index.rst b/docs/source/index.rst index a82cb22..59dc7d1 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -50,3 +50,4 @@ The :doc:`./api_ref` page contains detailed documentation of: cli api_ref changelog + contributing diff --git a/docs/source/synthesis_methodology.rst b/docs/source/synthesis_methodology.rst index a1c8dae..63e60ff 100644 --- a/docs/source/synthesis_methodology.rst +++ b/docs/source/synthesis_methodology.rst @@ -100,6 +100,6 @@ Here is an example of such ``context_constraints`` entry: More details on the models can be found here: -* `TNS `_ -* (does not exist yet) -* (does not exist yet) +* `NeuroTS `_ +* `region-grower `_ +* `placement-algorithm `_ diff --git a/requirements/base.pip b/requirements/base.pip index 876907d..35073b7 100644 --- a/requirements/base.pip +++ b/requirements/base.pip @@ -1,9 +1,5 @@ -atlas_analysis>=0.0.5 -bluepy>=2.5,<3 -bluepy-configfile>=0.1.19 -bluepymm>=0.8.5 bluepyparallel>=0.0.8 -brainbuilder>=0.20 +brainbuilder>=0.20.1 diameter_synthesis>=0.5.4 dictdiffer>=0.9 gitpython>=3.1.30 @@ -16,16 +12,15 @@ matplotlib>=3.6.2 morph_tool>=2.9.1,<3 morphio>=3.3.6,<4 neuroc>=0.3.0,<1 -neurocollage>=0.3.3 +neurocollage>=0.3.6 neurom>=3.2.2,<4 neurots>=3.6,<4 numpy>=1.26.4 -pandas>=1.5.3 -placement_algorithm>=2.3.1 +pandas>=2.1 PyYAML>=6 -region_grower>=1.3,<2 -scipy>=1.10 +region_grower>=1.5.1,<2 +scipy>=1.13 seaborn>=0.12.2 tmd>=2.3 tqdm>=4.64.1 -voxcell>=3.1.3,<4 +voxcell>=3.1.5,<4 diff --git a/setup.py b/setup.py index ce8f0e0..bb256a6 100644 --- a/setup.py +++ b/setup.py @@ -1,19 +1,10 @@ """Setup for the synthesis-workflow package.""" -import importlib from pathlib import Path from setuptools import find_namespace_packages from setuptools import setup -spec = importlib.util.spec_from_file_location( - "src.version", - "src/version.py", -) -module = importlib.util.module_from_spec(spec) -spec.loader.exec_module(module) -VERSION = module.VERSION - # Read the requirements with open("requirements/base.pip", "r", encoding="utf-8") as f: reqs = f.read().splitlines() @@ -28,21 +19,23 @@ setup( name="synthesis-workflow", - author="bbp-ou-cells", - author_email="bbp-ou-cells@groupes.epfl.ch", + author="Blue Brain Project, EPFL", description="Workflow used for synthesis and its validation.", long_description=Path("README.rst").read_text(encoding="utf-8"), long_description_content_type="text/x-rst", - url="https://bbpteam.epfl.ch/documentation/projects/synthesis-workflow", + url="https://synthesis-workflow.readthedocs.io", project_urls={ - "Tracker": "https://bbpteam.epfl.ch/project/issues/projects/CELLS/issues", - "Source": "https://bbpgitlab.epfl.ch/neuromath/synthesis-workflow", + "Tracker": "https://github.com/BlueBrain/synthesis-workflow/issues", + "Source": "https://github.com/BlueBrain/synthesis-workflow", }, - license="BBP-internal-confidential", + license="Apache License 2.0", packages=find_namespace_packages("src"), package_dir={"": "src"}, python_requires=">=3.9", - version=VERSION, + use_scm_version=True, + setup_requires=[ + "setuptools_scm", + ], install_requires=reqs, extras_require={ "docs": doc_reqs, @@ -56,6 +49,7 @@ }, include_package_data=True, classifiers=[ + # TODO: Update to relevant classifiers "Development Status :: 2 - Pre-Alpha", "Intended Audience :: Education", "Intended Audience :: Science/Research", diff --git a/src/synthesis_workflow/morphology_combos/__init__.py b/src/synthesis_workflow/morphology_combos/__init__.py deleted file mode 100644 index a494cf1..0000000 --- a/src/synthesis_workflow/morphology_combos/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -"""Module to create and modify morphologie combos with dataframes.""" - -from .create_me_combos import * # noqa diff --git a/src/synthesis_workflow/morphology_combos/create_me_combos.py b/src/synthesis_workflow/morphology_combos/create_me_combos.py deleted file mode 100644 index 4b44d7c..0000000 --- a/src/synthesis_workflow/morphology_combos/create_me_combos.py +++ /dev/null @@ -1,150 +0,0 @@ -"""Functions to create and modify combos dataframe.""" - -import json -import logging -from pathlib import Path - -import pandas as pd -from bluepymm.prepare_combos.parse_files import read_mm_recipe - -L = logging.getLogger(__name__) - - -def get_me_types_map(recipe, emodel_etype_map): - """Use recipe data and bluepymm to get mtype/etype combos.""" - me_types_map = pd.DataFrame() - for i in recipe.index: - combo = recipe.loc[i] - for emodel, emap in emodel_etype_map.items(): - if combo.layer in emap["layer"] and combo.etype == emap["etype"]: - if "mtype" in emap: - if emap["mtype"] == combo.fullmtype: - combo["emodel"] = emodel - me_types_map = me_types_map.append(combo.copy()) - else: - combo["emodel"] = emodel - me_types_map = me_types_map.append(combo.copy()) - - return me_types_map.rename(columns={"fullmtype": "mtype"}).reset_index() - - -def create_morphs_combos_df( - morphs_df, - recipe_path=None, - emodel_etype_map_path=None, - emodels=None, - me_types_map=None, -): - """From the morphs_df, create a dataframe with all possible combos.""" - if me_types_map is None and emodel_etype_map_path is not None and recipe_path is not None: - recipe = read_mm_recipe(recipe_path) - with open(emodel_etype_map_path, "rb") as f: - emodel_etype_map = json.load(f) - me_types_map = get_me_types_map(recipe, emodel_etype_map) - - morphs_combos_df = pd.DataFrame() - for combo_id in me_types_map.index: - if emodels is not None: - if me_types_map.loc[combo_id, "emodel"] not in emodels: - continue - combo = morphs_df[morphs_df.mtype == me_types_map.loc[combo_id, "mtype"]] - combo = combo.assign(etype=me_types_map.loc[combo_id, "etype"]) - combo = combo.assign(emodel=me_types_map.loc[combo_id, "emodel"]) - morphs_combos_df = morphs_combos_df.append(combo.copy()) - - morphs_combos_df = ( - morphs_combos_df.drop_duplicates().reset_index().rename(columns={"index": "morph_gid"}) - ) - return morphs_combos_df - - -def _base_emodel(emodel): - return "_".join(emodel.split("_")[:2]) - - -def add_for_optimisation_flag(config_path, morphs_combos_df=None, morphs_df=None, emodels=None): - """Add for_optimisation flag for combos used for optimisation.""" - if morphs_df is None and morphs_combos_df is None: - raise ValueError("Please provide at least one dataframe.") - - if morphs_combos_df is not None: - emodels = list(set(morphs_combos_df.emodel)) - morphs_combos_df["for_optimisation"] = False - for emodel in emodels: - with open( - config_path / _base_emodel(emodel) / "config/recipes/recipes.json", "rb" - ) as f: - recipe = json.load(f)[_base_emodel(emodel)] - opt_mask = (morphs_combos_df.emodel == emodel) & ( - morphs_combos_df.name == Path(recipe["morphology"][0][1]).stem - ) - morphs_combos_df.loc[opt_mask, "for_optimisation"] = True - if len(morphs_combos_df[opt_mask]) == 0: - new_combo = morphs_combos_df[ - (morphs_combos_df.name == Path(recipe["morphology"][0][1]).stem) - & (morphs_combos_df.for_optimisation == 1) - ] - if len(new_combo) > 0: - new_combo = new_combo.iloc[0] - L.warning("Duplicate optimisation cell for emodel %s", emodel) - else: - L.warning("Error, no cell for %s", emodel) - - new_combo["emodel"] = emodel - new_combo["etype"] = emodel.split("_")[0] - morphs_combos_df = morphs_combos_df.append(new_combo.copy()) - - if morphs_df is not None: - morphs_df["for_optimisation"] = False - if emodels is None and morphs_combos_df is None: - raise ValueError("Please provide a list of emodels for your cells") - for emodel in emodels: - with open( - config_path / _base_emodel(emodel) / "config/recipes/recipes.json", "rb" - ) as f: - recipe = json.load(f)[_base_emodel(emodel)] - morphs_df.loc[ - (morphs_df.name == Path(recipe["morphology"][0][1]).stem), - "for_optimisation", - ] = True - return morphs_combos_df, morphs_df - - -def add_for_optimisation_flag_old(config_path, morphs_combos_df=None, morphs_df=None, emodels=None): - """Add for_optimisation flag for combos used for optimisation.""" - if morphs_df is None and morphs_combos_df is None: - raise ValueError("Please provide at least one dataframe.") - - if morphs_combos_df is not None: - emodels = list(set(morphs_combos_df.emodel)) - morphs_combos_df["for_optimisation"] = False - for emodel in emodels: - with open(config_path / emodel / "recipes/recipes.json", "rb") as f: - recipe = json.load(f)[emodel] - morphs_combos_df.loc[ - (morphs_combos_df.emodel == emodel) - & (morphs_combos_df.name == Path(recipe["morphology"][0][1]).stem), - "for_optimisation", - ] = True - if ( - len( - morphs_combos_df.loc[ - (morphs_combos_df.emodel == emodel) - & (morphs_combos_df.name == Path(recipe["morphology"][0][1]).stem) - ] - ) - == 0 - ): - L.warning("Could not find a cell for optimisation for emodel %s", emodel) - - if morphs_df is not None: - morphs_df["for_optimisation"] = False - if emodels is None and morphs_combos_df is None: - raise ValueError("Please provide a list of emodels for your cells") - for emodel in emodels: - with open(config_path / emodel / "recipes/recipes.json", "rb") as f: - recipe = json.load(f)[emodel] - morphs_df.loc[ - (morphs_df.name == Path(recipe["morphology"][0][1]).stem), - "for_optimisation", - ] = True diff --git a/src/synthesis_workflow/synthesis.py b/src/synthesis_workflow/synthesis.py index 1a12be7..093c590 100644 --- a/src/synthesis_workflow/synthesis.py +++ b/src/synthesis_workflow/synthesis.py @@ -19,8 +19,13 @@ from neurom.check.morphology_checks import has_apical_dendrite from neurom.core.dataformat import COLS from neurots import extract_input -from placement_algorithm.app import utils -from placement_algorithm.app.choose_morphologies import Master as ChooseMorphologyMaster + +try: + from placement_algorithm.app.choose_morphologies import Master as ChooseMorphologyMaster + + with_placement_algo = True +except ImportError: + with_placement_algo = False from tmd.io.io import load_population from tqdm import tqdm from voxcell import CellCollection @@ -279,7 +284,7 @@ def create_axon_morphologies_tsv( f"Either 'morphs_df_path' or all the following parameter should be None: {_params}" ) - if all(check_placement_params.values()): + if all(check_placement_params.values()) and with_placement_algo: L.info("Use placement algorithm for axons") kwargs = { @@ -320,7 +325,7 @@ def create_axon_morphologies_tsv( random_state=42, )["name"].to_list() - utils.dump_morphology_list(axon_morphs, axon_morphs_path) + axon_morphs.to_csv(axon_morphs_path, sep="\t", na_rep="N/A") def get_target_length(soma_layer, target_layer, cortical_thicknesses): diff --git a/src/synthesis_workflow/tasks/synthesis.py b/src/synthesis_workflow/tasks/synthesis.py index 76a26f0..0a36ada 100644 --- a/src/synthesis_workflow/tasks/synthesis.py +++ b/src/synthesis_workflow/tasks/synthesis.py @@ -21,7 +21,6 @@ from neurots.generate.orientations import fit_3d_angles from neurots.validator import validate_neuron_distribs from neurots.validator import validate_neuron_params -from placement_algorithm.app.compact_annotations import _collect_annotations from region_grower.synthesize_morphologies import SynthesizeMorphologies from region_grower.utils import NumpyEncoder from tqdm import tqdm @@ -46,6 +45,7 @@ from synthesis_workflow.tools import find_case_insensitive_file from synthesis_workflow.tools import load_neurondb_to_dataframe from synthesis_workflow.utils import apply_parameter_diff +from synthesis_workflow.utils import collect_annotations morphio.set_maximum_warnings(0) @@ -280,7 +280,7 @@ class CreateAnnotationsFile(WorkflowTask): def run(self): """Actual process of the task.""" # pylint: disable=protected-access - annotations = _collect_annotations(self.annotation_dir, self.morph_db) + annotations = collect_annotations(self.annotation_dir, self.morph_db) with open(self.destination, "w", encoding="utf-8") as f: json.dump(annotations, f, indent=4, sort_keys=True) diff --git a/src/synthesis_workflow/tasks/validation.py b/src/synthesis_workflow/tasks/validation.py index f4ae91c..e90dd4d 100644 --- a/src/synthesis_workflow/tasks/validation.py +++ b/src/synthesis_workflow/tasks/validation.py @@ -9,7 +9,6 @@ import pandas as pd import pkg_resources import yaml -from bluepy import Circuit from luigi.parameter import OptionalNumericalParameter from luigi.parameter import PathParameter from luigi_tools.parameter import BoolParameter @@ -21,6 +20,7 @@ from neurom.view import matplotlib_impl from voxcell import VoxelData from voxcell.cell_collection import CellCollection +from voxcell.nexus.voxelbrain import Atlas from morphval import validation_main as morphval_validation from synthesis_workflow.tasks.circuit import CreateAtlasLayerAnnotations @@ -41,6 +41,7 @@ from synthesis_workflow.tasks.vacuum_synthesis import VacuumSynthesize from synthesis_workflow.vacuum_synthesis import VACUUM_SYNTH_MORPHOLOGY_PATH from synthesis_workflow.validation import SYNTH_MORPHOLOGY_PATH +from synthesis_workflow.validation import AtlasCircuit from synthesis_workflow.validation import VacuumCircuit from synthesis_workflow.validation import convert_circuit_to_morphs_df from synthesis_workflow.validation import get_debug_data @@ -182,12 +183,10 @@ def requires(self): def run(self): """Actual process of the task.""" if self.in_atlas: - circuit = Circuit( - { - "cells": self.input()["circuit"].path, - "morphologies": self.input()["out_morphologies"].path, - "atlas": CircuitConfig().atlas_path, - } + circuit = AtlasCircuit( + atlas=Atlas.open(CircuitConfig().atlas_path), + cells=CellCollection.load(self.input()["circuit"].path).as_dataframe(), + morphology_path=self.input()["out_morphologies"].path, ) else: diff --git a/src/synthesis_workflow/utils.py b/src/synthesis_workflow/utils.py index 1c031a8..640c4f3 100644 --- a/src/synthesis_workflow/utils.py +++ b/src/synthesis_workflow/utils.py @@ -5,10 +5,12 @@ from pathlib import Path import dictdiffer +import lxml.etree import numpy as np import pandas as pd from jsonpath_ng import parse from pkg_resources import resource_filename +from tqdm import tqdm # pylint:disable=too-many-nested-blocks @@ -192,3 +194,41 @@ def create_circuit_config(nodes_file, morphology_path): "edges": [], } } + + +def parse_annotations(filepath): + """Parse XML with morphology annotations.""" + etree = lxml.etree.parse(filepath) + result = {} + for elem in etree.findall("placement"): + attr = dict(elem.attrib) + rule_id = attr.pop("rule") + if rule_id in result: + raise KeyError(f"Duplicate annotation for rule '{rule_id}'") + result[rule_id] = attr + return result + + +def parse_morphdb(filepath): + """Parse (ext)neuronDB.dat file.""" + columns = ["morphology", "layer", "mtype"] + first_row = pd.read_csv(filepath, sep=r"\s+", header=None, nrows=1) + if first_row.shape[1] > 3: + columns.append("etype") + return pd.read_csv( + filepath, sep=r"\s+", names=columns, usecols=columns, na_filter=False, dtype={"layer": str} + ) + + +def collect_annotations(annotation_dir, morphdb_path): + """Collect annotations from given directory.""" + result = {} + if morphdb_path is None: + for filepath in tqdm(Path(annotation_dir).glob("*.xml")): + result[Path(filepath).stem] = parse_annotations(filepath) + else: + morphdb = parse_morphdb(morphdb_path) + for morph in tqdm(morphdb["morphology"].unique()): + filepath = Path(annotation_dir) / (morph + ".xml") + result[morph] = parse_annotations(filepath) + return result diff --git a/src/synthesis_workflow/validation.py b/src/synthesis_workflow/validation.py index fd5bf67..0a13a0f 100644 --- a/src/synthesis_workflow/validation.py +++ b/src/synthesis_workflow/validation.py @@ -21,11 +21,11 @@ import numpy as np import pandas as pd import seaborn as sns -from bluepy import Circuit from joblib import Parallel from joblib import delayed from matplotlib import cm from matplotlib.backends.backend_pdf import PdfPages +from morph_tool.transform import transform from morphio.mut import Morphology from neurom import load_morphologies from neurom.apps import morph_stats @@ -52,6 +52,7 @@ VacuumCircuit = namedtuple("VacuumCircuit", ["cells", "morphs_df", "morphology_path"]) +AtlasCircuit = namedtuple("AtlasCircuit", ["atlas", "cells", "morphology_path"]) SYNTH_MORPHOLOGY_PATH = "synth_morphology_path" @@ -399,11 +400,18 @@ def sample_morph_voxel_values( def _get_depths_df(circuit, mtype, sample, voxeldata, sample_distance): """Create dataframe with depths data for violin plots.""" out_of_bounds_value = np.nan - gids = circuit.cells.ids(group={"mtype": mtype}, sample=sample) + morphs_df = circuit.morphs_df + path = Path(circuit.morphology_path) + cells = morphs_df.loc[morphs_df["mtype"] == mtype, path] + gids = cells.sample(sample, random_state=42).index point_depths = defaultdict(list) for gid in gids: - morphology = circuit.morph.get(gid, transform=True, source="ascii") + morphology = Morphology(path / (cells.loc[gid, "morph"] + ".asc")) + T = np.eye(4) + T[:3, :3] = cells.loc[gid, "orientation"] + T[:3, 3] = cells.loc[gid, ["x", "y", "z"]] + transform(morphology, T) point_depth_tmp = sample_morph_voxel_values( morphology, sample_distance, voxeldata, out_of_bounds_value ) @@ -463,7 +471,7 @@ def _plot_density_profile( fig = plt.figure() ax = plt.gca() try: - if isinstance(circuit, Circuit): + if isinstance(circuit, AtlasCircuit): _plot_layers(x_pos, circuit.atlas, ax) plot_df = _get_depths_df(circuit, mtype, sample, voxeldata, sample_distance) ax.legend(loc="best") diff --git a/tests/data/in_small_O1/out/atlas/atlas_planes_O0.npz b/tests/data/in_small_O1/out/atlas/atlas_planes_O0.npz index 976ca51..070c074 100644 Binary files a/tests/data/in_small_O1/out/atlas/atlas_planes_O0.npz and b/tests/data/in_small_O1/out/atlas/atlas_planes_O0.npz differ diff --git a/tox.ini b/tox.ini index 2dee8de..495ffd9 100644 --- a/tox.ini +++ b/tox.ini @@ -5,7 +5,6 @@ files = src/{[base]name} src/{[base]morphval} tests docs/source/conf.py setup.py [tox] envlist = - check-version check-packaging lint docs @@ -19,8 +18,6 @@ minversion = 3.18 extras = test setenv = COVERAGE_FILE = {env:COVERAGE_FILE:.coverage-{envname}} - PIP_INDEX_URL = {env:PIP_INDEX_URL:https://bbpteam.epfl.ch/repository/devpi/simple} - PIP_EXTRA_INDEX_URL = {env:PIP_EXTRA_INDEX_URL:https://pypi.python.org/simple} commands = pytest \ -n {env:PYTEST_NPROCS:3} \ @@ -38,6 +35,8 @@ commands = --junit-xml=reports/pytest-{envname}.xml \ --self-contained-html \ --dcd-export-formatted-data \ + --durations 10 \ + --durations-min=2.0 \ {posargs} [testenv:coverage] @@ -49,13 +48,6 @@ commands = coverage xml coverage report -[testenv:check-version] -skip_install = true -deps = bbp-nse-ci>=0.2.5 -commands = - do_release.py -p . check-version - do_release.py -p . check-changelog --release-only --path CHANGELOG.md - [testenv:check-packaging] skip_install = true deps = @@ -94,7 +86,6 @@ commands = pre-commit run --all-files [testenv:docs] -basepython = python3.9 changedir = docs extras = docs allowlist_externals = @@ -104,3 +95,10 @@ allowlist_externals = commands = make clean make html SPHINXOPTS=-W + +[gh-actions] +python = + 3.9: py39, lint + 3.10: py310, check-packaging + 3.11: py311, docs + 3.12: py312