diff --git a/README.md b/README.md index 90952c826..abb00b6de 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,10 @@ # iblrig -iblrig is using gitflow and semantic versioning conventions. Click on the following links for more information on [gitflow](https://www.atlassian.com/git/tutorials/comparing-workflows/gitflow-workflow) or [semantic versioning](https://semver.org/). +This repository is using [semantic versioning](https://semver.org/) and [gitflow](https://www.atlassian.com/git/tutorials/comparing-workflows/gitflow-workflow) conventions: +![](README_semver.png) +![](README_gitflow_workflow.png) + +Please review these conventions to more easily contribute to the project. --- ## How to work with this repository @@ -17,7 +21,7 @@ iblrig is using gitflow and semantic versioning conventions. Click on the follow ### Hotfix branches: - a `hotfix` or `maintenance` branch is forked from `master` -- once the fix has been thoroughly tested, it will get merged back into `master`, `develop`, `rc` +- once the fix has been thoroughly tested, it will get merged back into `master` and `develop` - the `hotfix` branch will eventually be deleted --- @@ -27,7 +31,7 @@ In order to install iblrig on a Windows machine please ensure that the following - [Git](https://git-scm.com) - [Anaconda](https://anaconda.com) -### Installation Instructions: +### Instructions for automated installation from scratch: - Ensure Git, Anaconda, and your favorite text editor are already installed - Please also ensure a stable internet connection is present as the installer pulls from various servers throughout the installation process - Clone the latest version of this repository to the root of the `C:\` drive @@ -36,11 +40,43 @@ In order to install iblrig on a Windows machine please ensure that the following - The installer will take over for a while and ensure the rest of the requisite software is present - The installer will prompt you to install ONE (yes/no) - If you decide to install ONE, various prompts will assist you in the default configuration - - _TODO: Add and document better error catching/handling for when these settings are incorrect_ - The installer will prompt you to install Bonsai (yes/no) - Installation complete ### Running pybpod -- Navigate your Anaconda Prompt to `C:\iblrig` +- Navigate your Anaconda Prompt to the iblrig folder: `cd C:\iblrig` +- Ensure the `iblrig` anaconda environment is activated: `conda activate iblrig` - At the prompt, run: `.\pybpod.bat` -- _TODO: More instruction on how to work with the software? Other options?_ \ No newline at end of file + +### Instructions for manual installation from scratch: +The following commands to be run from the Windows command prompt (not tested in powershell). Please ensure that your git and +anaconda environment are up-to-date. +```commandline +cd C:\ +git clone https://github.com/int-brain-lab/iblrig +cd C:\iblrig +conda create --name iblrig python=3.7.13 --yes +conda activate iblrig +pip install --editable . +mkdir C:\iblrig_params +python setup_pybpod.py C:\iblrig_params +cd C:\iblrig\Bonsai +setup.bat +cd C:\iblrig +conda create --name ibllib python=3.8.13 --yes +conda activate ibllib +pip install ibllib +python -c "from one.api import ONE; ONE()" # several prompts will require interaction to configure ONE +conda activate iblrig +pybpod.bat +``` + +### Instructions for manual update from 6.6.2 to 6.6.3: +The following commands to be run from the Windows command prompt (not tested in powershell). Please ensure that your git and +anaconda environment are up-to-date. **Backup any custom tasks or modifications before performing the following** +```commandline +cd C:\iblrig +git reset —-hard +git fetch +git pull +``` \ No newline at end of file diff --git a/README_gitflow_workflow.png b/README_gitflow_workflow.png new file mode 100644 index 000000000..7d7139123 Binary files /dev/null and b/README_gitflow_workflow.png differ diff --git a/README_semver.png b/README_semver.png new file mode 100644 index 000000000..154c979ee Binary files /dev/null and b/README_semver.png differ diff --git a/iblrig/__init__.py b/iblrig/__init__.py index 36cc54d8b..3306fbf16 100644 --- a/iblrig/__init__.py +++ b/iblrig/__init__.py @@ -1,9 +1,4 @@ -__version__ = "6.6.2" -# !/usr/bin/env python -# @Author: Niccolò Bonacchi -# @Creation_Date: Friday, January 11th 2019, 2:04:42 pm -# @Editor: Michele Fabbri -# @Edit_Date: 2022-02-01 +__version__ = "6.6.3" import logging import colorlog diff --git a/iblrig/frame2TTL.py b/iblrig/frame2TTL.py index d02622e45..ef0046283 100644 --- a/iblrig/frame2TTL.py +++ b/iblrig/frame2TTL.py @@ -25,6 +25,7 @@ def Frame2TTL(serial_port: str, version: int = 2) -> object: object: Instance of the v1/v2 class """ f2ttl = None + log.debug(f"serial_port from Frame2TTL: {serial_port}") if version == 2: try: f2ttl = Frame2TTLv2(serial_port) @@ -32,7 +33,7 @@ def Frame2TTL(serial_port: str, version: int = 2) -> object: if iblrig.params.load_params_file().get("F2TTL_HW_VERSION", None) != 2: iblrig.params.update_params_file(data={"F2TTL_HW_VERSION": 2}) return f2ttl - except BaseException as e: + except (serial.SerialException, AssertionError) as e: log.warning(f"Cannot connect assuming F2TTLv2 device, continuing with v1: {e}") elif version == 1: try: @@ -41,13 +42,16 @@ def Frame2TTL(serial_port: str, version: int = 2) -> object: if iblrig.params.load_params_file().get("F2TTL_HW_VERSION", None) != 1: iblrig.params.update_params_file(data={"F2TTL_HW_VERSION": 1}) return f2ttl - except BaseException as e: + except AssertionError as e: log.error( f"Couldn't connect to F2TTLv1: {str(e)}\nDisconnecting and then " f"reconnecting the Frame2TTL cable may resolve this issue." ) - elif version == 0: - return None + raise e + except FileNotFoundError as e: + raise e + else: + raise ValueError("Unsupported version " + str(version)) return Frame2TTL(serial_port, version=version - 1) @@ -296,7 +300,7 @@ def connect(self) -> serial.Serial: # 1 byte response expected (unsigned) self.hw_version = int.from_bytes(ser.read(1), byteorder="little", signed=False) if self.hw_version != 2: - self.ser.close() + ser.close() raise serial.SerialException("Error: Frame2TTLv2 requires hardware version 2.") return ser diff --git a/iblrig/iotasks.py b/iblrig/iotasks.py index 583f2e4e0..c5014137d 100644 --- a/iblrig/iotasks.py +++ b/iblrig/iotasks.py @@ -20,6 +20,7 @@ import iblrig.raw_data_loaders as raw log = logging.getLogger("iblrig") +N_PREGENERATED_SESSIONS = 12 class ComplexEncoder(json.JSONEncoder): @@ -145,7 +146,7 @@ def load_session_order_idx(last_settings_data: dict) -> tuple: session_idx = 0 elif "SESSION_ORDER" in last_settings_data.keys(): session_order = last_settings_data["SESSION_ORDER"] - session_idx = last_settings_data["SESSION_IDX"] + 1 + session_idx = (last_settings_data["SESSION_IDX"] + 1) % N_PREGENERATED_SESSIONS return session_order, session_idx diff --git a/iblrig/params.py b/iblrig/params.py index 4b3af369a..57f70dc00 100644 --- a/iblrig/params.py +++ b/iblrig/params.py @@ -194,6 +194,7 @@ def load_params_file(silent=True) -> dict: """ iblrig_params = Path(ph.get_iblrig_params_folder()) fpath = iblrig_params / ".iblrig_params.json" + log.debug(f"fpath from load_params_file: {fpath}") if fpath.exists(): with open(fpath, "r") as f: out = json.load(f) diff --git a/iblrig/session_creator.py b/iblrig/session_creator.py index f08ab741e..e0fbb18fe 100644 --- a/iblrig/session_creator.py +++ b/iblrig/session_creator.py @@ -19,9 +19,11 @@ # EPHYS CHOICE WORLD -def make_ephysCW_pc(): +def make_ephysCW_pc(prob_type='biased'): """make_ephysCW_pc Makes positions, contrasts and block lengths for ephysCW Generates ~2000 trias + :prob_type: (str) 'biased': 0 contrast half has likely to be drawn, 'uniform': 0 contrast as + likely as other contrasts :return: pc :rtype: [type] """ @@ -38,7 +40,7 @@ def make_ephysCW_pc(): len_block.append(blocks.get_block_len(60, min_=20, max_=100)) for x in range(len_block[-1]): p = blocks.draw_position([-35, 35], prob_left) - c = misc.draw_contrast(contrasts, prob_type="uniform") + c = misc.draw_contrast(contrasts, prob_type=prob_type) pc = np.append(pc, np.array([[p, c, prob_left]]), axis=0) # do this in PC space prob_left = np.round(np.abs(1 - prob_left), 1) diff --git a/release_notes.md b/release_notes.md index dbb28cf8d..32887bf73 100644 --- a/release_notes.md +++ b/release_notes.md @@ -1,5 +1,12 @@ # **Release notes** +## **Release Notes 6.6.3** + +- additional f2ttl error catching and messaging +- purge_rig_data.py script now functional +- corrected datetime import for `tasks/_iblrig_misc_bpod_ttl_test/trial_params.py` +- manual installation and update instructions added to README.md to simplify troubleshooting efforts + ## **Release Notes 6.6.2** - Update procedure will now ignore the reinstall flag if version increments only micro/patch version diff --git a/requirements.txt b/requirements.txt index 34997d3c6..f08ceac6c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,8 +5,9 @@ Cython==0.29.6 globus-sdk==1.7.1 numpy<1.21,>=1.17 opencv-python==3.4.5.20 +ONE-api packaging==20.8 -pandas==0.25.3 +pandas pybpod==1.8.2 pyOpenSSL==19.0.0 PySocks==1.6.8 diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/scripts/ibllib/__init__.py b/scripts/ibllib/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/scripts/ibllib/purge_rig_data.py b/scripts/ibllib/purge_rig_data.py index c4404dc97..535292e16 100644 --- a/scripts/ibllib/purge_rig_data.py +++ b/scripts/ibllib/purge_rig_data.py @@ -1,87 +1,82 @@ -#!/usr/bin/env python -# @Author: Niccolò Bonacchi -# @Creation_Date: Thursday, March 28th 2019, 7:53:44 pm -# @Editor: Michele Fabbri -# @Edit_Date: 2022-02-01 """ Purge data from RIG -- Find all files by rglob -- Find all sessions of the found files -- Check Alyx if corresponding datasetTypes have been registered as existing -sessions and files on Flatiron -- Delete local raw file if found on Flatiron +- looks for datasets matching filename pattern +- datasets that exist in ONE cache are removed """ import argparse import logging +from fnmatch import fnmatch from pathlib import Path +import one from one.alf.files import get_session_path from one.api import ONE -log = logging.getLogger("iblrig") +log = logging.getLogger('iblrig') +try: # Verify ONE-api is at v1.13.0 or greater + assert(tuple(map(int, one.__version__.split('.'))) >= (1, 13, 0)) + from one.alf.cache import iter_datasets, iter_sessions +except (AssertionError, ImportError) as e: + if e is AssertionError: + log.error("The found version of ONE needs to be updated to run this script, please run a 'pip install -U ONE-api' from " + "the appropriate anaconda environment") + raise -def session_name(path) -> str: - """Returns the session name (subject/date/number) string for any filepath - using session_path""" - return "/".join(get_session_path(path).parts[-3:]) +def session_name(path, lab=None) -> str: + """ + Returns the session name (subject/date/number) string for a given session path. If lab is given + returns lab/Subjects/subject/date/number. + """ + lab = f'{lab}/Subjects/' if lab else '' + return lab + '/'.join(get_session_path(path).parts[-3:]) -def purge_local_data(local_folder, file_name, lab=None, dry=False): - # Figure out datasetType from file_name or file path - file_name = Path(file_name).name - alf_parts = file_name.split(".") - dstype = ".".join(alf_parts[:2]) - print(f"Looking for file <{file_name}> in folder <{local_folder}>") - # Get all paths for file_name in local folder + +def local_alf_paths(root_dir, filename): + """Yield session path and relative paths of ALFs that match filename pattern""" + for session_path in iter_sessions(root_dir): + for dataset in iter_datasets(session_path): + if fnmatch(dataset, filename): + yield session_path, dataset + + +def purge_local_data(local_folder, filename='*', lab=None, dry=False, one=None): + # Figure out datasetType from filename or file path local_folder = Path(local_folder) - files = list(local_folder.rglob(f"*{file_name}")) - print(f"Found {len(files)} files") - print(f"Checking on Flatiron for datsetType: {dstype}...") - # Get all sessions and details from Alyx that have the dstype - one = ONE(cache_rest=None) - if lab is None: - eid, det = one.search(dataset_types=[dstype], details=True) - else: - eid, det = one.search(dataset_types=[dstype], lab=lab, details=True) - urls = [] - for d in det: - urls.extend( - [ - x["data_url"] - for x in d["data_dataset_session_related"] - if x["dataset_type"] == dstype - ] - ) - # Remove None answers when session is registered but dstype not htere yet - urls = [u for u in urls if u is not None] - print(f"Found files on Flatiron: {len(urls)}") + + # Get matching files that exist in ONE cache to_remove = [] - for f in files: - sess_name = session_name(f) - for u in urls: - if sess_name in u: - to_remove.append(f) - print(f"Local files to remove: {len(to_remove)}") - for f in to_remove: - print(f) - if dry: + one = one or ONE() + for session_path, dataset in local_alf_paths(local_folder, filename): + session = session_name(session_path, lab=lab) + eid = one.to_eid(session) + if not eid: + continue + matching = one.list_datasets(eid, dataset.as_posix()) + if not matching: continue - else: + assert len(matching) == 1 + to_remove.append(local_folder.joinpath(session_path, dataset)) + + log.info(f'Local files to remove: {len(to_remove)}') + for f in to_remove: + log.info(f'DELETE: {f}') + if not dry: f.unlink() - return + return to_remove -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Delete files from rig") - parser.add_argument("folder", help="Local iblrig_data folder") - parser.add_argument("file", help="File name to search and destroy for every session") +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Delete files from rig') + parser.add_argument('folder', help='Local iblrig_data folder') + parser.add_argument('file', help='File name to search and destroy for every session') parser.add_argument( - "-lab", required=False, default=None, help="Lab name, search on Alyx faster. default: None", + '-lab', required=False, default=None, help='Lab name, in case sessions conflict between labs. default: None', ) parser.add_argument( - "--dry", required=False, default=False, action="store_true", help="Dry run? default: False", + '--dry', required=False, default=False, action='store_true', help='Dry run? default: False', ) args = parser.parse_args() purge_local_data(args.folder, args.file, lab=args.lab, dry=args.dry) - print("Done\n") + print('Done\n') diff --git a/tasks/_iblrig_misc_bpod_ttl_test/trial_params.py b/tasks/_iblrig_misc_bpod_ttl_test/trial_params.py index 8edcfc901..aaa4bc5e3 100644 --- a/tasks/_iblrig_misc_bpod_ttl_test/trial_params.py +++ b/tasks/_iblrig_misc_bpod_ttl_test/trial_params.py @@ -1,7 +1,4 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Author: Niccolò Bonacchi -# @Date: 2018-02-02 14:06:34 +import datetime import json import logging @@ -92,7 +89,6 @@ def next_trial(self): import time import task_settings as _task_settings import scratch._user_settings as _user_settings - import datetime # noqa dt = datetime.datetime.now() dt = [ diff --git a/test_iblrig/__init__.py b/test_iblrig/__init__.py index e69de29bb..24424908b 100644 --- a/test_iblrig/__init__.py +++ b/test_iblrig/__init__.py @@ -0,0 +1,6 @@ +OPENALYX_PARAMETERS = { + "base_url": "https://openalyx.internationalbrainlab.org", + "username": "intbrainlab", + "password": "international", + "silent": True +} \ No newline at end of file diff --git a/test_iblrig/test_scripts.py b/test_iblrig/test_scripts.py index f22de949f..7f3a38e86 100644 --- a/test_iblrig/test_scripts.py +++ b/test_iblrig/test_scripts.py @@ -1,17 +1,74 @@ -import json -import os -import pathlib import tempfile import unittest +from pathlib import Path + +from one.api import ONE + +from test_iblrig import OPENALYX_PARAMETERS + +try: # If an AssertionError is thrown on import, ONE-api is less than v1.13.0 + from scripts.ibllib.purge_rig_data import purge_local_data, session_name +except AssertionError: + SKIP_PURGE_TEST = True -# TODO: Flesh out script testing in the future class TestScripts(unittest.TestCase): + def setUp(self) -> None: + self.tempdir = tempfile.TemporaryDirectory() + self.addCleanup(self.tempdir.cleanup) + + @unittest.skipIf(SKIP_PURGE_TEST, "Skipping test_purge_rig_data, ONE-api is lower than v1.13.0") + def test_purge_rig_data(self): + # Setup out test + root = Path(self.tempdir.name) + local_data = root.joinpath('iblrig_data', 'Subjects') + local_data.mkdir(parents=True) + # Need to add a username/password to the ONE call for the test to function + one = ONE(**OPENALYX_PARAMETERS) + # Find a session with at least 5 or so datasets and touch those files + sessions = one.search(lab='cortex') + session = next(x for x in sessions if len(one.list_datasets(x, collection='raw*')) > 5) + session_path = local_data.joinpath(session_name(one.eid2path(session))) + datasets = one.list_datasets(session_path, collection='raw*') + for rel_path in datasets: + session_path.joinpath(rel_path).parent.mkdir(parents=True, exist_ok=True) + session_path.joinpath(rel_path).touch() + # Touch some files that don't exist in the cache + session_path.joinpath('raw_foobar_data').mkdir() + for i in range(5): + session_path.joinpath('raw_foobar_data', f'_test_foo.bar{i}.npy').touch() + + # Test filename filter + filename = '*' + '.'.join(datasets[0].split('.', 2)[:-1]) + '.*' # e.g. *foo/bar.baz.* + assert any(session_path.rglob(filename)) # Files matching pattern should exist + # Dry run first, no files should be effected + removed = purge_local_data(str(local_data), filename, one=one, dry=True) + self.assertTrue( + all(session_path.rglob(filename)), 'files matching pattern deleted on dry run' + ) + self.assertFalse( + any(not x.exists() for x in removed), 'files matching pattern deleted on dry run' + ) + removed = purge_local_data(str(local_data), filename, one=one, dry=False) + self.assertFalse( + any(session_path.rglob(filename)), 'files matching pattern were not removed' + ) + self.assertFalse(any(x.exists() for x in removed), 'some returned files were not unlinked') + # Other registered datasets should still exist + self.assertTrue(any(x for x in session_path.rglob('*.*') if 'foobar' not in str(x))) + + # Test purge all + removed = purge_local_data(str(local_data), one=one) + self.assertFalse(any(x.exists() for x in removed), 'some returned files were not unlinked') + self.assertFalse( + any('foobar' in x for x in map(str, removed)), "files deleted that weren't in cache" + ) + def test_transfer_rig_data(self): # Ensure transfer_rig_data.py exists in the location we expect it - current_path = pathlib.Path(__file__).parent.absolute() - transfer_rig_data_script_loc = current_path.parent / "scripts" / "transfer_rig_data.py" - self.assertTrue(os.path.exists(transfer_rig_data_script_loc)) + current_path = Path(__file__).parent.absolute() + transfer_rig_data_script_loc = current_path.parent / 'scripts' / 'transfer_rig_data.py' + self.assertTrue(transfer_rig_data_script_loc.exists()) # Tests below will only pass if the call to 'move_ephys.py' script is commented out in # 'transfer_rig_data.py' main diff --git a/test_iblrig/test_task.py b/test_iblrig/test_task.py new file mode 100644 index 000000000..20e13b08f --- /dev/null +++ b/test_iblrig/test_task.py @@ -0,0 +1,38 @@ +""" +Unit tests for task logic functions +""" +import unittest +from iblrig import session_creator +import pandas as pd +import numpy as np +pc, lb = session_creator.make_ephysCW_pc() + + +class TestsBiasedBlocksGeneration(unittest.TestCase): + + @staticmethod + def count_contrasts(pc): + df = pd.DataFrame(data=pc, columns=['angle', 'contrast', 'proba']) + df['signed_contrasts'] = df['contrast'] * np.sign(df['angle']) + c = df.groupby('signed_contrasts')['signed_contrasts'].count() / pc.shape[0] + return c.values + + def test_default(self): + np.random.seed(7816) + # the default generation has a bias on the 0-contrast + pc, lb = session_creator.make_ephysCW_pc() + c = self.count_contrasts(pc) + assert np.all(np.abs(1 - c * 9) <= 0.2) + + def test_biased(self): + # test biased, signed contrasts are uniform + pc, lb = session_creator.make_ephysCW_pc(prob_type='biased') + c = self.count_contrasts(pc) + assert np.all(np.abs(1 - c * 9) <= 0.2) + + def test_uniform(self): + # test uniform: signed contrasts are twice as likely for the 0 sample + pc, lb = session_creator.make_ephysCW_pc(prob_type='uniform') + c = self.count_contrasts(pc) + c[4] /= 2 + assert np.all(np.abs(1 - c * 10) <= 0.2)