Skip to content

Commit

Permalink
Merge branch 'develop' into feature/sleepless
Browse files Browse the repository at this point in the history
  • Loading branch information
bimac committed Nov 13, 2023
2 parents c04749e + 94fdbda commit bac2dc4
Show file tree
Hide file tree
Showing 25 changed files with 2,103 additions and 390 deletions.
2 changes: 1 addition & 1 deletion .flake8
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[flake8]
max-line-length = 130
ignore = W504, W503, E266
ignore = W504, W503, E266, D, BLK
exclude =
.git,
__pycache__,
Expand Down
664 changes: 517 additions & 147 deletions brainbox/behavior/training.py

Large diffs are not rendered by default.

28 changes: 21 additions & 7 deletions brainbox/io/one.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,10 @@
import matplotlib.pyplot as plt

from one.api import ONE, One
import one.alf.io as alfio
from one.alf.files import get_alf_path
from one.alf.exceptions import ALFObjectNotFound
from one.alf import cache
import one.alf.io as alfio
from neuropixel import TIP_SIZE_UM, trace_header
import spikeglx

Expand Down Expand Up @@ -830,6 +830,20 @@ def __post_init__(self):
self.atlas = AllenAtlas()
self.files = {}

def _load_object(self, *args, **kwargs):
"""
This function is a wrapper around alfio.load_object that will remove the UUID in the
filename if the object is on SDSC.
"""
remove_uuids = getattr(self.one, 'uuid_filenames', False)
d = alfio.load_object(*args, **kwargs)
if remove_uuids:
# pops the UUID in the key names
keys = list(d.keys())
for k in keys:
d[k[:-37]] = d.pop(k)
return d

@staticmethod
def _get_attributes(dataset_types):
"""returns attributes to load for spikes and clusters objects"""
Expand Down Expand Up @@ -865,7 +879,7 @@ def load_spike_sorting_object(self, obj, *args, **kwargs):
:return:
"""
self.download_spike_sorting_object(obj, *args, **kwargs)
return alfio.load_object(self.files[obj])
return self._load_object(self.files[obj])

def download_spike_sorting_object(self, obj, spike_sorter='pykilosort', dataset_types=None, collection=None,
missing='raise', **kwargs):
Expand Down Expand Up @@ -922,10 +936,10 @@ def load_channels(self, **kwargs):
# we do not specify the spike sorter on purpose here: the electrode sites do not depend on the spike sorting
self.download_spike_sorting_object(obj='electrodeSites', collection=f'alf/{self.pname}', missing='ignore')
if 'electrodeSites' in self.files:
channels = alfio.load_object(self.files['electrodeSites'], wildcards=self.one.wildcards)
channels = self._load_object(self.files['electrodeSites'], wildcards=self.one.wildcards)
else: # otherwise, we try to load the channel object from the spike sorting folder - this may not contain histology
self.download_spike_sorting_object(obj='channels', **kwargs)
channels = alfio.load_object(self.files['channels'], wildcards=self.one.wildcards)
channels = self._load_object(self.files['channels'], wildcards=self.one.wildcards)
if 'brainLocationIds_ccf_2017' not in channels:
_logger.debug(f"loading channels from alyx for {self.files['channels']}")
_channels, self.histology = _load_channel_locations_traj(
Expand Down Expand Up @@ -960,8 +974,8 @@ def load_spike_sorting(self, spike_sorter='pykilosort', **kwargs):
self.spike_sorter = spike_sorter
self.download_spike_sorting(spike_sorter=spike_sorter, **kwargs)
channels = self.load_channels(spike_sorter=spike_sorter, **kwargs)
clusters = alfio.load_object(self.files['clusters'], wildcards=self.one.wildcards)
spikes = alfio.load_object(self.files['spikes'], wildcards=self.one.wildcards)
clusters = self._load_object(self.files['clusters'], wildcards=self.one.wildcards)
spikes = self._load_object(self.files['spikes'], wildcards=self.one.wildcards)

return spikes, clusters, channels

Expand Down Expand Up @@ -1090,7 +1104,7 @@ def raster(self, spikes, channels, save_dir=None, br=None, label='raster', time_

self.download_spike_sorting_object('drift', self.spike_sorter, missing='ignore')
if 'drift' in self.files:
drift = alfio.load_object(self.files['drift'], wildcards=self.one.wildcards)
drift = self._load_object(self.files['drift'], wildcards=self.one.wildcards)
axs[0, 0].plot(drift['times'], drift['um'], 'k', alpha=.5)

if save_dir is not None:
Expand Down
168 changes: 168 additions & 0 deletions examples/loading_data/loading_raw_audio_data.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,168 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "5683982d",
"metadata": {},
"source": [
"# Loading Raw Audio Data"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6b2485da",
"metadata": {
"nbsphinx": "hidden"
},
"outputs": [],
"source": [
"# Turn off logging, this is a hidden cell on docs page\n",
"import logging\n",
"logger = logging.getLogger('ibllib')\n",
"logger.setLevel(logging.CRITICAL)"
]
},
{
"cell_type": "markdown",
"id": "16345774",
"metadata": {},
"source": [
"The audio file is saved from the microphone. It is useful to look at it to plot a spectrogram and confirm the sounds played during the task are indeed audible."
]
},
{
"cell_type": "markdown",
"id": "8d62c890",
"metadata": {},
"source": [
"## Relevant datasets\n",
"* _iblrig_micData.raw.flac\n"
]
},
{
"cell_type": "markdown",
"id": "bc23fdf7",
"metadata": {},
"source": [
"## Loading"
]
},
{
"cell_type": "markdown",
"id": "9103084d",
"metadata": {},
"source": [
"### Loading raw audio file"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2b807296",
"metadata": {
"ibl_execute": false
},
"outputs": [],
"source": [
"from one.api import ONE\n",
"import soundfile as sf\n",
"\n",
"one = ONE()\n",
"eid = '4ecb5d24-f5cc-402c-be28-9d0f7cb14b3a'\n",
"\n",
"# -- Get raw data\n",
"filename = one.load_dataset(eid, '_iblrig_micData.raw.flac', download_only=True)\n",
"with open(filename, 'rb') as f:\n",
" wav, fs = sf.read(f)"
]
},
{
"cell_type": "markdown",
"id": "203d23c1",
"metadata": {},
"source": [
"## Plot the spectrogram"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "811e3533",
"metadata": {
"ibl_execute": false
},
"outputs": [],
"source": [
"from ibllib.io.extractors.training_audio import welchogram\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"\n",
"# -- Compute spectrogram over first 2 minutes\n",
"t_idx = 120 * fs\n",
"tscale, fscale, W, detect = welchogram(fs, wav[:t_idx])\n",
"\n",
"# -- Put data into single variable\n",
"TF = {}\n",
"\n",
"TF['power'] = W.astype(np.single)\n",
"TF['frequencies'] = fscale[None, :].astype(np.single)\n",
"TF['onset_times'] = detect\n",
"TF['times_mic'] = tscale[:, None].astype(np.single)\n",
"\n",
"# # -- Plot spectrogram\n",
"tlims = TF['times_mic'][[0, -1]].flatten()\n",
"flims = TF['frequencies'][0, [0, -1]].flatten()\n",
"fig = plt.figure(figsize=[16, 7])\n",
"ax = plt.axes()\n",
"im = ax.imshow(20 * np.log10(TF['power'].T), aspect='auto', cmap=plt.get_cmap('magma'),\n",
" extent=np.concatenate((tlims, flims)),\n",
" origin='lower')\n",
"ax.set_xlabel(r'Time (s)')\n",
"ax.set_ylabel(r'Frequency (Hz)')\n",
"plt.colorbar(im)\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"id": "bef6702e",
"metadata": {},
"source": [
"## More details\n",
"* [Description of audio datasets](https://docs.google.com/document/d/1OqIqqakPakHXRAwceYLwFY9gOrm8_P62XIfCTnHwstg/edit#heading=h.n61f0vdcplxp)"
]
},
{
"cell_type": "markdown",
"id": "4e9dd4b9",
"metadata": {},
"source": [
"## Useful modules\n",
"* [ibllib.io.extractors.training_audio](https://int-brain-lab.github.io/iblenv/_autosummary/ibllib.io.extractors.training_audio.html#module-ibllib.io.extractors.training_audio)"
]
}
],
"metadata": {
"celltoolbar": "Edit Metadata",
"kernelspec": {
"display_name": "Python [conda env:iblenv] *",
"language": "python",
"name": "conda-env-iblenv-py"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
58 changes: 57 additions & 1 deletion examples/loading_data/loading_raw_ephys_data.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@
"\n",
"# Use spikeglx reader to read in the whole raw data\n",
"sr = spikeglx.Reader(bin_file)\n",
"sr.shape\n"
"print(sr.shape)"
]
},
{
Expand Down Expand Up @@ -326,6 +326,62 @@
"destriped = destripe(raw, fs=sr.fs)"
]
},
{
"cell_type": "markdown",
"source": [
"## Get the probe geometry"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "markdown",
"source": [
"### Using the `eid` and `probe` information"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": null,
"outputs": [],
"source": [
"from brainbox.io.one import load_channel_locations\n",
"channels = load_channel_locations(eid, probe)\n",
"print(channels[probe].keys())\n",
"# Use the axial and lateral coordinates ; Print example first 4 channels\n",
"print(channels[probe][\"axial_um\"][0:4])\n",
"print(channels[probe][\"lateral_um\"][0:4])"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "markdown",
"source": [
"### Using the reader and the `.cbin` file"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": null,
"outputs": [],
"source": [
"# You would have loaded the bin file as per the loading example above\n",
"# sr = spikeglx.Reader(bin_file)\n",
"sr.geometry"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "markdown",
"id": "9851b10d",
Expand Down
5 changes: 2 additions & 3 deletions ibllib/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,12 @@
import logging
import warnings

__version__ = '2.26'
__version__ = '2.27'
warnings.filterwarnings('always', category=DeprecationWarning, module='ibllib')

# if this becomes a full-blown library we should let the logging configuration to the discretion of the dev
# who uses the library. However since it can also be provided as an app, the end-users should be provided
# with an useful default logging in standard output without messing with the complex python logging system
# -*- coding:utf-8 -*-
# with a useful default logging in standard output without messing with the complex python logging system
USE_LOGGING = True
#%(asctime)s,%(msecs)d
if USE_LOGGING:
Expand Down
23 changes: 21 additions & 2 deletions ibllib/io/extractors/camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from ibllib.io.extractors.base import get_session_extractor_type
from ibllib.io.extractors.ephys_fpga import get_sync_fronts, get_sync_and_chn_map
import ibllib.io.raw_data_loaders as raw
import ibllib.io.extractors.video_motion as vmotion
from ibllib.io.extractors.base import (
BaseBpodTrialsExtractor,
BaseExtractor,
Expand Down Expand Up @@ -148,12 +149,30 @@ def _extract(self, sync=None, chmap=None, video_path=None, sync_label='audio',
except AssertionError as ex:
_logger.critical('Failed to extract using %s: %s', sync_label, ex)

# If you reach here extracting using sync TTLs was not possible
_logger.warning('Alignment by wheel data not yet implemented')
# If you reach here extracting using sync TTLs was not possible, we attempt to align using wheel motion energy
_logger.warning('Attempting to align using wheel')

try:
if self.label not in ['left', 'right']:
# Can only use wheel alignment for left and right cameras
raise ValueError(f'Wheel alignment not supported for {self.label} camera')

motion_class = vmotion.MotionAlignmentFullSession(self.session_path, self.label, sync='nidq', upload=True)
new_times = motion_class.process()
if not motion_class.qc_outcome:
raise ValueError(f'Wheel alignment for {self.label} camera failed to pass qc: {motion_class.qc}')
else:
_logger.warning(f'Wheel alignment for {self.label} camera successful, qc: {motion_class.qc}')
return new_times

except Exception as err:
_logger.critical(f'Failed to align with wheel for {self.label} camera: {err}')

if length < raw_ts.size:
df = raw_ts.size - length
_logger.info(f'Discarding first {df} pulses')
raw_ts = raw_ts[df:]

return raw_ts


Expand Down
Loading

0 comments on commit bac2dc4

Please sign in to comment.