From d85d424b38ca58a3c6e6ae416ff18030a60658a4 Mon Sep 17 00:00:00 2001 From: owinter Date: Thu, 14 Mar 2024 10:39:35 -0700 Subject: [PATCH 1/3] support for online spikeglx reader --- release_notes.md | 4 + setup.py | 12 +- src/ibldsp/cadzow.py | 36 +- src/ibldsp/cuda_tools.py | 24 +- src/ibldsp/destripe_gpu.py | 26 +- src/ibldsp/filter_gpu.py | 39 +- src/ibldsp/fourier.py | 83 ++- src/ibldsp/smooth.py | 40 +- src/ibldsp/utils.py | 44 +- src/ibldsp/voltage.py | 337 +++++++---- src/ibldsp/waveforms.py | 290 +++++---- src/neuropixel.py | 561 ++++++++++-------- src/neurowaveforms/model.py | 251 +++++--- src/spikeglx.py | 527 ++++++++++------ src/tests/integration/cpu/csd_experiments.py | 48 +- src/tests/integration/cpu/test_destripe.py | 89 ++- ...4shanks_while_acquiring_incomplete.ap.meta | 47 ++ src/tests/unit/cpu/test_dsp.py | 7 +- src/tests/unit/cpu/test_neuropixel.py | 6 +- src/tests/unit/cpu/test_spikeglx.py | 455 ++++++++------ src/tests/unit/cpu/test_waveforms.py | 175 +++--- src/tests/unit/gpu/test_filter_gpu.py | 8 +- src/tests/unit/gpu/test_fourier.py | 1 - 23 files changed, 1944 insertions(+), 1166 deletions(-) create mode 100644 src/tests/unit/cpu/fixtures/sampleNP2.4_4shanks_while_acquiring_incomplete.ap.meta diff --git a/release_notes.md b/release_notes.md index 099e3f4..8f0aeb2 100644 --- a/release_notes.md +++ b/release_notes.md @@ -1,3 +1,7 @@ +# 0.10.0 +## 0.10.0 2024-03-14 +- add support for online spikeglx reader + # 0.9.0 ## 0.9.2 2024-02-08 - `neurodsp` is now `ibldsp`. Drop-in replacement of the package name is all that is required to update. The `neurodsp` name will disappear on 01-Sep-2024; until then both names will work. diff --git a/setup.py b/setup.py index d7b3761..b42c42e 100644 --- a/setup.py +++ b/setup.py @@ -3,12 +3,12 @@ with open("README.md", "r", encoding="utf-8") as fh: long_description = fh.read() -with open('requirements.txt') as f: - require = [x.strip() for x in f.readlines() if not x.startswith('git+')] +with open("requirements.txt") as f: + require = [x.strip() for x in f.readlines() if not x.startswith("git+")] setuptools.setup( name="ibl-neuropixel", - version="0.9.2", + version="0.10.0", author="The International Brain Laboratory", description="Collection of tools for Neuropixel 1.0 and 2.0 probes data", long_description=long_description, @@ -23,9 +23,9 @@ "Operating System :: OS Independent", ], install_requires=require, - package_dir={'': 'src'}, - packages=setuptools.find_packages(where="src", exclude=['tests']), + package_dir={"": "src"}, + packages=setuptools.find_packages(where="src", exclude=["tests"]), include_package_data=True, - py_modules=['spikeglx', 'neuropixel'], + py_modules=["spikeglx", "neuropixel"], python_requires=">=3.8", ) diff --git a/src/ibldsp/cadzow.py b/src/ibldsp/cadzow.py index eb97b86..f2184cd 100644 --- a/src/ibldsp/cadzow.py +++ b/src/ibldsp/cadzow.py @@ -99,7 +99,17 @@ def denoise(WAV, x, y, r, imax=None, niter=1): return WAV_ -def cadzow_np1(wav, fs=30000, rank=5, niter=1, fmax=7500, h=None, ovx=int(16), nswx=int(32), npad=int(0)): +def cadzow_np1( + wav, + fs=30000, + rank=5, + niter=1, + fmax=7500, + h=None, + ovx=int(16), + nswx=int(32), + npad=int(0), +): """ Apply Fxy rank-denoiser to a full recording of Neuropixel 1 probe geometry ntr - nswx has to be a multiple of (nswx - ovx) @@ -124,11 +134,19 @@ def cadzow_np1(wav, fs=30000, rank=5, niter=1, fmax=7500, h=None, ovx=int(16), n imax = np.searchsorted(fscale, fmax) WAV = scipy.fft.rfft(wav[:, :]) padgain = scipy.signal.windows.hann(npad * 2)[:npad] - WAV = np.r_[np.flipud(WAV[1:npad + 1, :]) * padgain[:, np.newaxis], - WAV, - np.flipud(WAV[-npad - 2: - 1, :]) * np.flipud(np.r_[padgain, 1])[:, np.newaxis]] # apply padding - x = np.r_[np.flipud(h['x'][1:npad + 1]), h['x'], np.flipud(h['x'][-npad - 2: - 1])] - y = np.r_[np.flipud(h['y'][1:npad + 1]) - 120, h['y'], np.flipud(h['y'][-npad - 2: - 1]) + 120] + WAV = np.r_[ + np.flipud(WAV[1 : npad + 1, :]) * padgain[:, np.newaxis], + WAV, + np.flipud(WAV[-npad - 2 : -1, :]) * np.flipud(np.r_[padgain, 1])[:, np.newaxis], + ] # apply padding + x = np.r_[ + np.flipud(h["x"][1 : npad + 1]), h["x"], np.flipud(h["x"][-npad - 2 : -1]) + ] + y = np.r_[ + np.flipud(h["y"][1 : npad + 1]) - 120, + h["y"], + np.flipud(h["y"][-npad - 2 : -1]) + 120, + ] WAV_ = np.zeros_like(WAV) gain = np.zeros(ntr + npad * 2 + 1) hanning = scipy.signal.windows.hann(ovx * 2 - 1)[0:ovx] @@ -144,9 +162,11 @@ def cadzow_np1(wav, fs=30000, rank=5, niter=1, fmax=7500, h=None, ovx=int(16), n gw = gain_window gain[firstx:lastx] += gw array = WAV[firstx:lastx, :] - array = denoise(array, x=x[firstx:lastx], y=y[firstx:lastx], r=rank, imax=imax, niter=niter) + array = denoise( + array, x=x[firstx:lastx], y=y[firstx:lastx], r=rank, imax=imax, niter=niter + ) WAV_[firstx:lastx, :] += array * gw[:, np.newaxis] - WAV_ = WAV_[npad:-npad - 1] # remove padding + WAV_ = WAV_[npad : -npad - 1] # remove padding wav_ = scipy.fft.irfft(WAV_) return wav_ diff --git a/src/ibldsp/cuda_tools.py b/src/ibldsp/cuda_tools.py index 5b26051..0b97f5a 100644 --- a/src/ibldsp/cuda_tools.py +++ b/src/ibldsp/cuda_tools.py @@ -4,7 +4,7 @@ from iblutil.util import Bunch -REGEX_PATTERN = r'const int\s+\S+\s+=\s+\S+.+' +REGEX_PATTERN = r"const int\s+\S+\s+=\s+\S+.+" def get_cuda(fn, **constants): @@ -15,10 +15,10 @@ def get_cuda(fn, **constants): :return: code: String constants: Bunch """ - path = Path(__file__).parent / (fn + '.cu') + path = Path(__file__).parent / (fn + ".cu") assert path.exists code = path.read_text() - code = code.replace('__global__ void', 'extern "C" __global__ void') + code = code.replace("__global__ void", 'extern "C" __global__ void') if not constants: return code, Bunch(extract_constants_from_cuda(code)) return change_cuda_constants(code, constants) @@ -33,9 +33,9 @@ def extract_constants_from_cuda(code): r = re.compile(REGEX_PATTERN) m = r.search(code) if m: - constants = m.group(0).replace('const int', '').replace(';', '').split(',') + constants = m.group(0).replace("const int", "").replace(";", "").split(",") for const in constants: - a, b = const.strip().split('=') + a, b = const.strip().split("=") yield a.strip(), int(b.strip()) @@ -49,14 +49,16 @@ def change_cuda_constants(code, constants): """ r = re.compile(REGEX_PATTERN) m = r.match(code) - assert m, 'No constants found in CUDA code' + assert m, "No constants found in CUDA code" pattern_length = m.span(0)[1] - 1 - default_constants_string = m.group(0).replace('const int', '').replace(';', '').split(',') + default_constants_string = ( + m.group(0).replace("const int", "").replace(";", "").split(",") + ) code_constants = {} # Find default constants in CUDA code for default_constants_string in default_constants_string: - name, value = default_constants_string.split('=') + name, value = default_constants_string.split("=") code_constants[name.strip()] = int(value.strip()) # Replace default constants with the new user constants @@ -65,9 +67,9 @@ def change_cuda_constants(code, constants): new_strings = [] for name, value in code_constants.items(): - new_strings.append(f'{name} = {value}') - new_constants_string = ', '.join(new_strings) + new_strings.append(f"{name} = {value}") + new_constants_string = ", ".join(new_strings) - new_code = f'const int {new_constants_string}{code[pattern_length:]}' + new_code = f"const int {new_constants_string}{code[pattern_length:]}" return new_code, Bunch(code_constants) diff --git a/src/ibldsp/destripe_gpu.py b/src/ibldsp/destripe_gpu.py index ee545d3..688a163 100644 --- a/src/ibldsp/destripe_gpu.py +++ b/src/ibldsp/destripe_gpu.py @@ -7,8 +7,16 @@ from .voltage import _get_destripe_parameters, interpolate_bad_channels, kfilt -def destripe_array(data, fs=30000, fshigh=300., taper_size=64, sample_shifts=None, channel_labels=None, - channel_xcoords=None, channel_ycoords=None): +def destripe_array( + data, + fs=30000, + fshigh=300.0, + taper_size=64, + sample_shifts=None, + channel_labels=None, + channel_xcoords=None, + channel_ycoords=None, +): """ Applies de-striping to a cupy array :param data: float32 cupy array, shape (n_channels, n_times) @@ -39,7 +47,7 @@ def destripe_array(data, fs=30000, fshigh=300., taper_size=64, sample_shifts=Non # align channels if the time shifts are provided if sample_shifts is not None: - sample_shifts = cp.array(sample_shifts, dtype='float32') + sample_shifts = cp.array(sample_shifts, dtype="float32") data = channel_shift(data, sample_shifts) # apply spatial filter @@ -47,9 +55,13 @@ def destripe_array(data, fs=30000, fshigh=300., taper_size=64, sample_shifts=Non kfilt_kwargs = _get_destripe_parameters(fs, None, None, True)[1] if channel_labels is not None: - data = interpolate_bad_channels(data, channel_labels, channel_xcoords, channel_ycoords, gpu=True) + data = interpolate_bad_channels( + data, channel_labels, channel_xcoords, channel_ycoords, gpu=True + ) inside_brain = cp.where(channel_labels != 3)[0] - data[inside_brain, :] = kfilt(data[inside_brain, :], gpu=True, **kfilt_kwargs) # apply the k-filter / CAR + data[inside_brain, :] = kfilt( + data[inside_brain, :], gpu=True, **kfilt_kwargs + ) # apply the k-filter / CAR else: data = kfilt(data, gpu=True, **kfilt_kwargs) # apply the k-filter / CAR @@ -65,6 +77,6 @@ def get_sos(fs, fshigh, fslow=None): :return: sos, second-order sections """ if fslow and fslow < fs / 2: - return butter(3, (2 * fshigh / fs, 2 * fslow / fs), 'bandpass', output='sos') + return butter(3, (2 * fshigh / fs, 2 * fslow / fs), "bandpass", output="sos") else: - return butter(3, 2 * fshigh / fs, 'high', output='sos') + return butter(3, 2 * fshigh / fs, "high", output="sos") diff --git a/src/ibldsp/filter_gpu.py b/src/ibldsp/filter_gpu.py index 4ee03a3..3328830 100644 --- a/src/ibldsp/filter_gpu.py +++ b/src/ibldsp/filter_gpu.py @@ -22,7 +22,7 @@ def sosfiltfilt_gpu(sos, x, axis=-1): n_sections, m = sos.shape if m != 6: - raise ValueError('sos array must be shape (n_sections, 6)') + raise ValueError("sos array must be shape (n_sections, 6)") ntaps = 2 * n_sections + 1 ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum()) @@ -64,20 +64,20 @@ def sosfilt_gpu(sos, x, axis, zi): n_sections, m = sos.shape if m != 6: - raise ValueError('sos array must be shape (n_sections, 6)') + raise ValueError("sos array must be shape (n_sections, 6)") x_zi_shape = list(x.shape) x_zi_shape[axis] = 2 x_zi_shape = tuple([n_sections] + x_zi_shape) if zi is not None: - assert zi.shape == x_zi_shape, f'zi has shape {zi.shape}, expected {x_zi_shape}' - zi = cp.array(zi, dtype='float32') + assert zi.shape == x_zi_shape, f"zi has shape {zi.shape}, expected {x_zi_shape}" + zi = cp.array(zi, dtype="float32") else: - zi = cp.zeros(x_zi_shape, dtype='float32') + zi = cp.zeros(x_zi_shape, dtype="float32") - sos = cp.array(sos, dtype='float32') - assert x.dtype == 'float32', f'Expected float32 data, got {x.dtype}' + sos = cp.array(sos, dtype="float32") + assert x.dtype == "float32", f"Expected float32 data, got {x.dtype}" axis = axis % x.ndim x = cp.ascontiguousarray(cp.moveaxis(x, axis, -1)) @@ -91,15 +91,15 @@ def sosfilt_gpu(sos, x, axis, zi): def _cuda_sosfilt(sos, x, zi): - n_signals, n_samples = x.shape n_sections = sos.shape[0] n_blocks, n_threads = sosfilt_kernel_params(n_signals) - code, consts = get_cuda('sosfilt', n_signals=n_signals, n_samples=n_samples, - n_sections=n_sections) - kernel = cp.RawKernel(code, 'sosfilt') + code, consts = get_cuda( + "sosfilt", n_signals=n_signals, n_samples=n_samples, n_sections=n_sections + ) + kernel = cp.RawKernel(code, "sosfilt") kernel((n_blocks,), (n_threads,), (sos, x, zi)) @@ -152,17 +152,20 @@ def odd_ext(x, n, axis=-1): if n < 1: return x if n > x.shape[axis] - 1: - raise ValueError(("The extension length n (%d) is too big. " + - "It must not exceed x.shape[axis]-1, which is %d.") - % (n, x.shape[axis] - 1)) + raise ValueError( + ( + "The extension length n (%d) is too big. " + + "It must not exceed x.shape[axis]-1, which is %d." + ) + % (n, x.shape[axis] - 1) + ) left_end = axis_slice(x, start=0, stop=1, axis=axis) left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis) right_end = axis_slice(x, start=-1, axis=axis) right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis) - ext = cp.concatenate((2 * left_end - left_ext, - x, - 2 * right_end - right_ext), - axis=axis) + ext = cp.concatenate( + (2 * left_end - left_ext, x, 2 * right_end - right_ext), axis=axis + ) return ext diff --git a/src/ibldsp/fourier.py b/src/ibldsp/fourier.py index 1529529..3ee34bc 100644 --- a/src/ibldsp/fourier.py +++ b/src/ibldsp/fourier.py @@ -19,16 +19,16 @@ def channel_shift(data, sample_shifts): n_channels, n_times = data.shape - dephas = cp.tile(- 2 * pi / n_times * cp.arange(n_times), (n_channels, 1)) - dephas += 2 * pi * (dephas < - pi) # angles in the range (-pi,pi) + dephas = cp.tile(-2 * pi / n_times * cp.arange(n_times), (n_channels, 1)) + dephas += 2 * pi * (dephas < -pi) # angles in the range (-pi,pi) dephas = cp.exp(1j * dephas * sample_shifts[:, cp.newaxis]) data_shifted = cp.real(cp.fft.ifft(cp.fft.fft(data) * dephas)) - return cp.array(data_shifted, dtype='float32') + return cp.array(data_shifted, dtype="float32") -def convolve(x, w, mode='full', gpu=False): +def convolve(x, w, mode="full", gpu=False): """ Frequency domain convolution along the last dimension (2d arrays) Will broadcast if a matrix is convolved with a vector @@ -46,13 +46,19 @@ def convolve(x, w, mode='full', gpu=False): nsx = x.shape[-1] nsw = w.shape[-1] ns = ns_optim_fft(nsx + nsw) - x_ = gp.concatenate((x, gp.zeros([*x.shape[:-1], ns - nsx], dtype=x.dtype)), axis=-1) - w_ = gp.concatenate((w, gp.zeros([*w.shape[:-1], ns - nsw], dtype=w.dtype)), axis=-1) - xw = gp.real(gp.fft.irfft(gp.fft.rfft(x_, axis=-1) * gp.fft.rfft(w_, axis=-1), axis=-1)) - xw = xw[..., :(nsx + nsw)] # remove 0 padding - if mode == 'full': + x_ = gp.concatenate( + (x, gp.zeros([*x.shape[:-1], ns - nsx], dtype=x.dtype)), axis=-1 + ) + w_ = gp.concatenate( + (w, gp.zeros([*w.shape[:-1], ns - nsw], dtype=w.dtype)), axis=-1 + ) + xw = gp.real( + gp.fft.irfft(gp.fft.rfft(x_, axis=-1) * gp.fft.rfft(w_, axis=-1), axis=-1) + ) + xw = xw[..., : (nsx + nsw)] # remove 0 padding + if mode == "full": return xw - elif mode == 'same': + elif mode == "same": first = int(gp.floor(nsw / 2)) - ((nsw + 1) % 2) last = int(gp.ceil(nsw / 2)) + ((nsw + 1) % 2) return xw[..., first:-last] @@ -78,7 +84,7 @@ def dephas(w, phase, axis=-1): :return: """ ns = w.shape[axis] - W = freduce(np.fft.fft(w, axis=axis), axis=axis) * np.exp(- 1j * phase / 180 * np.pi) + W = freduce(np.fft.fft(w, axis=axis), axis=axis) * np.exp(-1j * phase / 180 * np.pi) return np.real(np.fft.ifft(fexpand(W, ns=ns, axis=axis), axis=axis)) @@ -142,7 +148,7 @@ def bp(ts, si, b, axis=None): :param axis: axis along which to perform reduction (last axis by default) :return: filtered time serie """ - return _freq_filter(ts, si, b, axis=axis, typ='bp') + return _freq_filter(ts, si, b, axis=axis, typ="bp") def lp(ts, si, b, axis=None): @@ -155,7 +161,7 @@ def lp(ts, si, b, axis=None): :param axis: axis along which to perform reduction (last axis by default) :return: filtered time serie """ - return _freq_filter(ts, si, b, axis=axis, typ='lp') + return _freq_filter(ts, si, b, axis=axis, typ="lp") def hp(ts, si, b, axis=None): @@ -168,38 +174,40 @@ def hp(ts, si, b, axis=None): :param axis: axis along which to perform reduction (last axis by default) :return: filtered time serie """ - return _freq_filter(ts, si, b, axis=axis, typ='hp') + return _freq_filter(ts, si, b, axis=axis, typ="hp") -def _freq_filter(ts, si, b, axis=None, typ='lp'): +def _freq_filter(ts, si, b, axis=None, typ="lp"): """ - Wrapper for hp/lp/bp filters + Wrapper for hp/lp/bp filters """ if axis is None: axis = ts.ndim - 1 ns = ts.shape[axis] f = fscale(ns, si=si, one_sided=True) - if typ == 'bp': - filc = _freq_vector(f, b[0:2], typ='hp') * _freq_vector(f, b[2:4], typ='lp') + if typ == "bp": + filc = _freq_vector(f, b[0:2], typ="hp") * _freq_vector(f, b[2:4], typ="lp") else: filc = _freq_vector(f, b, typ=typ) if axis < (ts.ndim - 1): filc = filc[:, np.newaxis] - return np.real(np.fft.ifft(np.fft.fft(ts, axis=axis) * fexpand(filc, ns, axis=0), axis=axis)) + return np.real( + np.fft.ifft(np.fft.fft(ts, axis=axis) * fexpand(filc, ns, axis=0), axis=axis) + ) -def _freq_vector(f, b, typ='lp'): +def _freq_vector(f, b, typ="lp"): """ - Returns a frequency modulated vector for filtering + Returns a frequency modulated vector for filtering - :param f: frequency vector, uniform and monotonic - :param b: 2 bounds array - :return: amplitude modulated frequency vector + :param f: frequency vector, uniform and monotonic + :param b: 2 bounds array + :return: amplitude modulated frequency vector """ filc = fcn_cosine(b)(f) - if typ.lower() in ['hp', 'highpass']: + if typ.lower() in ["hp", "highpass"]: return filc - elif typ.lower() in ['lp', 'lowpass']: + elif typ.lower() in ["lp", "lowpass"]: return 1 - filc @@ -256,8 +264,13 @@ def fit_phase(w, si=1, fmin=0, fmax=None, axis=-1): freqs = freduce(fscale(ns, si=si)) phi = np.unwrap(np.angle(freduce(np.fft.fft(w, axis=axis), axis=axis))) indf = np.logical_and(fmin < freqs, freqs < fmax) - dt = - np.polyfit(freqs[indf], - np.swapaxes(phi.compress(indf, axis=axis), axis, 0), 1)[0] / np.pi / 2 + dt = ( + -np.polyfit( + freqs[indf], np.swapaxes(phi.compress(indf, axis=axis), axis, 0), 1 + )[0] + / np.pi + / 2 + ) return dt @@ -284,7 +297,7 @@ def dft(x, xscale=None, axis=-1, kscale=None): shape = np.array(x.shape) x = np.reshape(x, (ns, int(np.prod(x.shape) / ns))) # compute fourier coefficients - exp = np.exp(- 1j * 2 * np.pi / ns * xscale * kscale[:, np.newaxis]) + exp = np.exp(-1j * 2 * np.pi / ns * xscale * kscale[:, np.newaxis]) X = np.matmul(exp, x) shape[0] = int(nk) X = X.reshape(shape) @@ -306,8 +319,14 @@ def dft2(x, r, c, nk, nl): # it would be interesting to compare performance with numba straight loops (easier to write) # GPU/C implementation should implement straight loops nt = x.shape[-1] - k, h = [v.flatten() for v in np.meshgrid(np.arange(nk), np.arange(nl), indexing='ij')] + k, h = [ + v.flatten() for v in np.meshgrid(np.arange(nk), np.arange(nl), indexing="ij") + ] # exp has dimension (kh, rc) - exp = np.exp(- 1j * 2 * np.pi * (r[np.newaxis] * k[:, np.newaxis] + - c[np.newaxis] * h[:, np.newaxis])) + exp = np.exp( + -1j + * 2 + * np.pi + * (r[np.newaxis] * k[:, np.newaxis] + c[np.newaxis] * h[:, np.newaxis]) + ) return np.matmul(exp, x).reshape((nk, nl, nt)) diff --git a/src/ibldsp/smooth.py b/src/ibldsp/smooth.py index 494d40f..8dd698f 100644 --- a/src/ibldsp/smooth.py +++ b/src/ibldsp/smooth.py @@ -17,12 +17,12 @@ def lp(ts, fac, pad=0.2): """ # keep at least two periods for the padding lpad = int(np.ceil(ts.shape[0] * pad)) - ts_ = np.pad(ts, lpad, mode='edge') + ts_ = np.pad(ts, lpad, mode="edge") ts_ = ft.lp(ts_, 1, np.array(fac) / 2) return ts_[lpad:-lpad] -def rolling_window(x, window_len=11, window='blackman'): +def rolling_window(x, window_len=11, window="blackman"): """ Smooth the data using a window with requested size. @@ -62,19 +62,21 @@ def rolling_window(x, window_len=11, window='blackman'): if window_len < 3: return x - if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']: - raise ValueError("Window is not one of 'flat', 'hanning', 'hamming',\ -'bartlett', 'blackman'") + if window not in ["flat", "hanning", "hamming", "bartlett", "blackman"]: + raise ValueError( + "Window is not one of 'flat', 'hanning', 'hamming',\ +'bartlett', 'blackman'" + ) - s = np.r_[x[window_len - 1:0:-1], x, x[-1:-window_len:-1]] + s = np.r_[x[window_len - 1 : 0 : -1], x, x[-1:-window_len:-1]] # print(len(s)) - if window == 'flat': # moving average - w = np.ones(window_len, 'd') + if window == "flat": # moving average + w = np.ones(window_len, "d") else: - w = eval('np.' + window + '(window_len)') + w = eval("np." + window + "(window_len)") - y = np.convolve(w / w.sum(), s, mode='valid') - return y[round((window_len / 2 - 1)):round(-(window_len / 2))] + y = np.convolve(w / w.sum(), s, mode="valid") + return y[round((window_len / 2 - 1)) : round(-(window_len / 2))] def non_uniform_savgol(x, y, window, polynom): @@ -102,7 +104,7 @@ def non_uniform_savgol(x, y, window, polynom): if len(x) != len(y): raise ValueError('"x" and "y" must be of the same size') if len(x) < window: - raise ValueError('The data size must be larger than the window size') + raise ValueError("The data size must be larger than the window size") if type(window) is not int: raise TypeError('"window" must be an integer') if window % 2 == 0: @@ -177,7 +179,7 @@ def non_uniform_savgol(x, y, window, polynom): return y_smoothed -def smooth_interpolate_savgol(signal, window=31, order=3, interp_kind='cubic'): +def smooth_interpolate_savgol(signal, window=31, order=3, interp_kind="cubic"): """Run savitzy-golay filter on signal, interpolate through nan points. Parameters @@ -201,12 +203,20 @@ def smooth_interpolate_savgol(signal, window=31, order=3, interp_kind='cubic'): good_idxs = np.where(~np.isnan(signal_noisy_w_nans))[0] # perform savitzky-golay filtering on non-nan points signal_smooth_nonans = non_uniform_savgol( - timestamps[good_idxs], signal_noisy_w_nans[good_idxs], window=window, polynom=order) + timestamps[good_idxs], + signal_noisy_w_nans[good_idxs], + window=window, + polynom=order, + ) signal_smooth_w_nans = np.copy(signal_noisy_w_nans) signal_smooth_w_nans[good_idxs] = signal_smooth_nonans # interpolate nan points interpolater = interp1d( - timestamps[good_idxs], signal_smooth_nonans, kind=interp_kind, fill_value='extrapolate') + timestamps[good_idxs], + signal_smooth_nonans, + kind=interp_kind, + fill_value="extrapolate", + ) signal = interpolater(timestamps) return signal diff --git a/src/ibldsp/utils.py b/src/ibldsp/utils.py index 4de7bff..55962bf 100644 --- a/src/ibldsp/utils.py +++ b/src/ibldsp/utils.py @@ -22,8 +22,12 @@ def sync_timestamps(tsa, tsb, tbin=0.1, return_indices=False): def _interp_fcn(tsa, tsb, ib): # now compute the bpod/fpga drift and precise time shift - drift_ppm = np.polyfit(tsa[ib >= 0], tsb[ib[ib >= 0]] - tsa[ib >= 0], 1)[0] * 1e6 - fcn_a2b = scipy.interpolate.interp1d(tsa[ib >= 0], tsb[ib[ib >= 0]], fill_value="extrapolate") + drift_ppm = ( + np.polyfit(tsa[ib >= 0], tsb[ib[ib >= 0]] - tsa[ib >= 0], 1)[0] * 1e6 + ) + fcn_a2b = scipy.interpolate.interp1d( + tsa[ib >= 0], tsb[ib[ib >= 0]], fill_value="extrapolate" + ) return fcn_a2b, drift_ppm # assert sorted inputs @@ -34,7 +38,9 @@ def _interp_fcn(tsa, tsb, ib): y = np.zeros_like(x) x[np.int32(np.floor((tsa - tmin) / tbin))] = 1 y[np.int32(np.floor((tsb - tmin) / tbin))] = 1 - delta_t = (parabolic_max(scipy.signal.correlate(x, y, mode='full'))[0] - x.shape[0] + 1) * tbin + delta_t = ( + parabolic_max(scipy.signal.correlate(x, y, mode="full"))[0] - x.shape[0] + 1 + ) * tbin # do a first assignment at a DT threshold ib = np.zeros(tsa.shape, dtype=np.int32) - 1 threshold = tbin @@ -85,12 +91,16 @@ def parabolic_max(x): v010 = x[np.maximum(np.minimum(imax + np.array([-1, 0, 1]), ns - 1), 0)] v010 = v010[:, np.newaxis] else: - v010 = np.vstack((x[..., np.arange(x.shape[0]), np.maximum(imax - 1, 0)], - x[..., np.arange(x.shape[0]), imax], - x[..., np.arange(x.shape[0]), np.minimum(imax + 1, ns - 1)])) - poly = np.matmul(.5 * np.array([[1, -2, 1], [-1, 0, 1], [0, 2, 0]]), v010) - ipeak = - poly[1] / (poly[0] + np.double(poly[0] == 0)) / 2 - maxi = poly[2] + ipeak * poly[1] + ipeak ** 2. * poly[0] + v010 = np.vstack( + ( + x[..., np.arange(x.shape[0]), np.maximum(imax - 1, 0)], + x[..., np.arange(x.shape[0]), imax], + x[..., np.arange(x.shape[0]), np.minimum(imax + 1, ns - 1)], + ) + ) + poly = np.matmul(0.5 * np.array([[1, -2, 1], [-1, 0, 1], [0, 2, 0]]), v010) + ipeak = -poly[1] / (poly[0] + np.double(poly[0] == 0)) / 2 + maxi = poly[2] + ipeak * poly[1] + ipeak**2.0 * poly[0] ipeak += imax # handle edges iedges = np.logical_or(imax == 0, imax == ns - 1) @@ -133,6 +143,7 @@ def fcn_cosine(bounds, gpu=False): def _cos(x): return (1 - gp.cos((x - bounds[0]) / (bounds[1] - bounds[0]) * gp.pi)) / 2 + func = lambda x: _fcn_extrap(x, _cos, bounds) # noqa return func @@ -200,11 +211,13 @@ def rms(x, axis=-1): :param axis: (optional, -1) :return: numpy array """ - return np.sqrt(np.mean(x ** 2, axis=axis)) + return np.sqrt(np.mean(x**2, axis=axis)) -def make_channel_index(geom, radius=200., pad_val=384): - neighbors = scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(geom)) < radius +def make_channel_index(geom, radius=200.0, pad_val=384): + neighbors = ( + scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(geom)) < radius + ) n_nbors = np.max(np.sum(neighbors, 0)) nc = geom.shape[0] @@ -213,7 +226,7 @@ def make_channel_index(geom, radius=200., pad_val=384): channel_idx = np.full((nc, n_nbors), pad_val, dtype=int) for c in range(nc): ch_idx = np.flatnonzero(neighbors[c, :]) - channel_idx[c, :ch_idx.shape[0]] = ch_idx + channel_idx[c, : ch_idx.shape[0]] = ch_idx return channel_idx @@ -227,6 +240,7 @@ class WindowGenerator(object): Example of implementations in test_dsp.py. """ + def __init__(self, ns, nswin, overlap): """ :param ns: number of sample of the signal along the direction to be windowed @@ -285,4 +299,6 @@ def tscale(self, fs): :param fs: sampling frequency (Hz) :return: time axis scale """ - return np.array([(first + (last - first - 1) / 2) / fs for first, last in self.firstlast]) + return np.array( + [(first + (last - first - 1) / 2) / fs for first, last in self.firstlast] + ) diff --git a/src/ibldsp/voltage.py b/src/ibldsp/voltage.py index 882ad0a..851f332 100644 --- a/src/ibldsp/voltage.py +++ b/src/ibldsp/voltage.py @@ -17,7 +17,7 @@ import ibldsp.utils as utils -def agc(x, wl=.5, si=.002, epsilon=1e-8, gpu=False): +def agc(x, wl=0.5, si=0.002, epsilon=1e-8, gpu=False): """ Automatic gain control w_agc, gain = agc(w, wl=.5, si=.002, epsilon=1e-8) @@ -36,18 +36,28 @@ def agc(x, wl=.5, si=.002, epsilon=1e-8, gpu=False): ns_win = int(gp.round(wl / si / 2) * 2 + 1) w = gp.hanning(ns_win) w /= gp.sum(w) - gain = fourier.convolve(gp.abs(x), w, mode='same', gpu=gpu) + gain = fourier.convolve(gp.abs(x), w, mode="same", gpu=gpu) gain += (gp.sum(gain, axis=1) * epsilon / x.shape[-1])[:, gp.newaxis] dead_channels = np.sum(gain, axis=1) == 0 x[~dead_channels, :] = x[~dead_channels, :] / gain[~dead_channels, :] if gpu: - return (x * gain).astype('float32'), gain.astype('float32') + return (x * gain).astype("float32"), gain.astype("float32") return x, gain -def fk(x, si=.002, dx=1, vbounds=None, btype='highpass', ntr_pad=0, ntr_tap=None, lagc=.5, - collection=None, kfilt=None): +def fk( + x, + si=0.002, + dx=1, + vbounds=None, + btype="highpass", + ntr_pad=0, + ntr_tap=None, + lagc=0.5, + collection=None, + kfilt=None, +): """Frequency-wavenumber filter: filters apparent plane-waves velocity :param x: the input array to be filtered. dimension, the filtering is considering axis=0: spatial dimension, axis=1 temporal dimension. (ntraces, ns) @@ -68,8 +78,16 @@ def fk(x, si=.002, dx=1, vbounds=None, btype='highpass', ntr_pad=0, ntr_tap=None xout = np.zeros_like(x) for c in np.unique(collection): sel = collection == c - xout[sel, :] = fk(x[sel, :], si=si, dx=dx, vbounds=vbounds, ntr_pad=ntr_pad, - ntr_tap=ntr_tap, lagc=lagc, collection=None) + xout[sel, :] = fk( + x[sel, :], + si=si, + dx=dx, + vbounds=vbounds, + ntr_pad=ntr_pad, + ntr_tap=ntr_tap, + lagc=lagc, + collection=None, + ) return xout assert vbounds @@ -85,14 +103,14 @@ def fk(x, si=.002, dx=1, vbounds=None, btype='highpass', ntr_pad=0, ntr_tap=None kscale = fourier.fscale(nxp, dx) kscale[0] = 1e-6 v = fscale[np.newaxis, :] / kscale[:, np.newaxis] - if btype.lower() in ['highpass', 'hp']: + if btype.lower() in ["highpass", "hp"]: fk_att = fourier.fcn_cosine(vbounds)(np.abs(v)) - elif btype.lower() in ['lowpass', 'lp']: - fk_att = (1 - fourier.fcn_cosine(vbounds)(np.abs(v))) + elif btype.lower() in ["lowpass", "lp"]: + fk_att = 1 - fourier.fcn_cosine(vbounds)(np.abs(v)) # if a k-filter is also provided, apply it if kfilt is not None: - katt = fourier._freq_vector(np.abs(kscale), kfilt['bounds'], typ=kfilt['btype']) + katt = fourier._freq_vector(np.abs(kscale), kfilt["bounds"], typ=kfilt["btype"]) fk_att *= katt[:, np.newaxis] # import matplotlib.pyplot as plt @@ -112,7 +130,9 @@ def fk(x, si=.002, dx=1, vbounds=None, btype='highpass', ntr_pad=0, ntr_tap=None xf = np.r_[np.flipud(xf[:ntr_pad]), xf, np.flipud(xf[-ntr_pad:])] if ntr_tap > 0: taper = fourier.fcn_cosine([0, ntr_tap])(np.arange(nxp)) # taper up - taper *= 1 - fourier.fcn_cosine([nxp - ntr_tap, nxp])(np.arange(nxp)) # taper down + taper *= 1 - fourier.fcn_cosine([nxp - ntr_tap, nxp])( + np.arange(nxp) + ) # taper down xf = xf * taper[:, np.newaxis] xf = np.real(np.fft.ifft2(fk_att * np.fft.fft2(xf))) @@ -132,13 +152,18 @@ def car(x, collection=None, lagc=300, butter_kwargs=None, **kwargs): :return: """ if butter_kwargs is None: - butter_kwargs = {'N': 3, 'Wn': 0.1, 'btype': 'highpass'} + butter_kwargs = {"N": 3, "Wn": 0.1, "btype": "highpass"} if collection is not None: xout = np.zeros_like(x) for c in np.unique(collection): sel = collection == c - xout[sel, :] = kfilt(x=x[sel, :], ntr_pad=0, ntr_tap=None, collection=None, - butter_kwargs=butter_kwargs) + xout[sel, :] = kfilt( + x=x[sel, :], + ntr_pad=0, + ntr_tap=None, + collection=None, + butter_kwargs=butter_kwargs, + ) return xout # apply agc and keep the gain in handy @@ -152,7 +177,9 @@ def car(x, collection=None, lagc=300, butter_kwargs=None, **kwargs): return xf * gain -def kfilt(x, collection=None, ntr_pad=0, ntr_tap=None, lagc=300, butter_kwargs=None, gpu=False): +def kfilt( + x, collection=None, ntr_pad=0, ntr_tap=None, lagc=300, butter_kwargs=None, gpu=False +): """ Applies a butterworth filter on the 0-axis with tapering / padding :param x: the input array to be filtered. dimension, the filtering is considering @@ -171,13 +198,18 @@ def kfilt(x, collection=None, ntr_pad=0, ntr_tap=None, lagc=300, butter_kwargs=N gp = np if butter_kwargs is None: - butter_kwargs = {'N': 3, 'Wn': 0.1, 'btype': 'highpass'} + butter_kwargs = {"N": 3, "Wn": 0.1, "btype": "highpass"} if collection is not None: xout = gp.zeros_like(x) for c in gp.unique(collection): sel = collection == c - xout[sel, :] = kfilt(x=x[sel, :], ntr_pad=0, ntr_tap=None, collection=None, - butter_kwargs=butter_kwargs) + xout[sel, :] = kfilt( + x=x[sel, :], + ntr_pad=0, + ntr_tap=None, + collection=None, + butter_kwargs=butter_kwargs, + ) return xout nx, nt = x.shape @@ -197,11 +229,14 @@ def kfilt(x, collection=None, ntr_pad=0, ntr_tap=None, lagc=300, butter_kwargs=N xf = gp.r_[gp.flipud(xf[:ntr_pad]), xf, gp.flipud(xf[-ntr_pad:])] if ntr_tap > 0: taper = fourier.fcn_cosine([0, ntr_tap], gpu=gpu)(gp.arange(nxp)) # taper up - taper *= 1 - fourier.fcn_cosine([nxp - ntr_tap, nxp], gpu=gpu)(gp.arange(nxp)) # taper down + taper *= 1 - fourier.fcn_cosine([nxp - ntr_tap, nxp], gpu=gpu)( + gp.arange(nxp) + ) # taper down xf = xf * taper[:, gp.newaxis] - sos = scipy.signal.butter(**butter_kwargs, output='sos') + sos = scipy.signal.butter(**butter_kwargs, output="sos") if gpu: from .filter_gpu import sosfiltfilt_gpu + xf = sosfiltfilt_gpu(sos, xf, axis=0) else: xf = scipy.signal.sosfiltfilt(sos, xf, axis=0) @@ -211,7 +246,9 @@ def kfilt(x, collection=None, ntr_pad=0, ntr_tap=None, lagc=300, butter_kwargs=N return xf * gain -def interpolate_bad_channels(data, channel_labels=None, x=None, y=None, p=1.3, kriging_distance_um=20, gpu=False): +def interpolate_bad_channels( + data, channel_labels=None, x=None, y=None, p=1.3, kriging_distance_um=20, gpu=False +): """ Interpolate the channel labeled as bad channels using linear interpolation. The weights applied to neighbouring channels come from an exponential decay function @@ -237,7 +274,7 @@ def interpolate_bad_channels(data, channel_labels=None, x=None, y=None, p=1.3, k for i in bad_channels: # compute the weights to apply to neighbouring traces offset = gp.abs(x - x[i] + 1j * (y - y[i])) - weights = gp.exp(-(offset / kriging_distance_um) ** p) + weights = gp.exp(-((offset / kriging_distance_um) ** p)) weights[bad_channels] = 0 weights[weights < 0.005] = 0 weights = weights / gp.sum(weights) @@ -255,11 +292,15 @@ def _get_destripe_parameters(fs, butter_kwargs, k_kwargs, k_filter): """gets the default params for destripe. This is used for both the destripe fcn on a numpy array and the function that actuates on a cbin file""" if butter_kwargs is None: - butter_kwargs = {'N': 3, 'Wn': 300 / fs * 2, 'btype': 'highpass'} + butter_kwargs = {"N": 3, "Wn": 300 / fs * 2, "btype": "highpass"} if k_kwargs is None: lagc = None if fs < 3000 else int(fs / 10) - k_kwargs = {'ntr_pad': 60, 'ntr_tap': 0, 'lagc': lagc, - 'butter_kwargs': {'N': 3, 'Wn': 0.01, 'btype': 'highpass'}} + k_kwargs = { + "ntr_pad": 60, + "ntr_tap": 0, + "lagc": lagc, + "butter_kwargs": {"N": 3, "Wn": 0.01, "btype": "highpass"}, + } if k_filter: spatial_fcn = lambda dat: kfilt(dat, **k_kwargs) # noqa else: @@ -267,7 +308,16 @@ def _get_destripe_parameters(fs, butter_kwargs, k_kwargs, k_filter): return butter_kwargs, k_kwargs, spatial_fcn -def destripe(x, fs, h=None, neuropixel_version=1, butter_kwargs=None, k_kwargs=None, channel_labels=None, k_filter=True): +def destripe( + x, + fs, + h=None, + neuropixel_version=1, + butter_kwargs=None, + k_kwargs=None, + channel_labels=None, + k_filter=True, +): """Super Car (super slow also...) - far from being set in stone but a good workflow example :param x: demultiplexed array (nc, ns) :param fs: sampling frequency @@ -288,21 +338,23 @@ def destripe(x, fs, h=None, neuropixel_version=1, butter_kwargs=None, k_kwargs=N :param k_filter (True): applies k-filter by default, otherwise, apply CAR. :return: x, filtered array """ - butter_kwargs, k_kwargs, spatial_fcn = _get_destripe_parameters(fs, butter_kwargs, k_kwargs, k_filter) + butter_kwargs, k_kwargs, spatial_fcn = _get_destripe_parameters( + fs, butter_kwargs, k_kwargs, k_filter + ) if h is None: h = neuropixel.trace_header(version=neuropixel_version) if channel_labels is True: channel_labels, _ = detect_bad_channels(x, fs) # butterworth - sos = scipy.signal.butter(**butter_kwargs, output='sos') + sos = scipy.signal.butter(**butter_kwargs, output="sos") x = scipy.signal.sosfiltfilt(sos, x) # channel interpolation # apply ADC shift if neuropixel_version is not None: - x = fourier.fshift(x, h['sample_shift'], axis=1) + x = fourier.fshift(x, h["sample_shift"], axis=1) # apply spatial filter only on channels that are inside of the brain if channel_labels is not None: - x = interpolate_bad_channels(x, channel_labels, h['x'], h['y']) + x = interpolate_bad_channels(x, channel_labels, h["x"], h["y"]) inside_brain = np.where(channel_labels != 3)[0] x[inside_brain, :] = spatial_fcn(x[inside_brain, :]) # apply the k-filter else: @@ -318,16 +370,34 @@ def destripe_lfp(x, fs, channel_labels=None, **kwargs): :param fs: :return: """ - kwargs['butter_kwargs'] = {'N': 3, 'Wn': 2 / fs * 2, 'btype': 'highpass'} - kwargs['k_filter'] = False + kwargs["butter_kwargs"] = {"N": 3, "Wn": 2 / fs * 2, "btype": "highpass"} + kwargs["k_filter"] = False if channel_labels is True: - kwargs['channel_labels'], _ = detect_bad_channels(x, fs=fs, psd_hf_threshold=1.4) + kwargs["channel_labels"], _ = detect_bad_channels( + x, fs=fs, psd_hf_threshold=1.4 + ) return destripe(x, fs, **kwargs) -def decompress_destripe_cbin(sr_file, output_file=None, h=None, wrot=None, append=False, nc_out=None, butter_kwargs=None, - dtype=np.int16, ns2add=0, nbatch=None, nprocesses=None, compute_rms=True, reject_channels=True, - k_kwargs=None, k_filter=True, reader_kwargs=None, output_qc_path=None): +def decompress_destripe_cbin( + sr_file, + output_file=None, + h=None, + wrot=None, + append=False, + nc_out=None, + butter_kwargs=None, + dtype=np.int16, + ns2add=0, + nbatch=None, + nprocesses=None, + compute_rms=True, + reject_channels=True, + k_kwargs=None, + k_filter=True, + reader_kwargs=None, + output_qc_path=None, +): """ From a spikeglx Reader object, decompresses and apply ADC. Saves output as a flat binary file in int16 @@ -363,35 +433,41 @@ def decompress_destripe_cbin(sr_file, output_file=None, h=None, wrot=None, appen if reject_channels: # get bad channels if option is on channel_labels = detect_bad_channels_cbin(sr) assert isinstance(sr_file, str) or isinstance(sr_file, Path) - butter_kwargs, k_kwargs, spatial_fcn = _get_destripe_parameters(sr.fs, butter_kwargs, k_kwargs, k_filter) + butter_kwargs, k_kwargs, spatial_fcn = _get_destripe_parameters( + sr.fs, butter_kwargs, k_kwargs, k_filter + ) h = sr.geometry if h is None else h - ncv = h['sample_shift'].size # number of channels - output_file = sr.file_bin.with_suffix('.bin') if output_file is None else Path(output_file) + ncv = h["sample_shift"].size # number of channels + output_file = ( + sr.file_bin.with_suffix(".bin") if output_file is None else Path(output_file) + ) assert output_file != sr.file_bin taper = np.r_[0, scipy.signal.windows.cosine((SAMPLES_TAPER - 1) * 2), 0] # create the FFT stencils nc_out = nc_out or sr.nc # compute LP filter coefficients - sos = scipy.signal.butter(**butter_kwargs, output='sos') + sos = scipy.signal.butter(**butter_kwargs, output="sos") nbytes = dtype(1).nbytes nprocesses = nprocesses or int(cpu_count() - cpu_count() / 4) - win = pyfftw.empty_aligned((ncv, NBATCH), dtype='float32') - WIN = pyfftw.empty_aligned((ncv, int(NBATCH / 2 + 1)), dtype='complex64') - fft_object = pyfftw.FFTW(win, WIN, axes=(1,), direction='FFTW_FORWARD', threads=4) + win = pyfftw.empty_aligned((ncv, NBATCH), dtype="float32") + WIN = pyfftw.empty_aligned((ncv, int(NBATCH / 2 + 1)), dtype="complex64") + fft_object = pyfftw.FFTW(win, WIN, axes=(1,), direction="FFTW_FORWARD", threads=4) dephas = np.zeros((ncv, NBATCH), dtype=np.float32) - dephas[:, 1] = 1. - DEPHAS = np.exp(1j * np.angle(fft_object(dephas)) * h['sample_shift'][:, np.newaxis]) + dephas[:, 1] = 1.0 + DEPHAS = np.exp( + 1j * np.angle(fft_object(dephas)) * h["sample_shift"][:, np.newaxis] + ) # if we want to compute the rms ap across the session if compute_rms: - ap_rms_file = output_file.parent.joinpath('ap_rms.bin') - ap_time_file = output_file.parent.joinpath('ap_time.bin') + ap_rms_file = output_file.parent.joinpath("ap_rms.bin") + ap_time_file = output_file.parent.joinpath("ap_time.bin") rms_nbytes = np.float32(1).nbytes if append: rms_offset = Path(ap_rms_file).stat().st_size time_offset = Path(ap_time_file).stat().st_size - with open(ap_time_file, 'rb') as tid: + with open(ap_time_file, "rb") as tid: t = tid.read() time_data = np.frombuffer(t, dtype=np.float32) t0 = time_data[-1] @@ -399,15 +475,15 @@ def decompress_destripe_cbin(sr_file, output_file=None, h=None, wrot=None, appen rms_offset = 0 time_offset = 0 t0 = 0 - open(ap_rms_file, 'wb').close() - open(ap_time_file, 'wb').close() + open(ap_rms_file, "wb").close() + open(ap_time_file, "wb").close() if append: # need to find the end of the file and the offset offset = Path(output_file).stat().st_size else: offset = 0 - open(output_file, 'wb').close() + open(output_file, "wb").close() # chunks to split the file into, dependent on number of parallel processes CHUNK_SIZE = int(sr.ns / nprocesses) @@ -421,20 +497,24 @@ def my_function(i_chunk, n_chunk): # Find the maximum sample for each chunk max_s = _sr.ns if i_chunk == n_chunk - 1 else (i_chunk + 1) * CHUNK_SIZE # need to redefine this here to avoid 4 byte boundary error - win = pyfftw.empty_aligned((ncv, NBATCH), dtype='float32') - WIN = pyfftw.empty_aligned((ncv, int(NBATCH / 2 + 1)), dtype='complex64') - fft_object = pyfftw.FFTW(win, WIN, axes=(1,), direction='FFTW_FORWARD', threads=4) - ifft_object = pyfftw.FFTW(WIN, win, axes=(1,), direction='FFTW_BACKWARD', threads=4) - - fid = open(output_file, 'r+b') + win = pyfftw.empty_aligned((ncv, NBATCH), dtype="float32") + WIN = pyfftw.empty_aligned((ncv, int(NBATCH / 2 + 1)), dtype="complex64") + fft_object = pyfftw.FFTW( + win, WIN, axes=(1,), direction="FFTW_FORWARD", threads=4 + ) + ifft_object = pyfftw.FFTW( + WIN, win, axes=(1,), direction="FFTW_BACKWARD", threads=4 + ) + + fid = open(output_file, "r+b") if i_chunk == 0: fid.seek(offset) else: fid.seek(offset + ((first_s + SAMPLES_TAPER) * nc_out * nbytes)) if compute_rms: - aid = open(ap_rms_file, 'r+b') - tid = open(ap_time_file, 'r+b') + aid = open(ap_rms_file, "r+b") + tid = open(ap_time_file, "r+b") if i_chunk == 0: aid.seek(rms_offset) tid.seek(time_offset) @@ -454,7 +534,7 @@ def my_function(i_chunk, n_chunk): ind2save = [SAMPLES_TAPER, NBATCH - SAMPLES_TAPER] if last_s == _sr.ns: # for the last batch just use the normal fft as the stencil doesn't fit - chunk = fourier.fshift(chunk, s=h['sample_shift']) + chunk = fourier.fshift(chunk, s=h["sample_shift"]) ind2save[1] = NBATCH else: # apply precomputed fshift of the proper length @@ -465,9 +545,11 @@ def my_function(i_chunk, n_chunk): # interpolate missing traces after the low-cut filter it's important to leave the # channels outside of the brain outside of the computation if reject_channels: - chunk = interpolate_bad_channels(chunk, channel_labels, h['x'], h['y']) + chunk = interpolate_bad_channels(chunk, channel_labels, h["x"], h["y"]) inside_brain = np.where(channel_labels != 3)[0] - chunk[inside_brain, :] = spatial_fcn(chunk[inside_brain, :]) # apply the k-filter / CAR + chunk[inside_brain, :] = spatial_fcn( + chunk[inside_brain, :] + ) # apply the k-filter / CAR else: chunk = spatial_fcn(chunk) # apply the k-filter / CAR @@ -493,28 +575,34 @@ def my_function(i_chunk, n_chunk): if last_s >= max_s: if last_s == _sr.ns: if ns2add > 0: - np.tile(chunk[-1, :nc_out].astype(dtype), (ns2add, 1)).tofile(fid) + np.tile(chunk[-1, :nc_out].astype(dtype), (ns2add, 1)).tofile( + fid + ) fid.close() if compute_rms: aid.close() tid.close() break - _ = Parallel(n_jobs=nprocesses)(delayed(my_function)(i, nprocesses) for i in range(nprocesses)) + _ = Parallel(n_jobs=nprocesses)( + delayed(my_function)(i, nprocesses) for i in range(nprocesses) + ) sr.close() # Here convert the ap_rms bin files to the ibl format and save if compute_rms: - with open(ap_rms_file, 'rb') as aid, open(ap_time_file, 'rb') as tid: + with open(ap_rms_file, "rb") as aid, open(ap_time_file, "rb") as tid: rms_data = aid.read() time_data = tid.read() time_data = np.frombuffer(time_data, dtype=np.float32) rms_data = np.frombuffer(rms_data, dtype=np.float32) - assert (rms_data.shape[0] == time_data.shape[0] * ncv) + assert rms_data.shape[0] == time_data.shape[0] * ncv rms_data = rms_data.reshape(time_data.shape[0], ncv) output_qc_path = output_qc_path or output_file.parent - np.save(output_qc_path.joinpath('_iblqc_ephysTimeRmsAP.rms.npy'), rms_data) - np.save(output_qc_path.joinpath('_iblqc_ephysTimeRmsAP.timestamps.npy'), time_data) + np.save(output_qc_path.joinpath("_iblqc_ephysTimeRmsAP.rms.npy"), rms_data) + np.save( + output_qc_path.joinpath("_iblqc_ephysTimeRmsAP.timestamps.npy"), time_data + ) def detect_bad_channels(raw, fs, similarity_threshold=(-0.5, 1), psd_hf_threshold=None): @@ -541,7 +629,7 @@ def rneighbours(raw, n=1): # noqa """ nc = raw.shape[0] mixer = np.triu(np.ones((nc, nc)), 1) - np.triu(np.ones((nc, nc)), 1 + n) - mixer += np.tril(np.ones((nc, nc)), -1) - np.tril(np.ones((nc, nc)), - n - 1) + mixer += np.tril(np.ones((nc, nc)), -1) - np.tril(np.ones((nc, nc)), -n - 1) r = rcoeff(raw, np.matmul(raw.T, mixer).T) r[np.isnan(r)] = 0 return r @@ -568,8 +656,11 @@ def channels_similarity(raw, nmed=0): :param nmed: :return: """ + def fxcor(x, y): - return scipy.fft.irfft(scipy.fft.rfft(x) * np.conj(scipy.fft.rfft(y)), n=raw.shape[-1]) + return scipy.fft.irfft( + scipy.fft.rfft(x) * np.conj(scipy.fft.rfft(y)), n=raw.shape[-1] + ) def nxcor(x, ref): ref = ref - np.mean(ref) @@ -591,24 +682,31 @@ def nxcor(x, ref): if psd_hf_threshold is None: # the LFP band data is obviously much stronger so auto-adjust the default threshold psd_hf_threshold = 1.4 if fs < 5000 else 0.02 - sos_hp = scipy.signal.butter(**{'N': 3, 'Wn': 300 / fs * 2, 'btype': 'highpass'}, output='sos') + sos_hp = scipy.signal.butter( + **{"N": 3, "Wn": 300 / fs * 2, "btype": "highpass"}, output="sos" + ) hf = scipy.signal.sosfiltfilt(sos_hp, raw) xcorf = channels_similarity(hf) - xfeats = ({ - 'ind': np.arange(nc), - 'rms_raw': utils.rms(raw), # very similar to the rms avfter butterworth filter - 'xcor_hf': detrend(xcor, 11), - 'xcor_lf': xcorf - detrend(xcorf, 11) - 1, - 'psd_hf': np.mean(psd[:, fscale > (fs / 2 * 0.8)], axis=-1), # 80% nyquists - }) + xfeats = { + "ind": np.arange(nc), + "rms_raw": utils.rms(raw), # very similar to the rms avfter butterworth filter + "xcor_hf": detrend(xcor, 11), + "xcor_lf": xcorf - detrend(xcorf, 11) - 1, + "psd_hf": np.mean(psd[:, fscale > (fs / 2 * 0.8)], axis=-1), # 80% nyquists + } # make recommendation ichannels = np.zeros(nc) - idead = np.where(similarity_threshold[0] > xfeats['xcor_hf'])[0] - inoisy = np.where(np.logical_or(xfeats['psd_hf'] > psd_hf_threshold, xfeats['xcor_hf'] > similarity_threshold[1]))[0] + idead = np.where(similarity_threshold[0] > xfeats["xcor_hf"])[0] + inoisy = np.where( + np.logical_or( + xfeats["psd_hf"] > psd_hf_threshold, + xfeats["xcor_hf"] > similarity_threshold[1], + ) + )[0] # the channels outside of the brains are the contiguous channels below the threshold on the trend coherency - ioutside = np.where(xfeats['xcor_lf'] < -0.75)[0] + ioutside = np.where(xfeats["xcor_lf"] < -0.75)[0] if ioutside.size > 0 and ioutside[-1] == (nc - 1): a = np.cumsum(np.r_[0, np.diff(ioutside) - 1]) ioutside = ioutside[a == np.max(a)] @@ -631,7 +729,9 @@ def detect_bad_channels_cbin(bin_file, n_batches=10, batch_duration=0.3, display :param display: if True will return a figure with features and an excerpt of the raw data :return: channel_labels: nc int array with 0:ok, 1:dead, 2:high noise, 3:outside of the brain """ - sr = bin_file if isinstance(bin_file, spikeglx.Reader) else spikeglx.Reader(bin_file) + sr = ( + bin_file if isinstance(bin_file, spikeglx.Reader) else spikeglx.Reader(bin_file) + ) nc = sr.nc - sr.nsync channel_labels = np.zeros((nc, n_batches)) # loop over the file and take the mode of detections @@ -649,6 +749,7 @@ def detect_bad_channels_cbin(bin_file, n_batches=10, batch_duration=0.3, display if display: raw = sr[sl, :nc].TO from ibllib.plots.figures import ephys_bad_channels + ephys_bad_channels(raw, sr.fs, channel_flags, xfeats_med) return channel_flags @@ -667,25 +768,35 @@ def resample_denoise_lfp_cbin(lf_file, RESAMPLE_FACTOR=10, output=None): :return: None """ - output = output or Path(lf_file).parent.joinpath('lf_resampled.bin') + output = output or Path(lf_file).parent.joinpath("lf_resampled.bin") sr = spikeglx.Reader(lf_file) wg = utils.WindowGenerator(ns=sr.ns, nswin=65536, overlap=1024) cflags = detect_bad_channels_cbin(lf_file) c = 0 - with open(output, 'wb') as f: + with open(output, "wb") as f: for first, last in wg.firstlast: - butter_kwargs = {'N': 3, 'Wn': np.array([2, 200]) / sr.fs * 2, 'btype': 'bandpass'} - sos = scipy.signal.butter(**butter_kwargs, output='sos') - raw = sr[first:last, :-sr.nsync] + butter_kwargs = { + "N": 3, + "Wn": np.array([2, 200]) / sr.fs * 2, + "btype": "bandpass", + } + sos = scipy.signal.butter(**butter_kwargs, output="sos") + raw = sr[first:last, : -sr.nsync] raw = scipy.signal.sosfiltfilt(sos, raw, axis=0) destripe = destripe_lfp(raw.T, fs=sr.fs, channel_labels=cflags) # viewephys(raw.T, fs=sr.fs, title='raw') # viewephys(destripe, fs=sr.fs, title='destripe') - rsamp = scipy.signal.decimate(destripe, RESAMPLE_FACTOR, axis=1, ftype='fir').T + rsamp = scipy.signal.decimate( + destripe, RESAMPLE_FACTOR, axis=1, ftype="fir" + ).T # viewephys(rsamp, fs=sr.fs / RESAMPLE_FACTOR, title='rsamp') first_valid = 0 if first == 0 else int(wg.overlap / 2 / RESAMPLE_FACTOR) - last_valid = rsamp.shape[0] if last == sr.ns else int(rsamp.shape[0] - wg.overlap / 2 / RESAMPLE_FACTOR) + last_valid = ( + rsamp.shape[0] + if last == sr.ns + else int(rsamp.shape[0] - wg.overlap / 2 / RESAMPLE_FACTOR) + ) rsamp = rsamp[first_valid:last_valid, :] c += rsamp.shape[0] print(first, last, last - first, first_valid, last_valid, c) @@ -718,16 +829,16 @@ def stack(data, word, fcn_agg=np.nanmean, header=None): if header is None: hstack = fold else: - header['stack_word'] = word - dfh = pd.DataFrame(header).groupby('stack_word') - hstack = dfh.aggregate('mean').to_dict(orient='series') + header["stack_word"] = word + dfh = pd.DataFrame(header).groupby("stack_word") + hstack = dfh.aggregate("mean").to_dict(orient="series") hstack = {k: hstack[k].values for k in hstack.keys()} - hstack['fold'] = fold + hstack["fold"] = fold return stack, hstack -def current_source_density(lfp, h, method='diff', sigma=1 / 3): +def current_source_density(lfp, h, method="diff", sigma=1 / 3): """ Compute the current source density (CSD) of a given LFP signal recorded on neuropixel 1 or 2 :param data: LFP signal (n_channels, n_samples) @@ -737,31 +848,37 @@ def current_source_density(lfp, h, method='diff', sigma=1 / 3): :return: """ csd = np.zeros(lfp.shape, dtype=np.float64) * np.NAN - xy = h['x'] + 1j * h['y'] - for col in np.unique(h['col']): - ind = np.where(h['col'] == col)[0] - isort = np.argsort(h['row'][ind]) + xy = h["x"] + 1j * h["y"] + for col in np.unique(h["col"]): + ind = np.where(h["col"] == col)[0] + isort = np.argsort(h["row"][ind]) itr = ind[isort] dx = np.median(np.diff(np.abs(xy[itr]))) - if method == 'diff': - csd[itr[1:-1], :] = np.diff(lfp[itr, :].astype(np.float64), n=2, axis=0) / dx ** 2 * sigma + if method == "diff": + csd[itr[1:-1], :] = ( + np.diff(lfp[itr, :].astype(np.float64), n=2, axis=0) / dx**2 * sigma + ) csd[itr[0], :] = csd[itr[1], :] csd[itr[-1], :] = csd[itr[-2], :] - elif method == 'kcsd': + elif method == "kcsd": from kcsd import KCSD1D + # here we could eventually expose the KCSD kwargs csd[itr, :] = KCSD1D( - h['y'][itr, np.newaxis], + h["y"][itr, np.newaxis], lfp[itr, :], - h=np.median(np.diff(h['y'][ind])), # this seems to work well with the current intertrace + h=np.median( + np.diff(h["y"][ind]) + ), # this seems to work well with the current intertrace sigma=sigma, - xmin=np.min(h['y'][itr]), - xmax=np.max(h['y'][itr]), - gdx=np.ceil((np.max(h['y'][itr]) - np.min(h['y'][itr])) / itr.size), - lambd=0., - R_init=5., + xmin=np.min(h["y"][itr]), + xmax=np.max(h["y"][itr]), + gdx=np.ceil((np.max(h["y"][itr]) - np.min(h["y"][itr])) / itr.size), + lambd=0.0, + R_init=5.0, n_src_init=10000, - src_type='gauss').values('CSD') + src_type="gauss", + ).values("CSD") return csd diff --git a/src/ibldsp/waveforms.py b/src/ibldsp/waveforms.py index c473c97..d08c5a5 100644 --- a/src/ibldsp/waveforms.py +++ b/src/ibldsp/waveforms.py @@ -19,35 +19,37 @@ def _validate_arr_in(arr_in): def get_array_peak(arr_in, df): - ''' + """ Create matrix of just NxT (spikes x time) of the peak waveforms channel (=1 channel) :param arr_in: NxTxC waveform matrix (spikes x time x channel) ; expands to 1xTxC if TxC as input :param df: dataframe of waveform features :return: NxT waveform matrix : spikes x time, only the peak channel - ''' + """ arr_in = _validate_arr_in(arr_in) - arr_peak = arr_in[np.arange(arr_in.shape[0]), :, df['peak_trace_idx'].to_numpy()] + arr_peak = arr_in[np.arange(arr_in.shape[0]), :, df["peak_trace_idx"].to_numpy()] return arr_peak def invert_peak_waveform(arr_peak, df): # Get the sign of the peak - indx_pos = np.where(df['peak_val'].to_numpy() > 0)[0] + indx_pos = np.where(df["peak_val"].to_numpy() > 0)[0] # Flip positive wavs so all are negative if len(indx_pos) > 0: arr_peak[indx_pos, :] = -1 * arr_peak[indx_pos, :] - df['invert_sign_peak'] = np.sign(df['peak_val']) * -1 # Inverted signe peak to multiply point values by + df["invert_sign_peak"] = ( + np.sign(df["peak_val"]) * -1 + ) # Inverted signe peak to multiply point values by return arr_peak, df def arr_pre_post(arr_peak, indx_peak): - ''' + """ :param arr_peak: NxT waveform matrix : spikes x time, only the peak channel :param indx_peak: Nx1 matrix : indices of the peak for each channel :return: - ''' + """ # Create zero mask with 1 at peak, cumsum arr_mask = np.zeros(arr_peak.shape) arr_mask[np.arange(0, arr_mask.shape[0], 1), indx_peak] = 1 @@ -61,12 +63,16 @@ def arr_pre_post(arr_peak, indx_peak): del arr_mask arr_pre = arr_peak.copy() - arr_pre = arr_pre.astype('float') - arr_pre[indx_postpeak] = np.nan # Array with values pre-, nans post- peak (from peak to end) + arr_pre = arr_pre.astype("float") + arr_pre[ + indx_postpeak + ] = np.nan # Array with values pre-, nans post- peak (from peak to end) arr_post = arr_peak.copy() - arr_post = arr_post.astype('float') - arr_post[indx_prepeak] = np.nan # Array with values post-, nans pre- peak (from start to peak-1) + arr_post = arr_post.astype("float") + arr_post[ + indx_prepeak + ] = np.nan # Array with values post-, nans pre- peak (from start to peak-1) return arr_pre, arr_post @@ -114,9 +120,9 @@ def find_peak(arr_in): # Create dict / pd df df = pd.DataFrame() - df['peak_trace_idx'] = indx_trace - df['peak_time_idx'] = indx_peak - df['peak_val'] = val_peak + df["peak_trace_idx"] = indx_trace + df["peak_time_idx"] = indx_peak + df["peak_val"] = val_peak return df @@ -124,16 +130,19 @@ def find_trough(arr_peak, df): # Find tip (at peak waveform) # Create masks pre/post - arr_pre, arr_post = arr_pre_post(arr_peak, df['peak_time_idx'].to_numpy()) + arr_pre, arr_post = arr_pre_post(arr_peak, df["peak_time_idx"].to_numpy()) # Find trough # indx_trough = np.nanargmin(arr_post * np.sign(val_peak)[:, np.newaxis], axis=1) indx_trough = np.nanargmax(arr_post, axis=1) - val_trough = arr_peak[np.arange(0, arr_peak.shape[0], 1), indx_trough] * df['invert_sign_peak'].to_numpy() + val_trough = ( + arr_peak[np.arange(0, arr_peak.shape[0], 1), indx_trough] + * df["invert_sign_peak"].to_numpy() + ) # Put values into df - df['trough_time_idx'] = indx_trough - df['trough_val'] = val_trough + df["trough_time_idx"] = indx_trough + df["trough_val"] = val_trough return df @@ -142,10 +151,10 @@ def find_tip(arr_peak, df): # Find tip (at peak waveform) # Create masks pre/post - arr_pre, arr_post = arr_pre_post(arr_peak, df['peak_time_idx'].to_numpy()) + arr_pre, arr_post = arr_pre_post(arr_peak, df["peak_time_idx"].to_numpy()) # Find tip - ''' + """ # 02-06-2023 ; Decided not to use the inflection point but rather maximum # Leaving code for now commented as legacy example @@ -158,24 +167,27 @@ def find_tip(arr_peak, df): indx_tip = np.argmax(np.cumsum(arr_cs, axis=1), axis=1) + 1 val_tip = arr_peak[np.arange(0, arr_peak.shape[0], 1), indx_tip] * df['invert_sign_peak'].to_numpy() del arr_cs - ''' + """ # Maximum indx_tip = np.nanargmax(arr_pre, axis=1) - val_tip = arr_peak[np.arange(0, arr_peak.shape[0], 1), indx_tip] * df['invert_sign_peak'].to_numpy() + val_tip = ( + arr_peak[np.arange(0, arr_peak.shape[0], 1), indx_tip] + * df["invert_sign_peak"].to_numpy() + ) # Put values into df - df['tip_time_idx'] = indx_tip - df['tip_val'] = val_tip + df["tip_time_idx"] = indx_tip + df["tip_val"] = val_tip return df def find_tip_trough(arr_peak, arr_peak_real, df): - ''' + """ :param arr_in: inverted :param df: :return: - ''' + """ # 2. Find trough and tip (at peak waveform) # Find trough @@ -186,18 +198,18 @@ def find_tip_trough(arr_peak, arr_peak_real, df): # Call the function again to compute trough etc. with new peak assigned # Find df rows to be changed - df_index = df.index[(df['peak_val'] > 0) & (df['peak_to_trough_ratio'] <= 1.5)] + df_index = df.index[(df["peak_val"] > 0) & (df["peak_to_trough_ratio"] <= 1.5)] df_rows = df.iloc[df_index] if len(df_index) > 0: # New peak - Swap peak for trough values - df_rows = df_rows.drop(['peak_val', 'peak_time_idx'], axis=1) - df_rows['peak_val'] = df_rows['trough_val'] - df_rows['peak_time_idx'] = df_rows['trough_time_idx'] + df_rows = df_rows.drop(["peak_val", "peak_time_idx"], axis=1) + df_rows["peak_val"] = df_rows["trough_val"] + df_rows["peak_time_idx"] = df_rows["trough_time_idx"] # df_trials.loc[iss, f] = predicted[f].values # Drop trough columns - df_rows = df_rows.drop(['trough_time_idx', 'trough_val'], axis=1) + df_rows = df_rows.drop(["trough_time_idx", "trough_val"], axis=1) # Create mini arr_peak for those rows uniquely (take the real waveforms value in, not inverted ones) arr_peak_rows = arr_peak_real[df_index, :] # Place into "inverted" array peak for return @@ -229,7 +241,8 @@ def plot_wiggle(wav, ax=None, scalar=0.3, clip=1.5, **axkwargs): nc, ns = wav.shape vals = np.c_[wav, wav[:, :1] * np.nan].ravel() # flat view of the 2d array. vect = np.arange(vals.size).astype( - np.float32) # flat index array, for correctly locating zero crossings in the flat view + np.float32 + ) # flat index array, for correctly locating zero crossings in the flat view crossing = np.where(np.diff(np.signbit(vals)))[0] # index before zero crossing # use linear interpolation to find the zero crossing, i.e. y = mx + c. x1 = vals[crossing] @@ -246,11 +259,11 @@ def plot_wiggle(wav, ax=None, scalar=0.3, clip=1.5, **axkwargs): order = np.argsort(y) # shift from amplitudes to plotting coordinates x_shift, y = y[order].__divmod__(ns + 1) - ax.plot(y, x[order] + x_shift + 1, 'k', linewidth=.5) + ax.plot(y, x[order] + x_shift + 1, "k", linewidth=0.5) x[x > 0] = np.nan x = x[order] + x_shift + 1 - ax.fill(y, x, 'k', aa=True) - ax.set(xlim=[0, ns], ylim=[0, nc], xlabel='sample', ylabel='trace') + ax.fill(y, x, "k", aa=True) + ax.set(xlim=[0, ns], ylim=[0, nc], xlabel="sample", ylabel="trace") plt.tight_layout() return ax @@ -263,46 +276,66 @@ def plot_peaktiptrough(df, arr, ax, nth_wav=0, plot_grey=True, fs=30000): if ax is None: fig, ax = plt.subplots() if plot_grey: - ax.plot(tscale, arr[nth_wav], c='gray', alpha=0.5) + ax.plot(tscale, arr[nth_wav], c="gray", alpha=0.5) # Peak channel - ax.plot(tscale, arr[nth_wav][:, int(df.iloc[nth_wav].peak_trace_idx)], marker=".", c='blue') + ax.plot( + tscale, + arr[nth_wav][:, int(df.iloc[nth_wav].peak_trace_idx)], + marker=".", + c="blue", + ) # Peak point - ax.plot(tscale[df.iloc[nth_wav].peak_time_idx], df.iloc[nth_wav].peak_val, 'r*') + ax.plot(tscale[df.iloc[nth_wav].peak_time_idx], df.iloc[nth_wav].peak_val, "r*") # Trough point - ax.plot(tscale[df.iloc[nth_wav].trough_time_idx], df.iloc[nth_wav].trough_val, 'g*') + ax.plot(tscale[df.iloc[nth_wav].trough_time_idx], df.iloc[nth_wav].trough_val, "g*") # Tip point - ax.plot(tscale[df.iloc[nth_wav].tip_time_idx], df.iloc[nth_wav].tip_val, 'k*') + ax.plot(tscale[df.iloc[nth_wav].tip_time_idx], df.iloc[nth_wav].tip_val, "k*") # Half peak points - ax.plot(tscale[df.iloc[nth_wav].half_peak_post_time_idx], df.iloc[nth_wav].half_peak_post_val, 'c*') - ax.plot(tscale[df.iloc[nth_wav].half_peak_pre_time_idx], df.iloc[nth_wav].half_peak_pre_val, 'c*') + ax.plot( + tscale[df.iloc[nth_wav].half_peak_post_time_idx], + df.iloc[nth_wav].half_peak_post_val, + "c*", + ) + ax.plot( + tscale[df.iloc[nth_wav].half_peak_pre_time_idx], + df.iloc[nth_wav].half_peak_pre_val, + "c*", + ) # Line for half peak boundary # ax.plot((0, arr.shape[1]), np.array((1, 1)) * df.iloc[nth_wav].peak_val / 2, '-k') - ax.plot((tscale[0], tscale[-1]), np.array((1, 1)) * df.iloc[nth_wav].peak_val / 2, '-k') + ax.plot( + (tscale[0], tscale[-1]), np.array((1, 1)) * df.iloc[nth_wav].peak_val / 2, "-k" + ) # Recovery point - ax.plot(tscale[df.iloc[nth_wav].recovery_time_idx], df.iloc[nth_wav].recovery_val, 'y*') + ax.plot( + tscale[df.iloc[nth_wav].recovery_time_idx], df.iloc[nth_wav].recovery_val, "y*" + ) # Axis labels - ax.set_ylabel('(Volt)') - ax.set_xlabel('Time (ms)') + ax.set_ylabel("(Volt)") + ax.set_xlabel("Time (ms)") def half_peak_point(arr_peak, df): - ''' + """ Compute the two intersection points at halp-maximum peak :param: arr_peak: NxT waveform matrix : spikes x time, only the peak channel (inverted for positive wavs) :return: df with columns containing indices of intersection points and values, length of N wav - ''' + """ # TODO Review: is df.to_numpy() necessary ? # Compute half max value, repmat and substract it - half_max = (df['peak_val'].to_numpy() / 2) * df['invert_sign_peak'].to_numpy() + half_max = (df["peak_val"].to_numpy() / 2) * df["invert_sign_peak"].to_numpy() half_max_rep = np.tile(half_max, (arr_peak.shape[1], 1)).transpose() # Note on the above: using np.tile because np.repeat does not work with axis=1 # todo rewrite with np.repeat and np.newaxis arr_sub = arr_peak - half_max_rep # Create masks pre/post - arr_pre, arr_post = arr_pre_post(arr_sub, df['peak_time_idx'].to_numpy()) + arr_pre, arr_post = arr_pre_post(arr_sub, df["peak_time_idx"].to_numpy()) # POST: Find first time it crosses 0 (from negative -> positive values) indx_post = np.argmax(arr_post > 0, axis=1) - val_post = arr_peak[np.arange(0, arr_peak.shape[0], 1), indx_post] * df['invert_sign_peak'].to_numpy() + val_post = ( + arr_peak[np.arange(0, arr_peak.shape[0], 1), indx_post] + * df["invert_sign_peak"].to_numpy() + ) # PRE: # Invert matrix (flip L-R) to find first point crossing threshold before peak arr_pre_flip = np.fliplr(arr_pre) @@ -314,75 +347,82 @@ def half_peak_point(arr_peak, df): arr_pre_ones = np.fliplr(arr_zeros) # Find index where there are 1 indx_pre = np.argmax(arr_pre_ones > 0, axis=1) - val_pre = arr_peak[np.arange(0, arr_peak.shape[0], 1), indx_pre] * df['invert_sign_peak'].to_numpy() + val_pre = ( + arr_peak[np.arange(0, arr_peak.shape[0], 1), indx_pre] + * df["invert_sign_peak"].to_numpy() + ) # Add columns to DF and return - df['half_peak_post_time_idx'] = indx_post - df['half_peak_pre_time_idx'] = indx_pre - df['half_peak_post_val'] = val_post - df['half_peak_pre_val'] = val_pre + df["half_peak_post_time_idx"] = indx_post + df["half_peak_pre_time_idx"] = indx_pre + df["half_peak_post_val"] = val_post + df["half_peak_pre_val"] = val_pre return df def half_peak_duration(df, fs=30000): - ''' + """ Compute the half peak duration (in second) :param df: dataframe of waveforms features, with the half peak intersection points computed :param fs: sampling rate (Hz) :return: dataframe wirth added column - ''' - df['half_peak_duration'] = (df['half_peak_post_time_idx'] - df['half_peak_pre_time_idx']) / fs + """ + df["half_peak_duration"] = ( + df["half_peak_post_time_idx"] - df["half_peak_pre_time_idx"] + ) / fs return df def peak_to_trough_duration(df, fs=30000): - ''' + """ Compute the duration (second) of the peak-to-trough :param df: dataframe of waveforms features :param fs: sampling rate (Hz) :return: df - ''' + """ # Duration - df['peak_to_trough_duration'] = (df['trough_time_idx'] - df['peak_time_idx']) / fs + df["peak_to_trough_duration"] = (df["trough_time_idx"] - df["peak_time_idx"]) / fs return df def peak_to_trough_ratio(df): - ''' + """ Compute the ratio of the peak-to-trough :param df: dataframe of waveforms features :param fs: sampling rate (Hz) :return: - ''' + """ # Ratio - df['peak_to_trough_ratio'] = np.abs(df['peak_val'] / df['trough_val']) # Division by 0 returns NaN + df["peak_to_trough_ratio"] = np.abs( + df["peak_val"] / df["trough_val"] + ) # Division by 0 returns NaN # Ratio log-scale - df['peak_to_trough_ratio_log'] = np.log(df['peak_to_trough_ratio']) + df["peak_to_trough_ratio_log"] = np.log(df["peak_to_trough_ratio"]) return df def polarisation_slopes(df, fs=30000): - ''' + """ Computes the depolarisation and repolarisation slopes as the difference between tip-peak and peak-trough respectively. :param df: dataframe of waveforms features :param fs: sampling frequency (Hz) :return: dataframe with added columns - ''' + """ # Depolarisation: slope before the peak (between tip and peak) - depolarise_duration = (df['peak_time_idx'] - df['tip_time_idx']) / fs - depolarise_volt = df['peak_val'] - df['tip_val'] - df['depolarisation_slope'] = depolarise_volt / depolarise_duration + depolarise_duration = (df["peak_time_idx"] - df["tip_time_idx"]) / fs + depolarise_volt = df["peak_val"] - df["tip_val"] + df["depolarisation_slope"] = depolarise_volt / depolarise_duration # Repolarisation: slope after the peak (between peak and trough) - repolarise_duration = (df['trough_time_idx'] - df['peak_time_idx']) / fs - repolarise_volt = df['trough_val'] - df['peak_val'] - df['repolarisation_slope'] = repolarise_volt / repolarise_duration + repolarise_duration = (df["trough_time_idx"] - df["peak_time_idx"]) / fs + repolarise_volt = df["trough_val"] - df["peak_val"] + df["repolarisation_slope"] = repolarise_volt / repolarise_duration return df def recovery_point(arr_peak, df, idx_from_trough=5): - ''' + """ Compute the single recovery secondary point (selected by a fixed increment from the trough). If the fixed increment from the trough is outside the matrix boundary, the last value of the waveform is used. @@ -390,55 +430,66 @@ def recovery_point(arr_peak, df, idx_from_trough=5): :param df: dataframe of waveforms features :param idx_from_trough: sample index to be taken into account for the second point ; index from the trough :return: dataframe with added columns - ''' + """ # Check range is not outside of matrix boundary) if idx_from_trough >= (arr_peak.shape[1]): - raise ValueError('Index out of bound: Index larger than waveform array shape') + raise ValueError("Index out of bound: Index larger than waveform array shape") # Check df['peak_time_idx'] + pt_idx is not out of bound - idx_all = df['trough_time_idx'].to_numpy() + idx_from_trough + idx_all = df["trough_time_idx"].to_numpy() + idx_from_trough # Find waveform(s) for which the second point is outside matrix boundary range idx_over = np.where(idx_all > arr_peak.shape[1])[0] if len(idx_over) > 0: # Todo should this raise a warning ? idx_all[idx_over] = arr_peak.shape[1] - 1 # Take the last value of the waveform - df['recovery_time_idx'] = idx_all - df['recovery_val'] = arr_peak[np.arange(0, arr_peak.shape[0], 1), idx_all] * df['invert_sign_peak'].to_numpy() + df["recovery_time_idx"] = idx_all + df["recovery_val"] = ( + arr_peak[np.arange(0, arr_peak.shape[0], 1), idx_all] + * df["invert_sign_peak"].to_numpy() + ) return df def recovery_slope(df, fs=30000): - ''' + """ Compute the recovery slope, from the trough to the single secondary point. :param df: dataframe of waveforms features :param fs: sampling frequency (Hz) :return: dataframe with added columns - ''' + """ # Note: this could be lumped in with the polarisation_slopes # Time, volt and slope values - recovery_duration = (df['recovery_time_idx'] - df['trough_time_idx']) / fs # Diff between second point and peak - recovery_volt = df['recovery_val'] - df['trough_val'] - df['recovery_slope'] = recovery_volt / recovery_duration + recovery_duration = ( + df["recovery_time_idx"] - df["trough_time_idx"] + ) / fs # Diff between second point and peak + recovery_volt = df["recovery_val"] - df["trough_val"] + df["recovery_slope"] = recovery_volt / recovery_duration return df def dist_chanel_from_peak(channel_geometry, peak_trace_idx): - ''' + """ Compute distance for each channel from the peak channel, for each spike :param channel_geometry: Matrix N(spikes) * N(channels) * 3 (spatial coordinates x,y,z) # Note: computing this to provide it as input will be a pain :param peak_trace_idx: index of the highest amplitude channel in the multi-channel waveform :return: eu_dist : N(spikes) * N(channels): the euclidian distance between each channel and the peak channel, for each waveform - ''' + """ # Note: It deals with Nan in entry coordinate (fake padding channels); returns Nan as Eu dist # Get peak coordinates (x,y,z) - peak_coord = channel_geometry[np.arange(0, channel_geometry.shape[0], 1), peak_trace_idx, :] + peak_coord = channel_geometry[ + np.arange(0, channel_geometry.shape[0], 1), peak_trace_idx, : + ] # repmat peak coordinates (x,y,z) [Nspikes x Ncoordinates] across channels - peak_coord_rep = np.repeat(peak_coord[:, :, np.newaxis], channel_geometry.shape[1], axis=2) # Todo -1 - peak_coord_rep = np.swapaxes(peak_coord_rep, 1, 2) # N spikes x channel x coordinates + peak_coord_rep = np.repeat( + peak_coord[:, :, np.newaxis], channel_geometry.shape[1], axis=2 + ) # Todo -1 + peak_coord_rep = np.swapaxes( + peak_coord_rep, 1, 2 + ) # N spikes x channel x coordinates # Difference diff_ch = peak_coord_rep - channel_geometry @@ -452,26 +503,28 @@ def dist_chanel_from_peak(channel_geometry, peak_trace_idx): def spatial_spread_weighted(eu_dist, weights): - ''' + """ Returns the spatial spread defined by the sum(w_i * dist_i) / sum(w_i). The weight is a given value per channel (e.g. the absolute peak voltage value) :param eu_dist: N(spikes) * N(channels): the euclidian distance between each channel and the peak channel, for each waveform :param weights: N(spikes) * N(channels): the weights per channel per spikes :return: spatial_spread : N(spikes) * 1 vector - ''' + """ # Note: possible to have nan entries in eu_dist - spatial_spread = np.nansum(np.multiply(eu_dist, weights), axis=1) / np.sum(weights, axis=1) + spatial_spread = np.nansum(np.multiply(eu_dist, weights), axis=1) / np.sum( + weights, axis=1 + ) return spatial_spread def reshape_wav_one_channel(arr): - ''' + """ Reshape matrix so instead of being like waveforms: (wav, time, trace) i.e. (npsikes x nsamples x nchannels) it is of size (npsikes * nchannels) x nsamples :param waveforms: 3D np.array containing multi-channel waveforms, 3D dimension have to be (wav, time, trace) :return: - ''' + """ # Swap axis so the matrix is now: wav x channel x time arr_ax = np.swapaxes(arr, 1, 2) # reshape using the first 2 dimension (multiplied) x time @@ -481,35 +534,37 @@ def reshape_wav_one_channel(arr): return arr_out -def weights_spk_ch(arr, weight_type='peak'): - ''' +def weights_spk_ch(arr, weight_type="peak"): + """ Compute a value on all channels of a waveform matrix, and return as weights (to be used in spatial spread). :param arr: 3D np.array containing multi-channel waveforms, 3D dimension have to be (wav, time, trace) :param weight_type: value to be returned as weight (implemented: peak) :return: weights: N(spikes) * N(channels): the weights per channel per spikes - ''' + """ # Reshape arr_resh = reshape_wav_one_channel(arr) # Peak df = find_peak(arr_resh) - if weight_type == 'peak': - weights_flat = df['peak_val'].to_numpy() + if weight_type == "peak": + weights_flat = df["peak_val"].to_numpy() else: - raise ValueError('weight_type: unknown value attributed') + raise ValueError("weight_type: unknown value attributed") # Reshape # Order in DF: #1-2-3 channel of spike #1, then #1-2-3 channel spike #2 etc weights = np.reshape(weights_flat, (arr.shape[0], arr.shape[2])) return weights -def compute_spatial_spread(arr, df, channel_geometry, weight_type='peak'): +def compute_spatial_spread(arr, df, channel_geometry, weight_type="peak"): eu_dist = dist_chanel_from_peak(channel_geometry, df) weights = weights_spk_ch(arr, weight_type) - df['spatial_spread'] = spatial_spread_weighted(eu_dist, weights) + df["spatial_spread"] = spatial_spread_weighted(eu_dist, weights) return df -def compute_spike_features(arr_in, fs=30000, recovery_duration_ms=0.16, return_peak_channel=False): +def compute_spike_features( + arr_in, fs=30000, recovery_duration_ms=0.16, return_peak_channel=False +): """ This is the main function to compute spike features from a set of waveforms Current features: @@ -529,7 +584,9 @@ def compute_spike_features(arr_in, fs=30000, recovery_duration_ms=0.16, return_p # Per waveform, keep only trace that contains the peak arr_peak_real = get_array_peak(arr_in, df) # Invert positive spikes - arr_peak, df = invert_peak_waveform(arr_peak_real.copy(), df) # Copy otherwise overwrite the variable in memory + arr_peak, df = invert_peak_waveform( + arr_peak_real.copy(), df + ) # Copy otherwise overwrite the variable in memory # Tip-trough (this also computes the peak_to_trough_ratio) df, arr_peak = find_tip_trough(arr_peak, arr_peak_real, df) # Peak to trough duration @@ -539,7 +596,9 @@ def compute_spike_features(arr_in, fs=30000, recovery_duration_ms=0.16, return_p # Half peak duration df = half_peak_duration(df, fs=fs) # Recovery point - df = recovery_point(arr_peak, df, idx_from_trough=int(round(recovery_duration_ms * fs / 1000))) + df = recovery_point( + arr_peak, df, idx_from_trough=int(round(recovery_duration_ms * fs / 1000)) + ) # Slopes df = polarisation_slopes(df, fs=fs) df = recovery_slope(df, fs=fs) @@ -550,7 +609,14 @@ def compute_spike_features(arr_in, fs=30000, recovery_duration_ms=0.16, return_p return df -def extract_wfs_array(arr, df, channel_neighbors, trough_offset=42, spike_length_samples=121, add_nan_trace=False): +def extract_wfs_array( + arr, + df, + channel_neighbors, + trough_offset=42, + spike_length_samples=121, + add_nan_trace=False, +): """ Extract waveforms at specified samples and peak channels as a stack. @@ -574,8 +640,9 @@ def extract_wfs_array(arr, df, channel_neighbors, trough_offset=42, spike_length # check that the spike window is included in the recording: last_idx = df["sample"].iloc[-1] - assert last_idx + (spike_length_samples - trough_offset) < arr.shape[0], \ - f"Spike index {last_idx} extends past end of recording ({arr.shape[0]} samples)." + assert ( + last_idx + (spike_length_samples - trough_offset) < arr.shape[0] + ), f"Spike index {last_idx} extends past end of recording ({arr.shape[0]} samples)." nwf = len(df) @@ -583,13 +650,16 @@ def extract_wfs_array(arr, df, channel_neighbors, trough_offset=42, spike_length cind = channel_neighbors[df["peak_channel"].to_numpy()] # Get sample indices - sind = df["sample"].to_numpy()[:, np.newaxis] + (np.arange(spike_length_samples) - trough_offset) + sind = df["sample"].to_numpy()[:, np.newaxis] + ( + np.arange(spike_length_samples) - trough_offset + ) nchan = cind.shape[1] wfs = np.zeros((nwf, spike_length_samples, nchan), arr.dtype) try: from tqdm import trange + fun = trange except ImportError: fun = range diff --git a/src/neuropixel.py b/src/neuropixel.py index 0d42436..c6d1f55 100644 --- a/src/neuropixel.py +++ b/src/neuropixel.py @@ -11,44 +11,47 @@ import spikeglx from ibldsp.utils import WindowGenerator -_logger = logging.getLogger('ibllib') +_logger = logging.getLogger("ibllib") # hardware pin to channel mapping -SYNC_PIN_OUT = {'3A': {"pin01": 0, - "pin02": 1, - "pin03": 2, - "pin04": 3, - "pin05": None, - "pin06": 4, - "pin07": 5, - "pin08": 6, - "pin09": 7, - "pin10": None, - "pin11": 8, - "pin12": 9, - "pin13": 10, - "pin14": 11, - "pin15": None, - "pin16": 12, - "pin17": 13, - "pin18": 14, - "pin19": 15, - "pin20": None, - "pin21": None, - "pin22": None, - "pin23": None, - "pin24": None - }, - '3B': {"P0.0": 0, - "P0.1": 1, - "P0.2": 2, - "P0.3": 3, - "P0.4": 4, - "P0.5": 5, - "P0.6": 6, - "P0.7": 7, - } - } +SYNC_PIN_OUT = { + "3A": { + "pin01": 0, + "pin02": 1, + "pin03": 2, + "pin04": 3, + "pin05": None, + "pin06": 4, + "pin07": 5, + "pin08": 6, + "pin09": 7, + "pin10": None, + "pin11": 8, + "pin12": 9, + "pin13": 10, + "pin14": 11, + "pin15": None, + "pin16": 12, + "pin17": 13, + "pin18": 14, + "pin19": 15, + "pin20": None, + "pin21": None, + "pin22": None, + "pin23": None, + "pin24": None, + }, + "3B": { + "P0.0": 0, + "P0.1": 1, + "P0.2": 2, + "P0.3": 3, + "P0.4": 4, + "P0.5": 5, + "P0.6": 6, + "P0.7": 7, + }, +} # sample to volt conversion factors S2V_AP = 2.34375e-06 @@ -60,17 +63,19 @@ def _deprecated_sites_coordinates() -> np.array: # this is used in legacy code - warnings.warn("the SITES_COORDINATES module attribute reflects only 374 channels and is only applicable to old" - "deprecated 3A probes \n Use `neuropixel.trace_header() to get the canonical probe geometries " - "according to the probe versions: see help(neuropixel.trace_header)." - "\n If possible the reommended approach is to directly read the probe geometry" - "from the metadata using spigeglx.Reader") + warnings.warn( + "the SITES_COORDINATES module attribute reflects only 374 channels and is only applicable to old" + "deprecated 3A probes \n Use `neuropixel.trace_header() to get the canonical probe geometries " + "according to the probe versions: see help(neuropixel.trace_header)." + "\n If possible the reommended approach is to directly read the probe geometry" + "from the metadata using spigeglx.Reader" + ) for line in traceback.format_stack(): - if 'ibllib' in line: + if "ibllib" in line: print(line.strip()) refch_3a = np.array([36, 75, 112, 151, 188, 227, 264, 303, 340, 379]) th = trace_header(version=1) - SITES_COORDINATES = np.delete(np.c_[th['x'], th['y']], refch_3a, axis=0) + SITES_COORDINATES = np.delete(np.c_[th["x"], th["y"]], refch_3a, axis=0) return SITES_COORDINATES @@ -96,7 +101,7 @@ def xy2rc(x, y, version=1): elif np.floor(version) == 2: col = x / 32 row = y / 15 - return {'col': col, 'row': row} + return {"col": col, "row": row} def rc2xy(row, col, version=1): @@ -113,7 +118,7 @@ def rc2xy(row, col, version=1): elif np.floor(version) == 2: x = col * 32 y = row * 15 - return {'x': x, 'y': y} + return {"x": x, "y": y} def dense_layout(version=1, nshank=1): @@ -122,24 +127,40 @@ def dense_layout(version=1, nshank=1): :param version: major version number: 1 or 2 or 2.4 :return: dictionary with keys 'ind', 'col', 'row', 'x', 'y' """ - ch = {'ind': np.arange(NC), - 'row': np.floor(np.arange(NC) / 2), - 'shank': np.zeros(NC)} + ch = { + "ind": np.arange(NC), + "row": np.floor(np.arange(NC) / 2), + "shank": np.zeros(NC), + } if version == 1: # version 1 has a dense layout, checkerboard pattern - ch.update({'col': np.tile(np.array([2, 0, 3, 1]), int(NC / 4))}) - elif np.floor(version) == 2 and nshank == 1: # single shank NP1 has 2 columns in a dense patter - ch.update({'col': np.tile(np.array([0, 1]), int(NC / 2))}) - elif np.floor(version) == 2 and nshank == 4: # the 4 shank version default is rather complicated + ch.update({"col": np.tile(np.array([2, 0, 3, 1]), int(NC / 4))}) + elif ( + np.floor(version) == 2 and nshank == 1 + ): # single shank NP1 has 2 columns in a dense patter + ch.update({"col": np.tile(np.array([0, 1]), int(NC / 2))}) + elif ( + np.floor(version) == 2 and nshank == 4 + ): # the 4 shank version default is rather complicated shank_row = np.tile(np.arange(NC / 16), (2, 1)).T[:, np.newaxis].flatten() shank_row = np.tile(shank_row, 8) - shank_row += np.tile(np.array([0, 0, 1, 1, 0, 0, 1, 1])[:, np.newaxis], (1, int(NC / 8))).flatten() * 24 - ch.update({ - 'col': np.tile(np.array([0, 1]), int(NC / 2)), - 'shank': np.tile(np.array([0, 1, 0, 1, 2, 3, 2, 3])[:, np.newaxis], (1, int(NC / 8))).flatten(), - 'row': shank_row}) + shank_row += ( + np.tile( + np.array([0, 0, 1, 1, 0, 0, 1, 1])[:, np.newaxis], (1, int(NC / 8)) + ).flatten() + * 24 + ) + ch.update( + { + "col": np.tile(np.array([0, 1]), int(NC / 2)), + "shank": np.tile( + np.array([0, 1, 0, 1, 2, 3, 2, 3])[:, np.newaxis], (1, int(NC / 8)) + ).flatten(), + "row": shank_row, + } + ) # for all, get coordinates - ch.update(rc2xy(ch['row'], ch['col'], version=version)) + ch.update(rc2xy(ch["row"], ch["col"], version=version)) return ch @@ -200,7 +221,7 @@ def trace_header(version=1, nshank=1): x, y, row, col, ind, adc and sampleshift vectors corresponding to each site """ h = dense_layout(version=version, nshank=nshank) - h['sample_shift'], h['adc'] = adc_shifts(version=version) + h["sample_shift"], h["adc"] = adc_shifts(version=version) return h @@ -211,7 +232,7 @@ def split_trace_header(h, shank=0): :param shank: :return: """ - shank_idx = np.where(h['shank'] == shank)[0] + shank_idx = np.where(h["shank"] == shank)[0] h_shank = {key: h[key][shank_idx] for key in h.keys()} return h_shank @@ -256,26 +277,31 @@ def init_params(self, nsamples=None, nwindow=None, extra=None, nshank=None): self.ratio = int(self.fs_ap / self.fs_lf) self.nsamples = nsamples or self.sr.ns self.samples_window = nwindow or 2 * self.fs_ap - assert np.mod(self.samples_window, self.ratio) == 0, \ - f'nwindow must be a factor or {self.ratio}' + assert ( + np.mod(self.samples_window, self.ratio) == 0 + ), f"nwindow must be a factor or {self.ratio}" self.samples_overlap = 576 - assert np.mod(self.samples_overlap, self.ratio) == 0, \ - f'samples_overlap must be a factor or {self.ratio}' + assert ( + np.mod(self.samples_overlap, self.ratio) == 0 + ), f"samples_overlap must be a factor or {self.ratio}" self.samples_taper = int(self.samples_overlap / 4) - assert np.mod(self.samples_taper, self.ratio) == 0, \ - f'samples_taper must be a factor or {self.ratio}' - self.taper = np.r_[0, scipy.signal.windows.cosine((self.samples_taper - 1) * 2), 0] + assert ( + np.mod(self.samples_taper, self.ratio) == 0 + ), f"samples_taper must be a factor or {self.ratio}" + self.taper = np.r_[ + 0, scipy.signal.windows.cosine((self.samples_taper - 1) * 2), 0 + ] # Low pass filter (acts as both anti-aliasing and LP filter) - butter_lp_kwargs = {'N': 2, 'Wn': 1000 / 2500 / 2, 'btype': 'lowpass'} - self.sos_lp = scipy.signal.butter(**butter_lp_kwargs, output='sos') + butter_lp_kwargs = {"N": 2, "Wn": 1000 / 2500 / 2, "btype": "lowpass"} + self.sos_lp = scipy.signal.butter(**butter_lp_kwargs, output="sos") # Number of ap channels - self.napch = int(self.sr.meta['snsApLfSy'][0]) + self.napch = int(self.sr.meta["snsApLfSy"][0]) # Position of start of sync channels in the raw data - self.idxsyncch = int(self.sr.meta['snsApLfSy'][0]) + self.idxsyncch = int(self.sr.meta["snsApLfSy"][0]) - self.extra = extra or '' + self.extra = extra or "" self.nshank = nshank or None self.check_completed = False @@ -285,7 +311,7 @@ def check_metadata(self): been split into shanks. If we are sets flag and prevents further processing occurring :return: """ - if self.sr.meta.get(f'{self.np_version}_shank', None) is not None: + if self.sr.meta.get(f"{self.np_version}_shank", None) is not None: self.already_processed = True else: self.already_processed = False @@ -297,12 +323,12 @@ def process(self, overwrite=False): :param overwrite: :return: """ - if self.np_version == 'NP2.4': + if self.np_version == "NP2.4": status = self._process_NP24(overwrite=overwrite) - elif self.np_version == 'NP2.1': + elif self.np_version == "NP2.1": status = self._process_NP21(overwrite=overwrite) else: - _logger.warning('Meta file is not of type NP2.1 or NP2.4, cannot process') + _logger.warning("Meta file is not of type NP2.1 or NP2.4, cannot process") status = -1 return status @@ -316,34 +342,43 @@ def _process_NP24(self, overwrite=False): :return: """ if self.already_processed: - _logger.warning('This ap file is an NP2.4 that has already been split into shanks, ' - 'nothing to do here') + _logger.warning( + "This ap file is an NP2.4 that has already been split into shanks, " + "nothing to do here" + ) return 0 self.shank_info = self._prepare_files_NP24(overwrite=overwrite) if self.already_exists: - _logger.warning('One or more of the sub shank folders already exists, ' - 'to force reprocessing set overwrite to True') + _logger.warning( + "One or more of the sub shank folders already exists, " + "to force reprocessing set overwrite to True" + ) return 0 # Initial checks out the way. Let's goooo! wg = WindowGenerator(self.nsamples, self.samples_window, self.samples_overlap) for first, last in wg.firstlast: - chunk_ap = self.sr[first:last, :self.napch].T - chunk_ap_sync = self.sr[first:last, self.idxsyncch:].T - chunk_lf = self.extract_lfp(self.sr[first:last, :self.napch].T) - chunk_lf_sync = self.extract_lfp_sync(self.sr[first:last, self.idxsyncch:].T) - - chunk_ap2save = self._ind2save(chunk_ap, chunk_ap_sync, wg, ratio=1, etype='ap') - chunk_lf2save = self._ind2save(chunk_lf, chunk_lf_sync, wg, ratio=self.ratio, - etype='lf') - - self._split2shanks(chunk_ap2save, etype='ap') - self._split2shanks(chunk_lf2save, etype='lf') - - self._closefiles(etype='ap') - self._closefiles(etype='lf') + chunk_ap = self.sr[first:last, : self.napch].T + chunk_ap_sync = self.sr[first:last, self.idxsyncch :].T + chunk_lf = self.extract_lfp(self.sr[first:last, : self.napch].T) + chunk_lf_sync = self.extract_lfp_sync( + self.sr[first:last, self.idxsyncch :].T + ) + + chunk_ap2save = self._ind2save( + chunk_ap, chunk_ap_sync, wg, ratio=1, etype="ap" + ) + chunk_lf2save = self._ind2save( + chunk_lf, chunk_lf_sync, wg, ratio=self.ratio, etype="lf" + ) + + self._split2shanks(chunk_ap2save, etype="ap") + self._split2shanks(chunk_lf2save, etype="lf") + + self._closefiles(etype="ap") + self._closefiles(etype="lf") self._writemetadata_ap() self._writemetadata_lf() @@ -369,7 +404,7 @@ def _prepare_files_NP24(self, overwrite=False): """ chn_info = spikeglx._map_channels_from_meta(self.sr.meta) - n_shanks = self.nshank or np.unique(chn_info['shank']).astype(np.int16) + n_shanks = self.nshank or np.unique(chn_info["shank"]).astype(np.int16) label = self.ap_file.parent.parts[-1] shank_info = {} self.already_exists = False @@ -377,29 +412,35 @@ def _prepare_files_NP24(self, overwrite=False): for sh in n_shanks: _shank_info = {} # channels for individual shank + sync channel - _shank_info['chns'] = np.r_[np.where(chn_info['shank'] == sh)[0], - np.array(spikeglx._get_sync_trace_indices_from_meta( - self.sr.meta))] + _shank_info["chns"] = np.r_[ + np.where(chn_info["shank"] == sh)[0], + np.array(spikeglx._get_sync_trace_indices_from_meta(self.sr.meta)), + ] - probe_path = self.ap_file.parent.parent.joinpath(label + chr(97 + int(sh)) + self.extra) + probe_path = self.ap_file.parent.parent.joinpath( + label + chr(97 + int(sh)) + self.extra + ) if not probe_path.exists() or overwrite: if self.sr.is_mtscomp: - ap_file_bin = self.ap_file.with_suffix('.bin').name + ap_file_bin = self.ap_file.with_suffix(".bin").name else: ap_file_bin = self.ap_file.name probe_path.mkdir(parents=True, exist_ok=True) - _shank_info['ap_file'] = probe_path.joinpath(ap_file_bin) - _shank_info['ap_open_file'] = open(_shank_info['ap_file'], 'wb') - _shank_info['lf_file'] = probe_path.joinpath( - ap_file_bin.replace('ap', 'lf')) - _shank_info['lf_open_file'] = open(_shank_info['lf_file'], 'wb') - - shank_info[f'shank{sh}'] = _shank_info + _shank_info["ap_file"] = probe_path.joinpath(ap_file_bin) + _shank_info["ap_open_file"] = open(_shank_info["ap_file"], "wb") + _shank_info["lf_file"] = probe_path.joinpath( + ap_file_bin.replace("ap", "lf") + ) + _shank_info["lf_open_file"] = open(_shank_info["lf_file"], "wb") + + shank_info[f"shank{sh}"] = _shank_info else: self.already_exists = True - _logger.warning('One or more of the sub shank folders already exists, ' - 'to force reprocessing set overwrite to True') + _logger.warning( + "One or more of the sub shank folders already exists, " + "to force reprocessing set overwrite to True" + ) return shank_info @@ -415,26 +456,30 @@ def _process_NP21(self, overwrite=False, offset=0, **kwargs): self.shank_info = self._prepare_files_NP21(overwrite=overwrite, **kwargs) if self.already_exists: - _logger.warning('This ap file is an NP2.1 that already has lfp extracted, ' - 'nothing to do here') + _logger.warning( + "This ap file is an NP2.1 that already has lfp extracted, " + "nothing to do here" + ) return 0 wg = WindowGenerator(self.nsamples, self.samples_window, self.samples_overlap) for first, last in wg.firstlast: - first = first + offset last = last + offset - chunk_lf = self.extract_lfp(self.sr[first:last, :self.napch].T) - chunk_lf_sync = self.extract_lfp_sync(self.sr[first:last, self.idxsyncch:].T) + chunk_lf = self.extract_lfp(self.sr[first:last, : self.napch].T) + chunk_lf_sync = self.extract_lfp_sync( + self.sr[first:last, self.idxsyncch :].T + ) - chunk_lf2save = self._ind2save(chunk_lf, chunk_lf_sync, wg, ratio=self.ratio, - etype='lf') + chunk_lf2save = self._ind2save( + chunk_lf, chunk_lf_sync, wg, ratio=self.ratio, etype="lf" + ) - self._split2shanks(chunk_lf2save, etype='lf') + self._split2shanks(chunk_lf2save, etype="lf") - self._closefiles(etype='lf') + self._closefiles(etype="lf") self._writemetadata_lf() @@ -455,34 +500,41 @@ def _prepare_files_NP21(self, overwrite=False, assert_shanks=True): chn_info = spikeglx._map_channels_from_meta(self.sr.meta) if assert_shanks: - n_shanks = np.unique(chn_info['shank']).astype(np.int16) - assert (len(n_shanks) == 1) + n_shanks = np.unique(chn_info["shank"]).astype(np.int16) + assert len(n_shanks) == 1 else: n_shanks = np.array([0]) shank_info = {} self.already_exists = False - lf_file = self.ap_file.parent.joinpath(self.ap_file.name.replace('ap', 'lf')).with_suffix('.bin') - lf_cbin_file = lf_file.with_suffix('.cbin') + lf_file = self.ap_file.parent.joinpath( + self.ap_file.name.replace("ap", "lf") + ).with_suffix(".bin") + lf_cbin_file = lf_file.with_suffix(".cbin") if not (lf_file.exists() or lf_cbin_file.exists()) or overwrite: for sh in n_shanks: _shank_info = {} # channels for individual shank + sync channel if assert_shanks: - _shank_info['chns'] = np.r_[np.where(chn_info['shank'] == sh)[0], - np.array(spikeglx._get_sync_trace_indices_from_meta( - self.sr.meta))] + _shank_info["chns"] = np.r_[ + np.where(chn_info["shank"] == sh)[0], + np.array( + spikeglx._get_sync_trace_indices_from_meta(self.sr.meta) + ), + ] else: - _shank_info['chns'] = np.arange(self.sr.nc) + _shank_info["chns"] = np.arange(self.sr.nc) - _shank_info['lf_file'] = lf_file - _shank_info['lf_open_file'] = open(_shank_info['lf_file'], 'wb') + _shank_info["lf_file"] = lf_file + _shank_info["lf_open_file"] = open(_shank_info["lf_file"], "wb") - shank_info[f'shank{sh}'] = _shank_info + shank_info[f"shank{sh}"] = _shank_info else: self.already_exists = True - _logger.warning('LF file for this probe already exists, ' - 'to force reprocessing set overwrite to True') + _logger.warning( + "LF file for this probe already exists, " + "to force reprocessing set overwrite to True" + ) return shank_info @@ -494,7 +546,7 @@ def check_NP24(self): :return: """ for sh in self.shank_info.keys(): - self.shank_info[sh]['sr'] = spikeglx.Reader(self.shank_info[sh]['ap_file']) + self.shank_info[sh]["sr"] = spikeglx.Reader(self.shank_info[sh]["ap_file"]) wg = WindowGenerator(self.nsamples, self.samples_window, 0) for first, last in wg.firstlast: @@ -502,16 +554,20 @@ def check_NP24(self): chunk = np.zeros_like(expected) for ish, sh in enumerate(self.shank_info.keys()): if ish == 0: - chunk[:, self.shank_info[sh]['chns']] = self.shank_info[sh]['sr'][first:last, :] + chunk[:, self.shank_info[sh]["chns"]] = self.shank_info[sh]["sr"][ + first:last, : + ] else: - chunk[:, self.shank_info[sh]['chns'][:-1]] = \ - self.shank_info[sh]['sr'][first:last, :-1] - assert np.array_equal(expected, chunk), \ - 'data in original file and split files do no match' + chunk[:, self.shank_info[sh]["chns"][:-1]] = self.shank_info[sh][ + "sr" + ][first:last, :-1] + assert np.array_equal( + expected, chunk + ), "data in original file and split files do no match" # close the sglx instances once we are done checking for sh in self.shank_info.keys(): - sr = self.shank_info[sh].pop('sr') + sr = self.shank_info[sh].pop("sr") sr.close() self.check_completed = True @@ -522,26 +578,26 @@ def compress_NP24(self, overwrite=False, **kwargs): :return: """ for sh in self.shank_info.keys(): - bin_file = self.shank_info[sh]['ap_file'] + bin_file = self.shank_info[sh]["ap_file"] if overwrite: - cbin_file = bin_file.with_suffix('.cbin') + cbin_file = bin_file.with_suffix(".cbin") cbin_file.unlink() sr_ap = spikeglx.Reader(bin_file) cbin_file = sr_ap.compress_file(**kwargs) sr_ap.close() bin_file.unlink() - self.shank_info[sh]['ap_file'] = cbin_file + self.shank_info[sh]["ap_file"] = cbin_file - bin_file = self.shank_info[sh]['lf_file'] + bin_file = self.shank_info[sh]["lf_file"] if overwrite: - cbin_file = bin_file.with_suffix('.cbin') + cbin_file = bin_file.with_suffix(".cbin") cbin_file.unlink() sr_lf = spikeglx.Reader(bin_file) cbin_file = sr_lf.compress_file(**kwargs) sr_lf.close() bin_file.unlink() - self.shank_info[sh]['lf_file'] = cbin_file + self.shank_info[sh]["lf_file"] = cbin_file def compress_NP21(self, overwrite=False): """ @@ -556,15 +612,15 @@ def compress_NP21(self, overwrite=False): self.ap_file = cbin_file self.sr = spikeglx.Reader(self.ap_file) - bin_file = self.shank_info[sh]['lf_file'] + bin_file = self.shank_info[sh]["lf_file"] if overwrite: - cbin_file = bin_file.with_suffix('.cbin') + cbin_file = bin_file.with_suffix(".cbin") cbin_file.unlink() sr_lf = spikeglx.Reader(bin_file) cbin_file = sr_lf.compress_file() sr_lf.close() bin_file.unlink() - self.shank_info[sh]['lf_file'] = cbin_file + self.shank_info[sh]["lf_file"] = cbin_file def delete_NP24(self): """ @@ -573,12 +629,12 @@ def delete_NP24(self): :return: """ if self.check_completed and self.delete_original: - _logger.info(f'Removing original file in folder {self.ap_file}') + _logger.info(f"Removing original file in folder {self.ap_file}") self.sr.close() self.ap_file.unlink() # shutil.rmtree(self.ap_file.parent) # should we remove the whole folder? - def _split2shanks(self, chunk, etype='ap'): + def _split2shanks(self, chunk, etype="ap"): """ Splits the signal on the 384 channels into the individual shanks and saves to file @@ -588,10 +644,10 @@ def _split2shanks(self, chunk, etype='ap'): """ for sh in self.shank_info.keys(): - open = self.shank_info[sh][f'{etype}_open_file'] - (chunk[:, self.shank_info[sh]['chns']]).tofile(open) + open = self.shank_info[sh][f"{etype}_open_file"] + (chunk[:, self.shank_info[sh]["chns"]]).tofile(open) - def _ind2save(self, chunk, chunk_sync, wg, ratio=1, etype='ap'): + def _ind2save(self, chunk, chunk_sync, wg, ratio=1, etype="ap"): """ Determines the portion of the full chunk to save based on the window and taper used. Cuts off beginning and end to get rid of filtering/ decimating artefacts @@ -604,18 +660,23 @@ def _ind2save(self, chunk, chunk_sync, wg, ratio=1, etype='ap'): :return: """ - ind2save = [int(self.samples_taper * 2 / ratio), - int((self.samples_window - self.samples_taper * 2) / ratio)] + ind2save = [ + int(self.samples_taper * 2 / ratio), + int((self.samples_window - self.samples_taper * 2) / ratio), + ] if wg.iw == 0: ind2save[0] = 0 if wg.iw == wg.nwin - 1: ind2save[1] = int(self.samples_window / ratio) - chunk2save = (np.c_[chunk[:, slice(*ind2save)].T / - self.sr.channel_conversion_sample2v[etype][:self.napch], - chunk_sync[:, slice(*ind2save)].T / - self.sr.channel_conversion_sample2v[etype][self.idxsyncch:]]).\ - astype(np.int16) + chunk2save = ( + np.c_[ + chunk[:, slice(*ind2save)].T + / self.sr.channel_conversion_sample2v[etype][: self.napch], + chunk_sync[:, slice(*ind2save)].T + / self.sr.channel_conversion_sample2v[etype][self.idxsyncch :], + ] + ).astype(np.int16) return chunk2save @@ -628,10 +689,10 @@ def extract_lfp(self, chunk): :return: LFP signal """ - chunk[:, :self.samples_taper] *= self.taper[:self.samples_taper] - chunk[:, -self.samples_taper:] *= self.taper[self.samples_taper:] + chunk[:, : self.samples_taper] *= self.taper[: self.samples_taper] + chunk[:, -self.samples_taper :] *= self.taper[self.samples_taper :] chunk = scipy.signal.sosfiltfilt(self.sos_lp, chunk) - chunk = chunk[:, ::self.ratio] + chunk = chunk[:, :: self.ratio] return chunk def extract_lfp_sync(self, chunk_sync): @@ -642,10 +703,10 @@ def extract_lfp_sync(self, chunk_sync): :return: downsampled sync signal """ - chunk_sync = chunk_sync[:, ::self.ratio] + chunk_sync = chunk_sync[:, :: self.ratio] return chunk_sync - def _closefiles(self, etype='ap'): + def _closefiles(self, etype="ap"): """ Close .bin files that were being written to @@ -654,7 +715,7 @@ def _closefiles(self, etype='ap'): """ for sh in self.shank_info.keys(): - open = self.shank_info[sh].pop(f'{etype}_open_file') + open = self.shank_info[sh].pop(f"{etype}_open_file") open.close() def _writemetadata_ap(self): @@ -667,19 +728,20 @@ def _writemetadata_ap(self): """ for sh in self.shank_info.keys(): - n_chns = len(self.shank_info[sh]['chns']) + n_chns = len(self.shank_info[sh]["chns"]) # First for the ap file meta_shank = copy.deepcopy(self.sr.meta) - meta_shank['acqApLfSy'][0] = n_chns - 1 - meta_shank['snsApLfSy'][0] = n_chns - 1 - meta_shank['nSavedChans'] = n_chns - meta_shank['fileSizeBytes'] = self.shank_info[sh]['ap_file'].stat().st_size - meta_shank['snsSaveChanSubset_orig'] = \ - spikeglx._get_savedChans_subset(self.shank_info[sh]['chns']) - meta_shank['snsSaveChanSubset'] = f'0:{n_chns-1}' - meta_shank['original_meta'] = False - meta_shank[f'{self.np_version}_shank'] = int(sh[-1]) - meta_file = self.shank_info[sh]['ap_file'].with_suffix('.meta') + meta_shank["acqApLfSy"][0] = n_chns - 1 + meta_shank["snsApLfSy"][0] = n_chns - 1 + meta_shank["nSavedChans"] = n_chns + meta_shank["fileSizeBytes"] = self.shank_info[sh]["ap_file"].stat().st_size + meta_shank["snsSaveChanSubset_orig"] = spikeglx._get_savedChans_subset( + self.shank_info[sh]["chns"] + ) + meta_shank["snsSaveChanSubset"] = f"0:{n_chns-1}" + meta_shank["original_meta"] = False + meta_shank[f"{self.np_version}_shank"] = int(sh[-1]) + meta_file = self.shank_info[sh]["ap_file"].with_suffix(".meta") spikeglx.write_meta_data(meta_shank, meta_file) def _writemetadata_lf(self): @@ -692,22 +754,23 @@ def _writemetadata_lf(self): """ for sh in self.shank_info.keys(): - n_chns = len(self.shank_info[sh]['chns']) + n_chns = len(self.shank_info[sh]["chns"]) meta_shank = copy.deepcopy(self.sr.meta) - meta_shank['acqApLfSy'][0] = 0 - meta_shank['acqApLfSy'][1] = n_chns - 1 - meta_shank['snsApLfSy'][0] = 0 - meta_shank['snsApLfSy'][1] = n_chns - 1 - meta_shank['fileSizeBytes'] = self.shank_info[sh]['lf_file'].stat().st_size - meta_shank['imSampRate'] = self.fs_lf - if self.np_version == 'NP2.4': - meta_shank['snsSaveChanSubset_orig'] = \ - spikeglx._get_savedChans_subset(self.shank_info[sh]['chns']) - meta_shank['snsSaveChanSubset'] = f'0:{n_chns-1}' - meta_shank['nSavedChans'] = n_chns - meta_shank['original_meta'] = False - meta_shank[f'{self.np_version}_shank'] = int(sh[-1]) - meta_file = self.shank_info[sh]['lf_file'].with_suffix('.meta') + meta_shank["acqApLfSy"][0] = 0 + meta_shank["acqApLfSy"][1] = n_chns - 1 + meta_shank["snsApLfSy"][0] = 0 + meta_shank["snsApLfSy"][1] = n_chns - 1 + meta_shank["fileSizeBytes"] = self.shank_info[sh]["lf_file"].stat().st_size + meta_shank["imSampRate"] = self.fs_lf + if self.np_version == "NP2.4": + meta_shank["snsSaveChanSubset_orig"] = spikeglx._get_savedChans_subset( + self.shank_info[sh]["chns"] + ) + meta_shank["snsSaveChanSubset"] = f"0:{n_chns-1}" + meta_shank["nSavedChans"] = n_chns + meta_shank["original_meta"] = False + meta_shank[f"{self.np_version}_shank"] = int(sh[-1]) + meta_file = self.shank_info[sh]["lf_file"].with_suffix(".meta") spikeglx.write_meta_data(meta_shank, meta_file) def get_processed_files_NP24(self): @@ -717,37 +780,36 @@ def get_processed_files_NP24(self): """ out_files = [] for sh in self.shank_info.keys(): - ap_file = self.shank_info[sh]['ap_file'] + ap_file = self.shank_info[sh]["ap_file"] out_files.append(ap_file) - out_files.append(ap_file.with_suffix('.meta')) + out_files.append(ap_file.with_suffix(".meta")) - if ap_file.suffix == '.cbin': - out_files.append(ap_file.with_suffix('.ch')) + if ap_file.suffix == ".cbin": + out_files.append(ap_file.with_suffix(".ch")) - lf_file = self.shank_info[sh]['lf_file'] + lf_file = self.shank_info[sh]["lf_file"] out_files.append(lf_file) - out_files.append(lf_file.with_suffix('.meta')) + out_files.append(lf_file.with_suffix(".meta")) - if lf_file.suffix == '.cbin': - out_files.append(lf_file.with_suffix('.ch')) + if lf_file.suffix == ".cbin": + out_files.append(lf_file.with_suffix(".ch")) return out_files def get_processed_files_NP21(self): - out_files = [] for sh in self.shank_info.keys(): - lf_file = self.shank_info[sh]['lf_file'] + lf_file = self.shank_info[sh]["lf_file"] out_files.append(lf_file) - out_files.append(lf_file.with_suffix('.meta')) + out_files.append(lf_file.with_suffix(".meta")) - if lf_file.suffix == '.cbin': - out_files.append(lf_file.with_suffix('.ch')) + if lf_file.suffix == ".cbin": + out_files.append(lf_file.with_suffix(".ch")) out_files.append(self.ap_file) - out_files.append(self.ap_file.with_suffix('.meta')) - if self.ap_file.suffix == '.cbin': - out_files.append(self.ap_file.with_suffix('.ch')) + out_files.append(self.ap_file.with_suffix(".meta")) + if self.ap_file.suffix == ".cbin": + out_files.append(self.ap_file.with_suffix(".ch")) return out_files @@ -794,42 +856,50 @@ def _prepare_files(self): :return: """ - folders = list(self.data_path.glob(f'{self.pname}*')) + folders = list(self.data_path.glob(f"{self.pname}*")) # remove any probe00 folder if present - folders = sorted([fold for fold in folders if fold != self.data_path.joinpath(f'{self.pname}')]) + folders = sorted( + [ + fold + for fold in folders + if fold != self.data_path.joinpath(f"{self.pname}") + ] + ) # TODO check the meta data - meta_file = next(folders[0].glob('*ap.meta')) + meta_file = next(folders[0].glob("*ap.meta")) meta_info = spikeglx.read_meta_data(meta_file) self.np_version = spikeglx._get_neuropixel_version_from_meta(meta_info) - if self.np_version != 'NP2.4': - _logger.warning('Not Neuropixel 2.4 nothing to do') + if self.np_version != "NP2.4": + _logger.warning("Not Neuropixel 2.4 nothing to do") return - ap_file = next(folders[0].glob('*ap.*bin')) - self.save_file = self.probe_path.joinpath(ap_file.name).with_suffix('.bin') + ap_file = next(folders[0].glob("*ap.*bin")) + self.save_file = self.probe_path.joinpath(ap_file.name).with_suffix(".bin") # note we use the private method here to make sure we get all of the original channels, # as the .geometry() method returns only channels pertaining to the shank chn_info = spikeglx._map_channels_from_meta(meta_info) - expected_shanks = np.unique(chn_info['shank']) + expected_shanks = np.unique(chn_info["shank"]) if len(folders) != len(expected_shanks): - _logger.warning('Number of expected subfolders and number of shanks do not match') + _logger.warning( + "Number of expected subfolders and number of shanks do not match" + ) return shank_info = {} for iF, fold in enumerate(folders): - ap_file = next(fold.glob('*ap.*bin')) + ap_file = next(fold.glob("*ap.*bin")) _shank_info = {} - _shank_info['ap_file'] = ap_file + _shank_info["ap_file"] = ap_file sr = spikeglx.Reader(ap_file) - sh = sr.meta.get(f'{self.np_version}_shank') - _shank_info['sr'] = sr - _shank_info['chns'] = self._get_chans(sr.meta) - assert all(_shank_info['chns'][:-1] == np.where(chn_info['shank'] == sh)[0]) - shank_info[f'shank{iF}'] = _shank_info + sh = sr.meta.get(f"{self.np_version}_shank") + _shank_info["sr"] = sr + _shank_info["chns"] = self._get_chans(sr.meta) + assert all(_shank_info["chns"][:-1] == np.where(chn_info["shank"] == sh)[0]) + shank_info[f"shank{iF}"] = _shank_info return shank_info @@ -839,15 +909,15 @@ def get_params(self): :return: """ self.fs_ap = 30000 - self.nch = np.max(self.shank_info['shank0']['chns']) + 1 - self.nsamples = self.shank_info['shank0']['sr'].ns + self.nch = np.max(self.shank_info["shank0"]["chns"]) + 1 + self.nsamples = self.shank_info["shank0"]["sr"].ns self.samples_window = 2 * self.fs_ap def _get_chans(self, meta): - chn_subset = meta.get('snsSaveChanSubset_orig') - chn_subset = chn_subset.split(',') + chn_subset = meta.get("snsSaveChanSubset_orig") + chn_subset = chn_subset.split(",") for ich, ch_sub in enumerate(chn_subset): - sub = ch_sub.split(':') + sub = ch_sub.split(":") if len(sub) > 1: chns = np.arange(int(sub[0]), int(sub[1]) + 1) else: @@ -866,7 +936,7 @@ def _reconstruct(self): :return: """ - file_out = open(self.save_file, 'wb') + file_out = open(self.save_file, "wb") wg = WindowGenerator(self.nsamples, self.samples_window, 0) for first, last in wg.firstlast: @@ -874,16 +944,19 @@ def _reconstruct(self): chunk = np.zeros((ns, self.nch), dtype=np.int16) for ish, sh in enumerate(self.shank_info.keys()): if ish == 0: - chunk[:, self.shank_info[sh]['chns']] = self.shank_info[sh]['sr']._raw[first:last, :] + chunk[:, self.shank_info[sh]["chns"]] = self.shank_info[sh][ + "sr" + ]._raw[first:last, :] else: - chunk[:, self.shank_info[sh]['chns'][:-1]] = \ - self.shank_info[sh]['sr']._raw[first:last, :-1] + chunk[:, self.shank_info[sh]["chns"][:-1]] = self.shank_info[sh][ + "sr" + ]._raw[first:last, :-1] chunk.tofile(file_out) # close the sglx instances once we are done converting for sh in self.shank_info.keys(): - sr = self.shank_info[sh].pop('sr') + sr = self.shank_info[sh].pop("sr") sr.close() file_out.close() @@ -898,22 +971,24 @@ def write_metadata(self): """ # see if the meta file already exists - meta_file = self.save_file.with_suffix('.meta') + meta_file = self.save_file.with_suffix(".meta") if meta_file.exists(): meta_info = spikeglx.read_meta_data(meta_file) - if meta_info['fileSizeBytes'] == self.save_file.stat().st_size: + if meta_info["fileSizeBytes"] == self.save_file.stat().st_size: _logger.info('Meta file already present won"t overwrite') return # First for the ap file - meta_shank = spikeglx.read_meta_data(self.shank_info['shank0']['ap_file'].with_suffix('.meta')) - meta_shank['acqApLfSy'][0] = self.nch - 1 - meta_shank['snsApLfSy'][0] = self.nch - 1 - meta_shank['nSavedChans'] = self.nch - meta_shank['fileSizeBytes'] = self.save_file.stat().st_size - meta_shank['snsSaveChanSubset'] = f'0:{self.nch - 1}' - _ = meta_shank.pop(f'{self.np_version}_shank') - _ = meta_shank.pop('snsSaveChanSubset_orig') + meta_shank = spikeglx.read_meta_data( + self.shank_info["shank0"]["ap_file"].with_suffix(".meta") + ) + meta_shank["acqApLfSy"][0] = self.nch - 1 + meta_shank["snsApLfSy"][0] = self.nch - 1 + meta_shank["nSavedChans"] = self.nch + meta_shank["fileSizeBytes"] = self.save_file.stat().st_size + meta_shank["snsSaveChanSubset"] = f"0:{self.nch - 1}" + _ = meta_shank.pop(f"{self.np_version}_shank") + _ = meta_shank.pop("snsSaveChanSubset_orig") spikeglx.write_meta_data(meta_shank, meta_file) diff --git a/src/neurowaveforms/model.py b/src/neurowaveforms/model.py index c35ded2..6e43e93 100644 --- a/src/neurowaveforms/model.py +++ b/src/neurowaveforms/model.py @@ -2,7 +2,9 @@ from ibldsp.fourier import fshift -def generate_waveform(spike=None, sxy=None, wxy=None, fs=30000, vertical_velocity_mps=3): +def generate_waveform( + spike=None, sxy=None, wxy=None, fs=30000, vertical_velocity_mps=3 +): """ Generate a waveform from a spike and a set of coordinates :param spike: the single trace spike waveform @@ -13,89 +15,186 @@ def generate_waveform(spike=None, sxy=None, wxy=None, fs=30000, vertical_velocit :return: the generated waveform ns by ntraces """ # spike coordinates - sxy = np.array([43., 1940., 0.]) if sxy is None else sxy + sxy = np.array([43.0, 1940.0, 0.0]) if sxy is None else sxy # generated traces coordinates if wxy is None: - wxy = np.array([ - [43., 1740., 0.], - [59., 1760., 0.], - [27., 1760., 0.], - [43., 1780., 0.], - [11., 1780., 0.], - [59., 1800., 0.], - [27., 1800., 0.], - [43., 1820., 0.], - [11., 1820., 0.], - [59., 1840., 0.], - [27., 1840., 0.], - [43., 1860., 0.], - [11., 1860., 0.], - [59., 1880., 0.], - [27., 1880., 0.], - [43., 1900., 0.], - [11., 1900., 0.], - [59., 1920., 0.], - [27., 1920., 0.], - [43., 1940., 0.], - [11., 1940., 0.], - [59., 1960., 0.], - [27., 1960., 0.], - [43., 1980., 0.], - [11., 1980., 0.], - [59., 2000., 0.], - [27., 2000., 0.], - [43., 2020., 0.], - [11., 2020., 0.], - [59., 2040., 0.], - [27., 2040., 0.], - [43., 2060., 0.], - [11., 2060., 0.], - [59., 2080., 0.], - [27., 2080., 0.], - [43., 2100., 0.], - [11., 2100., 0.], - [59., 2120., 0.], - [27., 2120., 0.], - [43., 2140., 0.]]) + wxy = np.array( + [ + [43.0, 1740.0, 0.0], + [59.0, 1760.0, 0.0], + [27.0, 1760.0, 0.0], + [43.0, 1780.0, 0.0], + [11.0, 1780.0, 0.0], + [59.0, 1800.0, 0.0], + [27.0, 1800.0, 0.0], + [43.0, 1820.0, 0.0], + [11.0, 1820.0, 0.0], + [59.0, 1840.0, 0.0], + [27.0, 1840.0, 0.0], + [43.0, 1860.0, 0.0], + [11.0, 1860.0, 0.0], + [59.0, 1880.0, 0.0], + [27.0, 1880.0, 0.0], + [43.0, 1900.0, 0.0], + [11.0, 1900.0, 0.0], + [59.0, 1920.0, 0.0], + [27.0, 1920.0, 0.0], + [43.0, 1940.0, 0.0], + [11.0, 1940.0, 0.0], + [59.0, 1960.0, 0.0], + [27.0, 1960.0, 0.0], + [43.0, 1980.0, 0.0], + [11.0, 1980.0, 0.0], + [59.0, 2000.0, 0.0], + [27.0, 2000.0, 0.0], + [43.0, 2020.0, 0.0], + [11.0, 2020.0, 0.0], + [59.0, 2040.0, 0.0], + [27.0, 2040.0, 0.0], + [43.0, 2060.0, 0.0], + [11.0, 2060.0, 0.0], + [59.0, 2080.0, 0.0], + [27.0, 2080.0, 0.0], + [43.0, 2100.0, 0.0], + [11.0, 2100.0, 0.0], + [59.0, 2120.0, 0.0], + [27.0, 2120.0, 0.0], + [43.0, 2140.0, 0.0], + ] + ) # spike waveform if spike is None: - spike = np.array([ - 1.39729483e-02, 8.35108757e-03, 1.19482297e-02, 8.70722998e-03, - 7.36843236e-03, -1.13049131e-02, -1.55959455e-02, -2.61027571e-02, - -2.17338502e-02, -4.38232124e-02, -5.93647808e-02, -7.47340098e-02, - -9.37892646e-02, -1.18461445e-01, -1.39234722e-01, -1.30095094e-01, - -1.10826254e-01, -8.66197199e-02, -9.47982967e-02, -8.71859193e-02, - 3.44982445e-02, 1.14850193e-01, 1.52717680e-01, 1.71819285e-01, - 1.89162567e-01, 1.66016370e-01, 1.77806437e-01, 1.37514159e-01, - 1.76143244e-01, 1.78440601e-01, 1.70397654e-01, 1.42554864e-01, - 1.80504188e-01, 2.94411600e-01, 4.22272682e-01, 4.51008588e-01, - 3.66263747e-01, 1.88719124e-01, -2.11601019e-01, -9.27919567e-01, - -1.91966367e+00, -2.83236146e+00, -3.26066566e+00, -2.75487232e+00, - -1.70523679e+00, -5.07467031e-01, 4.15351689e-01, 1.14206159e+00, - 1.42395592e+00, 1.47938919e+00, 1.48601341e+00, 1.39127147e+00, - 1.34073710e+00, 1.21446955e+00, 1.07075334e+00, 9.82654214e-01, - 9.20302153e-01, 7.62362599e-01, 6.83112204e-01, 5.65638483e-01, - 4.95534867e-01, 4.45296884e-01, 3.31838697e-01, 1.83607757e-01, - 1.10464394e-01, 7.94331133e-02, 5.71388900e-02, -1.53203905e-02, - -1.13413632e-01, -2.63738900e-01, -2.96958297e-01, -2.99410105e-01, - -3.27556312e-01, -4.34036553e-01, -5.66587210e-01, -5.50931454e-01, - -4.83926237e-01, -3.96159559e-01, -3.59628379e-01, -2.93378174e-01, - -2.23388135e-01, -1.75207376e-01, -1.44064426e-01, -8.60679895e-02, - -5.16730249e-02, -6.04236871e-02, -7.13021904e-02, -5.77894375e-02, - -5.49767427e-02, -5.17059378e-02, -3.11024077e-02, -2.73740329e-02, - -3.09202522e-02, -3.67176980e-02, -3.99643928e-02, -5.43142855e-02, - -6.30898550e-02, -6.07964136e-02, -4.08532396e-02, -2.44005471e-02, - -3.96704227e-02, -1.90648790e-02, 9.41569358e-03, 3.47820818e-02, - 4.08176184e-02, 3.42404768e-02, 3.01315673e-02, 2.90315691e-02, - 2.64853500e-02, 2.18018480e-02, 1.19765718e-02, 4.67543490e-03, - 2.74471682e-03, -2.62711023e-04, 1.84994331e-03, 6.98080519e-03, - 1.11559704e-02, 1.33141074e-02, 1.58480220e-02, 1.66855101e-02, - 1.60783399e-02], dtype=np.float32) + spike = np.array( + [ + 1.39729483e-02, + 8.35108757e-03, + 1.19482297e-02, + 8.70722998e-03, + 7.36843236e-03, + -1.13049131e-02, + -1.55959455e-02, + -2.61027571e-02, + -2.17338502e-02, + -4.38232124e-02, + -5.93647808e-02, + -7.47340098e-02, + -9.37892646e-02, + -1.18461445e-01, + -1.39234722e-01, + -1.30095094e-01, + -1.10826254e-01, + -8.66197199e-02, + -9.47982967e-02, + -8.71859193e-02, + 3.44982445e-02, + 1.14850193e-01, + 1.52717680e-01, + 1.71819285e-01, + 1.89162567e-01, + 1.66016370e-01, + 1.77806437e-01, + 1.37514159e-01, + 1.76143244e-01, + 1.78440601e-01, + 1.70397654e-01, + 1.42554864e-01, + 1.80504188e-01, + 2.94411600e-01, + 4.22272682e-01, + 4.51008588e-01, + 3.66263747e-01, + 1.88719124e-01, + -2.11601019e-01, + -9.27919567e-01, + -1.91966367e00, + -2.83236146e00, + -3.26066566e00, + -2.75487232e00, + -1.70523679e00, + -5.07467031e-01, + 4.15351689e-01, + 1.14206159e00, + 1.42395592e00, + 1.47938919e00, + 1.48601341e00, + 1.39127147e00, + 1.34073710e00, + 1.21446955e00, + 1.07075334e00, + 9.82654214e-01, + 9.20302153e-01, + 7.62362599e-01, + 6.83112204e-01, + 5.65638483e-01, + 4.95534867e-01, + 4.45296884e-01, + 3.31838697e-01, + 1.83607757e-01, + 1.10464394e-01, + 7.94331133e-02, + 5.71388900e-02, + -1.53203905e-02, + -1.13413632e-01, + -2.63738900e-01, + -2.96958297e-01, + -2.99410105e-01, + -3.27556312e-01, + -4.34036553e-01, + -5.66587210e-01, + -5.50931454e-01, + -4.83926237e-01, + -3.96159559e-01, + -3.59628379e-01, + -2.93378174e-01, + -2.23388135e-01, + -1.75207376e-01, + -1.44064426e-01, + -8.60679895e-02, + -5.16730249e-02, + -6.04236871e-02, + -7.13021904e-02, + -5.77894375e-02, + -5.49767427e-02, + -5.17059378e-02, + -3.11024077e-02, + -2.73740329e-02, + -3.09202522e-02, + -3.67176980e-02, + -3.99643928e-02, + -5.43142855e-02, + -6.30898550e-02, + -6.07964136e-02, + -4.08532396e-02, + -2.44005471e-02, + -3.96704227e-02, + -1.90648790e-02, + 9.41569358e-03, + 3.47820818e-02, + 4.08176184e-02, + 3.42404768e-02, + 3.01315673e-02, + 2.90315691e-02, + 2.64853500e-02, + 2.18018480e-02, + 1.19765718e-02, + 4.67543490e-03, + 2.74471682e-03, + -2.62711023e-04, + 1.84994331e-03, + 6.98080519e-03, + 1.11559704e-02, + 1.33141074e-02, + 1.58480220e-02, + 1.66855101e-02, + 1.60783399e-02, + ], + dtype=np.float32, + ) r = np.sqrt(np.sum(np.square(sxy - wxy), axis=1)) sample_shift = (wxy[:, 1] - np.mean(wxy[:, 1])) / 1e6 * vertical_velocity_mps * fs # shperical divergence - wav = (spike * 1 / (r[..., np.newaxis] + 50)**2) + wav = spike * 1 / (r[..., np.newaxis] + 50) ** 2 wav = fshift(wav, sample_shift, axis=-1).T return wav diff --git a/src/spikeglx.py b/src/spikeglx.py index d310f98..4382c1f 100644 --- a/src/spikeglx.py +++ b/src/spikeglx.py @@ -13,7 +13,7 @@ SAMPLE_SIZE = 2 # int16 DEFAULT_BATCH_SIZE = 1e6 -_logger = logging.getLogger('ibllib') +_logger = logging.getLogger("ibllib") class Reader: @@ -38,8 +38,20 @@ class Reader: Note: To release system resources the close method must be called """ - def __init__(self, sglx_file, open=True, nc=None, ns=None, fs=None, dtype='int16', s2v=None, - nsync=None, ignore_warnings=False, meta_file=None, ch_file=None): + def __init__( + self, + sglx_file, + open=True, + nc=None, + ns=None, + fs=None, + dtype="int16", + s2v=None, + nsync=None, + ignore_warnings=False, + meta_file=None, + ch_file=None, + ): """ An interface for reading data from a SpikeGLX file :param sglx_file: Path to a SpikeGLX file (compressed or otherwise), or to a meta-data file @@ -47,14 +59,22 @@ def __init__(self, sglx_file, open=True, nc=None, ns=None, fs=None, dtype='int16 """ self.ignore_warnings = ignore_warnings sglx_file = Path(sglx_file) - meta_file = meta_file or sglx_file.with_suffix('.meta') + meta_file = meta_file or sglx_file.with_suffix(".meta") # only used if MTSCOMP compressed self.ch_file = ch_file if meta_file == sglx_file: # if a meta-data file is provided, try to get the binary file - self.file_bin = sglx_file.with_suffix('.cbin') if sglx_file.with_suffix('.cbin').exists() else None - self.file_bin = sglx_file.with_suffix('.bin') if sglx_file.with_suffix('.bin').exists() else None + self.file_bin = ( + sglx_file.with_suffix(".cbin") + if sglx_file.with_suffix(".cbin").exists() + else None + ) + self.file_bin = ( + sglx_file.with_suffix(".bin") + if sglx_file.with_suffix(".bin").exists() + else None + ) else: self.file_bin = sglx_file self.nbytes = self.file_bin.stat().st_size if self.file_bin else None @@ -74,7 +94,7 @@ def __init__(self, sglx_file, open=True, nc=None, ns=None, fs=None, dtype='int16 nsync = nsync or 1 err_str = "Instantiating an Reader without meta data requires providing nc, fs and nc parameters" - assert (nc is not None and fs is not None and nc is not None), err_str + assert nc is not None and fs is not None and nc is not None, err_str self.file_meta_data = None self.meta = None self._nc, self._fs, self._ns = (int(nc), int(fs), int(ns)) @@ -82,10 +102,10 @@ def __init__(self, sglx_file, open=True, nc=None, ns=None, fs=None, dtype='int16 # multiple of the file size above to determine if there is a sync or not self._nsync = nsync or 0 if s2v is None: - s2v = neuropixel.S2V_AP if self.dtype == np.dtype('int16') else 1.0 - self.channel_conversion_sample2v = {'samples': np.ones(nc) * s2v} + s2v = neuropixel.S2V_AP if self.dtype == np.dtype("int16") else 1.0 + self.channel_conversion_sample2v = {"samples": np.ones(nc) * s2v} if self._nsync > 0: - self.channel_conversion_sample2v['samples'][-nsync:] = 1 + self.channel_conversion_sample2v["samples"][-nsync:] = 1 else: # normal case we continue reading and interpreting the metadata file self.file_meta_data = meta_file @@ -100,33 +120,44 @@ def open(self): sglx_file = str(self.file_bin) if self.is_mtscomp: self._raw = mtscomp.Reader() - ch_file = self.ch_file or self.file_bin.with_suffix('.ch') + ch_file = self.ch_file or self.file_bin.with_suffix(".ch") self._raw.open(self.file_bin, ch_file) if self._raw.shape != (self.ns, self.nc): ftsec = self._raw.shape[0] / self.fs if not self.ignore_warnings: # avoid the checks for streaming data - _logger.warning(f"{sglx_file} : meta data and compressed chunks dont checkout\n" - f"File duration: expected {self.meta['fileTimeSecs']}," - f" actual {ftsec}\n" - f"Will attempt to fudge the meta-data information.") - self.meta['fileTimeSecs'] = ftsec + _logger.warning( + f"{sglx_file} : meta data and compressed chunks dont checkout\n" + f"File duration: expected {self.meta['fileTimeSecs']}," + f" actual {ftsec}\n" + f"Will attempt to fudge the meta-data information." + ) + self.meta["fileTimeSecs"] = ftsec else: if self.nc * self.ns * self.dtype.itemsize != self.nbytes: - ftsec = self.file_bin.stat().st_size / self.dtype.itemsize / self.nc / self.fs + ftsec = ( + self.file_bin.stat().st_size + / self.dtype.itemsize + / self.nc + / self.fs + ) if self.meta is not None: if not self.ignore_warnings: - _logger.warning(f"{sglx_file} : meta data and filesize do not checkout\n" - f"File size: expected {self.meta['fileSizeBytes']}," - f" actual {self.file_bin.stat().st_size}\n" - f"File duration: expected {self.meta['fileTimeSecs']}," - f" actual {ftsec}\n" - f"Will attempt to fudge the meta-data information.") - self.meta['fileTimeSecs'] = ftsec - self._raw = np.memmap(sglx_file, dtype=self.dtype, mode='r', shape=(self.ns, self.nc)) + _logger.warning( + f"{sglx_file} : meta data and filesize do not checkout\n" + f"File size: expected {self.meta['fileSizeBytes']}," + f" actual {self.file_bin.stat().st_size}\n" + f"File duration: expected {self.meta['fileTimeSecs']}," + f" actual {ftsec}\n" + f"Will attempt to fudge the meta-data information." + ) + self.meta["fileTimeSecs"] = ftsec + self._raw = np.memmap( + sglx_file, dtype=self.dtype, mode="r", shape=(self.ns, self.nc) + ) def close(self): if self.is_open: - getattr(self._raw, '_mmap', self._raw).close() + getattr(self._raw, "_mmap", self._raw).close() def __enter__(self): if not self.is_open: @@ -164,17 +195,23 @@ def is_open(self): @property def is_mtscomp(self): - return 'cbin' in self.file_bin.suffix + return "cbin" in self.file_bin.suffix @property def version(self): """Gets the version string: '3A', '3B2', '3B1', 'NP2.1', 'NP2.4'""" - return None if self.meta is None else _get_neuropixel_version_from_meta(self.meta) + return ( + None if self.meta is None else _get_neuropixel_version_from_meta(self.meta) + ) @property def major_version(self): """Gets the the major version int: 1 or 2""" - return None if self.meta is None else _get_neuropixel_major_version_from_meta(self.meta) + return ( + None + if self.meta is None + else _get_neuropixel_major_version_from_meta(self.meta) + ) @property def rl(self): @@ -182,32 +219,36 @@ def rl(self): @property def type(self): - """:return: ap, lf or nidq. Useful to index dictionaries """ + """:return: ap, lf or nidq. Useful to index dictionaries""" if not self.meta: - return 'samples' + return "samples" return _get_type_from_meta(self.meta) @property def fs(self): - """ :return: sampling frequency (Hz) """ + """:return: sampling frequency (Hz)""" return self._fs if self.meta is None else _get_fs_from_meta(self.meta) @property def nc(self): - """ :return: number of channels """ + """:return: number of channels""" return self._nc if self.meta is None else _get_nchannels_from_meta(self.meta) @property def nsync(self): """:return: number of sync channels""" - return self._nsync if self.meta is None else len(_get_sync_trace_indices_from_meta(self.meta)) + return ( + self._nsync + if self.meta is None + else len(_get_sync_trace_indices_from_meta(self.meta)) + ) @property def ns(self): - """ :return: number of samples """ + """:return: number of samples""" if self.meta is None: return self._ns - return int(np.round(self.meta.get('fileTimeSecs') * self.fs)) + return int(np.round(self.meta.get("fileTimeSecs") * self.fs)) def read(self, nsel=slice(0, 10000), csel=slice(None), sync=True): """ @@ -217,7 +258,7 @@ def read(self, nsel=slice(0, 10000), csel=slice(None), sync=True): :return: float32 array """ if not self.is_open: - raise IOError('Reader not open; call `open` before `read`') + raise IOError("Reader not open; call `open` before `read`") darray = self._raw[nsel, csel].astype(np.float32, copy=True) darray *= self.channel_conversion_sample2v[self.type][csel] if sync: @@ -246,10 +287,12 @@ def read_sync_digital(self, _slice=slice(0, 10000)): >>> sync_samples = sr.read_sync_digital(slice(0,10000)) """ if not self.is_open: - raise IOError('Reader not open; call `open` before `read`') + raise IOError("Reader not open; call `open` before `read`") if not self.meta: - _logger.warning('Sync trace not labeled in metadata. Assuming last trace') - return split_sync(self._raw[_slice, _get_sync_trace_indices_from_meta(self.meta)]) + _logger.warning("Sync trace not labeled in metadata. Assuming last trace") + return split_sync( + self._raw[_slice, _get_sync_trace_indices_from_meta(self.meta)] + ) def read_sync_analog(self, _slice=slice(0, 10000)): """ @@ -291,16 +334,18 @@ def compress_file(self, keep_original=True, **kwargs): :param kwargs: :return: pathlib.Path of the compressed *.cbin file """ - file_tmp = self.file_bin.with_suffix('.cbin_tmp') + file_tmp = self.file_bin.with_suffix(".cbin_tmp") assert not self.is_mtscomp - mtscomp.compress(self.file_bin, - out=file_tmp, - outmeta=self.file_bin.with_suffix('.ch'), - sample_rate=self.fs, - n_channels=self.nc, - dtype=self.dtype, - **kwargs) - file_out = file_tmp.with_suffix('.cbin') + mtscomp.compress( + self.file_bin, + out=file_tmp, + outmeta=self.file_bin.with_suffix(".ch"), + sample_rate=self.fs, + n_channels=self.nc, + dtype=self.dtype, + **kwargs, + ) + file_out = file_tmp.with_suffix(".cbin") file_tmp.rename(file_out) if not keep_original: self.file_bin.unlink() @@ -315,17 +360,19 @@ def decompress_file(self, keep_original=True, **kwargs): NB: This is not equivalent to overwrite (which replaces the output file) :return: pathlib.Path of the decompressed *.bin file """ - if 'out' not in kwargs: - kwargs['out'] = self.file_bin.with_suffix('.bin') + if "out" not in kwargs: + kwargs["out"] = self.file_bin.with_suffix(".bin") assert self.is_mtscomp - r = mtscomp.decompress(self.file_bin, self.file_bin.with_suffix('.ch'), **kwargs) + r = mtscomp.decompress( + self.file_bin, self.file_bin.with_suffix(".ch"), **kwargs + ) r.close() if not keep_original: self.close() self.file_bin.unlink() - self.file_bin.with_suffix('.ch').unlink() - self.file_bin = kwargs['out'] - return kwargs['out'] + self.file_bin.with_suffix(".ch").unlink() + self.file_bin = kwargs["out"] + return kwargs["out"] def verify_hash(self): """ @@ -333,12 +380,14 @@ def verify_hash(self): :return: boolean """ if self.is_mtscomp: - with open(self.file_bin.with_suffix('.ch')) as fid: + with open(self.file_bin.with_suffix(".ch")) as fid: mtscomp_params = json.load(fid) - sm = mtscomp_params.get('sha1_compressed', None) + sm = mtscomp_params.get("sha1_compressed", None) if sm is None: - _logger.warning("SHA1 hash is not implemented for compressed ephys. To check " - "the spikeglx acquisition hash, uncompress the file first !") + _logger.warning( + "SHA1 hash is not implemented for compressed ephys. To check " + "the spikeglx acquisition hash, uncompress the file first !" + ) return True sm = sm.upper() else: @@ -353,6 +402,12 @@ def verify_hash(self): return sm == sc +class OnlineReader(Reader): + @property + def ns(self): + return int(self.file_bin.stat().st_size / self.dtype.itemsize / self.nc) + + def read(sglx_file, first_sample=0, last_sample=10000): """ Function to read from a spikeglx binary file without instantiating the class. @@ -382,17 +437,17 @@ def read_meta_data(md_file): md = fid.read() d = {} for a in md.splitlines(): - k, v = a.split('=', maxsplit=1) + k, v = a.split("=", maxsplit=1) # if all numbers, try to interpret the string - if v and re.fullmatch('[0-9,.]*', v) and v.count('.') < 2: - v = [float(val) for val in v.split(',')] + if v and re.fullmatch("[0-9,.]*", v) and v.count(".") < 2: + v = [float(val) for val in v.split(",")] # scalars should not be nested if len(v) == 1: v = v[0] # tildes in keynames removed - d[k.replace('~', '')] = v - d['neuropixelVersion'] = _get_neuropixel_version_from_meta(d) - d['serial'] = _get_serial_number_from_meta(d) + d[k.replace("~", "")] = v + d["neuropixelVersion"] = _get_neuropixel_version_from_meta(d) + d["serial"] = _get_serial_number_from_meta(d) return Bunch(d) @@ -403,14 +458,14 @@ def write_meta_data(md, md_file): :param md_file: file to save meta data to :return: """ - with open(md_file, 'w') as fid: + with open(md_file, "w") as fid: for key, val in md.items(): if isinstance(val, list): - val = ','.join([str(int(v)) for v in val]) + val = ",".join([str(int(v)) for v in val]) if isinstance(val, float): if val.is_integer(): val = int(val) - fid.write(f'{key}={val}\n') + fid.write(f"{key}={val}\n") def _get_savedChans_subset(chns): @@ -420,11 +475,14 @@ def _get_savedChans_subset(chns): :return: """ chn_grps = np.r_[0, np.where(np.diff(chns) != 1)[0] + 1, len(chns)] - chn_subset = [f'{chns[chn_grps[i]]}:{chns[chn_grps[i + 1] - 1]}' - if chn_grps[i] < len(chns) - 1 else f'{chns[chn_grps[i]]}' - for i in range(len(chn_grps) - 1)] + chn_subset = [ + f"{chns[chn_grps[i]]}:{chns[chn_grps[i + 1] - 1]}" + if chn_grps[i] < len(chns) - 1 + else f"{chns[chn_grps[i]]}" + for i in range(len(chn_grps) - 1) + ] - return ','.join([sub for sub in chn_subset]) + return ",".join([sub for sub in chn_subset]) def _get_serial_number_from_meta(md): @@ -432,13 +490,13 @@ def _get_serial_number_from_meta(md): Get neuropixel serial number from the metadata dictionary """ # imProbeSN for 3A, imDatPrb_sn for 3B2, None for nidq 3B2 - serial = md.get('imProbeSN') or md.get('imDatPrb_sn') + serial = md.get("imProbeSN") or md.get("imDatPrb_sn") if serial: return int(serial) def _get_neuropixel_major_version_from_meta(md): - MAJOR_VERSION = {'3A': 1, '3B2': 1, '3B1': 1, 'NP2.1': 2, 'NP2.4': 2.4} + MAJOR_VERSION = {"3A": 1, "3B2": 1, "3B1": 1, "NP2.1": 2, "NP2.4": 2.4} version = _get_neuropixel_version_from_meta(md) if version is not None: return MAJOR_VERSION[version] @@ -448,21 +506,21 @@ def _get_neuropixel_version_from_meta(md): """ Get neuropixel version tag (3A, 3B1, 3B2) from the metadata dictionary """ - if 'typeEnabled' in md.keys(): - return '3A' - prb_type = md.get('imDatPrb_type') + if "typeEnabled" in md.keys(): + return "3A" + prb_type = md.get("imDatPrb_type") # Neuropixel 1.0 either 3B1 or 3B2 (ask Olivier about 3B1) if prb_type == 0: - if 'imDatPrb_port' in md.keys() and 'imDatPrb_slot' in md.keys(): - return '3B2' + if "imDatPrb_port" in md.keys() and "imDatPrb_slot" in md.keys(): + return "3B2" else: - return '3B1' + return "3B1" # Neuropixel 2.0 single shank if prb_type == 21: - return 'NP2.1' + return "NP2.1" # Neuropixel 2.0 four shank if prb_type == 24: - return 'NP2.4' + return "NP2.4" def _get_sync_trace_indices_from_meta(md): @@ -471,10 +529,10 @@ def _get_sync_trace_indices_from_meta(md): """ typ = _get_type_from_meta(md) ntr = int(_get_nchannels_from_meta(md)) - if typ == 'nidq': - nsync = int(md.get('snsMnMaXaDw')[-1]) - elif typ in ['lf', 'ap']: - nsync = int(md.get('snsApLfSy')[2]) + if typ == "nidq": + nsync = int(md.get("snsMnMaXaDw")[-1]) + elif typ in ["lf", "ap"]: + nsync = int(md.get("snsApLfSy")[2]) return list(range(ntr - nsync, ntr)) @@ -483,40 +541,40 @@ def _get_analog_sync_trace_indices_from_meta(md): Returns a list containing indices of the sync traces in the original array """ typ = _get_type_from_meta(md) - if typ != 'nidq': + if typ != "nidq": return [] - tr = md.get('snsMnMaXaDw') + tr = md.get("snsMnMaXaDw") nsa = int(tr[-2]) return list(range(int(sum(tr[0:2])), int(sum(tr[0:2])) + nsa)) def _get_nchannels_from_meta(md): - return int(md.get('nSavedChans')) + return int(md.get("nSavedChans")) def _get_nshanks_from_meta(md): th = _geometry_from_meta(md) - return len(np.unique(th['shank'])) + return len(np.unique(th["shank"])) def _get_fs_from_meta(md): - if md.get('typeThis') == 'imec': - return md.get('imSampRate') + if md.get("typeThis") == "imec": + return md.get("imSampRate") else: - return md.get('niSampRate') + return md.get("niSampRate") def _get_type_from_meta(md): """ Get neuropixel data type (ap, lf or nidq) from metadata """ - snsApLfSy = md.get('snsApLfSy', [-1, -1, -1]) + snsApLfSy = md.get("snsApLfSy", [-1, -1, -1]) if snsApLfSy[0] == 0 and snsApLfSy[1] != 0: - return 'lf' + return "lf" elif snsApLfSy[0] != 0 and snsApLfSy[1] == 0: - return 'ap' - elif snsApLfSy == [-1, -1, -1] and md.get('typeThis', None) == 'nidq': - return 'nidq' + return "ap" + elif snsApLfSy == [-1, -1, -1] and md.get("typeThis", None) == "nidq": + return "nidq" def _split_geometry_into_shanks(th, meta_data): @@ -526,8 +584,8 @@ def _split_geometry_into_shanks(th, meta_data): :param meta_data: :return: """ - if 'NP2.4_shank' in meta_data.keys(): - shank_idx = np.where(th['shank'] == int(meta_data['NP2.4_shank']))[0] + if "NP2.4_shank" in meta_data.keys(): + shank_idx = np.where(th["shank"] == int(meta_data["NP2.4_shank"]))[0] th = {key: th[key][shank_idx] for key in th.keys()} return th @@ -542,25 +600,35 @@ def _geometry_from_meta(meta_data): cm = _map_channels_from_meta(meta_data) major_version = _get_neuropixel_major_version_from_meta(meta_data) if cm is None: - _logger.warning("Meta data doesn't have geometry (snsShankMap/snsGeomMap field), returning defaults") + _logger.warning( + "Meta data doesn't have geometry (snsShankMap/snsGeomMap field), returning defaults" + ) th = neuropixel.trace_header(version=major_version) - th['flag'] = th['x'] * 0 + 1. + th["flag"] = th["x"] * 0 + 1.0 return th th = cm.copy() # as of 2023-04 spikeglx stores only x, y coordinates of sites in UM and no col / row. Here # we convert to col / row for consistency with previous versions - if 'x' in cm.keys(): - if major_version == 1: # the spike sorting channel maps have a flipped version of the channel map - th['x'] = 70 - (th['x']) - th['y'] += 20 # there is a 20um offset between the probe tip and the first site in the coordinate conversion - th.update(neuropixel.xy2rc(th['x'], th['y'], version=major_version)) + if "x" in cm.keys(): + if ( + major_version == 1 + ): # the spike sorting channel maps have a flipped version of the channel map + th["x"] = 70 - (th["x"]) + th[ + "y" + ] += 20 # there is a 20um offset between the probe tip and the first site in the coordinate conversion + th.update(neuropixel.xy2rc(th["x"], th["y"], version=major_version)) else: - if major_version == 1: # the spike sorting channel maps have a flipped version of the channel map - th['col'] = - cm['col'] * 2 + 2 + np.mod(cm['row'], 2) - th.update(neuropixel.rc2xy(th['row'], th['col'], version=major_version)) - th['sample_shift'], th['adc'] = neuropixel.adc_shifts(version=major_version, nc=th['col'].size) + if ( + major_version == 1 + ): # the spike sorting channel maps have a flipped version of the channel map + th["col"] = -cm["col"] * 2 + 2 + np.mod(cm["row"], 2) + th.update(neuropixel.rc2xy(th["row"], th["col"], version=major_version)) + th["sample_shift"], th["adc"] = neuropixel.adc_shifts( + version=major_version, nc=th["col"].size + ) th = _split_geometry_into_shanks(th, meta_data) - th['ind'] = np.arange(th['col'].size) + th["ind"] = np.arange(th["col"].size) return th @@ -581,20 +649,20 @@ def _map_channels_from_meta(meta_data): :param meta_data: dictionary output from spikeglx.read_meta_data :return: dictionary of arrays 'shank', 'col', 'row', 'flag', one value per active site """ - if 'snsShankMap' in meta_data.keys(): - chmap = re.findall(r'([0-9]*:[0-9]*:[0-9]*:[0-9]*)', meta_data['snsShankMap']) - key_names = {'shank': 0, 'col': 1, 'row': 2, 'flag': 3} - elif 'snsGeomMap' in meta_data.keys(): - chmap = re.findall(r'([0-9]*:[0-9]*:[0-9]*:[0-9]*)', meta_data['snsGeomMap']) - key_names = {'shank': 0, 'x': 1, 'y': 2, 'flag': 3} + if "snsShankMap" in meta_data.keys(): + chmap = re.findall(r"([0-9]*:[0-9]*:[0-9]*:[0-9]*)", meta_data["snsShankMap"]) + key_names = {"shank": 0, "col": 1, "row": 2, "flag": 3} + elif "snsGeomMap" in meta_data.keys(): + chmap = re.findall(r"([0-9]*:[0-9]*:[0-9]*:[0-9]*)", meta_data["snsGeomMap"]) + key_names = {"shank": 0, "x": 1, "y": 2, "flag": 3} else: return None # for digital nidq types, the key exists but does not contain any information if not chmap: - return {'shank': None, 'col': None, 'row': None, 'flag': None} + return {"shank": None, "col": None, "row": None, "flag": None} # shank#, col#, row#, drawflag # (nb: drawflag is one should be drawn and considered spatial average) - chmap = np.array([np.float32(cm.split(':')) for cm in chmap]) + chmap = np.array([np.float32(cm.split(":")) for cm in chmap]) return {k: chmap[:, v] for (k, v) in key_names.items()} @@ -611,46 +679,89 @@ def _conversion_sample2v_from_meta(meta_data): """ def int2volts(md): - """ :return: Conversion scalar to Volts. Needs to be combined with channel gains """ - if md.get('typeThis', None) == 'imec': - if 'imMaxInt' in md: - return md.get('imAiRangeMax') / int(md['imMaxInt']) + """:return: Conversion scalar to Volts. Needs to be combined with channel gains""" + if md.get("typeThis", None) == "imec": + if "imMaxInt" in md: + return md.get("imAiRangeMax") / int(md["imMaxInt"]) else: - return md.get('imAiRangeMax') / 512 + return md.get("imAiRangeMax") / 512 else: - return md.get('niAiRangeMax') / 32768 + return md.get("niAiRangeMax") / 32768 int2volt = int2volts(meta_data) version = _get_neuropixel_version_from_meta(meta_data) # interprets the gain value from the metadata header: - if 'imroTbl' in meta_data.keys(): # binary from the probes: ap or lf - sy_gain = np.ones(int(meta_data['snsApLfSy'][-1]), dtype=np.float32) + if "imroTbl" in meta_data.keys(): # binary from the probes: ap or lf + sy_gain = np.ones(int(meta_data["snsApLfSy"][-1]), dtype=np.float32) # imroTbl has 384 entries regardless of no of channels saved, so need to index by n_ch # TODO need to look at snsSaveChanMap and index channels to get correct gain - n_chn = _get_nchannels_from_meta(meta_data) - len(_get_sync_trace_indices_from_meta(meta_data)) - if 'NP2' in version: + n_chn = _get_nchannels_from_meta(meta_data) - len( + _get_sync_trace_indices_from_meta(meta_data) + ) + if "NP2" in version: # NP 2.0; APGain = 80 for all AP # return 0 for LFgain (no LF channels) - out = {'lf': np.hstack((int2volt / 80 * np.ones(n_chn).astype(np.float32), sy_gain)), - 'ap': np.hstack((int2volt / 80 * np.ones(n_chn).astype(np.float32), sy_gain))} + out = { + "lf": np.hstack( + (int2volt / 80 * np.ones(n_chn).astype(np.float32), sy_gain) + ), + "ap": np.hstack( + (int2volt / 80 * np.ones(n_chn).astype(np.float32), sy_gain) + ), + } else: # the sync traces are not included in the gain values, so are included for # broadcast ops - gain = re.findall(r'([0-9]* [0-9]* [0-9]* [0-9]* [0-9]*)', - meta_data['imroTbl'])[:n_chn] - out = {'lf': np.hstack((np.array([1 / np.float32(g.split(' ')[-1]) for g in gain]) * - int2volt, sy_gain)), - 'ap': np.hstack((np.array([1 / np.float32(g.split(' ')[-2]) for g in gain]) * - int2volt, sy_gain))} + gain = re.findall( + r"([0-9]* [0-9]* [0-9]* [0-9]* [0-9]*)", meta_data["imroTbl"] + )[:n_chn] + out = { + "lf": np.hstack( + ( + np.array([1 / np.float32(g.split(" ")[-1]) for g in gain]) + * int2volt, + sy_gain, + ) + ), + "ap": np.hstack( + ( + np.array([1 / np.float32(g.split(" ")[-2]) for g in gain]) + * int2volt, + sy_gain, + ) + ), + } # nidaq gain can be read in the same way regardless of NP1.0 or NP2.0 - elif 'niMNGain' in meta_data.keys(): # binary from nidq + elif "niMNGain" in meta_data.keys(): # binary from nidq gain = np.r_[ - np.ones(int(meta_data['snsMnMaXaDw'][0], )) / meta_data['niMNGain'] * int2volt, - np.ones(int(meta_data['snsMnMaXaDw'][1], )) / meta_data['niMAGain'] * int2volt, - np.ones(int(meta_data['snsMnMaXaDw'][2], )) * int2volt, # no gain for analog sync - np.ones(int(np.sum(meta_data['snsMnMaXaDw'][3]), ))] # no unit for digital sync - out = {'nidq': gain} + np.ones( + int( + meta_data["snsMnMaXaDw"][0], + ) + ) + / meta_data["niMNGain"] + * int2volt, + np.ones( + int( + meta_data["snsMnMaXaDw"][1], + ) + ) + / meta_data["niMAGain"] + * int2volt, + np.ones( + int( + meta_data["snsMnMaXaDw"][2], + ) + ) + * int2volt, # no gain for analog sync + np.ones( + int( + np.sum(meta_data["snsMnMaXaDw"][3]), + ) + ), + ] # no unit for digital sync + out = {"nidq": gain} return out @@ -670,30 +781,32 @@ def split_sync(sync_tr): def get_neuropixel_version_from_folder(session_path): - ephys_files = glob_ephys_files(session_path, ext='meta') + ephys_files = glob_ephys_files(session_path, ext="meta") return get_neuropixel_version_from_files(ephys_files) def get_neuropixel_version_from_files(ephys_files): - if any([ef.get('nidq') for ef in ephys_files]): - return '3B' + if any([ef.get("nidq") for ef in ephys_files]): + return "3B" else: - return '3A' + return "3A" def get_probes_from_folder(session_path): # should glob the ephys files and get out the labels # This assumes the meta files exist on the server (this is the case for now but should it be?) - ephys_files = glob_ephys_files(session_path, ext='meta') + ephys_files = glob_ephys_files(session_path, ext="meta") probes = [] for files in ephys_files: - if files['label']: - probes.append(files['label']) + if files["label"]: + probes.append(files["label"]) return probes -def glob_ephys_files(session_path, suffix='.meta', ext='bin', recursive=True, bin_exists=True): +def glob_ephys_files( + session_path, suffix=".meta", ext="bin", recursive=True, bin_exists=True +): """ From an arbitrary folder (usually session folder) gets the ap and lf files and labels Associated to the subfolders where they are @@ -721,66 +834,90 @@ def glob_ephys_files(session_path, suffix='.meta', ext='bin', recursive=True, bi :param session_path: folder, string or pathlib.Path :returns: a list of dictionaries with keys 'ap': apfile, 'lf': lffile and 'label' """ + def get_label(raw_ephys_apfile): - if raw_ephys_apfile.parts[-2] != 'raw_ephys_data': + if raw_ephys_apfile.parts[-2] != "raw_ephys_data": return raw_ephys_apfile.parts[-2] else: - return '' + return "" - recurse = '**/' if recursive else '' + recurse = "**/" if recursive else "" ephys_files = [] - for raw_ephys_file in Path(session_path).glob(f'{recurse}*.ap*{suffix}'): - raw_ephys_apfile = next(raw_ephys_file.parent.glob(raw_ephys_file.stem + f'.*{ext}'), None) + for raw_ephys_file in Path(session_path).glob(f"{recurse}*.ap*{suffix}"): + raw_ephys_apfile = next( + raw_ephys_file.parent.glob(raw_ephys_file.stem + f".*{ext}"), None + ) if not raw_ephys_apfile and bin_exists: continue - elif not raw_ephys_apfile and ext != 'bin': + elif not raw_ephys_apfile and ext != "bin": continue - elif not bin_exists and ext == 'bin': - raw_ephys_apfile = raw_ephys_file.with_suffix('.bin') + elif not bin_exists and ext == "bin": + raw_ephys_apfile = raw_ephys_file.with_suffix(".bin") # first get the ap file - ephys_files.extend([Bunch({'label': None, 'ap': None, 'lf': None, 'path': None})]) + ephys_files.extend( + [Bunch({"label": None, "ap": None, "lf": None, "path": None})] + ) ephys_files[-1].ap = raw_ephys_apfile # then get the corresponding lf file if it exists - lf_file = raw_ephys_apfile.parent / raw_ephys_apfile.name.replace('.ap.', '.lf.') - ephys_files[-1].lf = next(lf_file.parent.glob(lf_file.stem + f'.*{ext}'), None) + lf_file = raw_ephys_apfile.parent / raw_ephys_apfile.name.replace( + ".ap.", ".lf." + ) + ephys_files[-1].lf = next(lf_file.parent.glob(lf_file.stem + f".*{ext}"), None) # finally, the label is the current directory except if it is bare in raw_ephys_data ephys_files[-1].label = get_label(raw_ephys_apfile) ephys_files[-1].path = raw_ephys_apfile.parent # for 3b probes, need also to get the nidq dataset type - for raw_ephys_file in Path(session_path).rglob(f'{recurse}*.nidq*{suffix}'): - raw_ephys_nidqfile = next(raw_ephys_file.parent.glob(raw_ephys_file.stem + f'.*{ext}'), - None) - if not bin_exists and ext == 'bin': - raw_ephys_nidqfile = raw_ephys_file.with_suffix('.bin') - ephys_files.extend([Bunch({'label': get_label(raw_ephys_file), - 'nidq': raw_ephys_nidqfile, - 'path': raw_ephys_file.parent})]) + for raw_ephys_file in Path(session_path).rglob(f"{recurse}*.nidq*{suffix}"): + raw_ephys_nidqfile = next( + raw_ephys_file.parent.glob(raw_ephys_file.stem + f".*{ext}"), None + ) + if not bin_exists and ext == "bin": + raw_ephys_nidqfile = raw_ephys_file.with_suffix(".bin") + ephys_files.extend( + [ + Bunch( + { + "label": get_label(raw_ephys_file), + "nidq": raw_ephys_nidqfile, + "path": raw_ephys_file.parent, + } + ) + ] + ) return ephys_files -def _mock_spikeglx_file(mock_bin_file, meta_file, ns, nc, sync_depth, - random=False, int2volts=0.6 / 32768, corrupt=False): +def _mock_spikeglx_file( + mock_bin_file, + meta_file, + ns, + nc, + sync_depth, + random=False, + int2volts=0.6 / 32768, + corrupt=False, +): """ For testing purposes, create a binary file with sync pulses to test reading and extraction """ meta_file = Path(meta_file) mock_path_bin = Path(mock_bin_file) - mock_path_meta = mock_path_bin.with_suffix('.meta') + mock_path_meta = mock_path_bin.with_suffix(".meta") md = read_meta_data(meta_file) assert meta_file != mock_path_meta fs = _get_fs_from_meta(md) fid_source = open(meta_file) - fid_target = open(mock_path_meta, 'w+') + fid_target = open(mock_path_meta, "w+") line = fid_source.readline() while line: line = fid_source.readline() - if line.startswith('fileSizeBytes'): - line = f'fileSizeBytes={ns * nc * 2}\n' - if line.startswith('fileTimeSecs'): + if line.startswith("fileSizeBytes"): + line = f"fileSizeBytes={ns * nc * 2}\n" + if line.startswith("fileTimeSecs"): if corrupt: - line = f'fileTimeSecs={ns / fs + 1.8324}\n' + line = f"fileTimeSecs={ns / fs + 1.8324}\n" else: - line = f'fileTimeSecs={ns / fs}\n' + line = f"fileTimeSecs={ns / fs}\n" fid_target.write(line) fid_source.close() fid_target.close() @@ -792,10 +929,16 @@ def _mock_spikeglx_file(mock_bin_file, meta_file, ns, nc, sync_depth, # the last channel is the sync that we fill with sync = np.int16(2 ** np.float32(np.arange(-1, sync_depth))) D[:, -1] = 0 - D[:sync.size, -1] = sync - with open(mock_path_bin, 'w+') as fid: + D[: sync.size, -1] = sync + with open(mock_path_bin, "w+") as fid: D.tofile(fid) - return {'bin_file': mock_path_bin, 'ns': ns, 'nc': nc, 'sync_depth': sync_depth, 'D': D} + return { + "bin_file": mock_path_bin, + "ns": ns, + "nc": nc, + "sync_depth": sync_depth, + "D": D, + } def get_hardware_config(config_file): @@ -806,7 +949,7 @@ def get_hardware_config(config_file): """ config_file = Path(config_file) if config_file.is_dir(): - config_file = list(config_file.glob('*.wiring*.json')) + config_file = list(config_file.glob("*.wiring*.json")) if config_file: config_file = config_file[0] if not config_file or not config_file.exists(): @@ -821,16 +964,18 @@ def _sync_map_from_hardware_config(hardware_config): :param hardware_config: dictonary from json read of neuropixel_wirings.json :return: dictionary where key names refer to object and values to sync channel index """ - if hardware_config['SYSTEM'] == '3A' or hardware_config['SYSTEM'] == '3B': - pin_out = neuropixel.SYNC_PIN_OUT[hardware_config['SYSTEM']] - sync_map = {hardware_config['SYNC_WIRING_DIGITAL'][pin]: pin_out[pin] - for pin in hardware_config['SYNC_WIRING_DIGITAL'] - if pin_out[pin] is not None} + if hardware_config["SYSTEM"] == "3A" or hardware_config["SYSTEM"] == "3B": + pin_out = neuropixel.SYNC_PIN_OUT[hardware_config["SYSTEM"]] + sync_map = { + hardware_config["SYNC_WIRING_DIGITAL"][pin]: pin_out[pin] + for pin in hardware_config["SYNC_WIRING_DIGITAL"] + if pin_out[pin] is not None + } else: - digital = hardware_config.get('SYNC_WIRING_DIGITAL') + digital = hardware_config.get("SYNC_WIRING_DIGITAL") sync_map = {digital[pin]: int(pin[3:]) for pin in digital} - analog = hardware_config.get('SYNC_WIRING_ANALOG') + analog = hardware_config.get("SYNC_WIRING_ANALOG") if analog: sync_map.update({analog[pin]: int(pin[2:]) + 16 for pin in analog}) return sync_map diff --git a/src/tests/integration/cpu/csd_experiments.py b/src/tests/integration/cpu/csd_experiments.py index 744649e..3ddd432 100644 --- a/src/tests/integration/cpu/csd_experiments.py +++ b/src/tests/integration/cpu/csd_experiments.py @@ -17,22 +17,24 @@ h = trace_header(version=1) LFP_RESAMPLE_FACTOR = 10 # 250 Hz data -one = ONE(base_url='https://alyx.internationalbrainlab.org') -pid = 'ce397420-3cd2-4a55-8fd1-5e28321981f4' +one = ONE(base_url="https://alyx.internationalbrainlab.org") +pid = "ce397420-3cd2-4a55-8fd1-5e28321981f4" s0, sample_duration = (546, 30) -sr = Streamer(pid=pid, one=one, remove_cached=False, typ='lf') +sr = Streamer(pid=pid, one=one, remove_cached=False, typ="lf") tsel = slice(int(s0), int(s0) + int(sample_duration * sr.fs)) -raw = sr[tsel, :-sr.nsync].T -destripe = voltage.destripe_lfp(raw, fs=sr.fs, neuropixel_version=1, channel_labels=True) -destripe = scipy.signal.decimate(destripe, LFP_RESAMPLE_FACTOR, axis=1, ftype='fir') +raw = sr[tsel, : -sr.nsync].T +destripe = voltage.destripe_lfp( + raw, fs=sr.fs, neuropixel_version=1, channel_labels=True +) +destripe = scipy.signal.decimate(destripe, LFP_RESAMPLE_FACTOR, axis=1, ftype="fir") fs_out = sr.fs / LFP_RESAMPLE_FACTOR channels = SpikeSortingLoader(pid=pid, one=one).load_channels() -butter_kwargs = {'N': 3, 'Wn': 2 / sr.fs * 2, 'btype': 'highpass'} -sos = scipy.signal.butter(**butter_kwargs, output='sos') +butter_kwargs = {"N": 3, "Wn": 2 / sr.fs * 2, "btype": "highpass"} +sos = scipy.signal.butter(**butter_kwargs, output="sos") butter = scipy.signal.sosfiltfilt(sos, raw) -butter = fourier.fshift(butter, h['sample_shift'], axis=1) -butter = scipy.signal.decimate(butter, LFP_RESAMPLE_FACTOR, axis=1, ftype='fir') +butter = fourier.fshift(butter, h["sample_shift"], axis=1) +butter = scipy.signal.decimate(butter, LFP_RESAMPLE_FACTOR, axis=1, ftype="fir") ## %% @@ -40,14 +42,26 @@ csd = voltage.current_source_density(destripe, h) -eqcs['butter'] = viewephys(butter, fs=250, title='butter', channels=channels, br=br) -eqcs['destripe'] = viewephys(destripe, fs=250, title='destripe', channels=channels, br=br) -eqcs['csd_butter'] = viewephys(voltage.current_source_density(butter, h) * 40 ** 2, - fs=250, title='csd_butter', channels=channels, br=br) -eqcs['csd_destripe'] = viewephys(voltage.current_source_density(destripe, h) * 40 ** 2, - fs=250, title='csd_destripe', channels=channels, br=br) +eqcs["butter"] = viewephys(butter, fs=250, title="butter", channels=channels, br=br) +eqcs["destripe"] = viewephys( + destripe, fs=250, title="destripe", channels=channels, br=br +) +eqcs["csd_butter"] = viewephys( + voltage.current_source_density(butter, h) * 40**2, + fs=250, + title="csd_butter", + channels=channels, + br=br, +) +eqcs["csd_destripe"] = viewephys( + voltage.current_source_density(destripe, h) * 40**2, + fs=250, + title="csd_destripe", + channels=channels, + br=br, +) -today = datetime.date.today().strftime('%Y-%m-%d') +today = datetime.date.today().strftime("%Y-%m-%d") out_path = Path("/home/ibladmin/Pictures") for name, eqc in eqcs.items(): eqc.viewBox_seismic.setXRange(8000, 10000) diff --git a/src/tests/integration/cpu/test_destripe.py b/src/tests/integration/cpu/test_destripe.py index 281a427..5a51f3f 100644 --- a/src/tests/integration/cpu/test_destripe.py +++ b/src/tests/integration/cpu/test_destripe.py @@ -15,16 +15,15 @@ class TestEphysSpikeSortingPreProc(unittest.TestCase): - def test_pre_proc(self): - cbin_file = DATA_PATH.joinpath('adc', 'adc_test.ap.cbin') + cbin_file = DATA_PATH.joinpath("adc", "adc_test.ap.cbin") sr = spikeglx.Reader(cbin_file, open=True) - bin_file = cbin_file.with_suffix('.bin') + bin_file = cbin_file.with_suffix(".bin") ts = time.time() voltage.decompress_destripe_cbin(cbin_file, nprocesses=4) te = time.time() - ts - _logger.info(f'Time elapsed: {te}, length file (secs): {sr.ns / sr.fs}') + _logger.info(f"Time elapsed: {te}, length file (secs): {sr.ns / sr.fs}") sr_out = spikeglx.Reader(bin_file) assert sr.shape == sr_out.shape @@ -32,23 +31,38 @@ def test_pre_proc(self): sel_comp = slice(int(65536 * 0.4), int(65536 * 1.6)) h = trace_header(version=1) # create the FFT stencils - ncv = h['x'].size # number of channels - expected = voltage.destripe(sr[sel_comp, :ncv].T, fs=sr.fs, channel_labels=True).T + ncv = h["x"].size # number of channels + expected = voltage.destripe( + sr[sel_comp, :ncv].T, fs=sr.fs, channel_labels=True + ).T diff = expected - sr_out[sel_comp, :ncv] - assert np.min(20 * np.log10(utils.rms(diff[10000:-10000, :], axis=0) - / utils.rms(sr_out[sel_comp, :ncv], axis=0))) < 35 + assert ( + np.min( + 20 + * np.log10( + utils.rms(diff[10000:-10000, :], axis=0) + / utils.rms(sr_out[sel_comp, :ncv], axis=0) + ) + ) + < 35 + ) sr_out.close() bin_file.unlink() class TestEphysSpikeSortingMultiProcess(unittest.TestCase): def setUp(self) -> None: - - file_path = DATA_PATH.joinpath('np2', '_spikeglx_ephysData_g0_t0.imec0.ap.bin') - self.file_path = file_path.parent.parent.joinpath('probe00_temp', file_path.name) + file_path = DATA_PATH.joinpath("np2", "_spikeglx_ephysData_g0_t0.imec0.ap.bin") + self.file_path = file_path.parent.parent.joinpath( + "probe00_temp", file_path.name + ) self.file_path.parent.mkdir(exist_ok=True, parents=True) - meta_file = file_path.parent.joinpath('NP1_meta', '_spikeglx_ephysData_g0_t0.imec0.ap.meta') - self.meta_file = self.file_path.parent.joinpath('_spikeglx_ephysData_g0_t0.imec0.ap.meta') + meta_file = file_path.parent.joinpath( + "NP1_meta", "_spikeglx_ephysData_g0_t0.imec0.ap.meta" + ) + self.meta_file = self.file_path.parent.joinpath( + "_spikeglx_ephysData_g0_t0.imec0.ap.meta" + ) shutil.copy(file_path, self.file_path) shutil.copy(meta_file, self.meta_file) @@ -61,25 +75,30 @@ def tearDown(self): shutil.rmtree(self.file_path.parent) def test_parallel_computation(self): - - out_file = self.file_path.parent.joinpath('one_process.bin') - shutil.copy(self.meta_file, out_file.with_suffix('.meta')) - voltage.decompress_destripe_cbin(self.file_path, out_file, nprocesses=1, nbatch=6556) + out_file = self.file_path.parent.joinpath("one_process.bin") + shutil.copy(self.meta_file, out_file.with_suffix(".meta")) + voltage.decompress_destripe_cbin( + self.file_path, out_file, nprocesses=1, nbatch=6556 + ) sr_one = spikeglx.Reader(out_file) self.sglx_instances.append(sr_one) - out_file = self.file_path.parent.joinpath('four_process.bin') - shutil.copy(self.meta_file, out_file.with_suffix('.meta')) - voltage.decompress_destripe_cbin(self.file_path, out_file, nprocesses=4, nbatch=6556) + out_file = self.file_path.parent.joinpath("four_process.bin") + shutil.copy(self.meta_file, out_file.with_suffix(".meta")) + voltage.decompress_destripe_cbin( + self.file_path, out_file, nprocesses=4, nbatch=6556 + ) sr_four = spikeglx.Reader(out_file) self.sglx_instances.append(sr_four) assert np.array_equal(sr_one[:, :], sr_four[:, :]) # Now test the extra samples at the end - out_file = self.file_path.parent.joinpath('four_process_extra.bin') - shutil.copy(self.meta_file, out_file.with_suffix('.meta')) + out_file = self.file_path.parent.joinpath("four_process_extra.bin") + shutil.copy(self.meta_file, out_file.with_suffix(".meta")) ns2add = 100 - voltage.decompress_destripe_cbin(self.file_path, out_file, nprocesses=4, nbatch=6556, ns2add=ns2add) + voltage.decompress_destripe_cbin( + self.file_path, out_file, nprocesses=4, nbatch=6556, ns2add=ns2add + ) sr_four_extra = spikeglx.Reader(out_file) self.sglx_instances.append(sr_four_extra) assert sr_four_extra.ns == sr_four.ns + ns2add @@ -88,20 +107,26 @@ def test_parallel_computation(self): # Now test the whitening matrix wm = np.identity(sr_one.nc - 1) - out_file = self.file_path.parent.joinpath('four_process_whiten.bin') - shutil.copy(self.meta_file, out_file.with_suffix('.meta')) - voltage.decompress_destripe_cbin(self.file_path, out_file, nprocesses=4, nbatch=6556, wrot=wm) + out_file = self.file_path.parent.joinpath("four_process_whiten.bin") + shutil.copy(self.meta_file, out_file.with_suffix(".meta")) + voltage.decompress_destripe_cbin( + self.file_path, out_file, nprocesses=4, nbatch=6556, wrot=wm + ) sr_four_whiten = spikeglx.Reader(out_file) self.sglx_instances.append(sr_four_whiten) assert np.array_equal(sr_four_whiten._raw[:, :-1], sr_four._raw[:, :-1]) # Now test appending on the the end of an existing file - out_file = self.file_path.parent.joinpath('four_process.bin') - shutil.copy(self.meta_file, out_file.with_suffix('.meta')) - voltage.decompress_destripe_cbin(self.file_path, out_file, nprocesses=4, nbatch=6556, append=True) + out_file = self.file_path.parent.joinpath("four_process.bin") + shutil.copy(self.meta_file, out_file.with_suffix(".meta")) + voltage.decompress_destripe_cbin( + self.file_path, out_file, nprocesses=4, nbatch=6556, append=True + ) sr_four_append = spikeglx.Reader(out_file) self.sglx_instances.append(sr_four_append) assert sr_four_append.ns == 2 * sr_four.ns - assert np.array_equal(sr_four_append[sr_four.ns:, :], sr_four_append[:sr_four.ns, :]) - assert np.array_equal(sr_four_append[:sr_four.ns, :], sr_four[:, :]) - assert np.array_equal(sr_four_append[sr_four.ns:, :], sr_four[:, :]) + assert np.array_equal( + sr_four_append[sr_four.ns :, :], sr_four_append[: sr_four.ns, :] + ) + assert np.array_equal(sr_four_append[: sr_four.ns, :], sr_four[:, :]) + assert np.array_equal(sr_four_append[sr_four.ns :, :], sr_four[:, :]) diff --git a/src/tests/unit/cpu/fixtures/sampleNP2.4_4shanks_while_acquiring_incomplete.ap.meta b/src/tests/unit/cpu/fixtures/sampleNP2.4_4shanks_while_acquiring_incomplete.ap.meta new file mode 100644 index 0000000..9b21c5f --- /dev/null +++ b/src/tests/unit/cpu/fixtures/sampleNP2.4_4shanks_while_acquiring_incomplete.ap.meta @@ -0,0 +1,47 @@ +acqApLfSy=384,0,1 +appVersion=20201103 +fileCreateTime=2024-03-13T12:16:32 +fileName=D:/iblrigv8_data/steinmetzlab/Subjects/KM_013/2024-03-13/002/raw_ephys_data/_spikeglx_ephysData_g0/_spikeglx_ephysData_g0_imec1/_spikeglx_ephysData_g0_t0.imec1.ap.bin +gateMode=Immediate +imAiRangeMax=0.5 +imAiRangeMin=-0.5 +imCalibrated=true +imDatApi=3.31 +imDatBs_fw=2.0.137 +imDatBsc_fw=3.2.176 +imDatBsc_hw=1.9 +imDatBsc_pn=NP2_QBSC_00 +imDatBsc_sn=113 +imDatFx_hw=1.8 +imDatFx_pn=NPM_FLEX_0 +imDatHs_fw=2.9 +imDatHs_pn=NPM_HS_30 +imDatHs_sn=20431569 +imDatPrb_dock=1 +imDatPrb_pn=NP2010 +imDatPrb_port=2 +imDatPrb_slot=3 +imDatPrb_sn=19122519691 +imDatPrb_type=24 +imLEDEnable=false +imMaxInt=8192 +imRoFile=C:/SpikeGLX/Release_v20201103-phase30/IMRO_Tables/NPtype24_quarterDensity_ref1.imro +imSampRate=30000 +imStdby= +imTrgRising=true +imTrgSource=0 +nDataDirs=1 +nSavedChans=385 +snsApLfSy=384,0,1 +snsSaveChanSubset=0:384 +syncImInputSlot=3 +syncSourceIdx=3 +syncSourcePeriod=1 +trigMode=Immediate +typeImEnabled=3 +typeNiEnabled=1 +typeThis=imec +userNotes= +~imroTbl=(24,384)(0 0 0 1 0)(1 1 0 1 289)(2 2 0 1 98)(3 3 0 1 195)(4 0 0 1 4)(5 1 0 1 293)(6 2 0 1 102)(7 3 0 1 199)(8 0 0 1 8)(9 1 0 1 297)(10 2 0 1 106)(11 3 0 1 203)(12 0 0 1 12)(13 1 0 1 301)(14 2 0 1 110)(15 3 0 1 207)(16 0 0 1 16)(17 1 0 1 305)(18 2 0 1 114)(19 3 0 1 211)(20 0 0 1 20)(21 1 0 1 309)(22 2 0 1 118)(23 3 0 1 215)(24 0 0 1 24)(25 1 0 1 313)(26 2 0 1 122)(27 3 0 1 219)(28 0 0 1 28)(29 1 0 1 317)(30 2 0 1 126)(31 3 0 1 223)(32 0 0 1 32)(33 1 0 1 321)(34 2 0 1 130)(35 3 0 1 227)(36 0 0 1 36)(37 1 0 1 325)(38 2 0 1 134)(39 3 0 1 231)(40 0 0 1 40)(41 1 0 1 329)(42 2 0 1 138)(43 3 0 1 235)(44 0 0 1 44)(45 1 0 1 333)(46 2 0 1 142)(47 3 0 1 239)(48 0 0 1 288)(49 1 0 1 1)(50 2 0 1 194)(51 3 0 1 99)(52 0 0 1 292)(53 1 0 1 5)(54 2 0 1 198)(55 3 0 1 103)(56 0 0 1 296)(57 1 0 1 9)(58 2 0 1 202)(59 3 0 1 107)(60 0 0 1 300)(61 1 0 1 13)(62 2 0 1 206)(63 3 0 1 111)(64 0 0 1 304)(65 1 0 1 17)(66 2 0 1 210)(67 3 0 1 115)(68 0 0 1 308)(69 1 0 1 21)(70 2 0 1 214)(71 3 0 1 119)(72 0 0 1 312)(73 1 0 1 25)(74 2 0 1 218)(75 3 0 1 123)(76 0 0 1 316)(77 1 0 1 29)(78 2 0 1 222)(79 3 0 1 127)(80 0 0 1 320)(81 1 0 1 33)(82 2 0 1 226)(83 3 0 1 131)(84 0 0 1 324)(85 1 0 1 37)(86 2 0 1 230)(87 3 0 1 135)(88 0 0 1 328)(89 1 0 1 41)(90 2 0 1 234)(91 3 0 1 139)(92 0 0 1 332)(93 1 0 1 45)(94 2 0 1 238)(95 3 0 1 143)(96 0 0 1 48)(97 1 0 1 337)(98 2 0 1 146)(99 3 0 1 243)(100 0 0 1 52)(101 1 0 1 341)(102 2 0 1 150)(103 3 0 1 247)(104 0 0 1 56)(105 1 0 1 345)(106 2 0 1 154)(107 3 0 1 251)(108 0 0 1 60)(109 1 0 1 349)(110 2 0 1 158)(111 3 0 1 255)(112 0 0 1 64)(113 1 0 1 353)(114 2 0 1 162)(115 3 0 1 259)(116 0 0 1 68)(117 1 0 1 357)(118 2 0 1 166)(119 3 0 1 263)(120 0 0 1 72)(121 1 0 1 361)(122 2 0 1 170)(123 3 0 1 267)(124 0 0 1 76)(125 1 0 1 365)(126 2 0 1 174)(127 3 0 1 271)(128 0 0 1 80)(129 1 0 1 369)(130 2 0 1 178)(131 3 0 1 275)(132 0 0 1 84)(133 1 0 1 373)(134 2 0 1 182)(135 3 0 1 279)(136 0 0 1 88)(137 1 0 1 377)(138 2 0 1 186)(139 3 0 1 283)(140 0 0 1 92)(141 1 0 1 381)(142 2 0 1 190)(143 3 0 1 287)(144 0 0 1 336)(145 1 0 1 49)(146 2 0 1 242)(147 3 0 1 147)(148 0 0 1 340)(149 1 0 1 53)(150 2 0 1 246)(151 3 0 1 151)(152 0 0 1 344)(153 1 0 1 57)(154 2 0 1 250)(155 3 0 1 155)(156 0 0 1 348)(157 1 0 1 61)(158 2 0 1 254)(159 3 0 1 159)(160 0 0 1 352)(161 1 0 1 65)(162 2 0 1 258)(163 3 0 1 163)(164 0 0 1 356)(165 1 0 1 69)(166 2 0 1 262)(167 3 0 1 167)(168 0 0 1 360)(169 1 0 1 73)(170 2 0 1 266)(171 3 0 1 171)(172 0 0 1 364)(173 1 0 1 77)(174 2 0 1 270)(175 3 0 1 175)(176 0 0 1 368)(177 1 0 1 81)(178 2 0 1 274)(179 3 0 1 179)(180 0 0 1 372)(181 1 0 1 85)(182 2 0 1 278)(183 3 0 1 183)(184 0 0 1 376)(185 1 0 1 89)(186 2 0 1 282)(187 3 0 1 187)(188 0 0 1 380)(189 1 0 1 93)(190 2 0 1 286)(191 3 0 1 191)(192 0 0 1 96)(193 1 0 1 193)(194 2 0 1 2)(195 3 0 1 291)(196 0 0 1 100)(197 1 0 1 197)(198 2 0 1 6)(199 3 0 1 295)(200 0 0 1 104)(201 1 0 1 201)(202 2 0 1 10)(203 3 0 1 299)(204 0 0 1 108)(205 1 0 1 205)(206 2 0 1 14)(207 3 0 1 303)(208 0 0 1 112)(209 1 0 1 209)(210 2 0 1 18)(211 3 0 1 307)(212 0 0 1 116)(213 1 0 1 213)(214 2 0 1 22)(215 3 0 1 311)(216 0 0 1 120)(217 1 0 1 217)(218 2 0 1 26)(219 3 0 1 315)(220 0 0 1 124)(221 1 0 1 221)(222 2 0 1 30)(223 3 0 1 319)(224 0 0 1 128)(225 1 0 1 225)(226 2 0 1 34)(227 3 0 1 323)(228 0 0 1 132)(229 1 0 1 229)(230 2 0 1 38)(231 3 0 1 327)(232 0 0 1 136)(233 1 0 1 233)(234 2 0 1 42)(235 3 0 1 331)(236 0 0 1 140)(237 1 0 1 237)(238 2 0 1 46)(239 3 0 1 335)(240 0 0 1 192)(241 1 0 1 97)(242 2 0 1 290)(243 3 0 1 3)(244 0 0 1 196)(245 1 0 1 101)(246 2 0 1 294)(247 3 0 1 7)(248 0 0 1 200)(249 1 0 1 105)(250 2 0 1 298)(251 3 0 1 11)(252 0 0 1 204)(253 1 0 1 109)(254 2 0 1 302)(255 3 0 1 15)(256 0 0 1 208)(257 1 0 1 113)(258 2 0 1 306)(259 3 0 1 19)(260 0 0 1 212)(261 1 0 1 117)(262 2 0 1 310)(263 3 0 1 23)(264 0 0 1 216)(265 1 0 1 121)(266 2 0 1 314)(267 3 0 1 27)(268 0 0 1 220)(269 1 0 1 125)(270 2 0 1 318)(271 3 0 1 31)(272 0 0 1 224)(273 1 0 1 129)(274 2 0 1 322)(275 3 0 1 35)(276 0 0 1 228)(277 1 0 1 133)(278 2 0 1 326)(279 3 0 1 39)(280 0 0 1 232)(281 1 0 1 137)(282 2 0 1 330)(283 3 0 1 43)(284 0 0 1 236)(285 1 0 1 141)(286 2 0 1 334)(287 3 0 1 47)(288 0 0 1 144)(289 1 0 1 241)(290 2 0 1 50)(291 3 0 1 339)(292 0 0 1 148)(293 1 0 1 245)(294 2 0 1 54)(295 3 0 1 343)(296 0 0 1 152)(297 1 0 1 249)(298 2 0 1 58)(299 3 0 1 347)(300 0 0 1 156)(301 1 0 1 253)(302 2 0 1 62)(303 3 0 1 351)(304 0 0 1 160)(305 1 0 1 257)(306 2 0 1 66)(307 3 0 1 355)(308 0 0 1 164)(309 1 0 1 261)(310 2 0 1 70)(311 3 0 1 359)(312 0 0 1 168)(313 1 0 1 265)(314 2 0 1 74)(315 3 0 1 363)(316 0 0 1 172)(317 1 0 1 269)(318 2 0 1 78)(319 3 0 1 367)(320 0 0 1 176)(321 1 0 1 273)(322 2 0 1 82)(323 3 0 1 371)(324 0 0 1 180)(325 1 0 1 277)(326 2 0 1 86)(327 3 0 1 375)(328 0 0 1 184)(329 1 0 1 281)(330 2 0 1 90)(331 3 0 1 379)(332 0 0 1 188)(333 1 0 1 285)(334 2 0 1 94)(335 3 0 1 383)(336 0 0 1 240)(337 1 0 1 145)(338 2 0 1 338)(339 3 0 1 51)(340 0 0 1 244)(341 1 0 1 149)(342 2 0 1 342)(343 3 0 1 55)(344 0 0 1 248)(345 1 0 1 153)(346 2 0 1 346)(347 3 0 1 59)(348 0 0 1 252)(349 1 0 1 157)(350 2 0 1 350)(351 3 0 1 63)(352 0 0 1 256)(353 1 0 1 161)(354 2 0 1 354)(355 3 0 1 67)(356 0 0 1 260)(357 1 0 1 165)(358 2 0 1 358)(359 3 0 1 71)(360 0 0 1 264)(361 1 0 1 169)(362 2 0 1 362)(363 3 0 1 75)(364 0 0 1 268)(365 1 0 1 173)(366 2 0 1 366)(367 3 0 1 79)(368 0 0 1 272)(369 1 0 1 177)(370 2 0 1 370)(371 3 0 1 83)(372 0 0 1 276)(373 1 0 1 181)(374 2 0 1 374)(375 3 0 1 87)(376 0 0 1 280)(377 1 0 1 185)(378 2 0 1 378)(379 3 0 1 91)(380 0 0 1 284)(381 1 0 1 189)(382 2 0 1 382)(383 3 0 1 95) +~snsChanMap=(384,0,1)(AP0;0:0)(AP1;1:168)(AP2;2:216)(AP3;3:336)(AP4;4:1)(AP5;5:169)(AP6;6:217)(AP7;7:337)(AP8;8:2)(AP9;9:170)(AP10;10:218)(AP11;11:338)(AP12;12:3)(AP13;13:171)(AP14;14:219)(AP15;15:339)(AP16;16:4)(AP17;17:172)(AP18;18:220)(AP19;19:340)(AP20;20:5)(AP21;21:173)(AP22;22:221)(AP23;23:341)(AP24;24:6)(AP25;25:174)(AP26;26:222)(AP27;27:342)(AP28;28:7)(AP29;29:175)(AP30;30:223)(AP31;31:343)(AP32;32:8)(AP33;33:176)(AP34;34:224)(AP35;35:344)(AP36;36:9)(AP37;37:177)(AP38;38:225)(AP39;39:345)(AP40;40:10)(AP41;41:178)(AP42;42:226)(AP43;43:346)(AP44;44:11)(AP45;45:179)(AP46;46:227)(AP47;47:347)(AP48;48:72)(AP49;49:96)(AP50;50:240)(AP51;51:312)(AP52;52:73)(AP53;53:97)(AP54;54:241)(AP55;55:313)(AP56;56:74)(AP57;57:98)(AP58;58:242)(AP59;59:314)(AP60;60:75)(AP61;61:99)(AP62;62:243)(AP63;63:315)(AP64;64:76)(AP65;65:100)(AP66;66:244)(AP67;67:316)(AP68;68:77)(AP69;69:101)(AP70;70:245)(AP71;71:317)(AP72;72:78)(AP73;73:102)(AP74;74:246)(AP75;75:318)(AP76;76:79)(AP77;77:103)(AP78;78:247)(AP79;79:319)(AP80;80:80)(AP81;81:104)(AP82;82:248)(AP83;83:320)(AP84;84:81)(AP85;85:105)(AP86;86:249)(AP87;87:321)(AP88;88:82)(AP89;89:106)(AP90;90:250)(AP91;91:322)(AP92;92:83)(AP93;93:107)(AP94;94:251)(AP95;95:323)(AP96;96:12)(AP97;97:180)(AP98;98:228)(AP99;99:348)(AP100;100:13)(AP101;101:181)(AP102;102:229)(AP103;103:349)(AP104;104:14)(AP105;105:182)(AP106;106:230)(AP107;107:350)(AP108;108:15)(AP109;109:183)(AP110;110:231)(AP111;111:351)(AP112;112:16)(AP113;113:184)(AP114;114:232)(AP115;115:352)(AP116;116:17)(AP117;117:185)(AP118;118:233)(AP119;119:353)(AP120;120:18)(AP121;121:186)(AP122;122:234)(AP123;123:354)(AP124;124:19)(AP125;125:187)(AP126;126:235)(AP127;127:355)(AP128;128:20)(AP129;129:188)(AP130;130:236)(AP131;131:356)(AP132;132:21)(AP133;133:189)(AP134;134:237)(AP135;135:357)(AP136;136:22)(AP137;137:190)(AP138;138:238)(AP139;139:358)(AP140;140:23)(AP141;141:191)(AP142;142:239)(AP143;143:359)(AP144;144:84)(AP145;145:108)(AP146;146:252)(AP147;147:324)(AP148;148:85)(AP149;149:109)(AP150;150:253)(AP151;151:325)(AP152;152:86)(AP153;153:110)(AP154;154:254)(AP155;155:326)(AP156;156:87)(AP157;157:111)(AP158;158:255)(AP159;159:327)(AP160;160:88)(AP161;161:112)(AP162;162:256)(AP163;163:328)(AP164;164:89)(AP165;165:113)(AP166;166:257)(AP167;167:329)(AP168;168:90)(AP169;169:114)(AP170;170:258)(AP171;171:330)(AP172;172:91)(AP173;173:115)(AP174;174:259)(AP175;175:331)(AP176;176:92)(AP177;177:116)(AP178;178:260)(AP179;179:332)(AP180;180:93)(AP181;181:117)(AP182;182:261)(AP183;183:333)(AP184;184:94)(AP185;185:118)(AP186;186:262)(AP187;187:334)(AP188;188:95)(AP189;189:119)(AP190;190:263)(AP191;191:335)(AP192;192:24)(AP193;193:144)(AP194;194:192)(AP195;195:360)(AP196;196:25)(AP197;197:145)(AP198;198:193)(AP199;199:361)(AP200;200:26)(AP201;201:146)(AP202;202:194)(AP203;203:362)(AP204;204:27)(AP205;205:147)(AP206;206:195)(AP207;207:363)(AP208;208:28)(AP209;209:148)(AP210;210:196)(AP211;211:364)(AP212;212:29)(AP213;213:149)(AP214;214:197)(AP215;215:365)(AP216;216:30)(AP217;217:150)(AP218;218:198)(AP219;219:366)(AP220;220:31)(AP221;221:151)(AP222;222:199)(AP223;223:367)(AP224;224:32)(AP225;225:152)(AP226;226:200)(AP227;227:368)(AP228;228:33)(AP229;229:153)(AP230;230:201)(AP231;231:369)(AP232;232:34)(AP233;233:154)(AP234;234:202)(AP235;235:370)(AP236;236:35)(AP237;237:155)(AP238;238:203)(AP239;239:371)(AP240;240:48)(AP241;241:120)(AP242;242:264)(AP243;243:288)(AP244;244:49)(AP245;245:121)(AP246;246:265)(AP247;247:289)(AP248;248:50)(AP249;249:122)(AP250;250:266)(AP251;251:290)(AP252;252:51)(AP253;253:123)(AP254;254:267)(AP255;255:291)(AP256;256:52)(AP257;257:124)(AP258;258:268)(AP259;259:292)(AP260;260:53)(AP261;261:125)(AP262;262:269)(AP263;263:293)(AP264;264:54)(AP265;265:126)(AP266;266:270)(AP267;267:294)(AP268;268:55)(AP269;269:127)(AP270;270:271)(AP271;271:295)(AP272;272:56)(AP273;273:128)(AP274;274:272)(AP275;275:296)(AP276;276:57)(AP277;277:129)(AP278;278:273)(AP279;279:297)(AP280;280:58)(AP281;281:130)(AP282;282:274)(AP283;283:298)(AP284;284:59)(AP285;285:131)(AP286;286:275)(AP287;287:299)(AP288;288:36)(AP289;289:156)(AP290;290:204)(AP291;291:372)(AP292;292:37)(AP293;293:157)(AP294;294:205)(AP295;295:373)(AP296;296:38)(AP297;297:158)(AP298;298:206)(AP299;299:374)(AP300;300:39)(AP301;301:159)(AP302;302:207)(AP303;303:375)(AP304;304:40)(AP305;305:160)(AP306;306:208)(AP307;307:376)(AP308;308:41)(AP309;309:161)(AP310;310:209)(AP311;311:377)(AP312;312:42)(AP313;313:162)(AP314;314:210)(AP315;315:378)(AP316;316:43)(AP317;317:163)(AP318;318:211)(AP319;319:379)(AP320;320:44)(AP321;321:164)(AP322;322:212)(AP323;323:380)(AP324;324:45)(AP325;325:165)(AP326;326:213)(AP327;327:381)(AP328;328:46)(AP329;329:166)(AP330;330:214)(AP331;331:382)(AP332;332:47)(AP333;333:167)(AP334;334:215)(AP335;335:383)(AP336;336:60)(AP337;337:132)(AP338;338:276)(AP339;339:300)(AP340;340:61)(AP341;341:133)(AP342;342:277)(AP343;343:301)(AP344;344:62)(AP345;345:134)(AP346;346:278)(AP347;347:302)(AP348;348:63)(AP349;349:135)(AP350;350:279)(AP351;351:303)(AP352;352:64)(AP353;353:136)(AP354;354:280)(AP355;355:304)(AP356;356:65)(AP357;357:137)(AP358;358:281)(AP359;359:305)(AP360;360:66)(AP361;361:138)(AP362;362:282)(AP363;363:306)(AP364;364:67)(AP365;365:139)(AP366;366:283)(AP367;367:307)(AP368;368:68)(AP369;369:140)(AP370;370:284)(AP371;371:308)(AP372;372:69)(AP373;373:141)(AP374;374:285)(AP375;375:309)(AP376;376:70)(AP377;377:142)(AP378;378:286)(AP379;379:310)(AP380;380:71)(AP381;381:143)(AP382;382:287)(AP383;383:311)(SY0;384:384) +~snsShankMap=(4,2,640)(0:0:0:1)(1:1:144:1)(2:0:49:1)(3:1:97:1)(0:0:2:1)(1:1:146:1)(2:0:51:1)(3:1:99:1)(0:0:4:1)(1:1:148:1)(2:0:53:1)(3:1:101:1)(0:0:6:1)(1:1:150:1)(2:0:55:1)(3:1:103:1)(0:0:8:1)(1:1:152:1)(2:0:57:1)(3:1:105:1)(0:0:10:1)(1:1:154:1)(2:0:59:1)(3:1:107:1)(0:0:12:1)(1:1:156:1)(2:0:61:1)(3:1:109:1)(0:0:14:1)(1:1:158:1)(2:0:63:1)(3:1:111:1)(0:0:16:1)(1:1:160:1)(2:0:65:1)(3:1:113:1)(0:0:18:1)(1:1:162:1)(2:0:67:1)(3:1:115:1)(0:0:20:1)(1:1:164:1)(2:0:69:1)(3:1:117:1)(0:0:22:1)(1:1:166:1)(2:0:71:1)(3:1:119:1)(0:0:144:1)(1:1:0:1)(2:0:97:1)(3:1:49:1)(0:0:146:1)(1:1:2:1)(2:0:99:1)(3:1:51:1)(0:0:148:1)(1:1:4:1)(2:0:101:1)(3:1:53:1)(0:0:150:1)(1:1:6:1)(2:0:103:1)(3:1:55:1)(0:0:152:1)(1:1:8:1)(2:0:105:1)(3:1:57:1)(0:0:154:1)(1:1:10:1)(2:0:107:1)(3:1:59:1)(0:0:156:1)(1:1:12:1)(2:0:109:1)(3:1:61:1)(0:0:158:1)(1:1:14:1)(2:0:111:1)(3:1:63:1)(0:0:160:1)(1:1:16:1)(2:0:113:1)(3:1:65:1)(0:0:162:1)(1:1:18:1)(2:0:115:1)(3:1:67:1)(0:0:164:1)(1:1:20:1)(2:0:117:1)(3:1:69:1)(0:0:166:1)(1:1:22:1)(2:0:119:1)(3:1:71:1)(0:0:24:1)(1:1:168:1)(2:0:73:1)(3:1:121:1)(0:0:26:1)(1:1:170:1)(2:0:75:1)(3:1:123:1)(0:0:28:1)(1:1:172:1)(2:0:77:1)(3:1:125:1)(0:0:30:1)(1:1:174:1)(2:0:79:1)(3:1:127:1)(0:0:32:1)(1:1:176:1)(2:0:81:1)(3:1:129:1)(0:0:34:1)(1:1:178:1)(2:0:83:1)(3:1:131:1)(0:0:36:1)(1:1:180:1)(2:0:85:1)(3:1:133:1)(0:0:38:1)(1:1:182:1)(2:0:87:1)(3:1:135:0)(0:0:40:1)(1:1:184:1)(2:0:89:1)(3:1:137:1)(0:0:42:1)(1:1:186:1)(2:0:91:1)(3:1:139:1)(0:0:44:1)(1:1:188:1)(2:0:93:1)(3:1:141:1)(0:0:46:1)(1:1:190:1)(2:0:95:1)(3:1:143:1)(0:0:168:1)(1:1:24:1)(2:0:121:1)(3:1:73:1)(0:0:170:1)(1:1:26:1)(2:0:123:1)(3:1:75:1)(0:0:172:1)(1:1:28:1)(2:0:125:1)(3:1:77:1)(0:0:174:1)(1:1:30:1)(2:0:127:1)(3:1:79:1)(0:0:176:1)(1:1:32:1)(2:0:129:1)(3:1:81:1)(0:0:178:1)(1:1:34:1)(2:0:131:1)(3:1:83:1)(0:0:180:1)(1:1:36:1)(2:0:133:1)(3:1:85:1)(0:0:182:1)(1:1:38:1)(2:0:135:1)(3:1:87:1)(0:0:184:1)(1:1:40:1)(2:0:137:1)(3:1:89:1)(0:0:186:1)(1:1:42:1)(2:0:139:1)(3:1:91:1)(0:0:188:1)(1:1:44:1)(2:0:141:1)(3:1:93:1)(0:0:190:1)(1:1:46:1)(2:0:143:1)(3:1:95:1)(0:0:48:1)(1:1:96:1)(2:0:1:1)(3:1:145:1)(0:0:50:1)(1:1:98:1)(2:0:3:1)(3:1:147:1)(0:0:52:1)(1:1:100:1)(2:0:5:1)(3:1:149:1)(0:0:54:1)(1:1:102:1)(2:0:7:1)(3:1:151:1)(0:0:56:1)(1:1:104:1)(2:0:9:1)(3:1:153:1)(0:0:58:1)(1:1:106:1)(2:0:11:1)(3:1:155:1)(0:0:60:1)(1:1:108:1)(2:0:13:1)(3:1:157:1)(0:0:62:1)(1:1:110:1)(2:0:15:1)(3:1:159:1)(0:0:64:1)(1:1:112:1)(2:0:17:1)(3:1:161:1)(0:0:66:1)(1:1:114:1)(2:0:19:1)(3:1:163:1)(0:0:68:1)(1:1:116:1)(2:0:21:1)(3:1:165:1)(0:0:70:1)(1:1:118:1)(2:0:23:1)(3:1:167:1)(0:0:96:1)(1:1:48:1)(2:0:145:1)(3:1:1:1)(0:0:98:1)(1:1:50:1)(2:0:147:1)(3:1:3:1)(0:0:100:1)(1:1:52:1)(2:0:149:1)(3:1:5:1)(0:0:102:1)(1:1:54:1)(2:0:151:1)(3:1:7:1)(0:0:104:1)(1:1:56:1)(2:0:153:1)(3:1:9:1)(0:0:106:1)(1:1:58:1)(2:0:155:1)(3:1:11:1)(0:0:108:1)(1:1:60:1)(2:0:157:1)(3:1:13:1)(0:0:110:1)(1:1:62:1)(2:0:159:1)(3:1:15:1)(0:0:112:1)(1:1:64:1)(2:0:161:1)(3:1:17:1)(0:0:114:1)(1:1:66:1)(2:0:163:1)(3:1:19:1)(0:0:116:1)(1:1:68:1)(2:0:165:1)(3:1:21:1)(0:0:118:1)(1:1:70:1)(2:0:167:1)(3:1:23:1)(0:0:72:1)(1:1:120:1)(2:0:25:1)(3:1:169:1)(0:0:74:1)(1:1:122:1)(2:0:27:1)(3:1:171:1)(0:0:76:1)(1:1:124:1)(2:0:29:1)(3:1:173:1)(0:0:78:1)(1:1:126:1)(2:0:31:1)(3:1:175:1)(0:0:80:1)(1:1:128:1)(2:0:33:1)(3:1:177:1)(0:0:82:1)(1:1:130:1)(2:0:35:1)(3:1:179:1)(0:0:84:1)(1:1:132:1)(2:0:37:1)(3:1:181:1)(0:0:86:1)(1:1:134:1)(2:0:39:1)(3:1:183:1)(0:0:88:1)(1:1:136:1)(2:0:41:1)(3:1:185:1)(0:0:90:1)(1:1:138:1)(2:0:43:1)(3:1:187:1)(0:0:92:1)(1:1:140:1)(2:0:45:1)(3:1:189:1)(0:0:94:1)(1:1:142:1)(2:0:47:1)(3:1:191:1)(0:0:120:1)(1:1:72:1)(2:0:169:1)(3:1:25:1)(0:0:122:1)(1:1:74:1)(2:0:171:1)(3:1:27:1)(0:0:124:1)(1:1:76:1)(2:0:173:1)(3:1:29:1)(0:0:126:1)(1:1:78:1)(2:0:175:1)(3:1:31:1)(0:0:128:1)(1:1:80:1)(2:0:177:1)(3:1:33:1)(0:0:130:1)(1:1:82:1)(2:0:179:1)(3:1:35:1)(0:0:132:1)(1:1:84:1)(2:0:181:1)(3:1:37:1)(0:0:134:1)(1:1:86:1)(2:0:183:1)(3:1:39:1)(0:0:136:1)(1:1:88:1)(2:0:185:1)(3:1:41:1)(0:0:138:1)(1:1:90:1)(2:0:187:1)(3:1:43:1)(0:0:140:1)(1:1:92:1)(2:0:189:1)(3:1:45:1)(0:0:142:1)(1:1:94:1)(2:0:191:1)(3:1:47:1) diff --git a/src/tests/unit/cpu/test_dsp.py b/src/tests/unit/cpu/test_dsp.py index fe59fab..2d775a3 100644 --- a/src/tests/unit/cpu/test_dsp.py +++ b/src/tests/unit/cpu/test_dsp.py @@ -620,6 +620,9 @@ def test_deprecation_countdown(self): # `neurodsp` package at the top level of the ibl-neuropixel # repository import datetime + if datetime.datetime.now() > datetime.datetime(2024, 9, 1): - raise NotImplementedError("neurodsp will not longer be supported. " - "Change all references to ibldsp.") + raise NotImplementedError( + "neurodsp will not longer be supported. " + "Change all references to ibldsp." + ) diff --git a/src/tests/unit/cpu/test_neuropixel.py b/src/tests/unit/cpu/test_neuropixel.py index 1ef9a36..73e136d 100644 --- a/src/tests/unit/cpu/test_neuropixel.py +++ b/src/tests/unit/cpu/test_neuropixel.py @@ -9,9 +9,9 @@ def test_sites_coordinates_deprecated(): def test_adc_shifts(): # test ADC shifts version 1 h1 = neuropixel.trace_header(version=1) - np.testing.assert_equal(np.unique(h1['sample_shift'] * 13), np.arange(12)) + np.testing.assert_equal(np.unique(h1["sample_shift"] * 13), np.arange(12)) # test ADC shifts version 2 h21 = neuropixel.trace_header(version=2.1) h24 = neuropixel.trace_header(version=2.4) - np.testing.assert_equal(h24['sample_shift'], h21['sample_shift']) - np.testing.assert_equal(np.unique(h21['sample_shift'] * 16), np.arange(16)) + np.testing.assert_equal(h24["sample_shift"], h21["sample_shift"]) + np.testing.assert_equal(np.unique(h21["sample_shift"] * 16), np.arange(16)) diff --git a/src/tests/unit/cpu/test_spikeglx.py b/src/tests/unit/cpu/test_spikeglx.py index 13471b4..85d6212 100644 --- a/src/tests/unit/cpu/test_spikeglx.py +++ b/src/tests/unit/cpu/test_spikeglx.py @@ -9,34 +9,37 @@ import neuropixel import spikeglx -TEST_PATH = Path(__file__).parent.joinpath('fixtures') +TEST_PATH = Path(__file__).parent.joinpath("fixtures") class TestSpikeGLX_hardwareInfo(unittest.TestCase): - def setUp(self) -> None: self.workdir = TEST_PATH - self.map3A = {'left_camera': 2, - 'right_camera': 3, - 'body_camera': 4, - 'bpod': 7, - 'frame2ttl': 12, - 'rotary_encoder_0': 13, - 'rotary_encoder_1': 14, - 'audio': 15} - self.map3B = {'left_camera': 0, - 'right_camera': 1, - 'body_camera': 2, - 'imec_sync': 3, - 'frame2ttl': 4, - 'rotary_encoder_0': 5, - 'rotary_encoder_1': 6, - 'audio': 7, - 'bpod': 16, - 'laser': 17, - 'laser_ttl': 18} - self.file3a = self.workdir / 'sample3A_g0_t0.imec.wiring.json' - self.file3b = self.workdir / 'sample3B_g0_t0.nidq.wiring.json' + self.map3A = { + "left_camera": 2, + "right_camera": 3, + "body_camera": 4, + "bpod": 7, + "frame2ttl": 12, + "rotary_encoder_0": 13, + "rotary_encoder_1": 14, + "audio": 15, + } + self.map3B = { + "left_camera": 0, + "right_camera": 1, + "body_camera": 2, + "imec_sync": 3, + "frame2ttl": 4, + "rotary_encoder_0": 5, + "rotary_encoder_1": 6, + "audio": 7, + "bpod": 16, + "laser": 17, + "laser_ttl": 18, + } + self.file3a = self.workdir / "sample3A_g0_t0.imec.wiring.json" + self.file3b = self.workdir / "sample3B_g0_t0.nidq.wiring.json" def test_get_wiring(self): # get params providing full file path @@ -54,7 +57,7 @@ def test_get_wiring(self): def test_get_channel_map(self): with tempfile.TemporaryDirectory() as tdir: - self.assertIsNone(spikeglx.get_sync_map(Path(tdir) / 'idontexist.json')) + self.assertIsNone(spikeglx.get_sync_map(Path(tdir) / "idontexist.json")) class TestSpikeGLX_glob_ephys(unittest.TestCase): @@ -76,6 +79,7 @@ class TestSpikeGLX_glob_ephys(unittest.TestCase): ├── sync_testing_g0_t0.imec1.ap.bin └── sync_testing_g0_t0.imec1.lf.bin """ + def setUp(self): def touchfile(p): if isinstance(p, Path): @@ -83,52 +87,74 @@ def touchfile(p): p.parent.mkdir(exist_ok=True, parents=True) p.touch(exist_ok=True) except Exception: - print('tutu') + print("tutu") def create_tree(root_dir, dico): root_dir.mkdir(exist_ok=True, parents=True) for ldir in dico: for k in ldir: - if k == 'path' or k == 'label': + if k == "path" or k == "label": continue touchfile(ldir[k]) - Path(ldir[k]).with_suffix('.meta').touch() + Path(ldir[k]).with_suffix(".meta").touch() - self.tmpdir = Path(tempfile.gettempdir()) / 'test_glob_ephys' + self.tmpdir = Path(tempfile.gettempdir()) / "test_glob_ephys" self.tmpdir.mkdir(exist_ok=True) - self.dir3a = self.tmpdir.joinpath('3A').joinpath('raw_ephys_data') - self.dir3b = self.tmpdir.joinpath('3B').joinpath('raw_ephys_data') - self.dict3a = [{'label': 'imec0', - 'ap': self.dir3a / 'imec0' / 'sync_testing_g0_t0.imec0.ap.bin', - 'lf': self.dir3a / 'imec0' / 'sync_testing_g0_t0.imec0.lf.bin', - 'path': self.dir3a / 'imec0'}, - {'label': 'imec1', - 'ap': self.dir3a / 'imec1' / 'sync_testing_g0_t0.imec1.ap.bin', - 'lf': self.dir3a / 'imec1' / 'sync_testing_g0_t0.imec1.lf.bin', - 'path': self.dir3a / 'imec1'}] + self.dir3a = self.tmpdir.joinpath("3A").joinpath("raw_ephys_data") + self.dir3b = self.tmpdir.joinpath("3B").joinpath("raw_ephys_data") + self.dict3a = [ + { + "label": "imec0", + "ap": self.dir3a / "imec0" / "sync_testing_g0_t0.imec0.ap.bin", + "lf": self.dir3a / "imec0" / "sync_testing_g0_t0.imec0.lf.bin", + "path": self.dir3a / "imec0", + }, + { + "label": "imec1", + "ap": self.dir3a / "imec1" / "sync_testing_g0_t0.imec1.ap.bin", + "lf": self.dir3a / "imec1" / "sync_testing_g0_t0.imec1.lf.bin", + "path": self.dir3a / "imec1", + }, + ] # surprise ! one of them happens to be compressed - self.dict3b = [{'label': 'imec0', - 'ap': self.dir3b / 'imec0' / 'sync_testing_g0_t0.imec0.ap.cbin', - 'lf': self.dir3b / 'imec0' / 'sync_testing_g0_t0.imec0.lf.bin', - 'path': self.dir3b / 'imec0'}, - {'label': 'imec1', - 'ap': self.dir3b / 'imec1' / 'sync_testing_g0_t0.imec1.ap.bin', - 'lf': self.dir3b / 'imec1' / 'sync_testing_g0_t0.imec1.lf.bin', - 'path': self.dir3b / 'imec1'}, - {'label': '', - 'nidq': self.dir3b / 'sync_testing_g0_t0.nidq.bin', - 'path': self.dir3b}] - self.dict3b_ch = [{'label': 'imec0', - 'ap': self.dir3b / 'imec0' / 'sync_testing_g0_t0.imec0.ap.ch', - 'lf': self.dir3b / 'imec0' / 'sync_testing_g0_t0.imec0.lf.ch', - 'path': self.dir3b / 'imec0'}, - {'label': 'imec1', - 'ap': self.dir3b / 'imec1' / 'sync_testing_g0_t0.imec1.ap.ch', - 'lf': self.dir3b / 'imec1' / 'sync_testing_g0_t0.imec1.lf.ch', - 'path': self.dir3b / 'imec1'}, - {'label': '', - 'nidq': self.dir3b / 'sync_testing_g0_t0.nidq.ch', - 'path': self.dir3b}] + self.dict3b = [ + { + "label": "imec0", + "ap": self.dir3b / "imec0" / "sync_testing_g0_t0.imec0.ap.cbin", + "lf": self.dir3b / "imec0" / "sync_testing_g0_t0.imec0.lf.bin", + "path": self.dir3b / "imec0", + }, + { + "label": "imec1", + "ap": self.dir3b / "imec1" / "sync_testing_g0_t0.imec1.ap.bin", + "lf": self.dir3b / "imec1" / "sync_testing_g0_t0.imec1.lf.bin", + "path": self.dir3b / "imec1", + }, + { + "label": "", + "nidq": self.dir3b / "sync_testing_g0_t0.nidq.bin", + "path": self.dir3b, + }, + ] + self.dict3b_ch = [ + { + "label": "imec0", + "ap": self.dir3b / "imec0" / "sync_testing_g0_t0.imec0.ap.ch", + "lf": self.dir3b / "imec0" / "sync_testing_g0_t0.imec0.lf.ch", + "path": self.dir3b / "imec0", + }, + { + "label": "imec1", + "ap": self.dir3b / "imec1" / "sync_testing_g0_t0.imec1.ap.ch", + "lf": self.dir3b / "imec1" / "sync_testing_g0_t0.imec1.lf.ch", + "path": self.dir3b / "imec1", + }, + { + "label": "", + "nidq": self.dir3b / "sync_testing_g0_t0.nidq.ch", + "path": self.dir3b, + }, + ] create_tree(self.dir3a, self.dict3a) create_tree(self.dir3b, self.dict3b) create_tree(self.dir3b, self.dict3b_ch) @@ -136,36 +162,41 @@ def create_tree(root_dir, dico): def test_glob_ephys(self): def dict_equals(d1, d2): return all([x in d1 for x in d2]) and all([x in d2 for x in d1]) + ef3b = spikeglx.glob_ephys_files(self.dir3b) ef3a = spikeglx.glob_ephys_files(self.dir3a) - ef3b_ch = spikeglx.glob_ephys_files(self.dir3b, ext='ch') + ef3b_ch = spikeglx.glob_ephys_files(self.dir3b, ext="ch") # test glob self.assertTrue(dict_equals(self.dict3a, ef3a)) self.assertTrue(dict_equals(self.dict3b, ef3b)) self.assertTrue(dict_equals(self.dict3b_ch, ef3b_ch)) # test the version from glob - self.assertTrue(spikeglx.get_neuropixel_version_from_files(ef3a) == '3A') - self.assertTrue(spikeglx.get_neuropixel_version_from_files(ef3b) == '3B') + self.assertTrue(spikeglx.get_neuropixel_version_from_files(ef3a) == "3A") + self.assertTrue(spikeglx.get_neuropixel_version_from_files(ef3b) == "3B") # test the version from paths - self.assertTrue(spikeglx.get_neuropixel_version_from_folder(self.dir3a) == '3A') - self.assertTrue(spikeglx.get_neuropixel_version_from_folder(self.dir3b) == '3B') - self.dir3b.joinpath('imec1', 'sync_testing_g0_t0.imec1.ap.bin').unlink() - self.assertEqual(spikeglx.glob_ephys_files(self.dir3b.joinpath('imec1')), []) + self.assertTrue(spikeglx.get_neuropixel_version_from_folder(self.dir3a) == "3A") + self.assertTrue(spikeglx.get_neuropixel_version_from_folder(self.dir3b) == "3B") + self.dir3b.joinpath("imec1", "sync_testing_g0_t0.imec1.ap.bin").unlink() + self.assertEqual(spikeglx.glob_ephys_files(self.dir3b.joinpath("imec1")), []) def tearDown(self): shutil.rmtree(self.tmpdir) class TestsSpikeGLX_compress(unittest.TestCase): - def setUp(self): self._tempdir = tempfile.TemporaryDirectory() # self.addClassCleanup(self._tempdir.cleanup) # py3.8 self.workdir = Path(self._tempdir.name) - file_meta = TEST_PATH.joinpath('sample3A_short_g0_t0.imec.ap.meta') + file_meta = TEST_PATH.joinpath("sample3A_short_g0_t0.imec.ap.meta") self.file_bin = spikeglx._mock_spikeglx_file( - self.workdir.joinpath('sample3A_short_g0_t0.imec.ap.bin'), file_meta, ns=76104, - nc=385, sync_depth=16, random=True)['bin_file'] + self.workdir.joinpath("sample3A_short_g0_t0.imec.ap.bin"), + file_meta, + ns=76104, + nc=385, + sync_depth=16, + random=True, + )["bin_file"] self.sr = spikeglx.Reader(self.file_bin) assert self.sr._raw is not None assert self.sr.is_open @@ -176,7 +207,7 @@ def tearDown(self): def test_read_slices(self): sr = self.sr - s2mv = sr.channel_conversion_sample2v['ap'][0] + s2mv = sr.channel_conversion_sample2v["ap"][0] # test the slicing of reader object self.assertTrue(np.all(np.isclose(sr._raw[5:500, :-1] * s2mv, sr[5:500, :-1]))) self.assertTrue(np.all(np.isclose(sr._raw[5:500, 5] * s2mv, sr[5:500, 5]))) @@ -186,7 +217,6 @@ def test_read_slices(self): self.assertTrue(np.all(np.isclose(sr._raw[5:500] * s2mv, sr[5:500])[:, :-1])) def test_compress(self): - def compare_data(sr0, sr1): # test direct reading through memmap / mtscompreader self.assertTrue(np.all(sr0._raw[1200:1210, 12] == sr1._raw[1200:1210, 12])) @@ -201,10 +231,12 @@ def compare_data(sr0, sr1): self.sr.close() # create a reference file that will serve to compare for inplace operations - ref_file = self.file_bin.parent.joinpath('REF_' + self.file_bin.name) - ref_meta = self.file_bin.parent.joinpath('REF_' + self.file_bin.with_suffix('.meta').name) + ref_file = self.file_bin.parent.joinpath("REF_" + self.file_bin.name) + ref_meta = self.file_bin.parent.joinpath( + "REF_" + self.file_bin.with_suffix(".meta").name + ) shutil.copy(self.file_bin, ref_file) - shutil.copy(self.file_bin.with_suffix('.meta'), ref_meta) + shutil.copy(self.file_bin.with_suffix(".meta"), ref_meta) # test file compression copy with spikeglx.Reader(ref_file, open=False) as sr_ref: @@ -227,11 +259,10 @@ def compare_data(sr0, sr1): class TestsSpikeGLX_Meta(unittest.TestCase): - def setUp(self): self.workdir = TEST_PATH - self.meta_files = list(Path.glob(self.workdir, '*.meta')) - self.tmpdir = Path(tempfile.gettempdir()) / 'test_meta' + self.meta_files = list(Path.glob(self.workdir, "*.meta")) + self.tmpdir = Path(tempfile.gettempdir()) / "test_meta" self.tmpdir.mkdir(exist_ok=True) def tearDown(self) -> None: @@ -239,115 +270,154 @@ def tearDown(self) -> None: def test_fix_meta_file(self): # test the case where the meta file shows a larger amount of samples - with tempfile.TemporaryDirectory(prefix='glx_test') as tdir: + with tempfile.TemporaryDirectory(prefix="glx_test") as tdir: bin_3a = spikeglx._mock_spikeglx_file( - Path(tdir).joinpath('sample3A_g0_t0.imec.ap.bin'), - self.workdir / 'sample3A_g0_t0.imec.ap.meta', ns=32, nc=385, sync_depth=16) - with open(bin_3a['bin_file'], 'wb') as fp: + Path(tdir).joinpath("sample3A_g0_t0.imec.ap.bin"), + self.workdir / "sample3A_g0_t0.imec.ap.meta", + ns=32, + nc=385, + sync_depth=16, + ) + with open(bin_3a["bin_file"], "wb") as fp: np.random.randint(-20000, 20000, 385 * 22, dtype=np.int16).tofile(fp) - with spikeglx.Reader(bin_3a['bin_file'], open=False) as sr: - verifiable = sr.meta['fileTimeSecs'] * 30000 + with spikeglx.Reader(bin_3a["bin_file"], open=False) as sr: + verifiable = sr.meta["fileTimeSecs"] * 30000 self.assertEqual(verifiable, 22) def test_read_corrupt(self): # nidq has 1 analog and 1 digital sync channels - with tempfile.TemporaryDirectory(prefix='glx_test') as tdir: + with tempfile.TemporaryDirectory(prefix="glx_test") as tdir: int2volts = 5 / 32768 nidq = spikeglx._mock_spikeglx_file( - Path(tdir).joinpath('sample3B_g0_t0.nidq.bin'), - self.workdir / 'sample3B_g0_t0.nidq.meta', - ns=32, nc=2, sync_depth=8, int2volts=int2volts, corrupt=True) + Path(tdir).joinpath("sample3B_g0_t0.nidq.bin"), + self.workdir / "sample3B_g0_t0.nidq.meta", + ns=32, + nc=2, + sync_depth=8, + int2volts=int2volts, + corrupt=True, + ) self.assert_read_glx(nidq) def test_read_nidq(self): # nidq has 1 analog and 1 digital sync channels - with tempfile.TemporaryDirectory(prefix='glx_test') as tdir: + with tempfile.TemporaryDirectory(prefix="glx_test") as tdir: int2volts = 5 / 32768 nidq = spikeglx._mock_spikeglx_file( - Path(tdir).joinpath('sample3B_g0_t0.nidq.bin'), - self.workdir / 'sample3B_g0_t0.nidq.meta', - ns=32, nc=2, sync_depth=8, int2volts=int2volts) + Path(tdir).joinpath("sample3B_g0_t0.nidq.bin"), + self.workdir / "sample3B_g0_t0.nidq.meta", + ns=32, + nc=2, + sync_depth=8, + int2volts=int2volts, + ) self.assert_read_glx(nidq) def test_read_geometry_new_version_2023_04(self): - g_new = spikeglx.read_geometry(Path(TEST_PATH).joinpath('sample3B_version202304.ap.meta')) - g_old = spikeglx.read_geometry(Path(TEST_PATH).joinpath('sample3A_g0_t0.imec.ap.meta')) + g_new = spikeglx.read_geometry( + Path(TEST_PATH).joinpath("sample3B_version202304.ap.meta") + ) + g_old = spikeglx.read_geometry( + Path(TEST_PATH).joinpath("sample3A_g0_t0.imec.ap.meta") + ) for k in g_old.keys(): - if k == 'flag': + if k == "flag": continue np.testing.assert_array_equal(g_new[k], g_old[k]) def test_read_geometry(self): - - g = spikeglx.read_geometry(Path(TEST_PATH).joinpath('sample3A_g0_t0.imec.ap.meta')) + g = spikeglx.read_geometry( + Path(TEST_PATH).joinpath("sample3A_g0_t0.imec.ap.meta") + ) sizes = np.array([g[k].size for k in g]) np.testing.assert_array_equal(sizes, 384) - g = spikeglx.read_geometry(Path(TEST_PATH).joinpath('sample3A_376_channels.ap.meta')) + g = spikeglx.read_geometry( + Path(TEST_PATH).joinpath("sample3A_376_channels.ap.meta") + ) sizes = np.array([g[k].size for k in g]) np.testing.assert_array_equal(sizes, 276) def test_read_3A(self): - with tempfile.TemporaryDirectory(prefix='glx_test') as tdir: + with tempfile.TemporaryDirectory(prefix="glx_test") as tdir: bin_3a = spikeglx._mock_spikeglx_file( - Path(tdir).joinpath('sample3A_g0_t0.imec.ap.bin'), - self.workdir / 'sample3A_g0_t0.imec.ap.meta', - ns=32, nc=385, sync_depth=16) + Path(tdir).joinpath("sample3A_g0_t0.imec.ap.bin"), + self.workdir / "sample3A_g0_t0.imec.ap.meta", + ns=32, + nc=385, + sync_depth=16, + ) self.assert_read_glx(bin_3a) def test_read_3B(self): - with tempfile.TemporaryDirectory(prefix='glx_test') as tdir: + with tempfile.TemporaryDirectory(prefix="glx_test") as tdir: bin_3b = spikeglx._mock_spikeglx_file( - Path(tdir).joinpath('sample3B_g0_t0.imec1.ap.bin'), - self.workdir / 'sample3B_g0_t0.imec1.ap.meta', - ns=32, nc=385, sync_depth=16) + Path(tdir).joinpath("sample3B_g0_t0.imec1.ap.bin"), + self.workdir / "sample3B_g0_t0.imec1.ap.meta", + ns=32, + nc=385, + sync_depth=16, + ) self.assert_read_glx(bin_3b) def test_read_NP21(self): - with tempfile.TemporaryDirectory(prefix='glx_test') as tdir: + with tempfile.TemporaryDirectory(prefix="glx_test") as tdir: bin_3b = spikeglx._mock_spikeglx_file( - Path(tdir).joinpath('sampleNP2.1_g0_t0.imec.ap.bin'), - self.workdir / 'sampleNP2.1_g0_t0.imec.ap.meta', - ns=32, nc=385, sync_depth=16) + Path(tdir).joinpath("sampleNP2.1_g0_t0.imec.ap.bin"), + self.workdir / "sampleNP2.1_g0_t0.imec.ap.meta", + ns=32, + nc=385, + sync_depth=16, + ) self.assert_read_glx(bin_3b) def test_read_NP24(self): - with tempfile.TemporaryDirectory(prefix='glx_test') as tdir: + with tempfile.TemporaryDirectory(prefix="glx_test") as tdir: bin_3b = spikeglx._mock_spikeglx_file( - Path(tdir).joinpath('sampleNP2.4_4shanks_g0_t0.imec.ap.bin'), - self.workdir / 'sampleNP2.4_4shanks_g0_t0.imec.ap.meta', - ns=32, nc=385, sync_depth=16) + Path(tdir).joinpath("sampleNP2.4_4shanks_g0_t0.imec.ap.bin"), + self.workdir / "sampleNP2.4_4shanks_g0_t0.imec.ap.meta", + ns=32, + nc=385, + sync_depth=16, + ) self.assert_read_glx(bin_3b) def test_check_ephys_file(self): - self.tdir = tempfile.TemporaryDirectory(prefix='glx_test') + self.tdir = tempfile.TemporaryDirectory(prefix="glx_test") self.addCleanup(self.tdir.cleanup) bin_3b = spikeglx._mock_spikeglx_file( - Path(self.tdir.name).joinpath('sample3B_g0_t0.imec1.ap.bin'), - self.workdir / 'sample3B_g0_t0.imec1.ap.meta', - ns=32, nc=385, sync_depth=16) - self.assertEqual(hashfile.md5(bin_3b['bin_file']), "207ba1666b866a091e5bb8b26d19733f") - self.assertEqual(hashfile.sha1(bin_3b['bin_file']), - '1bf3219c35dea15409576f6764dd9152c3f8a89c') - sr = spikeglx.Reader(bin_3b['bin_file'], open=False) + Path(self.tdir.name).joinpath("sample3B_g0_t0.imec1.ap.bin"), + self.workdir / "sample3B_g0_t0.imec1.ap.meta", + ns=32, + nc=385, + sync_depth=16, + ) + self.assertEqual( + hashfile.md5(bin_3b["bin_file"]), "207ba1666b866a091e5bb8b26d19733f" + ) + self.assertEqual( + hashfile.sha1(bin_3b["bin_file"]), + "1bf3219c35dea15409576f6764dd9152c3f8a89c", + ) + sr = spikeglx.Reader(bin_3b["bin_file"], open=False) self.assertTrue(sr.verify_hash()) def assert_read_glx(self, tglx): - with spikeglx.Reader(tglx['bin_file']) as sr: - dexpected = sr.channel_conversion_sample2v[sr.type] * tglx['D'] - d, sync = sr.read_samples(0, tglx['ns']) + with spikeglx.Reader(tglx["bin_file"]) as sr: + dexpected = sr.channel_conversion_sample2v[sr.type] * tglx["D"] + d, sync = sr.read_samples(0, tglx["ns"]) # could be rounding errors with non-integer sampling rates self.assertTrue(sr.nsync == 1) self.assertTrue(sr.rl == sr.ns / sr.fs) - self.assertTrue(sr.nc == tglx['nc']) - self.assertTrue(sr.ns == tglx['ns']) + self.assertTrue(sr.nc == tglx["nc"]) + self.assertTrue(sr.ns == tglx["ns"]) # test the data reading with gain self.assertTrue(np.all(np.isclose(dexpected, d))) # test the sync reading, one front per channel - self.assertTrue(np.sum(sync) == tglx['sync_depth']) - for m in np.arange(tglx['sync_depth']): + self.assertTrue(np.sum(sync) == tglx["sync_depth"]) + for m in np.arange(tglx["sync_depth"]): self.assertTrue(sync[m + 1, m] == 1) - if sr.type in ['ap', 'lf']: # exclude nidq from the slicing circus + if sr.type in ["ap", "lf"]: # exclude nidq from the slicing circus # teast reading only one channel d, _ = sr.read(slice(None), 10) self.assertTrue(np.all(np.isclose(d, dexpected[:, 10]))) @@ -389,23 +459,25 @@ def assert_read_glx(self, tglx): # test the channel geometries but skip when meta data doesn't correspond to NP if sr.major_version is not None: th = sr.geometry - h = neuropixel.trace_header(sr.major_version, nshank=np.unique(th['shank']).size) + h = neuropixel.trace_header( + sr.major_version, nshank=np.unique(th["shank"]).size + ) for k in h.keys(): - assert (np.all(th[k] == h[k])), print(k) + assert np.all(th[k] == h[k]), print(k) def testGetSerialNumber(self): self.meta_files.sort() high_expectations = { - 'sample3A_g0_t0.imec.ap.meta': 641251510, - 'sample3A_g0_t0.imec.lf.meta': 641251510, - 'sample3A_short_g0_t0.imec.ap.meta': 641251510, - 'sample3B2_exported.imec0.ap.meta': 17216703352, - 'sample3B_g0_t0.imec1.ap.meta': 18005116811, - 'sample3B_g0_t0.imec1.lf.meta': 18005116811, - 'sample3B_g0_t0.nidq.meta': None, - 'sampleNP2.1_g0_t0.imec.ap.meta': 19011116954, - 'sampleNP2.4_1shank_g0_t0.imec.ap.meta': 20403308181, - 'sampleNP2.4_4shanks_g0_t0.imec.ap.meta': 19011110513, + "sample3A_g0_t0.imec.ap.meta": 641251510, + "sample3A_g0_t0.imec.lf.meta": 641251510, + "sample3A_short_g0_t0.imec.ap.meta": 641251510, + "sample3B2_exported.imec0.ap.meta": 17216703352, + "sample3B_g0_t0.imec1.ap.meta": 18005116811, + "sample3B_g0_t0.imec1.lf.meta": 18005116811, + "sample3B_g0_t0.nidq.meta": None, + "sampleNP2.1_g0_t0.imec.ap.meta": 19011116954, + "sampleNP2.4_1shank_g0_t0.imec.ap.meta": 20403308181, + "sampleNP2.4_4shanks_g0_t0.imec.ap.meta": 19011110513, } for meta_data_file in self.meta_files: with self.subTest(meta_data_file=meta_data_file): @@ -416,11 +488,10 @@ def testGetSerialNumber(self): def testGetRevisionAndType(self): for meta_data_file in self.meta_files: - md = spikeglx.read_meta_data(meta_data_file) self.assertTrue(len(md.keys()) >= 37) - if meta_data_file.name.split('.')[-2] in ['lf', 'ap']: + if meta_data_file.name.split(".")[-2] in ["lf", "ap"]: # for ap and lf look for version number # test getting revision revision = meta_data_file.name[6:8] @@ -429,58 +500,66 @@ def testGetRevisionAndType(self): print(revision, minor, major) self.assertEqual(minor, revision) # test the major version - if revision.startswith('3'): + if revision.startswith("3"): assert major == 1 else: assert np.floor(major) == 2 # test getting acquisition type for all ap, lf and nidq - type = meta_data_file.name.split('.')[-2] + type = meta_data_file.name.split(".")[-2] self.assertEqual(spikeglx._get_type_from_meta(md), type) def testReadChannelGainAPLF(self): for meta_data_file in self.meta_files: - if meta_data_file.name.split('.')[-2] not in ['lf', 'ap']: + if meta_data_file.name.split(".")[-2] not in ["lf", "ap"]: continue md = spikeglx.read_meta_data(meta_data_file) cg = spikeglx._conversion_sample2v_from_meta(md) - if 'NP2' in spikeglx._get_neuropixel_version_from_meta(md): - i2v = md.get('imAiRangeMax') / int(md.get('imMaxInt')) - self.assertTrue(np.all(cg['lf'][0:-1] == i2v / 80)) - self.assertTrue(np.all(cg['ap'][0:-1] == i2v / 80)) + if "NP2" in spikeglx._get_neuropixel_version_from_meta(md): + i2v = md.get("imAiRangeMax") / int(md.get("imMaxInt")) + self.assertTrue(np.all(cg["lf"][0:-1] == i2v / 80)) + self.assertTrue(np.all(cg["ap"][0:-1] == i2v / 80)) else: - i2v = md.get('imAiRangeMax') / 512 - self.assertTrue(np.all(cg['lf'][0:-1] == i2v / 250)) - self.assertTrue(np.all(cg['ap'][0:-1] == i2v / 500)) + i2v = md.get("imAiRangeMax") / 512 + self.assertTrue(np.all(cg["lf"][0:-1] == i2v / 250)) + self.assertTrue(np.all(cg["ap"][0:-1] == i2v / 500)) # also test consistent dimension with nchannels nc = spikeglx._get_nchannels_from_meta(md) - self.assertTrue(len(cg['ap']) == len(cg['lf']) == nc) + self.assertTrue(len(cg["ap"]) == len(cg["lf"]) == nc) def testGetAnalogSyncIndex(self): for meta_data_file in self.meta_files: md = spikeglx.read_meta_data(meta_data_file) - if spikeglx._get_type_from_meta(md) in ['ap', 'lf']: - self.assertTrue(spikeglx._get_analog_sync_trace_indices_from_meta(md) == []) + if spikeglx._get_type_from_meta(md) in ["ap", "lf"]: + self.assertTrue( + spikeglx._get_analog_sync_trace_indices_from_meta(md) == [] + ) else: - self.assertEqual(spikeglx._get_analog_sync_trace_indices_from_meta(md), [0]) + self.assertEqual( + spikeglx._get_analog_sync_trace_indices_from_meta(md), [0] + ) def testReadChannelGainNIDQ(self): for meta_data_file in self.meta_files: - if meta_data_file.name.split('.')[-2] not in ['nidq']: + if meta_data_file.name.split(".")[-2] not in ["nidq"]: continue md = spikeglx.read_meta_data(meta_data_file) nc = spikeglx._get_nchannels_from_meta(md) cg = spikeglx._conversion_sample2v_from_meta(md) - i2v = md.get('niAiRangeMax') / 32768 - self.assertTrue(np.all(cg['nidq'][slice(0, int(np.sum(md.acqMnMaXaDw[:3])))] == i2v)) - self.assertTrue(np.all(cg['nidq'][slice(int(np.sum(md.acqMnMaXaDw[-1])), None)] == 1.)) - self.assertTrue(len(cg['nidq']) == nc) + i2v = md.get("niAiRangeMax") / 32768 + self.assertTrue( + np.all(cg["nidq"][slice(0, int(np.sum(md.acqMnMaXaDw[:3])))] == i2v) + ) + self.assertTrue( + np.all(cg["nidq"][slice(int(np.sum(md.acqMnMaXaDw[-1])), None)] == 1.0) + ) + self.assertTrue(len(cg["nidq"]) == nc) def testReadChannelMap(self): for meta_data_file in self.meta_files: md = spikeglx.read_meta_data(meta_data_file) cm = spikeglx._map_channels_from_meta(md) - if 'snsShankMap' in md.keys(): - self.assertEqual(set(cm.keys()), set(['shank', 'col', 'row', 'flag'])) + if "snsShankMap" in md.keys(): + self.assertEqual(set(cm.keys()), set(["shank", "col", "row", "flag"])) def testSplitSyncTrace(self): sc = np.uint16(2 ** np.linspace(-1, 15, 17)) @@ -501,14 +580,16 @@ def testWriteMetaData(self): def testSaveSubset(self): chns = np.r_[np.arange(0, 11), np.arange(50, 91), 384] subset = spikeglx._get_savedChans_subset(chns) - self.assertEqual(subset, '0:10,50:90,384') + self.assertEqual(subset, "0:10,50:90,384") chns = np.r_[np.arange(30, 101), np.arange(250, 301), 384] subset = spikeglx._get_savedChans_subset(chns) - self.assertEqual(subset, '30:100,250:300,384') + self.assertEqual(subset, "30:100,250:300,384") def test_write_meta_file(self): - meta = spikeglx.read_meta_data(Path(TEST_PATH).joinpath('sample3A_g0_t0.imec.ap.meta')) + meta = spikeglx.read_meta_data( + Path(TEST_PATH).joinpath("sample3A_g0_t0.imec.ap.meta") + ) with tempfile.TemporaryDirectory() as tmpdir: temp_meta = Path(tmpdir) / "sample.meta" spikeglx.write_meta_data(meta, temp_meta) @@ -520,10 +601,11 @@ class TestsBasicReader(unittest.TestCase): """ Tests the basic usage where there is a flat binary and no metadata associated """ + def test_read_flat_binary_float32(self): # here we expect no scaling to V applied and no sync trace as the format is float32 kwargs = dict(ns=60000, nc=384, fs=30000, dtype=np.float32) - data = np.random.randn(kwargs['ns'], kwargs['nc']).astype(np.float32) + data = np.random.randn(kwargs["ns"], kwargs["nc"]).astype(np.float32) with tempfile.TemporaryDirectory() as tmpdir: temp_bin = Path(tmpdir) / "sample.bin" with open(temp_bin, "w") as fp: @@ -539,7 +621,7 @@ def test_read_flat_binary_int16_with_sync(self): kwargs = dict(ns=60000, nc=385, fs=30000, dtype=np.int16) s2v = np.ones(385) * neuropixel.S2V_AP s2v[-1] = 1 - data = np.random.randn(kwargs['ns'], kwargs['nc']) / s2v + data = np.random.randn(kwargs["ns"], kwargs["nc"]) / s2v data[:, -1] = 1 data = data.astype(np.int16) with tempfile.TemporaryDirectory() as tmpdir: @@ -553,7 +635,10 @@ def test_read_flat_binary_int16_with_sync(self): print(sr.shape, kw) assert sr.nsync == 1 np.testing.assert_allclose( - sr[:, :-1], data[:, :-1].astype(np.float32) * neuropixel.S2V_AP, rtol=1e-5) + sr[:, :-1], + data[:, :-1].astype(np.float32) * neuropixel.S2V_AP, + rtol=1e-5, + ) np.testing.assert_array_equal(sr.sample2volts, s2v) def test_read_flat_binary_int16_no_sync(self): @@ -561,11 +646,11 @@ def test_read_flat_binary_int16_no_sync(self): np.random.seed(42) kwargs = dict(ns=60000, nc=384, fs=30000, dtype=np.int16) s2v = np.ones(384) * neuropixel.S2V_AP - data = np.random.randn(kwargs['ns'], kwargs['nc']) / s2v + data = np.random.randn(kwargs["ns"], kwargs["nc"]) / s2v data = data.astype(np.int16) with tempfile.TemporaryDirectory() as tmpdir: temp_bin = Path(tmpdir) / "sample.bin" - with open(temp_bin, mode='w') as fp: + with open(temp_bin, mode="w") as fp: data.tofile(fp) # test for both arguments specifed and auto-detection of filesize / nchannels for neuropixel for kw in (kwargs, {}): @@ -573,12 +658,30 @@ def test_read_flat_binary_int16_no_sync(self): with spikeglx.Reader(temp_bin, **kw) as sr: print(sr.shape, kw) np.testing.assert_allclose( - sr[:, :], data[:, :].astype(np.float32) * neuropixel.S2V_AP, rtol=1e-5) + sr[:, :], + data[:, :].astype(np.float32) * neuropixel.S2V_AP, + rtol=1e-5, + ) assert sr.nsync == 0 np.testing.assert_array_equal(sr.sample2volts, s2v) def test_load_meta_file_only(self): # here we load only a meta-file - meta_file = Path(TEST_PATH).joinpath('sample3B_g0_t0.imec1.ap.meta') + meta_file = Path(TEST_PATH).joinpath("sample3B_g0_t0.imec1.ap.meta") sr = spikeglx.Reader(meta_file) assert sr.shape == (24734244, 385) + + +class TestOnlineSpikeGlxReader(unittest.TestCase): + def test_read_current_acquisition(self): + file_meta_original = Path(TEST_PATH).joinpath( + "sampleNP2.4_4shanks_while_acquiring_incomplete.ap.meta" + ) + with tempfile.TemporaryDirectory() as td: + file_meta = Path(td).joinpath("test.ap.meta") + shutil.copy(file_meta_original, file_meta) + file_ap = file_meta.with_suffix(".bin") + np.memmap(file_ap, mode="w+", shape=(300_000, 385), dtype=np.int16) + sr = spikeglx.OnlineReader(file_ap) + # just try to read the last sample of the bunch + assert np.all(sr[299_999, :] == 0) diff --git a/src/tests/unit/cpu/test_waveforms.py b/src/tests/unit/cpu/test_waveforms.py index 68a7b1b..e9b4c88 100644 --- a/src/tests/unit/cpu/test_waveforms.py +++ b/src/tests/unit/cpu/test_waveforms.py @@ -12,41 +12,55 @@ def make_array_peak_through_tip(): - arr = np. array([[[1, 1, np.nan], - [2, 2, np.nan], - [3, 5, np.nan], - [4, 4, np.nan], - [4, -5, np.nan], - [4, -6, np.nan], - [4, 5, np.nan]], - - [[-8, 7, 7], - [-7, 7, 7], - [7, 7, 7], - [7, 7, 7], - [4, 5, 4], - [4, 5, 4], - [4, 5, 4]]]) + arr = np.array( + [ + [ + [1, 1, np.nan], + [2, 2, np.nan], + [3, 5, np.nan], + [4, 4, np.nan], + [4, -5, np.nan], + [4, -6, np.nan], + [4, 5, np.nan], + ], + [ + [-8, 7, 7], + [-7, 7, 7], + [7, 7, 7], + [7, 7, 7], + [4, 5, 4], + [4, 5, 4], + [4, 5, 4], + ], + ] + ) return arr def make_array_peak_through_tip_v2(): # Duplicating as above array throws error due to Nans when computing arr_pre - arr = np. array([[[1, 1, np.nan], - [2, 2, np.nan], - [3, 5, np.nan], - [4, 4, np.nan], - [4, -5, np.nan], - [4, -6, np.nan], - [4, 5, np.nan]], - - [[1, 1, 0], - [2, 2, 0], - [3, 5, 0], - [4, 4, 0], - [4, -5, 0], - [4, -8, 1], - [4, 5, 0]]]) + arr = np.array( + [ + [ + [1, 1, np.nan], + [2, 2, np.nan], + [3, 5, np.nan], + [4, 4, np.nan], + [4, -5, np.nan], + [4, -6, np.nan], + [4, 5, np.nan], + ], + [ + [1, 1, 0], + [2, 2, 0], + [3, 5, 0], + [4, 4, 0], + [4, -5, 0], + [4, -8, 1], + [4, 5, 0], + ], + ] + ) return arr @@ -61,8 +75,9 @@ def test_peak_through_tip_3d(): df = waveforms.compute_spike_features(arr) arr_out = waveforms.get_array_peak(arr, df) - np.testing.assert_equal(arr_out, np.array([[1, 2, 5, 4, -5, -6, 5], - [1, 2, 5, 4, -5, -8, 5]])) + np.testing.assert_equal( + arr_out, np.array([[1, 2, 5, 4, -5, -6, 5], [1, 2, 5, 4, -5, -8, 5]]) + ) np.testing.assert_equal(df.peak_trace_idx, np.array([1, 1])) np.testing.assert_equal(df.peak_time_idx, np.array([5, 5])) @@ -77,12 +92,16 @@ def test_peak_through_tip_3d(): def test_halfpeak_slopes(): # Load fixtures - folder_save = Path(waveforms.__file__).parents[1].joinpath('tests', 'unit', 'cpu', 'fixtures', 'waveform_sample') - arr_in = np.load(folder_save.joinpath('test_arr_in.npy')) - test_arr_peak = np.load(folder_save.joinpath('test_arr_peak.npy')) - test_df = pd.read_csv(folder_save.joinpath('test_df.csv')) + folder_save = ( + Path(waveforms.__file__) + .parents[1] + .joinpath("tests", "unit", "cpu", "fixtures", "waveform_sample") + ) + arr_in = np.load(folder_save.joinpath("test_arr_in.npy")) + test_arr_peak = np.load(folder_save.joinpath("test_arr_peak.npy")) + test_df = pd.read_csv(folder_save.joinpath("test_df.csv")) test_df = test_df.drop("Unnamed: 0", axis=1) # Dropping the "Unnamed: 0" column - df = waveforms.compute_spike_features(arr_in, fs=30000, recovery_duration_ms=.16) + df = waveforms.compute_spike_features(arr_in, fs=30000, recovery_duration_ms=0.16) # Array peak testing arr_peak = waveforms.get_array_peak(arr_in, df) np.testing.assert_equal(arr_peak, test_arr_peak) @@ -92,8 +111,13 @@ def test_halfpeak_slopes(): def test_dist_chanel_from_peak(): # Distance test - xyz_testd = np.stack((np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0]]), - np.array([[4, 0, 0], [2, 0, 0], [np.NaN, np.NaN, np.NaN], [1, 0, 0]])), axis=2) + xyz_testd = np.stack( + ( + np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0]]), + np.array([[4, 0, 0], [2, 0, 0], [np.NaN, np.NaN, np.NaN], [1, 0, 0]]), + ), + axis=2, + ) xyz_testd = np.swapaxes(xyz_testd, axis1=0, axis2=2) xyz_testd = np.swapaxes(xyz_testd, axis1=1, axis2=2) @@ -113,56 +137,23 @@ def test_reshape_wav_one_channel(): arr = make_array_peak_through_tip() arr_out = waveforms.reshape_wav_one_channel(arr) # Check against test - arr_tested = np.array([[[1.], - [2.], - [3.], - [4.], - [4.], - [4.], - [4.]], - [[1.], - [2.], - [5.], - [4.], - [-5.], - [-6.], - [5.]], - [[np.nan], - [np.nan], - [np.nan], - [np.nan], - [np.nan], - [np.nan], - [np.nan]], - [[-8.], - [-7.], - [7.], - [7.], - [4.], - [4.], - [4.]], - [[7.], - [7.], - [7.], - [7.], - [5.], - [5.], - [5.]], - [[7.], - [7.], - [7.], - [7.], - [4.], - [4.], - [4.]]]) + arr_tested = np.array( + [ + [[1.0], [2.0], [3.0], [4.0], [4.0], [4.0], [4.0]], + [[1.0], [2.0], [5.0], [4.0], [-5.0], [-6.0], [5.0]], + [[np.nan], [np.nan], [np.nan], [np.nan], [np.nan], [np.nan], [np.nan]], + [[-8.0], [-7.0], [7.0], [7.0], [4.0], [4.0], [4.0]], + [[7.0], [7.0], [7.0], [7.0], [5.0], [5.0], [5.0]], + [[7.0], [7.0], [7.0], [7.0], [4.0], [4.0], [4.0]], + ] + ) np.testing.assert_equal(arr_out, arr_tested) def test_weights_all_channels(): arr = make_array_peak_through_tip() weight = waveforms.weights_spk_ch(arr) - weight_tested = np.array([[4., -6., 0.], - [-8., 7., 7.]]) + weight_tested = np.array([[4.0, -6.0, 0.0], [-8.0, 7.0, 7.0]]) np.testing.assert_equal(weight, weight_tested) @@ -191,18 +182,22 @@ class TestWaveformExtractor(unittest.TestCase): # generate channel neighbor matrix for NP1, default radius 200um geom_dict = trace_header(version=1) geom = np.c_[geom_dict["x"], geom_dict["y"]] - channel_neighbors = utils.make_channel_index(geom, radius=200.) + channel_neighbors = utils.make_channel_index(geom, radius=200.0) # radius = 200um, 38 chans num_channels = 38 def test_extract_waveforms(self): - wfs, _, _ = waveforms.extract_wfs_array(self.arr, self.df, self.channel_neighbors) + wfs, _, _ = waveforms.extract_wfs_array( + self.arr, self.df, self.channel_neighbors + ) # first wf is a special case: it's at the top of the probe so the center # index is the actual channel index, and the rest of the wf has been padded # with NaNs - assert wfs[0, self.channels[0], self.trough_offset] == 1. - assert np.all(np.isnan(wfs[0, self.num_channels // 2 + self.channels[0] + 1:, :])) + assert wfs[0, self.channels[0], self.trough_offset] == 1.0 + assert np.all( + np.isnan(wfs[0, self.num_channels // 2 + self.channels[0] + 1 :, :]) + ) for i in range(1, 8): # center channel depends on odd/even of channel @@ -218,7 +213,7 @@ def test_extract_waveforms(self): centered_channel_idx = 18 else: centered_channel_idx = 19 - assert wfs[-1, centered_channel_idx, self.trough_offset] == 9. + assert wfs[-1, centered_channel_idx, self.trough_offset] == 9.0 def test_spike_window(self): # check that we have an error when the last spike window @@ -233,5 +228,7 @@ def test_nan_channel(self): # the user can set the flag and the result will be the same arr = self.arr.copy()[:, :-1] wfs = waveforms.extract_wfs_array(self.arr, self.df, self.channel_neighbors) - wfs_nan = waveforms.extract_wfs_array(arr, self.df, self.channel_neighbors, add_nan_trace=True) + wfs_nan = waveforms.extract_wfs_array( + arr, self.df, self.channel_neighbors, add_nan_trace=True + ) np.testing.assert_equal(wfs, wfs_nan) diff --git a/src/tests/unit/gpu/test_filter_gpu.py b/src/tests/unit/gpu/test_filter_gpu.py index 366097c..f30b464 100644 --- a/src/tests/unit/gpu/test_filter_gpu.py +++ b/src/tests/unit/gpu/test_filter_gpu.py @@ -7,17 +7,15 @@ class TestFilterGpuCpuParity(unittest.TestCase): - def test_parity(self): - GPU_TOL = 1e-3 - BUTTER_KWARGS = {'N': 3, 'Wn': 300 / 30000 * 2, 'btype': 'highpass'} + BUTTER_KWARGS = {"N": 3, "Wn": 300 / 30000 * 2, "btype": "highpass"} N_SIGNALS = 300 N_SAMPLES = 60000 - sos = butter(**BUTTER_KWARGS, output='sos') + sos = butter(**BUTTER_KWARGS, output="sos") array_cpu = np.cumsum(np.random.randn(N_SIGNALS, N_SAMPLES), axis=1) - array_gpu = cp.array(array_cpu, dtype='float32') + array_gpu = cp.array(array_cpu, dtype="float32") output_cpu = sosfiltfilt(sos, array_cpu) output_gpu = sosfiltfilt_gpu(sos, array_gpu) diff --git a/src/tests/unit/gpu/test_fourier.py b/src/tests/unit/gpu/test_fourier.py index 278cbc6..1e4565b 100644 --- a/src/tests/unit/gpu/test_fourier.py +++ b/src/tests/unit/gpu/test_fourier.py @@ -6,7 +6,6 @@ class TestFourierAlignmentGpuCpuParity(unittest.TestCase): - def test_parity(self): N_TIMES = 65600 N_CHANNELS = 384 From 667bb214762e788b5e70a95a04c4f257bfe673c5 Mon Sep 17 00:00:00 2001 From: olivier Date: Tue, 19 Mar 2024 11:47:25 +0000 Subject: [PATCH 2/3] spikeglx 20230905: new probe version number and explicit site locations --- src/neuropixel.py | 20 ++-- src/spikeglx.py | 24 ++--- ...leNP2.4_4shanks_appVersion20230905.ap.meta | 62 +++++++++++ src/tests/unit/cpu/test_spikeglx.py | 100 ++++++++++-------- 4 files changed, 136 insertions(+), 70 deletions(-) create mode 100644 src/tests/unit/cpu/fixtures/sampleNP2.4_4shanks_appVersion20230905.ap.meta diff --git a/src/neuropixel.py b/src/neuropixel.py index c6d1f55..18fe10b 100644 --- a/src/neuropixel.py +++ b/src/neuropixel.py @@ -59,6 +59,8 @@ TIP_SIZE_UM = 200 NC = 384 SITES_COORDINATES: np.array +# channel layouts for neuropixel probes as a function of the major version (1 or 2) +CHANNEL_GRID = {1: dict(DX=16, X0=11, DY=20, Y0=20), 2: dict(DX=32, X0=27, DY=15, Y0=20)} def _deprecated_sites_coordinates() -> np.array: @@ -95,12 +97,9 @@ def xy2rc(x, y, version=1): :param version: neuropixel major version 1 or 2 :return: dictionary with keys x and y """ - if version == 1: - col = (x - 11) / 16 - row = (y - 20) / 20 - elif np.floor(version) == 2: - col = x / 32 - row = y / 15 + grid = CHANNEL_GRID[np.floor(version)] + col = (x - grid['X0']) / grid['DX'] + row = (y - grid['Y0']) / grid['DY'] return {"col": col, "row": row} @@ -112,12 +111,9 @@ def rc2xy(row, col, version=1): :param version: neuropixel major version 1 or 2 :return: dictionary with keys x and y """ - if version == 1: - x = col * 16 + 11 - y = (row * 20) + 20 - elif np.floor(version) == 2: - x = col * 32 - y = row * 15 + grid = CHANNEL_GRID[np.floor(version)] + x = col * grid['DX'] + grid['X0'] + y = row * grid['DY'] + grid['Y0'] return {"x": x, "y": y} diff --git a/src/spikeglx.py b/src/spikeglx.py index 4382c1f..c5a0ed6 100644 --- a/src/spikeglx.py +++ b/src/spikeglx.py @@ -516,10 +516,10 @@ def _get_neuropixel_version_from_meta(md): else: return "3B1" # Neuropixel 2.0 single shank - if prb_type == 21: + elif prb_type == 21: return "NP2.1" # Neuropixel 2.0 four shank - if prb_type == 24: + elif prb_type == 24 or prb_type == 2013: return "NP2.4" @@ -600,9 +600,7 @@ def _geometry_from_meta(meta_data): cm = _map_channels_from_meta(meta_data) major_version = _get_neuropixel_major_version_from_meta(meta_data) if cm is None: - _logger.warning( - "Meta data doesn't have geometry (snsShankMap/snsGeomMap field), returning defaults" - ) + _logger.warning("Meta data doesn't have geometry (snsShankMap/snsGeomMap field), returning defaults") th = neuropixel.trace_header(version=major_version) th["flag"] = th["x"] * 0 + 1.0 return th @@ -610,18 +608,16 @@ def _geometry_from_meta(meta_data): # as of 2023-04 spikeglx stores only x, y coordinates of sites in UM and no col / row. Here # we convert to col / row for consistency with previous versions if "x" in cm.keys(): - if ( - major_version == 1 - ): # the spike sorting channel maps have a flipped version of the channel map + # the spike sorting channel maps have a flipped version of the channel map + # there is a 20um offset between the probe tip and the first site in the coordinate conversion + if major_version == 1: th["x"] = 70 - (th["x"]) - th[ - "y" - ] += 20 # there is a 20um offset between the probe tip and the first site in the coordinate conversion + + th["y"] += 20 th.update(neuropixel.xy2rc(th["x"], th["y"], version=major_version)) else: - if ( - major_version == 1 - ): # the spike sorting channel maps have a flipped version of the channel map + # the spike sorting channel maps have a flipped version of the channel map + if major_version == 1: th["col"] = -cm["col"] * 2 + 2 + np.mod(cm["row"], 2) th.update(neuropixel.rc2xy(th["row"], th["col"], version=major_version)) th["sample_shift"], th["adc"] = neuropixel.adc_shifts( diff --git a/src/tests/unit/cpu/fixtures/sampleNP2.4_4shanks_appVersion20230905.ap.meta b/src/tests/unit/cpu/fixtures/sampleNP2.4_4shanks_appVersion20230905.ap.meta new file mode 100644 index 0000000..950c192 --- /dev/null +++ b/src/tests/unit/cpu/fixtures/sampleNP2.4_4shanks_appVersion20230905.ap.meta @@ -0,0 +1,62 @@ +acqApLfSy=384,0,1 +appVersion=20230905 +fileCreateTime=2024-03-13T14:40:27 +fileName=D:/NeuropixelData/459602/20240313/raw_ephys_data/_spikeGLX_ephysData_g1/_spikeGLX_ephysData_g1_imec1/_spikeGLX_ephysData_g1_t0.imec1.ap.bin +fileSHA1=8AD69E689092AF1D92E9CC09BF619BC06EFCFAB2 +fileSizeBytes=109318733370 +fileTimeSecs=4732.4127 +firstSample=2846884 +gateMode=Immediate +imAiRangeMax=0.62 +imAiRangeMin=-0.62 +imAnyChanFullBand=true +imCalibrated=true +imChan0apGain=100 +imDatApi=3.62 +imDatBs_fw=2.0.169 +imDatBsc_fw=3.2.189 +imDatBsc_hw=2.1 +imDatBsc_pn=NP2_QBSC_00 +imDatBsc_sn=21080730 +imDatFx_hw=0.1 +imDatFx_pn=NPM_FLEX_01 +imDatFx_sn=23200107 +imDatHs_hw=3.2 +imDatHs_pn=NPM_HS_31 +imDatHs_sn=23200107 +imDatPrb_dock=1 +imDatPrb_pn=NP2014 +imDatPrb_port=2 +imDatPrb_slot=3 +imDatPrb_sn=22420006883 +imDatPrb_type=2013 +imErrFlags_IS_CT_SR_LK_PP_SY=1 0 11361693 0 0 1 +imIsSvyRun=false +imLEDEnable=false +imLowLatency=false +imMaxInt=2048 +imSampRate=30000 +imStdby= +imSvyMaxBnk=-1 +imSvySecPerBnk=35 +imTipLength=206 +imTrgRising=true +imTrgSource=0 +imroFile=C:/Program Files/SpikeGLX/4shank_bottom_channels.imro +nDataDirs=1 +nSavedChans=385 +snsApLfSy=384,0,1 +snsSaveChanSubset=0:384 +syncImInputSlot=2 +syncSourceIdx=3 +syncSourcePeriod=1 +trigMode=Immediate +typeImEnabled=2 +typeNiEnabled=1 +typeObEnabled=0 +typeThis=imec +userNotes= +~imroTbl=(2013,384)(0 0 0 0 0)(1 0 0 0 1)(2 0 0 0 2)(3 0 0 0 3)(4 0 0 0 4)(5 0 0 0 5)(6 0 0 0 6)(7 0 0 0 7)(8 0 0 0 8)(9 0 0 0 9)(10 0 0 0 10)(11 0 0 0 11)(12 0 0 0 12)(13 0 0 0 13)(14 0 0 0 14)(15 0 0 0 15)(16 0 0 0 16)(17 0 0 0 17)(18 0 0 0 18)(19 0 0 0 19)(20 0 0 0 20)(21 0 0 0 21)(22 0 0 0 22)(23 0 0 0 23)(24 0 0 0 24)(25 0 0 0 25)(26 0 0 0 26)(27 0 0 0 27)(28 0 0 0 28)(29 0 0 0 29)(30 0 0 0 30)(31 0 0 0 31)(32 0 0 0 32)(33 0 0 0 33)(34 0 0 0 34)(35 0 0 0 35)(36 0 0 0 36)(37 0 0 0 37)(38 0 0 0 38)(39 0 0 0 39)(40 0 0 0 40)(41 0 0 0 41)(42 0 0 0 42)(43 0 0 0 43)(44 0 0 0 44)(45 0 0 0 45)(46 0 0 0 46)(47 0 0 0 47)(48 1 0 0 0)(49 1 0 0 1)(50 1 0 0 2)(51 1 0 0 3)(52 1 0 0 4)(53 1 0 0 5)(54 1 0 0 6)(55 1 0 0 7)(56 1 0 0 8)(57 1 0 0 9)(58 1 0 0 10)(59 1 0 0 11)(60 1 0 0 12)(61 1 0 0 13)(62 1 0 0 14)(63 1 0 0 15)(64 1 0 0 16)(65 1 0 0 17)(66 1 0 0 18)(67 1 0 0 19)(68 1 0 0 20)(69 1 0 0 21)(70 1 0 0 22)(71 1 0 0 23)(72 1 0 0 24)(73 1 0 0 25)(74 1 0 0 26)(75 1 0 0 27)(76 1 0 0 28)(77 1 0 0 29)(78 1 0 0 30)(79 1 0 0 31)(80 1 0 0 32)(81 1 0 0 33)(82 1 0 0 34)(83 1 0 0 35)(84 1 0 0 36)(85 1 0 0 37)(86 1 0 0 38)(87 1 0 0 39)(88 1 0 0 40)(89 1 0 0 41)(90 1 0 0 42)(91 1 0 0 43)(92 1 0 0 44)(93 1 0 0 45)(94 1 0 0 46)(95 1 0 0 47)(96 0 0 0 48)(97 0 0 0 49)(98 0 0 0 50)(99 0 0 0 51)(100 0 0 0 52)(101 0 0 0 53)(102 0 0 0 54)(103 0 0 0 55)(104 0 0 0 56)(105 0 0 0 57)(106 0 0 0 58)(107 0 0 0 59)(108 0 0 0 60)(109 0 0 0 61)(110 0 0 0 62)(111 0 0 0 63)(112 0 0 0 64)(113 0 0 0 65)(114 0 0 0 66)(115 0 0 0 67)(116 0 0 0 68)(117 0 0 0 69)(118 0 0 0 70)(119 0 0 0 71)(120 0 0 0 72)(121 0 0 0 73)(122 0 0 0 74)(123 0 0 0 75)(124 0 0 0 76)(125 0 0 0 77)(126 0 0 0 78)(127 0 0 0 79)(128 0 0 0 80)(129 0 0 0 81)(130 0 0 0 82)(131 0 0 0 83)(132 0 0 0 84)(133 0 0 0 85)(134 0 0 0 86)(135 0 0 0 87)(136 0 0 0 88)(137 0 0 0 89)(138 0 0 0 90)(139 0 0 0 91)(140 0 0 0 92)(141 0 0 0 93)(142 0 0 0 94)(143 0 0 0 95)(144 1 0 0 48)(145 1 0 0 49)(146 1 0 0 50)(147 1 0 0 51)(148 1 0 0 52)(149 1 0 0 53)(150 1 0 0 54)(151 1 0 0 55)(152 1 0 0 56)(153 1 0 0 57)(154 1 0 0 58)(155 1 0 0 59)(156 1 0 0 60)(157 1 0 0 61)(158 1 0 0 62)(159 1 0 0 63)(160 1 0 0 64)(161 1 0 0 65)(162 1 0 0 66)(163 1 0 0 67)(164 1 0 0 68)(165 1 0 0 69)(166 1 0 0 70)(167 1 0 0 71)(168 1 0 0 72)(169 1 0 0 73)(170 1 0 0 74)(171 1 0 0 75)(172 1 0 0 76)(173 1 0 0 77)(174 1 0 0 78)(175 1 0 0 79)(176 1 0 0 80)(177 1 0 0 81)(178 1 0 0 82)(179 1 0 0 83)(180 1 0 0 84)(181 1 0 0 85)(182 1 0 0 86)(183 1 0 0 87)(184 1 0 0 88)(185 1 0 0 89)(186 1 0 0 90)(187 1 0 0 91)(188 1 0 0 92)(189 1 0 0 93)(190 1 0 0 94)(191 1 0 0 95)(192 2 0 0 0)(193 2 0 0 1)(194 2 0 0 2)(195 2 0 0 3)(196 2 0 0 4)(197 2 0 0 5)(198 2 0 0 6)(199 2 0 0 7)(200 2 0 0 8)(201 2 0 0 9)(202 2 0 0 10)(203 2 0 0 11)(204 2 0 0 12)(205 2 0 0 13)(206 2 0 0 14)(207 2 0 0 15)(208 2 0 0 16)(209 2 0 0 17)(210 2 0 0 18)(211 2 0 0 19)(212 2 0 0 20)(213 2 0 0 21)(214 2 0 0 22)(215 2 0 0 23)(216 2 0 0 24)(217 2 0 0 25)(218 2 0 0 26)(219 2 0 0 27)(220 2 0 0 28)(221 2 0 0 29)(222 2 0 0 30)(223 2 0 0 31)(224 2 0 0 32)(225 2 0 0 33)(226 2 0 0 34)(227 2 0 0 35)(228 2 0 0 36)(229 2 0 0 37)(230 2 0 0 38)(231 2 0 0 39)(232 2 0 0 40)(233 2 0 0 41)(234 2 0 0 42)(235 2 0 0 43)(236 2 0 0 44)(237 2 0 0 45)(238 2 0 0 46)(239 2 0 0 47)(240 3 0 0 0)(241 3 0 0 1)(242 3 0 0 2)(243 3 0 0 3)(244 3 0 0 4)(245 3 0 0 5)(246 3 0 0 6)(247 3 0 0 7)(248 3 0 0 8)(249 3 0 0 9)(250 3 0 0 10)(251 3 0 0 11)(252 3 0 0 12)(253 3 0 0 13)(254 3 0 0 14)(255 3 0 0 15)(256 3 0 0 16)(257 3 0 0 17)(258 3 0 0 18)(259 3 0 0 19)(260 3 0 0 20)(261 3 0 0 21)(262 3 0 0 22)(263 3 0 0 23)(264 3 0 0 24)(265 3 0 0 25)(266 3 0 0 26)(267 3 0 0 27)(268 3 0 0 28)(269 3 0 0 29)(270 3 0 0 30)(271 3 0 0 31)(272 3 0 0 32)(273 3 0 0 33)(274 3 0 0 34)(275 3 0 0 35)(276 3 0 0 36)(277 3 0 0 37)(278 3 0 0 38)(279 3 0 0 39)(280 3 0 0 40)(281 3 0 0 41)(282 3 0 0 42)(283 3 0 0 43)(284 3 0 0 44)(285 3 0 0 45)(286 3 0 0 46)(287 3 0 0 47)(288 2 0 0 48)(289 2 0 0 49)(290 2 0 0 50)(291 2 0 0 51)(292 2 0 0 52)(293 2 0 0 53)(294 2 0 0 54)(295 2 0 0 55)(296 2 0 0 56)(297 2 0 0 57)(298 2 0 0 58)(299 2 0 0 59)(300 2 0 0 60)(301 2 0 0 61)(302 2 0 0 62)(303 2 0 0 63)(304 2 0 0 64)(305 2 0 0 65)(306 2 0 0 66)(307 2 0 0 67)(308 2 0 0 68)(309 2 0 0 69)(310 2 0 0 70)(311 2 0 0 71)(312 2 0 0 72)(313 2 0 0 73)(314 2 0 0 74)(315 2 0 0 75)(316 2 0 0 76)(317 2 0 0 77)(318 2 0 0 78)(319 2 0 0 79)(320 2 0 0 80)(321 2 0 0 81)(322 2 0 0 82)(323 2 0 0 83)(324 2 0 0 84)(325 2 0 0 85)(326 2 0 0 86)(327 2 0 0 87)(328 2 0 0 88)(329 2 0 0 89)(330 2 0 0 90)(331 2 0 0 91)(332 2 0 0 92)(333 2 0 0 93)(334 2 0 0 94)(335 2 0 0 95)(336 3 0 0 48)(337 3 0 0 49)(338 3 0 0 50)(339 3 0 0 51)(340 3 0 0 52)(341 3 0 0 53)(342 3 0 0 54)(343 3 0 0 55)(344 3 0 0 56)(345 3 0 0 57)(346 3 0 0 58)(347 3 0 0 59)(348 3 0 0 60)(349 3 0 0 61)(350 3 0 0 62)(351 3 0 0 63)(352 3 0 0 64)(353 3 0 0 65)(354 3 0 0 66)(355 3 0 0 67)(356 3 0 0 68)(357 3 0 0 69)(358 3 0 0 70)(359 3 0 0 71)(360 3 0 0 72)(361 3 0 0 73)(362 3 0 0 74)(363 3 0 0 75)(364 3 0 0 76)(365 3 0 0 77)(366 3 0 0 78)(367 3 0 0 79)(368 3 0 0 80)(369 3 0 0 81)(370 3 0 0 82)(371 3 0 0 83)(372 3 0 0 84)(373 3 0 0 85)(374 3 0 0 86)(375 3 0 0 87)(376 3 0 0 88)(377 3 0 0 89)(378 3 0 0 90)(379 3 0 0 91)(380 3 0 0 92)(381 3 0 0 93)(382 3 0 0 94)(383 3 0 0 95) +~muxTbl=(24,16)(0 1 32 33 64 65 96 97 128 129 160 161 192 193 224 225 256 257 288 289 320 321 352 353)(2 3 34 35 66 67 98 99 130 131 162 163 194 195 226 227 258 259 290 291 322 323 354 355)(4 5 36 37 68 69 100 101 132 133 164 165 196 197 228 229 260 261 292 293 324 325 356 357)(6 7 38 39 70 71 102 103 134 135 166 167 198 199 230 231 262 263 294 295 326 327 358 359)(8 9 40 41 72 73 104 105 136 137 168 169 200 201 232 233 264 265 296 297 328 329 360 361)(10 11 42 43 74 75 106 107 138 139 170 171 202 203 234 235 266 267 298 299 330 331 362 363)(12 13 44 45 76 77 108 109 140 141 172 173 204 205 236 237 268 269 300 301 332 333 364 365)(14 15 46 47 78 79 110 111 142 143 174 175 206 207 238 239 270 271 302 303 334 335 366 367)(16 17 48 49 80 81 112 113 144 145 176 177 208 209 240 241 272 273 304 305 336 337 368 369)(18 19 50 51 82 83 114 115 146 147 178 179 210 211 242 243 274 275 306 307 338 339 370 371)(20 21 52 53 84 85 116 117 148 149 180 181 212 213 244 245 276 277 308 309 340 341 372 373)(22 23 54 55 86 87 118 119 150 151 182 183 214 215 246 247 278 279 310 311 342 343 374 375)(24 25 56 57 88 89 120 121 152 153 184 185 216 217 248 249 280 281 312 313 344 345 376 377)(26 27 58 59 90 91 122 123 154 155 186 187 218 219 250 251 282 283 314 315 346 347 378 379)(28 29 60 61 92 93 124 125 156 157 188 189 220 221 252 253 284 285 316 317 348 349 380 381)(30 31 62 63 94 95 126 127 158 159 190 191 222 223 254 255 286 287 318 319 350 351 382 383) +~snsChanMap=(384,0,1)(AP0;0:0)(AP1;1:1)(AP2;2:2)(AP3;3:3)(AP4;4:4)(AP5;5:5)(AP6;6:6)(AP7;7:7)(AP8;8:8)(AP9;9:9)(AP10;10:10)(AP11;11:11)(AP12;12:12)(AP13;13:13)(AP14;14:14)(AP15;15:15)(AP16;16:16)(AP17;17:17)(AP18;18:18)(AP19;19:19)(AP20;20:20)(AP21;21:21)(AP22;22:22)(AP23;23:23)(AP24;24:24)(AP25;25:25)(AP26;26:26)(AP27;27:27)(AP28;28:28)(AP29;29:29)(AP30;30:30)(AP31;31:31)(AP32;32:32)(AP33;33:33)(AP34;34:34)(AP35;35:35)(AP36;36:36)(AP37;37:37)(AP38;38:38)(AP39;39:39)(AP40;40:40)(AP41;41:41)(AP42;42:42)(AP43;43:43)(AP44;44:44)(AP45;45:45)(AP46;46:46)(AP47;47:47)(AP48;48:96)(AP49;49:97)(AP50;50:98)(AP51;51:99)(AP52;52:100)(AP53;53:101)(AP54;54:102)(AP55;55:103)(AP56;56:104)(AP57;57:105)(AP58;58:106)(AP59;59:107)(AP60;60:108)(AP61;61:109)(AP62;62:110)(AP63;63:111)(AP64;64:112)(AP65;65:113)(AP66;66:114)(AP67;67:115)(AP68;68:116)(AP69;69:117)(AP70;70:118)(AP71;71:119)(AP72;72:120)(AP73;73:121)(AP74;74:122)(AP75;75:123)(AP76;76:124)(AP77;77:125)(AP78;78:126)(AP79;79:127)(AP80;80:128)(AP81;81:129)(AP82;82:130)(AP83;83:131)(AP84;84:132)(AP85;85:133)(AP86;86:134)(AP87;87:135)(AP88;88:136)(AP89;89:137)(AP90;90:138)(AP91;91:139)(AP92;92:140)(AP93;93:141)(AP94;94:142)(AP95;95:143)(AP96;96:48)(AP97;97:49)(AP98;98:50)(AP99;99:51)(AP100;100:52)(AP101;101:53)(AP102;102:54)(AP103;103:55)(AP104;104:56)(AP105;105:57)(AP106;106:58)(AP107;107:59)(AP108;108:60)(AP109;109:61)(AP110;110:62)(AP111;111:63)(AP112;112:64)(AP113;113:65)(AP114;114:66)(AP115;115:67)(AP116;116:68)(AP117;117:69)(AP118;118:70)(AP119;119:71)(AP120;120:72)(AP121;121:73)(AP122;122:74)(AP123;123:75)(AP124;124:76)(AP125;125:77)(AP126;126:78)(AP127;127:79)(AP128;128:80)(AP129;129:81)(AP130;130:82)(AP131;131:83)(AP132;132:84)(AP133;133:85)(AP134;134:86)(AP135;135:87)(AP136;136:88)(AP137;137:89)(AP138;138:90)(AP139;139:91)(AP140;140:92)(AP141;141:93)(AP142;142:94)(AP143;143:95)(AP144;144:144)(AP145;145:145)(AP146;146:146)(AP147;147:147)(AP148;148:148)(AP149;149:149)(AP150;150:150)(AP151;151:151)(AP152;152:152)(AP153;153:153)(AP154;154:154)(AP155;155:155)(AP156;156:156)(AP157;157:157)(AP158;158:158)(AP159;159:159)(AP160;160:160)(AP161;161:161)(AP162;162:162)(AP163;163:163)(AP164;164:164)(AP165;165:165)(AP166;166:166)(AP167;167:167)(AP168;168:168)(AP169;169:169)(AP170;170:170)(AP171;171:171)(AP172;172:172)(AP173;173:173)(AP174;174:174)(AP175;175:175)(AP176;176:176)(AP177;177:177)(AP178;178:178)(AP179;179:179)(AP180;180:180)(AP181;181:181)(AP182;182:182)(AP183;183:183)(AP184;184:184)(AP185;185:185)(AP186;186:186)(AP187;187:187)(AP188;188:188)(AP189;189:189)(AP190;190:190)(AP191;191:191)(AP192;192:192)(AP193;193:193)(AP194;194:194)(AP195;195:195)(AP196;196:196)(AP197;197:197)(AP198;198:198)(AP199;199:199)(AP200;200:200)(AP201;201:201)(AP202;202:202)(AP203;203:203)(AP204;204:204)(AP205;205:205)(AP206;206:206)(AP207;207:207)(AP208;208:208)(AP209;209:209)(AP210;210:210)(AP211;211:211)(AP212;212:212)(AP213;213:213)(AP214;214:214)(AP215;215:215)(AP216;216:216)(AP217;217:217)(AP218;218:218)(AP219;219:219)(AP220;220:220)(AP221;221:221)(AP222;222:222)(AP223;223:223)(AP224;224:224)(AP225;225:225)(AP226;226:226)(AP227;227:227)(AP228;228:228)(AP229;229:229)(AP230;230:230)(AP231;231:231)(AP232;232:232)(AP233;233:233)(AP234;234:234)(AP235;235:235)(AP236;236:236)(AP237;237:237)(AP238;238:238)(AP239;239:239)(AP240;240:288)(AP241;241:289)(AP242;242:290)(AP243;243:291)(AP244;244:292)(AP245;245:293)(AP246;246:294)(AP247;247:295)(AP248;248:296)(AP249;249:297)(AP250;250:298)(AP251;251:299)(AP252;252:300)(AP253;253:301)(AP254;254:302)(AP255;255:303)(AP256;256:304)(AP257;257:305)(AP258;258:306)(AP259;259:307)(AP260;260:308)(AP261;261:309)(AP262;262:310)(AP263;263:311)(AP264;264:312)(AP265;265:313)(AP266;266:314)(AP267;267:315)(AP268;268:316)(AP269;269:317)(AP270;270:318)(AP271;271:319)(AP272;272:320)(AP273;273:321)(AP274;274:322)(AP275;275:323)(AP276;276:324)(AP277;277:325)(AP278;278:326)(AP279;279:327)(AP280;280:328)(AP281;281:329)(AP282;282:330)(AP283;283:331)(AP284;284:332)(AP285;285:333)(AP286;286:334)(AP287;287:335)(AP288;288:240)(AP289;289:241)(AP290;290:242)(AP291;291:243)(AP292;292:244)(AP293;293:245)(AP294;294:246)(AP295;295:247)(AP296;296:248)(AP297;297:249)(AP298;298:250)(AP299;299:251)(AP300;300:252)(AP301;301:253)(AP302;302:254)(AP303;303:255)(AP304;304:256)(AP305;305:257)(AP306;306:258)(AP307;307:259)(AP308;308:260)(AP309;309:261)(AP310;310:262)(AP311;311:263)(AP312;312:264)(AP313;313:265)(AP314;314:266)(AP315;315:267)(AP316;316:268)(AP317;317:269)(AP318;318:270)(AP319;319:271)(AP320;320:272)(AP321;321:273)(AP322;322:274)(AP323;323:275)(AP324;324:276)(AP325;325:277)(AP326;326:278)(AP327;327:279)(AP328;328:280)(AP329;329:281)(AP330;330:282)(AP331;331:283)(AP332;332:284)(AP333;333:285)(AP334;334:286)(AP335;335:287)(AP336;336:336)(AP337;337:337)(AP338;338:338)(AP339;339:339)(AP340;340:340)(AP341;341:341)(AP342;342:342)(AP343;343:343)(AP344;344:344)(AP345;345:345)(AP346;346:346)(AP347;347:347)(AP348;348:348)(AP349;349:349)(AP350;350:350)(AP351;351:351)(AP352;352:352)(AP353;353:353)(AP354;354:354)(AP355;355:355)(AP356;356:356)(AP357;357:357)(AP358;358:358)(AP359;359:359)(AP360;360:360)(AP361;361:361)(AP362;362:362)(AP363;363:363)(AP364;364:364)(AP365;365:365)(AP366;366:366)(AP367;367:367)(AP368;368:368)(AP369;369:369)(AP370;370:370)(AP371;371:371)(AP372;372:372)(AP373;373:373)(AP374;374:374)(AP375;375:375)(AP376;376:376)(AP377;377:377)(AP378;378:378)(AP379;379:379)(AP380;380:380)(AP381;381:381)(AP382;382:382)(AP383;383:383)(SY0;384:384) +~snsGeomMap=(NP2014,4,250,70)(0:27:0:1)(0:59:0:1)(0:27:15:1)(0:59:15:1)(0:27:30:1)(0:59:30:1)(0:27:45:1)(0:59:45:1)(0:27:60:1)(0:59:60:1)(0:27:75:1)(0:59:75:1)(0:27:90:1)(0:59:90:1)(0:27:105:1)(0:59:105:1)(0:27:120:1)(0:59:120:1)(0:27:135:1)(0:59:135:1)(0:27:150:1)(0:59:150:1)(0:27:165:1)(0:59:165:1)(0:27:180:1)(0:59:180:1)(0:27:195:1)(0:59:195:1)(0:27:210:1)(0:59:210:1)(0:27:225:1)(0:59:225:1)(0:27:240:1)(0:59:240:1)(0:27:255:1)(0:59:255:1)(0:27:270:1)(0:59:270:1)(0:27:285:1)(0:59:285:1)(0:27:300:1)(0:59:300:1)(0:27:315:1)(0:59:315:1)(0:27:330:1)(0:59:330:1)(0:27:345:1)(0:59:345:1)(1:27:0:1)(1:59:0:1)(1:27:15:1)(1:59:15:1)(1:27:30:1)(1:59:30:1)(1:27:45:1)(1:59:45:1)(1:27:60:1)(1:59:60:1)(1:27:75:1)(1:59:75:1)(1:27:90:1)(1:59:90:1)(1:27:105:1)(1:59:105:1)(1:27:120:1)(1:59:120:1)(1:27:135:1)(1:59:135:1)(1:27:150:1)(1:59:150:1)(1:27:165:1)(1:59:165:1)(1:27:180:1)(1:59:180:1)(1:27:195:1)(1:59:195:1)(1:27:210:1)(1:59:210:1)(1:27:225:1)(1:59:225:1)(1:27:240:1)(1:59:240:1)(1:27:255:1)(1:59:255:1)(1:27:270:1)(1:59:270:1)(1:27:285:1)(1:59:285:1)(1:27:300:1)(1:59:300:1)(1:27:315:1)(1:59:315:1)(1:27:330:1)(1:59:330:1)(1:27:345:1)(1:59:345:1)(0:27:360:1)(0:59:360:1)(0:27:375:1)(0:59:375:1)(0:27:390:1)(0:59:390:1)(0:27:405:1)(0:59:405:1)(0:27:420:1)(0:59:420:1)(0:27:435:1)(0:59:435:1)(0:27:450:1)(0:59:450:1)(0:27:465:1)(0:59:465:1)(0:27:480:1)(0:59:480:1)(0:27:495:1)(0:59:495:1)(0:27:510:1)(0:59:510:1)(0:27:525:1)(0:59:525:1)(0:27:540:1)(0:59:540:1)(0:27:555:1)(0:59:555:1)(0:27:570:1)(0:59:570:1)(0:27:585:1)(0:59:585:1)(0:27:600:1)(0:59:600:1)(0:27:615:1)(0:59:615:1)(0:27:630:1)(0:59:630:1)(0:27:645:1)(0:59:645:1)(0:27:660:1)(0:59:660:1)(0:27:675:1)(0:59:675:1)(0:27:690:1)(0:59:690:1)(0:27:705:1)(0:59:705:1)(1:27:360:1)(1:59:360:1)(1:27:375:1)(1:59:375:1)(1:27:390:1)(1:59:390:1)(1:27:405:1)(1:59:405:1)(1:27:420:1)(1:59:420:1)(1:27:435:1)(1:59:435:1)(1:27:450:1)(1:59:450:1)(1:27:465:1)(1:59:465:1)(1:27:480:1)(1:59:480:1)(1:27:495:1)(1:59:495:1)(1:27:510:1)(1:59:510:1)(1:27:525:1)(1:59:525:1)(1:27:540:1)(1:59:540:1)(1:27:555:1)(1:59:555:1)(1:27:570:1)(1:59:570:1)(1:27:585:1)(1:59:585:1)(1:27:600:1)(1:59:600:1)(1:27:615:1)(1:59:615:1)(1:27:630:1)(1:59:630:1)(1:27:645:1)(1:59:645:1)(1:27:660:1)(1:59:660:1)(1:27:675:1)(1:59:675:1)(1:27:690:1)(1:59:690:1)(1:27:705:1)(1:59:705:1)(2:27:0:1)(2:59:0:1)(2:27:15:1)(2:59:15:1)(2:27:30:1)(2:59:30:1)(2:27:45:1)(2:59:45:1)(2:27:60:1)(2:59:60:1)(2:27:75:1)(2:59:75:1)(2:27:90:1)(2:59:90:1)(2:27:105:1)(2:59:105:1)(2:27:120:1)(2:59:120:1)(2:27:135:1)(2:59:135:1)(2:27:150:1)(2:59:150:1)(2:27:165:1)(2:59:165:1)(2:27:180:1)(2:59:180:1)(2:27:195:1)(2:59:195:1)(2:27:210:1)(2:59:210:1)(2:27:225:1)(2:59:225:1)(2:27:240:1)(2:59:240:1)(2:27:255:1)(2:59:255:1)(2:27:270:1)(2:59:270:1)(2:27:285:1)(2:59:285:1)(2:27:300:1)(2:59:300:1)(2:27:315:1)(2:59:315:1)(2:27:330:1)(2:59:330:1)(2:27:345:1)(2:59:345:1)(3:27:0:1)(3:59:0:1)(3:27:15:1)(3:59:15:1)(3:27:30:1)(3:59:30:1)(3:27:45:1)(3:59:45:1)(3:27:60:1)(3:59:60:1)(3:27:75:1)(3:59:75:1)(3:27:90:1)(3:59:90:1)(3:27:105:1)(3:59:105:1)(3:27:120:1)(3:59:120:1)(3:27:135:1)(3:59:135:1)(3:27:150:1)(3:59:150:1)(3:27:165:1)(3:59:165:1)(3:27:180:1)(3:59:180:1)(3:27:195:1)(3:59:195:1)(3:27:210:1)(3:59:210:1)(3:27:225:1)(3:59:225:1)(3:27:240:1)(3:59:240:1)(3:27:255:1)(3:59:255:1)(3:27:270:1)(3:59:270:1)(3:27:285:1)(3:59:285:1)(3:27:300:1)(3:59:300:1)(3:27:315:1)(3:59:315:1)(3:27:330:1)(3:59:330:1)(3:27:345:1)(3:59:345:1)(2:27:360:1)(2:59:360:1)(2:27:375:1)(2:59:375:1)(2:27:390:1)(2:59:390:1)(2:27:405:1)(2:59:405:1)(2:27:420:1)(2:59:420:1)(2:27:435:1)(2:59:435:1)(2:27:450:1)(2:59:450:1)(2:27:465:1)(2:59:465:1)(2:27:480:1)(2:59:480:1)(2:27:495:1)(2:59:495:1)(2:27:510:1)(2:59:510:1)(2:27:525:1)(2:59:525:1)(2:27:540:1)(2:59:540:1)(2:27:555:1)(2:59:555:1)(2:27:570:1)(2:59:570:1)(2:27:585:1)(2:59:585:1)(2:27:600:1)(2:59:600:1)(2:27:615:1)(2:59:615:1)(2:27:630:1)(2:59:630:1)(2:27:645:1)(2:59:645:1)(2:27:660:1)(2:59:660:1)(2:27:675:1)(2:59:675:1)(2:27:690:1)(2:59:690:1)(2:27:705:1)(2:59:705:1)(3:27:360:1)(3:59:360:1)(3:27:375:1)(3:59:375:1)(3:27:390:1)(3:59:390:1)(3:27:405:1)(3:59:405:1)(3:27:420:1)(3:59:420:1)(3:27:435:1)(3:59:435:1)(3:27:450:1)(3:59:450:1)(3:27:465:1)(3:59:465:1)(3:27:480:1)(3:59:480:1)(3:27:495:1)(3:59:495:1)(3:27:510:1)(3:59:510:1)(3:27:525:1)(3:59:525:1)(3:27:540:1)(3:59:540:1)(3:27:555:1)(3:59:555:1)(3:27:570:1)(3:59:570:1)(3:27:585:1)(3:59:585:1)(3:27:600:1)(3:59:600:1)(3:27:615:1)(3:59:615:1)(3:27:630:1)(3:59:630:1)(3:27:645:1)(3:59:645:1)(3:27:660:1)(3:59:660:1)(3:27:675:1)(3:59:675:1)(3:27:690:1)(3:59:690:1)(3:27:705:1)(3:59:705:1) diff --git a/src/tests/unit/cpu/test_spikeglx.py b/src/tests/unit/cpu/test_spikeglx.py index 85d6212..7ef807c 100644 --- a/src/tests/unit/cpu/test_spikeglx.py +++ b/src/tests/unit/cpu/test_spikeglx.py @@ -314,16 +314,27 @@ def test_read_nidq(self): self.assert_read_glx(nidq) def test_read_geometry_new_version_2023_04(self): - g_new = spikeglx.read_geometry( - Path(TEST_PATH).joinpath("sample3B_version202304.ap.meta") - ) - g_old = spikeglx.read_geometry( - Path(TEST_PATH).joinpath("sample3A_g0_t0.imec.ap.meta") - ) - for k in g_old.keys(): - if k == "flag": - continue - np.testing.assert_array_equal(g_new[k], g_old[k]) + def assert_geom(hnew, href): + for k in href.keys(): + with self.subTest(key=k): + if k == "flag": + continue + np.testing.assert_array_equal(hnew[k], href[k]) + + g_new = spikeglx.read_geometry(Path(TEST_PATH).joinpath("sample3B_version202304.ap.meta")) + g_old = spikeglx.read_geometry(Path(TEST_PATH).joinpath("sample3A_g0_t0.imec.ap.meta")) + assert_geom(g_new, g_old) + g_new = spikeglx.read_geometry(Path(TEST_PATH).joinpath("sampleNP2.4_4shanks_appVersion20230905.ap.meta")) + g_old = spikeglx.read_geometry(Path(TEST_PATH).joinpath("sampleNP2.4_4shanks_g0_t0.imec.ap.meta")) + assert_geom(g_new, g_old) + # import matplotlib.pyplot as plt + # plt.plot(g_old['x'] + g_old['shank'] * 200, g_new['y'], 'o') + # plt.plot(g_new['x'] + g_new['shank'] * 200, g_new['y'], 'o') + # plt.axis('equal') # set axis equal + # plt.figure() + # plt.plot(g_old['col'] + g_old['shank'] * 5, g_new['row'], 'o') + # plt.plot(g_new['col'] + g_new['shank'] * 5, g_new['row'], 'o') + # plt.axis('equal') # set axis equal def test_read_geometry(self): g = spikeglx.read_geometry( @@ -488,43 +499,44 @@ def testGetSerialNumber(self): def testGetRevisionAndType(self): for meta_data_file in self.meta_files: - md = spikeglx.read_meta_data(meta_data_file) - self.assertTrue(len(md.keys()) >= 37) - - if meta_data_file.name.split(".")[-2] in ["lf", "ap"]: - # for ap and lf look for version number - # test getting revision - revision = meta_data_file.name[6:8] - minor = spikeglx._get_neuropixel_version_from_meta(md)[0:2] - major = spikeglx._get_neuropixel_major_version_from_meta(md) - print(revision, minor, major) - self.assertEqual(minor, revision) - # test the major version - if revision.startswith("3"): - assert major == 1 - else: - assert np.floor(major) == 2 - # test getting acquisition type for all ap, lf and nidq - type = meta_data_file.name.split(".")[-2] - self.assertEqual(spikeglx._get_type_from_meta(md), type) + with self.subTest(meta_data_file=meta_data_file): + md = spikeglx.read_meta_data(meta_data_file) + self.assertTrue(len(md.keys()) >= 37) + + if meta_data_file.name.split(".")[-2] in ["lf", "ap"]: + # for ap and lf look for version number + # test getting revision + revision = meta_data_file.name[6:8] + minor = spikeglx._get_neuropixel_version_from_meta(md)[0:2] + major = spikeglx._get_neuropixel_major_version_from_meta(md) + self.assertEqual(minor, revision) + # test the major version + if revision.startswith("3"): + self.assertEqual(major, 1) + else: + self.assertGreaterEqual(major, 2) + # test getting acquisition type for all ap, lf and nidq + type = meta_data_file.name.split(".")[-2] + self.assertEqual(spikeglx._get_type_from_meta(md), type) def testReadChannelGainAPLF(self): for meta_data_file in self.meta_files: - if meta_data_file.name.split(".")[-2] not in ["lf", "ap"]: - continue - md = spikeglx.read_meta_data(meta_data_file) - cg = spikeglx._conversion_sample2v_from_meta(md) - if "NP2" in spikeglx._get_neuropixel_version_from_meta(md): - i2v = md.get("imAiRangeMax") / int(md.get("imMaxInt")) - self.assertTrue(np.all(cg["lf"][0:-1] == i2v / 80)) - self.assertTrue(np.all(cg["ap"][0:-1] == i2v / 80)) - else: - i2v = md.get("imAiRangeMax") / 512 - self.assertTrue(np.all(cg["lf"][0:-1] == i2v / 250)) - self.assertTrue(np.all(cg["ap"][0:-1] == i2v / 500)) - # also test consistent dimension with nchannels - nc = spikeglx._get_nchannels_from_meta(md) - self.assertTrue(len(cg["ap"]) == len(cg["lf"]) == nc) + with self.subTest(meta_data_file=meta_data_file): + if meta_data_file.name.split(".")[-2] not in ["lf", "ap"]: + continue + md = spikeglx.read_meta_data(meta_data_file) + cg = spikeglx._conversion_sample2v_from_meta(md) + if "NP2" in spikeglx._get_neuropixel_version_from_meta(md): + i2v = md.get("imAiRangeMax") / int(md.get("imMaxInt")) + self.assertTrue(np.all(cg["lf"][0:-1] == i2v / 80)) + self.assertTrue(np.all(cg["ap"][0:-1] == i2v / 80)) + else: + i2v = md.get("imAiRangeMax") / 512 + self.assertTrue(np.all(cg["lf"][0:-1] == i2v / 250)) + self.assertTrue(np.all(cg["ap"][0:-1] == i2v / 500)) + # also test consistent dimension with nchannels + nc = spikeglx._get_nchannels_from_meta(md) + self.assertTrue(len(cg["ap"]) == len(cg["lf"]) == nc) def testGetAnalogSyncIndex(self): for meta_data_file in self.meta_files: From c40c61387721ae7bc25f0ad7ac25b9736af23ff0 Mon Sep 17 00:00:00 2001 From: olivier Date: Tue, 19 Mar 2024 11:57:58 +0000 Subject: [PATCH 3/3] flake and bump version number --- release_notes.md | 2 ++ setup.py | 2 +- src/ibldsp/cadzow.py | 14 ++++++-------- src/ibldsp/smooth.py | 4 ++-- src/neuropixel.py | 10 +++++----- src/tests/integration/cpu/test_destripe.py | 4 ++-- src/tests/unit/cpu/test_waveforms.py | 2 +- 7 files changed, 19 insertions(+), 19 deletions(-) diff --git a/release_notes.md b/release_notes.md index 8f0aeb2..0253a63 100644 --- a/release_notes.md +++ b/release_notes.md @@ -1,4 +1,6 @@ # 0.10.0 +## 0.10.1 2024-03-19 +- ensure compatibility with spikeglx 202309 metadata coordinates ## 0.10.0 2024-03-14 - add support for online spikeglx reader diff --git a/setup.py b/setup.py index b42c42e..9f1667a 100644 --- a/setup.py +++ b/setup.py @@ -8,7 +8,7 @@ setuptools.setup( name="ibl-neuropixel", - version="0.10.0", + version="0.10.1", author="The International Brain Laboratory", description="Collection of tools for Neuropixel 1.0 and 2.0 probes data", long_description=long_description, diff --git a/src/ibldsp/cadzow.py b/src/ibldsp/cadzow.py index f2184cd..35e58fa 100644 --- a/src/ibldsp/cadzow.py +++ b/src/ibldsp/cadzow.py @@ -135,17 +135,15 @@ def cadzow_np1( WAV = scipy.fft.rfft(wav[:, :]) padgain = scipy.signal.windows.hann(npad * 2)[:npad] WAV = np.r_[ - np.flipud(WAV[1 : npad + 1, :]) * padgain[:, np.newaxis], + np.flipud(WAV[1: npad + 1, :]) * padgain[:, np.newaxis], WAV, - np.flipud(WAV[-npad - 2 : -1, :]) * np.flipud(np.r_[padgain, 1])[:, np.newaxis], + np.flipud(WAV[-npad - 2: -1, :]) * np.flipud(np.r_[padgain, 1])[:, np.newaxis], ] # apply padding - x = np.r_[ - np.flipud(h["x"][1 : npad + 1]), h["x"], np.flipud(h["x"][-npad - 2 : -1]) - ] + x = np.r_[np.flipud(h["x"][1: npad + 1]), h["x"], np.flipud(h["x"][-npad - 2: -1])] y = np.r_[ - np.flipud(h["y"][1 : npad + 1]) - 120, + np.flipud(h["y"][1: npad + 1]) - 120, h["y"], - np.flipud(h["y"][-npad - 2 : -1]) + 120, + np.flipud(h["y"][-npad - 2: -1]) + 120, ] WAV_ = np.zeros_like(WAV) gain = np.zeros(ntr + npad * 2 + 1) @@ -167,6 +165,6 @@ def cadzow_np1( ) WAV_[firstx:lastx, :] += array * gw[:, np.newaxis] - WAV_ = WAV_[npad : -npad - 1] # remove padding + WAV_ = WAV_[npad: -npad - 1] # remove padding wav_ = scipy.fft.irfft(WAV_) return wav_ diff --git a/src/ibldsp/smooth.py b/src/ibldsp/smooth.py index 8dd698f..6db4e16 100644 --- a/src/ibldsp/smooth.py +++ b/src/ibldsp/smooth.py @@ -68,7 +68,7 @@ def rolling_window(x, window_len=11, window="blackman"): 'bartlett', 'blackman'" ) - s = np.r_[x[window_len - 1 : 0 : -1], x, x[-1:-window_len:-1]] + s = np.r_[x[window_len - 1: 0: -1], x, x[-1:-window_len:-1]] # print(len(s)) if window == "flat": # moving average w = np.ones(window_len, "d") @@ -76,7 +76,7 @@ def rolling_window(x, window_len=11, window="blackman"): w = eval("np." + window + "(window_len)") y = np.convolve(w / w.sum(), s, mode="valid") - return y[round((window_len / 2 - 1)) : round(-(window_len / 2))] + return y[round((window_len / 2 - 1)): round(-(window_len / 2))] def non_uniform_savgol(x, y, window, polynom): diff --git a/src/neuropixel.py b/src/neuropixel.py index 18fe10b..1d8c7a0 100644 --- a/src/neuropixel.py +++ b/src/neuropixel.py @@ -357,10 +357,10 @@ def _process_NP24(self, overwrite=False): for first, last in wg.firstlast: chunk_ap = self.sr[first:last, : self.napch].T - chunk_ap_sync = self.sr[first:last, self.idxsyncch :].T + chunk_ap_sync = self.sr[first:last, self.idxsyncch:].T chunk_lf = self.extract_lfp(self.sr[first:last, : self.napch].T) chunk_lf_sync = self.extract_lfp_sync( - self.sr[first:last, self.idxsyncch :].T + self.sr[first:last, self.idxsyncch:].T ) chunk_ap2save = self._ind2save( @@ -466,7 +466,7 @@ def _process_NP21(self, overwrite=False, offset=0, **kwargs): chunk_lf = self.extract_lfp(self.sr[first:last, : self.napch].T) chunk_lf_sync = self.extract_lfp_sync( - self.sr[first:last, self.idxsyncch :].T + self.sr[first:last, self.idxsyncch:].T ) chunk_lf2save = self._ind2save( @@ -670,7 +670,7 @@ def _ind2save(self, chunk, chunk_sync, wg, ratio=1, etype="ap"): chunk[:, slice(*ind2save)].T / self.sr.channel_conversion_sample2v[etype][: self.napch], chunk_sync[:, slice(*ind2save)].T - / self.sr.channel_conversion_sample2v[etype][self.idxsyncch :], + / self.sr.channel_conversion_sample2v[etype][self.idxsyncch:], ] ).astype(np.int16) @@ -686,7 +686,7 @@ def extract_lfp(self, chunk): """ chunk[:, : self.samples_taper] *= self.taper[: self.samples_taper] - chunk[:, -self.samples_taper :] *= self.taper[self.samples_taper :] + chunk[:, -self.samples_taper:] *= self.taper[self.samples_taper:] chunk = scipy.signal.sosfiltfilt(self.sos_lp, chunk) chunk = chunk[:, :: self.ratio] return chunk diff --git a/src/tests/integration/cpu/test_destripe.py b/src/tests/integration/cpu/test_destripe.py index 5a51f3f..82e7337 100644 --- a/src/tests/integration/cpu/test_destripe.py +++ b/src/tests/integration/cpu/test_destripe.py @@ -126,7 +126,7 @@ def test_parallel_computation(self): self.sglx_instances.append(sr_four_append) assert sr_four_append.ns == 2 * sr_four.ns assert np.array_equal( - sr_four_append[sr_four.ns :, :], sr_four_append[: sr_four.ns, :] + sr_four_append[sr_four.ns:, :], sr_four_append[: sr_four.ns, :] ) assert np.array_equal(sr_four_append[: sr_four.ns, :], sr_four[:, :]) - assert np.array_equal(sr_four_append[sr_four.ns :, :], sr_four[:, :]) + assert np.array_equal(sr_four_append[sr_four.ns:, :], sr_four[:, :]) diff --git a/src/tests/unit/cpu/test_waveforms.py b/src/tests/unit/cpu/test_waveforms.py index e9b4c88..0eb0bec 100644 --- a/src/tests/unit/cpu/test_waveforms.py +++ b/src/tests/unit/cpu/test_waveforms.py @@ -196,7 +196,7 @@ def test_extract_waveforms(self): # with NaNs assert wfs[0, self.channels[0], self.trough_offset] == 1.0 assert np.all( - np.isnan(wfs[0, self.num_channels // 2 + self.channels[0] + 1 :, :]) + np.isnan(wfs[0, self.num_channels // 2 + self.channels[0] + 1:, :]) ) for i in range(1, 8):