Skip to content

Commit

Permalink
fixes for latest codespell release (#11047)
Browse files Browse the repository at this point in the history
* fixes for latest codespell release

* use URI ignore words in actions config

* FIX: Work around pandas dep

Co-authored-by: Eric Larson <[email protected]>
  • Loading branch information
drammock and larsoner committed Aug 24, 2022
1 parent 896744e commit 2816d13
Show file tree
Hide file tree
Showing 12 changed files with 30 additions and 16 deletions.
1 change: 1 addition & 0 deletions .github/workflows/codespell_and_flake.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,4 +41,5 @@ jobs:
quiet_level: '3'
builtin: 'clear,rare,informal,names'
ignore_words_file: 'ignore_words.txt'
uri_ignore_words_list: 'bu'
name: 'Run codespell'
6 changes: 3 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
PYTHON ?= python
PYTESTS ?= py.test
CTAGS ?= ctags
CODESPELL_SKIPS ?= "doc/auto_*,*.fif,*.eve,*.gz,*.tgz,*.zip,*.mat,*.stc,*.label,*.w,*.bz2,*.annot,*.sulc,*.log,*.local-copy,*.orig_avg,*.inflated_avg,*.gii,*.pyc,*.doctree,*.pickle,*.inv,*.png,*.edf,*.touch,*.thickness,*.nofix,*.volume,*.defect_borders,*.mgh,lh.*,rh.*,COR-*,FreeSurferColorLUT.txt,*.examples,.xdebug_mris_calc,bad.segments,BadChannels,*.hist,empty_file,*.orig,*.js,*.map,*.ipynb,searchindex.dat,install_mne_c.rst,plot_*.rst,*.rst.txt,c_EULA.rst*,*.html,gdf_encodes.txt,*.svg,references.bib,*.css"
CODESPELL_SKIPS ?= "doc/_build,doc/auto_*,*.fif,*.eve,*.gz,*.tgz,*.zip,*.mat,*.stc,*.label,*.w,*.bz2,*.annot,*.sulc,*.log,*.local-copy,*.orig_avg,*.inflated_avg,*.gii,*.pyc,*.doctree,*.pickle,*.inv,*.png,*.edf,*.touch,*.thickness,*.nofix,*.volume,*.defect_borders,*.mgh,lh.*,rh.*,COR-*,FreeSurferColorLUT.txt,*.examples,.xdebug_mris_calc,bad.segments,BadChannels,*.hist,empty_file,*.orig,*.js,*.map,*.ipynb,searchindex.dat,install_mne_c.rst,plot_*.rst,*.rst.txt,c_EULA.rst*,*.html,gdf_encodes.txt,*.svg,references.bib,*.css"
CODESPELL_DIRS ?= mne/ doc/ tutorials/ examples/
all: clean inplace test test-doc

Expand Down Expand Up @@ -103,10 +103,10 @@ flake:
@echo "flake8 passed"

codespell: # running manually
@codespell --builtin clear,rare,informal,names,usage -w -i 3 -q 3 -S $(CODESPELL_SKIPS) --ignore-words=ignore_words.txt $(CODESPELL_DIRS)
@codespell --builtin clear,rare,informal,names,usage -w -i 3 -q 3 -S $(CODESPELL_SKIPS) --ignore-words=ignore_words.txt --uri-ignore-words-list=bu $(CODESPELL_DIRS)

codespell-error: # running on travis
@codespell --builtin clear,rare,informal,names,usage -i 0 -q 7 -S $(CODESPELL_SKIPS) --ignore-words=ignore_words.txt $(CODESPELL_DIRS)
@codespell --builtin clear,rare,informal,names,usage -i 0 -q 7 -S $(CODESPELL_SKIPS) --ignore-words=ignore_words.txt --uri-ignore-words-list=bu $(CODESPELL_DIRS)

pydocstyle:
@echo "Running pydocstyle"
Expand Down
2 changes: 2 additions & 0 deletions ignore_words.txt
Original file line number Diff line number Diff line change
Expand Up @@ -37,3 +37,5 @@ nin
trough
recuse
ro
nam
bu
2 changes: 1 addition & 1 deletion mne/coreg.py
Original file line number Diff line number Diff line change
Expand Up @@ -1764,7 +1764,7 @@ def fit_fiducials(self, lpa_weight=1., nasion_weight=10., rpa_weight=1.,
self._log_dig_mri_distance('Start')
n_scale_params = self._n_scale_params
if n_scale_params == 3:
# enfore 1 even for 3-axis here (3 points is not enough)
# enforce 1 even for 3-axis here (3 points is not enough)
logger.info("Enforcing 1 scaling parameter for fit "
"with fiducials.")
n_scale_params = 1
Expand Down
2 changes: 1 addition & 1 deletion mne/decoding/ssd.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ def fit(self, X, y=None):
self.filters_ = eigvects_[:, ix]
self.patterns_ = np.linalg.pinv(self.filters_)
# We assume that ordering by spectral ratio is more important
# than the initial ordering. This ording should be also learned when
# than the initial ordering. This ordering should be also learned when
# fitting.
X_ssd = self.filters_.T @ X[..., self.picks_, :]
sorter_spec = Ellipsis
Expand Down
2 changes: 1 addition & 1 deletion mne/evoked.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ def apply_baseline(self, baseline=(None, 0), *, verbose=None):
sfreq=self.info['sfreq'])
if self.baseline is not None and baseline is None:
raise ValueError('The data has already been baseline-corrected. '
'Cannot remove existing basline correction.')
'Cannot remove existing baseline correction.')
elif baseline is None:
# Do not rescale
logger.info(_log_rescale(None))
Expand Down
4 changes: 2 additions & 2 deletions mne/io/brainvision/brainvision.py
Original file line number Diff line number Diff line change
Expand Up @@ -931,7 +931,7 @@ def _parse_impedance(settings, recording_date=None):
Parameters
----------
settings : list
The header settings lines fom the VHDR/AHDR file.
The header settings lines from the VHDR/AHDR file.
recording_date : datetime.datetime | None
The date of the recording as extracted from the VMRK/AMRK file.
Expand Down Expand Up @@ -993,7 +993,7 @@ def _parse_impedance_ranges(settings):
Parameters
----------
settings : list
The header settings lines fom the VHDR/AHDR file.
The header settings lines from the VHDR/AHDR file.
Returns
-------
Expand Down
2 changes: 1 addition & 1 deletion mne/io/curry/curry.py
Original file line number Diff line number Diff line change
Expand Up @@ -393,7 +393,7 @@ def _first_hpi(fname):
break
else:
raise RuntimeError('Could not find valid HPI in %s' % (fname,))
# t is the first enttry
# t is the first entry
assert hpi.ndim == 1
hpi = hpi[1:]
hpi.shape = (-1, 5)
Expand Down
2 changes: 1 addition & 1 deletion mne/io/nihon/nihon.py
Original file line number Diff line number Diff line change
Expand Up @@ -392,7 +392,7 @@ def __init__(self, fname, preload=False, verbose=None):
# Get annotations from LOG file
annots = _read_nihon_annotations(fname)

# Annotate acqusition skips
# Annotate acquisition skips
controlblock = self._header['controlblocks'][0]
cur_sample = 0
if controlblock['n_datablocks'] > 1:
Expand Down
2 changes: 1 addition & 1 deletion mne/preprocessing/tests/test_eeglab_infomax.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def test_mne_python_vs_eeglab():
% (method,
dict(eeg='eeg', mag='meg')[ch_type]))

# For comparasion against eeglab, make sure the following
# For comparison against eeglab, make sure the following
# parameters have the same value in mne_python and eeglab:
#
# - starting point
Expand Down
19 changes: 15 additions & 4 deletions mne/utils/dataframe.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

from ._logging import logger, verbose
from ..defaults import _handle_default
from ..fixes import _get_args


@verbose
Expand Down Expand Up @@ -47,6 +48,16 @@ def _convert_times(inst, times, time_format):
return times


def _inplace(df, method, **kwargs):
"""Handle transition: inplace=True (pandas <1.5) → copy=False (>=1.5)."""
_meth = getattr(df, method) # used for set_index() and rename()
if 'copy' in _get_args(_meth):
return _meth(**kwargs, copy=False)
else:
_meth(**kwargs, inplace=True)
return df


@verbose
def _build_data_frame(inst, data, picks, long_format, mindex, index,
default_index, col_names=None, col_kind='channel',
Expand All @@ -63,16 +74,16 @@ def _build_data_frame(inst, data, picks, long_format, mindex, index,
df.insert(i, k, v)
# build Index
if long_format:
df.set_index(default_index, inplace=True)
df = _inplace(df, 'set_index', keys=default_index)
df.columns.name = col_kind
elif index is not None:
df.set_index(index, inplace=True)
df = _inplace(df, 'set_index', keys=index)
if set(index) == set(default_index):
df.columns.name = col_kind
# long format
if long_format:
df = df.stack().reset_index()
df.rename(columns={0: 'value'}, inplace=True)
df = _inplace(df, 'rename', columns={0: 'value'})
# add column for channel types (as appropriate)
ch_map = (None if isinstance(inst, _BaseSourceEstimate) else
dict(zip(np.array(inst.ch_names)[picks],
Expand All @@ -83,7 +94,7 @@ def _build_data_frame(inst, data, picks, long_format, mindex, index,
df.insert(col_index, 'ch_type', ch_type)
# restore index
if index is not None:
df.set_index(index, inplace=True)
df = _inplace(df, 'set_index', keys=index)
# convert channel/vertex/ch_type columns to factors
to_factor = [c for c in df.columns.tolist()
if c not in ('time', 'value')]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@
# an adjacency for time points was automatically taken into
# account. That is, at time point N, the time points N - 1 and
# N + 1 were considered as adjacent (this is also called "lattice
# adjacency"). This is only possbile because we ran the analysis on
# adjacency"). This is only possible because we ran the analysis on
# 2D data (times × channels) per observation ... for 3D data per
# observation (e.g., times × frequencies × channels), we will need
# to use :func:`mne.stats.combine_adjacency`, as shown further
Expand Down

0 comments on commit 2816d13

Please sign in to comment.