From 74e098581f06cbf078cdbb0eb3379e7e02d40bd4 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 9 Dec 2020 11:58:55 -0500 Subject: [PATCH 001/387] BUG: Fix passing of channel type (#8638) --- doc/changes/latest.inc | 2 + examples/preprocessing/plot_run_ica.py | 1 - examples/visualization/plot_evoked_topomap.py | 4 +- mne/evoked.py | 9 +- mne/preprocessing/ica.py | 10 +- mne/viz/ica.py | 7 +- mne/viz/tests/test_ica.py | 18 ++- mne/viz/tests/test_topomap.py | 13 +- mne/viz/topomap.py | 113 ++++++++++-------- .../plot_40_artifact_correction_ica.py | 1 - 10 files changed, 102 insertions(+), 76 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 17c553e6858..76cde15b9e1 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -109,6 +109,8 @@ Bugs - Fix bug with compensated CTF data when picking channels without preload (:gh:`8318` by `Eric Larson`_) +- Fix bug with plotting MEG topographies where the wrong extrapolation made was used in ICA (:gh:`8637` by `Eric Larson`_) + - Fix bug when merging fNIRS channels in :func:`mne.viz.plot_evoked_topomap` and related functions (:gh:`8306` by `Robert Luke`_) - Fix bug where events could overflow when writing to FIF (:gh:`8448` by `Eric Larson`_) diff --git a/examples/preprocessing/plot_run_ica.py b/examples/preprocessing/plot_run_ica.py index fe5efa07e03..ecd665fa670 100644 --- a/examples/preprocessing/plot_run_ica.py +++ b/examples/preprocessing/plot_run_ica.py @@ -10,7 +10,6 @@ .. note:: This example does quite a bit of processing, so even on a fast machine it can take about a minute to complete. """ - # Authors: Denis Engemann # # License: BSD (3-clause) diff --git a/examples/visualization/plot_evoked_topomap.py b/examples/visualization/plot_evoked_topomap.py index 59185f92a9f..465e4ed282e 100644 --- a/examples/visualization/plot_evoked_topomap.py +++ b/examples/visualization/plot_evoked_topomap.py @@ -122,8 +122,8 @@ # Animating the topomap # --------------------- # -# Instead of using a still image we can plot magnetometer data as an animation -# (animates only in matplotlib interactive mode) +# Instead of using a still image we can plot magnetometer data as an animation, +# which animates properly only in matplotlib interactive mode. # sphinx_gallery_thumbnail_number = 9 times = np.arange(0.05, 0.151, 0.01) diff --git a/mne/evoked.py b/mne/evoked.py index c3527aba76e..f9181476cd6 100644 --- a/mne/evoked.py +++ b/mne/evoked.py @@ -381,7 +381,8 @@ def plot_joint(self, times="peaks", title='', picks=None, @fill_doc def animate_topomap(self, ch_type=None, times=None, frame_rate=None, butterfly=False, blit=True, show=True, time_unit='s', - sphere=None): + sphere=None, *, extrapolate=_EXTRAPOLATE_DEFAULT, + verbose=None): """Make animation of evoked data as topomap timeseries. The animation can be paused/resumed with left mouse button. @@ -418,6 +419,10 @@ def animate_topomap(self, ch_type=None, times=None, frame_rate=None, .. versionadded:: 0.16 %(topomap_sphere_auto)s + %(topomap_extrapolate)s + + .. versionadded:: 0.22 + %(verbose_meth)s Returns ------- @@ -433,7 +438,7 @@ def animate_topomap(self, ch_type=None, times=None, frame_rate=None, return _topomap_animation( self, ch_type=ch_type, times=times, frame_rate=frame_rate, butterfly=butterfly, blit=blit, show=show, time_unit=time_unit, - sphere=sphere) + sphere=sphere, extrapolate=extrapolate, verbose=verbose) def as_type(self, ch_type='grad', mode='fast'): """Compute virtual evoked using interpolated fields. diff --git a/mne/preprocessing/ica.py b/mne/preprocessing/ica.py index df0ba4e935e..6871dbf08b9 100644 --- a/mne/preprocessing/ica.py +++ b/mne/preprocessing/ica.py @@ -1800,7 +1800,7 @@ def plot_components(self, picks=None, ch_type=None, res=64, contours=6, image_interp='bilinear', inst=None, plot_std=True, topomap_args=None, image_args=None, psd_args=None, reject='auto', - sphere=None): + sphere=None, verbose=None): return plot_ica_components(self, picks=picks, ch_type=ch_type, res=res, vmin=vmin, vmax=vmax, cmap=cmap, sensors=sensors, @@ -1810,19 +1810,21 @@ def plot_components(self, picks=None, ch_type=None, res=64, inst=inst, plot_std=plot_std, topomap_args=topomap_args, image_args=image_args, psd_args=psd_args, - reject=reject, sphere=sphere) + reject=reject, sphere=sphere, + verbose=verbose) @copy_function_doc_to_method_doc(plot_ica_properties) def plot_properties(self, inst, picks=None, axes=None, dB=True, plot_std=True, topomap_args=None, image_args=None, psd_args=None, figsize=None, show=True, reject='auto', - reject_by_annotation=True): + reject_by_annotation=True, *, verbose=None): return plot_ica_properties(self, inst, picks=picks, axes=axes, dB=dB, plot_std=plot_std, topomap_args=topomap_args, image_args=image_args, psd_args=psd_args, figsize=figsize, show=show, reject=reject, - reject_by_annotation=reject_by_annotation) + reject_by_annotation=reject_by_annotation, + verbose=verbose) @copy_function_doc_to_method_doc(plot_ica_sources) def plot_sources(self, inst, picks=None, start=None, diff --git a/mne/viz/ica.py b/mne/viz/ica.py index 852190bb745..d18ae389587 100644 --- a/mne/viz/ica.py +++ b/mne/viz/ica.py @@ -22,7 +22,7 @@ from ..io.meas_info import create_info from ..io.pick import pick_types, _picks_to_idx from ..time_frequency.psd import psd_multitaper -from ..utils import _reject_data_segments +from ..utils import _reject_data_segments, verbose @fill_doc @@ -250,11 +250,11 @@ def _get_psd_label_and_std(this_psd, dB, ica, num_std): return psd_ylabel, psds_mean, spectrum_std -@fill_doc +@verbose def plot_ica_properties(ica, inst, picks=None, axes=None, dB=True, plot_std=True, topomap_args=None, image_args=None, psd_args=None, figsize=None, show=True, reject='auto', - reject_by_annotation=True): + reject_by_annotation=True, *, verbose=None): """Display component properties. Properties include the topography, epochs image, ERP/ERF, power @@ -308,6 +308,7 @@ def plot_ica_properties(ica, inst, picks=None, axes=None, dB=True, %(reject_by_annotation_raw)s .. versionadded:: 0.21.0 + %(verbose)s Returns ------- diff --git a/mne/viz/tests/test_ica.py b/mne/viz/tests/test_ica.py index b6831db68a0..0bc09886a8f 100644 --- a/mne/viz/tests/test_ica.py +++ b/mne/viz/tests/test_ica.py @@ -14,7 +14,7 @@ make_fixed_length_events) from mne.io import read_raw_fif from mne.preprocessing import ICA, create_ecg_epochs, create_eog_epochs -from mne.utils import (run_tests_if_main, requires_sklearn, _click_ch_name, +from mne.utils import (requires_sklearn, _click_ch_name, catch_logging, _close_event) from mne.viz.ica import _create_properties_layout, plot_ica_properties from mne.viz.utils import _fake_click @@ -70,7 +70,12 @@ def test_plot_ica_components(): plt.close('all') # test interactive mode (passing 'inst' arg) - ica.plot_components([0, 1], image_interp='bilinear', inst=raw, res=16) + with catch_logging() as log: + ica.plot_components([0, 1], image_interp='bilinear', inst=raw, res=16, + verbose='debug', ch_type='grad') + log = log.getvalue() + assert 'grad data' in log + assert 'Interpolation mode local to mean' in log fig = plt.gcf() # test title click @@ -133,7 +138,11 @@ def test_plot_ica_properties(): _create_properties_layout(figsize=(2, 2), fig=fig) topoargs = dict(topomap_args={'res': 4, 'contours': 0, "sensors": False}) - ica.plot_properties(raw, picks=0, **topoargs) + with catch_logging() as log: + ica.plot_properties(raw, picks=0, verbose='debug', **topoargs) + log = log.getvalue() + assert raw.ch_names[0] == 'MEG 0113' + assert 'Interpolation mode local to mean' in log, log ica.plot_properties(epochs, picks=1, dB=False, plot_std=1.5, **topoargs) ica.plot_properties(epochs, picks=1, image_args={'sigma': 1.5}, topomap_args={'res': 4, 'colorbar': True}, @@ -396,6 +405,3 @@ def test_plot_instance_components(): _fake_click(fig, ax, [line.get_xdata()[0], line.get_ydata()[0]], 'data') _fake_click(fig, ax, [-0.1, 0.9]) # click on y-label fig.canvas.key_press_event('escape') - - -run_tests_if_main() diff --git a/mne/viz/tests/test_topomap.py b/mne/viz/tests/test_topomap.py index 5b89ee64b79..6ee9b2af9fa 100644 --- a/mne/viz/tests/test_topomap.py +++ b/mne/viz/tests/test_topomap.py @@ -133,22 +133,27 @@ def test_plot_projs_topomap(): plot_projs_topomap([eeg_proj], info_meg) -def test_plot_topomap_animation(): +def test_plot_topomap_animation(capsys): """Test topomap plotting.""" # evoked evoked = read_evokeds(evoked_fname, 'Left Auditory', baseline=(None, 0)) # Test animation _, anim = evoked.animate_topomap(ch_type='grad', times=[0, 0.1], - butterfly=False, time_unit='s') + butterfly=False, time_unit='s', + verbose='debug') anim._func(1) # _animate has to be tested separately on 'Agg' backend. + out, _ = capsys.readouterr() + assert 'Interpolation mode local to 0' in out plt.close('all') -def test_plot_topomap_animation_nirs(fnirs_evoked): +def test_plot_topomap_animation_nirs(fnirs_evoked, capsys): """Test topomap plotting for nirs data.""" - fig, anim = fnirs_evoked.animate_topomap(ch_type='hbo') + fig, anim = fnirs_evoked.animate_topomap(ch_type='hbo', verbose='debug') anim._func(1) # _animate has to be tested separately on 'Agg' backend. + out, _ = capsys.readouterr() + assert 'Interpolation mode head to 0' in out assert len(fig.axes) == 2 plt.close('all') diff --git a/mne/viz/topomap.py b/mne/viz/topomap.py index 9ecbf085608..828e4402ad4 100644 --- a/mne/viz/topomap.py +++ b/mne/viz/topomap.py @@ -767,6 +767,7 @@ def plot_topomap(data, pos, vmin=None, vmax=None, cmap=None, sensors=True, def _setup_interp(pos, res, extrapolate, sphere, outlines, border): + logger.debug(f'Interpolation mode {extrapolate} to {border}') xlim = np.inf, -np.inf, ylim = np.inf, -np.inf, mask_ = np.c_[outlines['mask_pos']] @@ -792,6 +793,30 @@ def _setup_interp(pos, res, extrapolate, sphere, outlines, border): return extent, Xi, Yi, interp +def _get_patch(outlines, extrapolate, interp, ax): + from matplotlib import patches + clip_radius = outlines['clip_radius'] + clip_origin = outlines.get('clip_origin', (0., 0.)) + _use_default_outlines = any(k.startswith('head') for k in outlines) + patch_ = None + if 'patch' in outlines: + patch_ = outlines['patch'] + patch_ = patch_() if callable(patch_) else patch_ + patch_.set_clip_on(False) + ax.add_patch(patch_) + ax.set_transform(ax.transAxes) + ax.set_clip_path(patch_) + if _use_default_outlines: + if extrapolate == 'local': + patch_ = patches.Polygon( + interp.mask_pts, clip_on=True, transform=ax.transData) + else: + patch_ = patches.Ellipse( + clip_origin, 2 * clip_radius[0], 2 * clip_radius[1], + clip_on=True, transform=ax.transData) + return patch_ + + def _plot_topomap(data, pos, vmin=None, vmax=None, cmap=None, sensors=True, res=64, axes=None, names=None, show_names=False, mask=None, mask_params=None, outlines='head', @@ -831,10 +856,7 @@ def _plot_topomap(data, pos, vmin=None, vmax=None, cmap=None, sensors=True, picks = list(range(data.shape[0])) pos = _find_topomap_coords(pos, picks=picks, sphere=sphere) - _check_option('extrapolate', extrapolate, ('box', 'local', 'head', 'auto')) - if extrapolate == 'auto': - extrapolate = 'local' if ch_type in _MEG_CH_TYPES_SPLIT else 'head' - + extrapolate = _check_extrapolate(extrapolate, ch_type) if data.ndim > 1: raise ValueError("Data needs to be array of shape (n_sensors,); got " "shape %s." % str(data.shape)) @@ -875,35 +897,16 @@ def _plot_topomap(data, pos, vmin=None, vmax=None, cmap=None, sensors=True, ax = axes if axes else plt.gca() _prepare_topomap(pos, ax) - _use_default_outlines = any(k.startswith('head') for k in outlines) mask_params = _handle_default('mask_params', mask_params) # find mask limits - clip_radius = outlines['clip_radius'] - clip_origin = outlines.get('clip_origin', (0., 0.)) extent, Xi, Yi, interp = _setup_interp( pos, res, extrapolate, sphere, outlines, border) interp.set_values(data) Zi = interp.set_locations(Xi, Yi)() # plot outline - patch_ = None - if 'patch' in outlines: - patch_ = outlines['patch'] - patch_ = patch_() if callable(patch_) else patch_ - patch_.set_clip_on(False) - ax.add_patch(patch_) - ax.set_transform(ax.transAxes) - ax.set_clip_path(patch_) - if _use_default_outlines: - from matplotlib import patches - if extrapolate == 'local': - patch_ = patches.Polygon( - interp.mask_pts, clip_on=True, transform=ax.transData) - else: - patch_ = patches.Ellipse( - clip_origin, 2 * clip_radius[0], 2 * clip_radius[1], - clip_on=True, transform=ax.transData) + patch_ = _get_patch(outlines, extrapolate, interp, ax) # plot interpolated map im = ax.imshow(Zi, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower', @@ -1010,7 +1013,7 @@ def _plot_ica_topomap(ica, idx=0, ch_type=None, res=64, data.ravel(), pos, vmin=vmin_, vmax=vmax_, res=res, axes=axes, cmap=cmap, outlines=outlines, contours=contours, sensors=sensors, image_interp=image_interp, show=show, extrapolate=extrapolate, - sphere=sphere, border=border)[0] + sphere=sphere, border=border, ch_type=ch_type)[0] if colorbar: cbar, cax = _add_colorbar(axes, im, cmap, pad=.05, title="AU", format='%3.2f') @@ -1019,7 +1022,7 @@ def _plot_ica_topomap(ica, idx=0, ch_type=None, res=64, _hide_frame(axes) -@fill_doc +@verbose def plot_ica_components(ica, picks=None, ch_type=None, res=64, vmin=None, vmax=None, cmap='RdBu_r', sensors=True, colorbar=False, title=None, @@ -1027,7 +1030,7 @@ def plot_ica_components(ica, picks=None, ch_type=None, res=64, image_interp='bilinear', inst=None, plot_std=True, topomap_args=None, image_args=None, psd_args=None, reject='auto', - sphere=None): + sphere=None, *, verbose=None): """Project mixing matrix on interpolated sensor topography. Parameters @@ -1109,6 +1112,7 @@ def plot_ica_components(ica, picks=None, ch_type=None, res=64, which applies the rejection parameters used when fitting the ICA object. %(topomap_sphere_auto)s + %(verbose)s Returns ------- @@ -1142,18 +1146,13 @@ def plot_ica_components(ica, picks=None, ch_type=None, res=64, figs = [] for k in range(0, n_components, p): picks = range(k, min(k + p, n_components)) - fig = plot_ica_components(ica, picks=picks, ch_type=ch_type, - res=res, vmax=vmax, - cmap=cmap, sensors=sensors, - colorbar=colorbar, title=title, - show=show, outlines=outlines, - contours=contours, - image_interp=image_interp, inst=inst, - plot_std=plot_std, - topomap_args=topomap_args, - image_args=image_args, - psd_args=psd_args, reject=reject, - sphere=sphere) + fig = plot_ica_components( + ica, picks=picks, ch_type=ch_type, res=res, vmax=vmax, + cmap=cmap, sensors=sensors, colorbar=colorbar, title=title, + show=show, outlines=outlines, contours=contours, + image_interp=image_interp, inst=inst, plot_std=plot_std, + topomap_args=topomap_args, image_args=image_args, + psd_args=psd_args, reject=reject, sphere=sphere) figs.append(fig) return figs else: @@ -1164,7 +1163,7 @@ def plot_ica_components(ica, picks=None, ch_type=None, res=64, data = np.dot(ica.mixing_matrix_[:, picks].T, ica.pca_components_[:ica.n_components_]) - data_picks, pos, merge_channels, names, _, sphere, clip_origin = \ + data_picks, pos, merge_channels, names, ch_type, sphere, clip_origin = \ _prepare_topomap_plot(ica, ch_type, sphere=sphere) outlines = _make_head_outlines(sphere, pos, outlines, clip_origin) @@ -1187,7 +1186,8 @@ def plot_ica_components(ica, picks=None, ch_type=None, res=64, im = plot_topomap( data_.flatten(), pos, vmin=vmin_, vmax=vmax_, res=res, axes=ax, cmap=cmap[0], outlines=outlines, contours=contours, - image_interp=image_interp, show=False, sensors=sensors)[0] + image_interp=image_interp, show=False, sensors=sensors, + ch_type=ch_type, **topomap_args)[0] im.axes.set_label(ica._ica_names[ii]) if colorbar: cbar, cax = _add_colorbar(ax, im, cmap, title="AU", @@ -2109,9 +2109,17 @@ def _hide_frame(ax): ax.set_frame_on(False) -def _init_anim(ax, ax_line, ax_cbar, params, merge_channels, sphere): +def _check_extrapolate(extrapolate, ch_type): + _check_option('extrapolate', extrapolate, ('box', 'local', 'head', 'auto')) + if extrapolate == 'auto': + extrapolate = 'local' if ch_type in _MEG_CH_TYPES_SPLIT else 'head' + return extrapolate + + +@verbose +def _init_anim(ax, ax_line, ax_cbar, params, merge_channels, sphere, ch_type, + extrapolate, verbose): """Initialize animated topomap.""" - from matplotlib import pyplot as plt, patches logger.info('Initializing animation...') data = params['data'] items = list() @@ -2137,7 +2145,10 @@ def _init_anim(ax, ax_line, ax_cbar, params, merge_channels, sphere): _hide_frame(ax) extent, Xi, Yi, interp = _setup_interp( - params['pos'], 64, 'box', sphere, outlines, 0) + params['pos'], 64, extrapolate, sphere, outlines, 0) + + patch_ = _get_patch(outlines, extrapolate, interp, ax) + params['Zis'] = list() for frame in params['frames']: params['Zis'].append(interp.set_values(data[:, frame])(Xi, Yi)) @@ -2152,14 +2163,9 @@ def _init_anim(ax, ax_line, ax_cbar, params, merge_channels, sphere): aspect='equal', extent=extent, interpolation='bilinear') ax.autoscale(enable=True, tight=True) - plt.colorbar(im, cax=ax_cbar) + ax.figure.colorbar(im, cax=ax_cbar) cont = ax.contour(Xi, Yi, Zi, levels=cont_lims, colors='k', linewidths=1) - patch_ = patches.Ellipse((0, 0), - 2 * outlines['clip_radius'][0], - 2 * outlines['clip_radius'][1], - clip_on=True, - transform=ax.transData) im.set_clip_path(patch_) text = ax.text(0.55, 0.95, '', transform=ax.transAxes, va='center', ha='right') @@ -2249,7 +2255,7 @@ def _key_press(event, params): def _topomap_animation(evoked, ch_type, times, frame_rate, butterfly, blit, - show, time_unit, sphere): + show, time_unit, sphere, extrapolate, *, verbose=None): """Make animation of evoked data as topomap timeseries. See mne.evoked.Evoked.animate_topomap. @@ -2273,7 +2279,6 @@ def _topomap_animation(evoked, ch_type, times, frame_rate, butterfly, blit, raise ValueError('All times must be inside the evoked time series.') frames = [np.abs(evoked.times - time).argmin() for time in times] - blit = False if plt.get_backend() == 'MacOSX' else blit picks, pos, merge_channels, _, ch_type, sphere, clip_origin = \ _prepare_topomap_plot(evoked, ch_type, sphere=sphere) data = evoked.data[picks, :] @@ -2292,6 +2297,7 @@ def _topomap_animation(evoked, ch_type, times, frame_rate, butterfly, blit, frames = np.linspace(0, len(evoked.times) - 1, frames).astype(int) ax_cbar = plt.subplot2grid(shape, (0, colspan), rowspan=rowspan) ax_cbar.set_title(_handle_default('units')[ch_type], fontsize=10) + extrapolate = _check_extrapolate(extrapolate, ch_type) params = dict(data=data, pos=pos, all_times=evoked.times, frame=0, frames=frames, butterfly=butterfly, blit=blit, @@ -2299,7 +2305,8 @@ def _topomap_animation(evoked, ch_type, times, frame_rate, butterfly, blit, clip_origin=clip_origin) init_func = partial(_init_anim, ax=ax, ax_cbar=ax_cbar, ax_line=ax_line, params=params, merge_channels=merge_channels, - sphere=sphere) + sphere=sphere, ch_type=ch_type, + extrapolate=extrapolate, verbose=verbose) animate_func = partial(_animate, ax=ax, ax_line=ax_line, params=params) pause_func = partial(_pause_anim, params=params) fig.canvas.mpl_connect('button_press_event', pause_func) diff --git a/tutorials/preprocessing/plot_40_artifact_correction_ica.py b/tutorials/preprocessing/plot_40_artifact_correction_ica.py index c5b9f3c92d9..afa9af9f6dc 100644 --- a/tutorials/preprocessing/plot_40_artifact_correction_ica.py +++ b/tutorials/preprocessing/plot_40_artifact_correction_ica.py @@ -20,7 +20,6 @@ and classes from that submodule: """ - import os import mne from mne.preprocessing import (ICA, create_eog_epochs, create_ecg_epochs, From 9139c345ef40a6b5c4a1ee4e6943ae76e25d8276 Mon Sep 17 00:00:00 2001 From: Alexandre Gramfort Date: Wed, 9 Dec 2020 21:06:26 +0100 Subject: [PATCH 002/387] MRG, ENH: make plot alignment use defaults for colors (#8553) * WIP : make plot alignment use defaults for colors * ENH: Use cube * ENH: oct * FIX: Missed * DOC: Loc * FIX: Subclass Co-authored-by: Eric Larson --- doc/changes/latest.inc | 4 ++ mne/defaults.py | 6 +- mne/gui/_coreg_gui.py | 6 +- mne/gui/_viewer.py | 18 ++++-- mne/viz/_3d.py | 56 +++++++++++++------ mne/viz/backends/_pysurfer_mayavi.py | 38 ++++++++++++- mne/viz/backends/_pyvista.py | 19 ++++++- mne/viz/backends/_utils.py | 3 +- .../source-modeling/plot_source_alignment.py | 2 +- 9 files changed, 116 insertions(+), 36 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 76cde15b9e1..c333914bcd1 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -43,6 +43,10 @@ Enhancements - Update ``surfaces`` argument in :func:`mne.viz.plot_alignment` to allow dict for transparency values, and set default for sEEG data to have transparency (:gh:`8445` by `Keith Doelling`_) +- Add support for ``mri_fiducials='estimated'`` in :func:`mne.viz.plot_alignment` to allow estimating MRI fiducial locations using :func:`mne.coreg.get_mni_fiducials` (:gh:`8553` by `Eric Larson`_) + +- Update default values in :ref:`mne coreg` and :func:`mne.viz.plot_alignment` for clearer representation of MRI and digitized fiducial points (:gh:`8553` by `Alex Gramfort`_ and `Eric Larson`_) + - Add ``n_pca_components`` argument to :func:`mne.viz.plot_ica_overlay` (:gh:`8351` by `Eric Larson`_) - Add :func:`mne.stc_near_sensors` to facilitate plotting ECoG data (:gh:`8190` by `Eric Larson`_) diff --git a/mne/defaults.py b/mne/defaults.py index 5b85be312c2..9e86df26191 100644 --- a/mne/defaults.py +++ b/mne/defaults.py @@ -55,10 +55,10 @@ markersize=4), coreg=dict( mri_fid_opacity=1.0, - dig_fid_opacity=0.3, + dig_fid_opacity=1.0, - mri_fid_scale=1e-2, - dig_fid_scale=3e-2, + mri_fid_scale=5e-3, + dig_fid_scale=8e-3, extra_scale=4e-3, eeg_scale=4e-3, eegp_scale=20e-3, eegp_height=0.1, ecog_scale=5e-3, diff --git a/mne/gui/_coreg_gui.py b/mne/gui/_coreg_gui.py index dd36d3013f9..3f1cdeee8ff 100644 --- a/mne/gui/_coreg_gui.py +++ b/mne/gui/_coreg_gui.py @@ -1900,18 +1900,18 @@ def _init_plot(self): point_scale = defaults['mri_fid_scale'] self.mri_lpa_obj = PointObject(scene=self.scene, color=lpa_color, has_norm=True, point_scale=point_scale, - name='LPA') + name='LPA', view='oct') self.model.sync_trait('transformed_mri_lpa', self.mri_lpa_obj, 'points', mutual=False) self.mri_nasion_obj = PointObject(scene=self.scene, color=nasion_color, has_norm=True, point_scale=point_scale, - name='Nasion') + name='Nasion', view='oct') self.model.sync_trait('transformed_mri_nasion', self.mri_nasion_obj, 'points', mutual=False) self.mri_rpa_obj = PointObject(scene=self.scene, color=rpa_color, has_norm=True, point_scale=point_scale, - name='RPA') + name='RPA', view='oct') self.model.sync_trait('transformed_mri_rpa', self.mri_rpa_obj, 'points', mutual=False) diff --git a/mne/gui/_viewer.py b/mne/gui/_viewer.py index e33f44a0680..4fe6e9b1002 100644 --- a/mne/gui/_viewer.py +++ b/mne/gui/_viewer.py @@ -21,9 +21,9 @@ from ..defaults import DEFAULTS from ..surface import _CheckInside, _DistanceQuery -from ..transforms import apply_trans +from ..transforms import apply_trans, rotation from ..utils import SilenceStdout -from ..viz.backends._pysurfer_mayavi import (_create_mesh_surf, +from ..viz.backends._pysurfer_mayavi import (_create_mesh_surf, _oct_glyph, _toggle_mlab_render) try: @@ -235,14 +235,14 @@ def __init__(self, view='points', has_norm=False, *args, **kwargs): Parameters ---------- - view : 'points' | 'cloud' + view : 'points' | 'cloud' | 'arrow' | 'oct' Whether the view options should be tailored to individual points or a point cloud. has_norm : bool Whether a norm can be defined; adds view options based on point norms (default False). """ - assert view in ('points', 'cloud', 'arrow') + assert view in ('points', 'cloud', 'arrow', 'oct') self._view = view self._has_norm = bool(has_norm) super(PointObject, self).__init__(*args, **kwargs) @@ -264,7 +264,7 @@ def default_traits_view(self): # noqa: D102 if self._view == 'arrow': visible = Item('visible', label='Show', show_label=False) return View(HGroup(visible, scale, 'opacity', 'label', Spring())) - elif self._view == 'points': + elif self._view in ('points', 'oct'): visible = Item('visible', label='Show', show_label=True) views = (visible, color, scale, 'label') else: @@ -327,11 +327,15 @@ def _plot_points(self): # this can occur sometimes during testing w/ui.dispose() return # fig.scene.engine.current_object is scatter - mode = 'arrow' if self._view == 'arrow' else 'sphere' + mode = {'cloud': 'sphere', 'points': 'sphere', 'oct': 'sphere'}.get( + self._view, self._view) + assert mode in ('sphere', 'arrow') glyph = pipeline.glyph(scatter, color=self.color, figure=fig, scale_factor=self.point_scale, opacity=1., resolution=self.resolution, mode=mode) + if self._view == 'oct': + _oct_glyph(glyph.glyph.glyph_source, rotation(0, 0, np.pi / 4)) glyph.actor.property.backface_culling = True glyph.glyph.glyph.vector_mode = 'use_normal' glyph.glyph.glyph.clamping = False @@ -430,6 +434,8 @@ def _update_marker_type(self): gs = self.glyph.glyph.glyph_source res = getattr(gs.glyph_source, 'theta_resolution', getattr(gs.glyph_source, 'resolution', None)) + if res is None: + return if self.project_to_surface or self.orient_to_surface: gs.glyph_source = tvtk.CylinderSource() gs.glyph_source.height = defaults['eegp_height'] diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py index 02c80746267..9eb90e39c91 100644 --- a/mne/viz/_3d.py +++ b/mne/viz/_3d.py @@ -34,7 +34,7 @@ _reorder_ccw, _complete_sphere_surf) from ..transforms import (_find_trans, apply_trans, rot_to_quat, combine_transforms, _get_trans, _ensure_trans, - invert_transform, Transform, + invert_transform, Transform, rotation, read_ras_mni_t, _print_coord_trans) from ..utils import (get_subjects_dir, logger, _check_subject, verbose, warn, has_nibabel, check_version, fill_doc, _pl, get_config, @@ -489,8 +489,12 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, If not None, also plot the source space points. mri_fiducials : bool | str Plot MRI fiducials (default False). If ``True``, look for a file with - the canonical name (``bem/{subject}-fiducials.fif``). If ``str`` it - should provide the full path to the fiducials file. + the canonical name (``bem/{subject}-fiducials.fif``). If ``str``, + it can be ``'estimated'`` to use :func:`mne.coreg.get_mni_fiducials`, + otherwise it should provide the full path to the fiducials file. + + .. versionadded:: 0.22 + Support for ``'estimated'``. bem : list of dict | instance of ConductorModel | None Can be either the BEM surfaces (list of dict), a BEM solution or a sphere model. If None, we first try loading @@ -550,6 +554,7 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, .. versionadded:: 0.15 """ from ..forward import _create_meg_coils, Forward + from ..coreg import get_mni_fiducials # Update the backend from .backends.renderer import _get_renderer @@ -811,9 +816,12 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, mri_fiducials = op.join(subjects_dir, subject, 'bem', subject + '-fiducials.fif') if isinstance(mri_fiducials, str): - mri_fiducials, cf = read_fiducials(mri_fiducials) - if cf != FIFF.FIFFV_COORD_MRI: - raise ValueError("Fiducials are not in MRI space") + if mri_fiducials == 'estimated': + mri_fiducials = get_mni_fiducials(subject, subjects_dir) + else: + mri_fiducials, cf = read_fiducials(mri_fiducials) + if cf != FIFF.FIFFV_COORD_MRI: + raise ValueError("Fiducials are not in MRI space") fid_loc = _fiducial_coords(mri_fiducials, FIFF.FIFFV_COORD_MRI) fid_loc = apply_trans(mri_trans, fid_loc) else: @@ -1014,7 +1022,8 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, for k, v in user_alpha.items(): if v is not None: alphas[k] = v - colors = dict(head=(0.6,) * 3, helmet=(0.0, 0.0, 0.6), lh=(0.5,) * 3, + colors = dict(head=DEFAULTS['coreg']['head_color'], + helmet=(0.0, 0.0, 0.6), lh=(0.5,) * 3, rh=(0.5,) * 3) colors.update(skull_colors) for key, surf in surfs.items(): @@ -1060,19 +1069,34 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, defaults['extra_scale'] ] + [defaults[key + '_scale'] for key in other_keys] assert len(datas) == len(colors) == len(alphas) == len(scales) + fid_colors = tuple( + defaults[f'{key}_color'] for key in ('lpa', 'nasion', 'rpa')) + glyphs = ['sphere'] * len(datas) for kind, loc in (('dig', car_loc), ('mri', fid_loc)): if len(loc) > 0: datas.extend(loc[:, np.newaxis]) - colors.extend((defaults['lpa_color'], - defaults['nasion_color'], - defaults['rpa_color'])) - alphas.extend(3 * (defaults[kind + '_fid_opacity'],)) - scales.extend(3 * (defaults[kind + '_fid_scale'],)) - - for data, color, alpha, scale in zip(datas, colors, alphas, scales): + colors.extend(fid_colors) + alphas.extend(3 * (defaults[f'{kind}_fid_opacity'],)) + scales.extend(3 * (defaults[f'{kind}_fid_scale'],)) + glyphs.extend(3 * (('oct' if kind == 'mri' else 'sphere'),)) + for data, color, alpha, scale, glyph in zip( + datas, colors, alphas, scales, glyphs): if len(data) > 0: - renderer.sphere(center=data, color=color, scale=scale, - opacity=alpha, backface_culling=True) + if glyph == 'oct': + transform = np.eye(4) + transform[:3, :3] = mri_trans['trans'][:3, :3] * scale + # rotate around Z axis 45 deg first + transform = transform @ rotation(0, 0, np.pi / 4) + renderer.quiver3d( + x=data[:, 0], y=data[:, 1], z=data[:, 2], + u=1., v=0., w=0., color=color, mode='oct', + scale=1., opacity=alpha, backface_culling=True, + solid_transform=transform) + else: + assert glyph == 'sphere' + assert data.ndim == 2 and data.shape[1] == 3, data.shape + renderer.sphere(center=data, color=color, scale=scale, + opacity=alpha, backface_culling=True) if len(eegp_loc) > 0: renderer.quiver3d( x=eegp_loc[:, 0], y=eegp_loc[:, 1], z=eegp_loc[:, 2], diff --git a/mne/viz/backends/_pysurfer_mayavi.py b/mne/viz/backends/_pysurfer_mayavi.py index 39abd16977b..3db8eba1536 100644 --- a/mne/viz/backends/_pysurfer_mayavi.py +++ b/mne/viz/backends/_pysurfer_mayavi.py @@ -234,7 +234,7 @@ def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8, glyph_height=None, glyph_center=None, glyph_resolution=None, opacity=1.0, scale_mode='none', scalars=None, backface_culling=False, colormap=None, vmin=None, vmax=None, - line_width=2., name=None): + line_width=2., name=None, solid_transform=None): _check_option('mode', mode, ALLOWED_QUIVER_MODES) color = _check_color(color) with warnings.catch_warnings(record=True): # traits @@ -244,12 +244,15 @@ def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8, scale_mode=scale_mode, resolution=resolution, scalars=scalars, opacity=opacity, figure=self.fig) - elif mode in ('cone', 'sphere'): + elif mode in ('cone', 'sphere', 'oct'): + use_mode = 'sphere' if mode == 'oct' else mode quiv = self.mlab.quiver3d(x, y, z, u, v, w, color=color, - mode=mode, scale_factor=scale, + mode=use_mode, scale_factor=scale, opacity=opacity, figure=self.fig) if mode == 'sphere': quiv.glyph.glyph_source.glyph_source.center = 0., 0., 0. + elif mode == 'oct': + _oct_glyph(quiv.glyph.glyph_source, solid_transform) else: assert mode == 'cylinder', mode # should be guaranteed above quiv = self.mlab.quiver3d(x, y, z, u, v, w, mode=mode, @@ -524,3 +527,32 @@ def _testing_context(interactive): yield finally: mlab.options.backend = orig_backend + + +def _oct_glyph(glyph_source, transform): + from tvtk.api import tvtk + from tvtk.common import configure_input + from traits.api import Array + gs = tvtk.PlatonicSolidSource() + + # Workaround for: + # File "mayavi/components/glyph_source.py", line 231, in _glyph_position_changed # noqa: E501 + # g.center = 0.0, 0.0, 0.0 + # traits.trait_errors.TraitError: Cannot set the undefined 'center' attribute of a 'TransformPolyDataFilter' object. # noqa: E501 + class SafeTransformPolyDataFilter(tvtk.TransformPolyDataFilter): + center = Array(shape=(3,), value=np.zeros(3)) + + gs.solid_type = 'octahedron' + if transform is not None: + # glyph: mayavi.modules.vectors.Vectors + # glyph.glyph: vtkGlyph3D + # glyph.glyph.glyph: mayavi.components.glyph.Glyph + assert transform.shape == (4, 4) + tr = tvtk.Transform() + tr.set_matrix(transform.ravel()) + trp = SafeTransformPolyDataFilter() + configure_input(trp, gs) + trp.transform = tr + trp.update() + gs = trp + glyph_source.glyph_source = gs diff --git a/mne/viz/backends/_pyvista.py b/mne/viz/backends/_pyvista.py index d6b24e0d790..5515e91d9af 100644 --- a/mne/viz/backends/_pyvista.py +++ b/mne/viz/backends/_pyvista.py @@ -486,7 +486,9 @@ def tube(self, origin, destination, radius=0.001, color='white', def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8, glyph_height=None, glyph_center=None, glyph_resolution=None, opacity=1.0, scale_mode='none', scalars=None, - backface_culling=False, line_width=2., name=None): + backface_culling=False, line_width=2., name=None, + glyph_width=None, glyph_depth=None, + solid_transform=None): _check_option('mode', mode, ALLOWED_QUIVER_MODES) with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=FutureWarning) @@ -517,6 +519,7 @@ def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8, ) mesh = pyvista.wrap(alg.GetOutput()) else: + tr = None if mode == 'cone': glyph = vtk.vtkConeSource() glyph.SetCenter(0.5, 0, 0) @@ -524,6 +527,9 @@ def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8, elif mode == 'cylinder': glyph = vtk.vtkCylinderSource() glyph.SetRadius(0.15) + elif mode == 'oct': + glyph = vtk.vtkPlatonicSolidSource() + glyph.SetSolidTypeToOctahedron() else: assert mode == 'sphere', mode # guaranteed above glyph = vtk.vtkSphereSource() @@ -534,10 +540,17 @@ def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8, glyph.SetCenter(glyph_center) if glyph_resolution is not None: glyph.SetResolution(glyph_resolution) - # fix orientation - glyph.Update() tr = vtk.vtkTransform() tr.RotateWXYZ(90, 0, 0, 1) + elif mode == 'oct': + if solid_transform is not None: + assert solid_transform.shape == (4, 4) + tr = vtk.vtkTransform() + tr.SetMatrix( + solid_transform.astype(np.float64).ravel()) + if tr is not None: + # fix orientation + glyph.Update() trp = vtk.vtkTransformPolyDataFilter() trp.SetInputData(glyph.GetOutput()) trp.SetTransform(tr) diff --git a/mne/viz/backends/_utils.py b/mne/viz/backends/_utils.py index 4c271ead23f..cc60cc39ac4 100644 --- a/mne/viz/backends/_utils.py +++ b/mne/viz/backends/_utils.py @@ -15,7 +15,8 @@ 'mayavi', 'notebook', ) -ALLOWED_QUIVER_MODES = ('2darrow', 'arrow', 'cone', 'cylinder', 'sphere') +ALLOWED_QUIVER_MODES = ('2darrow', 'arrow', 'cone', 'cylinder', 'sphere', + 'oct') def _get_colormap_from_array(colormap=None, normalized_colormap=False, diff --git a/tutorials/source-modeling/plot_source_alignment.py b/tutorials/source-modeling/plot_source_alignment.py index 45c38343fd7..9739eb83d6e 100644 --- a/tutorials/source-modeling/plot_source_alignment.py +++ b/tutorials/source-modeling/plot_source_alignment.py @@ -104,7 +104,7 @@ fig = mne.viz.plot_alignment(raw.info, trans=trans, subject='sample', subjects_dir=subjects_dir, surfaces='head-dense', show_axes=True, dig=True, eeg=[], meg='sensors', - coord_frame='meg') + coord_frame='meg', mri_fiducials='estimated') mne.viz.set_3d_view(fig, 45, 90, distance=0.6, focalpoint=(0., 0., 0.)) print('Distance from head origin to MEG origin: %0.1f mm' % (1000 * np.linalg.norm(raw.info['dev_head_t']['trans'][:3, 3]))) From 76e386ca0d588c30a58519b125448162f735a8e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Richard=20H=C3=B6chenberger?= Date: Thu, 10 Dec 2020 00:45:30 +0100 Subject: [PATCH 003/387] MRG: Small fix to tutorial; rename plot_events ordinate label to "Event id"; improve some SSP docstrings (#8612) * plot_events: Events id -> Event id * Events correspond to R wave peaks * SSP is signal-space projection * Better SSP docstrings * More consistent * Phrasing * Fix style * Use docdict & improve docstrings * insert link to Wikipedia * Correct false docstring [skip azp] * docstyle [skip azp] * Cleanup & docdict * Apply review suggestions * Better docstrings for create_eog_epochs & create_ecg_epochs * Fix pydocstyle [skip azp] * Fix docstyle [skip azp] --- mne/epochs.py | 22 +---- mne/preprocessing/ecg.py | 94 ++++++------------- mne/preprocessing/eog.py | 2 + mne/preprocessing/ssp.py | 12 ++- mne/proj.py | 12 ++- mne/utils/docs.py | 86 +++++++++++++++++ mne/viz/misc.py | 2 +- .../plot_50_artifact_correction_ssp.py | 5 +- 8 files changed, 141 insertions(+), 94 deletions(-) diff --git a/mne/epochs.py b/mne/epochs.py index 48f975c6917..d803acf1e3c 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -343,8 +343,7 @@ class BaseEpochs(ProjMixin, ContainsMixin, UpdateChannelsMixin, ShiftTimeMixin, %(picks_header)s See `Epochs` docstring. %(reject_epochs)s - flat : dict | None - See `Epochs` docstring. + %(flat)s decim : int See `Epochs` docstring. reject_tmin : scalar | None @@ -1190,12 +1189,7 @@ def drop_bad(self, reject='existing', flat='existing', verbose=None): Parameters ---------- %(reject_drop_bad)s - flat : dict | str | None - Rejection parameters based on flatness of signal. - Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values - are floats that set the minimum acceptable peak-to-peak amplitude. - If flat is None then no rejection is done. If 'existing', - then the flat parameters set at instantiation are used. + %(flat_drop_bad)s %(verbose_meth)s Returns @@ -2072,11 +2066,7 @@ class Epochs(BaseEpochs): or wait before accessing each epoch (more memory efficient but can be slower). %(reject_epochs)s - flat : dict | None - Rejection parameters based on flatness of signal. - Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values - are floats that set the minimum acceptable peak-to-peak amplitude. - If flat is None then no rejection is done. + %(flat)s %(proj_epochs)s %(decim)s reject_tmin : scalar | None @@ -2285,11 +2275,7 @@ class EpochsArray(BaseEpochs): and a dict is created with string integer names corresponding to the event id integers. %(reject_epochs)s - flat : dict | None - Rejection parameters based on flatness of signal. - Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values - are floats that set the minimum acceptable peak-to-peak amplitude. - If flat is None then no rejection is done. + %(flat)s reject_tmin : scalar | None Start of the time window used to reject epochs (with the default None, the window will start with tmin). diff --git a/mne/preprocessing/ecg.py b/mne/preprocessing/ecg.py index bd98b12184f..ce6cc143a9b 100644 --- a/mne/preprocessing/ecg.py +++ b/mne/preprocessing/ecg.py @@ -42,10 +42,8 @@ def qrs_detector(sfreq, ecg, thresh_value=0.6, levels=2.5, n_thresh=3, Low pass frequency h_freq : float High pass frequency - tstart : float - Start detection after tstart seconds. - filter_length : str | int | None - Number of taps to use for filtering. + %(ecg_tstart)s + %(ecg_filter_length)s %(verbose)s Returns @@ -142,30 +140,19 @@ def find_ecg_events(raw, event_id=999, ch_name=None, tstart=0.0, ---------- raw : instance of Raw The raw data. - event_id : int - The index to assign to found events. - ch_name : None | str - The name of the channel to use for ECG peak detection. - If ``None`` (default), ECG channel is used if present. If ``None`` and - **no** ECG channel is present, a synthetic ECG channel is created from - cross-channel average. This synthetic channel can only be created from - MEG channels. - tstart : float - Start detection after tstart seconds. Useful when beginning - of run is noisy. - l_freq : float - Low pass frequency to apply to the ECG channel while finding events. - h_freq : float - High pass frequency to apply to the ECG channel while finding events. + %(ecg_event_id)s + %(ecg_ch_name)s + %(ecg_tstart)s + %(ecg_filter_freqs)s qrs_threshold : float | str Between 0 and 1. qrs detection threshold. Can also be "auto" to automatically choose the threshold that generates a reasonable number of heartbeats (40-160 beats / min). - filter_length : str | int | None - Number of taps to use for filtering. + %(ecg_filter_length)s return_ecg : bool - Return ecg channel if synthesized. Defaults to False. If True and - and ecg exists this will yield None. + Return the ECG data. This is especially useful if no ECG channel + is present in the input data, so one will be synthesized. Defaults to + ``False``. %(reject_by_annotation_all)s .. versionadded:: 0.18 @@ -178,7 +165,11 @@ def find_ecg_events(raw, event_id=999, ch_name=None, tstart=0.0, ch_ecg : string Name of channel used. average_pulse : float - Estimated average pulse. + The estimated average pulse. If no ECG events could be found, this will + be zero. + ecg : array | None + The ECG data of the synthesized ECG channel, if any. This will only + be returned if ``return_ecg=True`` was passed. See Also -------- @@ -278,51 +269,28 @@ def create_ecg_epochs(raw, ch_name=None, event_id=999, picks=None, tmin=-0.5, reject_by_annotation=True, decim=1, verbose=None): """Conveniently generate epochs around ECG artifact events. + %(create_ecg_epochs)s + + .. note:: Filtering is only applied to the ECG channel while finding + events. The resulting ``ecg_epochs`` will have no filtering + applied (i.e., have the same filter properties as the input + ``raw`` instance). + Parameters ---------- raw : instance of Raw The raw data. - ch_name : None | str - The name of the channel to use for ECG R wave peak detection. - If None (default), ECG channel is used if present. If None and no - ECG channel is present, a synthetic ECG channel is created from - cross channel average. Synthetic channel can only be created from - MEG channels. - event_id : int - The index to assign to found events. + %(ecg_ch_name)s + %(ecg_event_id)s %(picks_all)s tmin : float Start time before event. tmax : float End time after event. - l_freq : float - Low pass frequency to apply to the ECG channel while finding events. - h_freq : float - High pass frequency to apply to the ECG channel while finding events. - reject : dict | None - Rejection parameters based on peak-to-peak amplitude. - Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'. - If reject is None then no rejection is done. Example:: - - reject = dict(grad=4000e-13, # T / m (gradiometers) - mag=4e-12, # T (magnetometers) - eeg=40e-6, # V (EEG channels) - eog=250e-6 # V (EOG channels) - ) - - flat : dict | None - Rejection parameters based on flatness of signal. - Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values - are floats that set the minimum acceptable peak-to-peak amplitude. - If flat is None then no rejection is done. - baseline : tuple | list of length 2 | None - The time interval to apply rescaling / baseline correction. - If None do not apply it. If baseline is (a, b) - the interval is between "a (s)" and "b (s)". - If a is None the beginning of the data is used - and if b is None then b is set to the end of the interval. - If baseline is equal to (None, None) all the time - interval is used. If None, no correction is applied. + %(ecg_filter_freqs)s + %(reject_epochs)s + %(flat)s + %(baseline_epochs)s preload : bool Preload epochs or not (default True). Must be True if keep_ecg is True. @@ -347,12 +315,6 @@ def create_ecg_epochs(raw, ch_name=None, event_id=999, picks=None, tmin=-0.5, -------- find_ecg_events compute_proj_ecg - - Notes - ----- - Filtering is only applied to the ECG channel while finding events. - The resulting ``ecg_epochs`` will have no filtering applied (i.e., have - the same filter properties as the input ``raw`` instance). """ has_ecg = 'ecg' in raw or ch_name is not None if keep_ecg and (has_ecg or not preload): diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 7cf5a602ba4..c7d93d32683 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -166,6 +166,8 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, thresh=None, decim=1, verbose=None): """Conveniently generate epochs around EOG artifact events. + %(create_eog_epochs)s + Parameters ---------- raw : instance of Raw diff --git a/mne/preprocessing/ssp.py b/mne/preprocessing/ssp.py index 1805e42f89a..373c24488e9 100644 --- a/mne/preprocessing/ssp.py +++ b/mne/preprocessing/ssp.py @@ -150,9 +150,11 @@ def compute_proj_ecg(raw, raw_event=None, tmin=-0.2, tmax=0.4, tstart=0., qrs_threshold='auto', filter_method='fir', iir_params=None, copy=True, return_drop_log=False, meg='separate', verbose=None): - """Compute SSP/PCA projections for ECG artifacts. + """Compute SSP (signal-space projection) vectors for ECG artifacts. - .. note:: raw data will be loaded if it is not already. + %(compute_proj_ecg)s + + .. note:: Raw data will be loaded if it hasn't been preloaded already. Parameters ---------- @@ -263,9 +265,11 @@ def compute_proj_eog(raw, raw_event=None, tmin=-0.2, tmax=0.2, eog_h_freq=10, tstart=0., filter_method='fir', iir_params=None, ch_name=None, copy=True, return_drop_log=False, meg='separate', verbose=None): - """Compute SSP/PCA projections for EOG artifacts. + """Compute SSP (signal-space projection) vectors for EOG artifacts. + + %(compute_proj_eog)s - .. note:: raw data must be preloaded. + .. note:: Raw data must be preloaded. Parameters ---------- diff --git a/mne/proj.py b/mne/proj.py index 4b43b6c02d7..fcf73bb6146 100644 --- a/mne/proj.py +++ b/mne/proj.py @@ -140,7 +140,9 @@ def _compute_proj(data, info, n_grad, n_mag, n_eeg, desc_prefix, @verbose def compute_proj_epochs(epochs, n_grad=2, n_mag=2, n_eeg=2, n_jobs=1, desc_prefix=None, meg='separate', verbose=None): - """Compute SSP (spatial space projection) vectors on Epochs. + """Compute SSP (signal-space projection) vectors on epoched data. + + %(compute_ssp)s Parameters ---------- @@ -207,7 +209,9 @@ def _compute_cov_epochs(epochs, n_jobs): @verbose def compute_proj_evoked(evoked, n_grad=2, n_mag=2, n_eeg=2, desc_prefix=None, meg='separate', verbose=None): - """Compute SSP (spatial space projection) vectors on Evoked. + """Compute SSP (signal-space projection) vectors on evoked data. + + %(compute_ssp)s Parameters ---------- @@ -253,7 +257,9 @@ def compute_proj_evoked(evoked, n_grad=2, n_mag=2, n_eeg=2, desc_prefix=None, def compute_proj_raw(raw, start=0, stop=None, duration=1, n_grad=2, n_mag=2, n_eeg=0, reject=None, flat=None, n_jobs=1, meg='separate', verbose=None): - """Compute SSP (spatial space projection) vectors on Raw. + """Compute SSP (signal-space projection) vectors on continuous data. + + %(compute_ssp)s Parameters ---------- diff --git a/mne/utils/docs.py b/mne/utils/docs.py index 1e065b844ad..12f9b3a1e82 100644 --- a/mne/utils/docs.py +++ b/mne/utils/docs.py @@ -1944,6 +1944,92 @@ If ``reject`` is ``None``, no rejection is performed. If ``'existing'`` (default), then the rejection parameters set at instantiation are used. """ +flat_common = """ + Rejection parameters based on flatness of signal. + Valid **keys** are ``'grad'``, ``'mag'``, ``'eeg'``, ``'eog'``, ``'ecg'``. + The **values** are floats that set the minimum acceptable peak-to-peak + amplitude (PTP). If the PTP is smaller than this threshold, the epoch will + be dropped. If ``None`` then no rejection is performed based on flatness + of the signal.""" +docdict['flat'] = f""" +flat : dict | None +{flat_common} +""" +docdict['flat_drop_bad'] = f""" +flat : dict | str | None +{flat_common} + If ``'existing'``, then the flat parameters set during epoch creation are + used. +""" + +# ECG detection +docdict['ecg_event_id'] = """ +event_id : int + The index to assign to found ECG events. +""" +docdict['ecg_ch_name'] = """ +ch_name : None | str + The name of the channel to use for ECG peak detection. + If ``None`` (default), ECG channel is used if present. If ``None`` and + **no** ECG channel is present, a synthetic ECG channel is created from + the cross-channel average. This synthetic channel can only be created from + MEG channels. +""" +docdict['ecg_filter_freqs'] = """ +l_freq : float + Low pass frequency to apply to the ECG channel while finding events. +h_freq : float + High pass frequency to apply to the ECG channel while finding events. +""" +docdict['ecg_filter_length'] = """ +filter_length : str | int | None + Number of taps to use for filtering. +""" +docdict['ecg_tstart'] = """ +tstart : float + Start ECG detection after ``tstart`` seconds. Useful when the beginning + of the run is noisy. +""" +docdict['create_ecg_epochs'] = """This function will: + +#. Filter the ECG data channel. + +#. Find ECG R wave peaks using :func:`mne.preprocessing.find_ecg_events`. + +#. Filter the raw data. + +#. Create `~mne.Epochs` around the R wave peaks, capturing the heartbeats. +""" + +# EOG detection +docdict['create_eog_epochs'] = """This function will: + +#. Filter the EOG data channel. + +#. Find the peaks of eyeblinks in the EOG data using + :func:`mne.preprocessing.find_eog_events`. + +#. Filter the raw data. + +#. Create `~mne.Epochs` around the eyeblinks. +""" + +# SSP +docdict['compute_ssp'] = """This function aims to find those SSP vectors that +will project out the ``n`` most prominent signals from the data for each +specified sensor type. Consequently, if the provided input data contains high +levels of noise, the produced SSP vectors can then be used to eliminate that +noise from the data. +""" +compute_proj_common = """ +#. Optionally average the `~mne.Epochs` to produce an `~mne.Evoked` if + ``average=True`` was passed (default). + +#. Calculate SSP projection vectors on that data to capture the artifacts.""" +docdict['compute_proj_ecg'] = f"""%(create_ecg_epochs)s {compute_proj_common} +""" % docdict +docdict['compute_proj_eog'] = f"""%(create_eog_epochs)s {compute_proj_common} +""" % docdict # Other docdict['accept'] = """ diff --git a/mne/viz/misc.py b/mne/viz/misc.py index 7912006c6d3..733e1026c04 100644 --- a/mne/viz/misc.py +++ b/mne/viz/misc.py @@ -693,7 +693,7 @@ def plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None, else: ax.set_ylim([min_event - 1, max_event + 1]) - ax.set(xlabel=xlabel, ylabel='Events id', xlim=[0, max_x]) + ax.set(xlabel=xlabel, ylabel='Event id', xlim=[0, max_x]) ax.grid(True) diff --git a/tutorials/preprocessing/plot_50_artifact_correction_ssp.py b/tutorials/preprocessing/plot_50_artifact_correction_ssp.py index d64f6e65c69..8dc8345acdf 100644 --- a/tutorials/preprocessing/plot_50_artifact_correction_ssp.py +++ b/tutorials/preprocessing/plot_50_artifact_correction_ssp.py @@ -228,8 +228,9 @@ # projectors for magnetometers, gradiometers, and EEG channels (default is two # projectors for each channel type). # :func:`~mne.preprocessing.compute_proj_ecg` also returns an :term:`events` -# array containing the sample numbers corresponding to the onset of each -# detected heartbeat. +# array containing the sample numbers corresponding to the peak of the +# `R wave `__ of each detected +# heartbeat. projs, events = compute_proj_ecg(raw, n_grad=1, n_mag=1, n_eeg=1, reject=None) From e2357ad7848a3c3968678557ef5ded4602bed0c8 Mon Sep 17 00:00:00 2001 From: Guillaume Favelier Date: Thu, 10 Dec 2020 19:47:18 +0100 Subject: [PATCH 004/387] FIX: scalar_bar (#8643) * Fix scalar bar mapped from polydata actor * Touch example * DRY * LUT still needs to be set for glyphs * Touch example * Improve coverage --- examples/inverse/plot_source_space_snr.py | 2 +- examples/inverse/plot_vector_mne_solution.py | 2 +- mne/viz/_brain/_brain.py | 3 ++- mne/viz/backends/_pysurfer_mayavi.py | 7 ++--- mne/viz/backends/_pyvista.py | 26 ++++++++++--------- mne/viz/backends/_utils.py | 7 +++++ mne/viz/backends/tests/test_renderer.py | 3 ++- tutorials/misc/plot_seeg.py | 2 +- .../source-modeling/plot_visualize_stc.py | 2 +- 9 files changed, 31 insertions(+), 23 deletions(-) diff --git a/examples/inverse/plot_source_space_snr.py b/examples/inverse/plot_source_space_snr.py index e7904f12838..ef22092a8be 100644 --- a/examples/inverse/plot_source_space_snr.py +++ b/examples/inverse/plot_source_space_snr.py @@ -53,7 +53,7 @@ ax.set(xlabel='Time (sec)', ylabel='SNR MEG-EEG') fig.tight_layout() -# Find time point of maximum SNR: +# Find time point of maximum SNR maxidx = np.argmax(ave) # Plot SNR on source space at the time point of maximum SNR: diff --git a/examples/inverse/plot_vector_mne_solution.py b/examples/inverse/plot_vector_mne_solution.py index 0a8983480fa..1ac9f2873b5 100644 --- a/examples/inverse/plot_vector_mne_solution.py +++ b/examples/inverse/plot_vector_mne_solution.py @@ -67,7 +67,7 @@ stc_max, directions = stc.project('pca', src=inv['src']) # These directions must by design be close to the normals because this -# inverse was computed with loose=0.2: +# inverse was computed with loose=0.2 print('Absolute cosine similarity between source normals and directions: ' f'{np.abs(np.sum(directions * inv["source_nn"][2::3], axis=-1)).mean()}') brain_max = stc_max.plot( diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index 79d61c5ac79..9519d819145 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -2407,7 +2407,8 @@ def update_lut(self, fmin=None, fmid=None, fmax=None): mesh = self._layered_meshes[hemi] mesh.update_overlay(name='data', colormap=self._data['ctable']) - _set_colormap_range(mesh._actor, ctable, scalar_bar, rng) + _set_colormap_range(mesh._actor, ctable, scalar_bar, rng, + self._brain_color) scalar_bar = None grid_volume_pos = hemi_data.get('grid_volume_pos') diff --git a/mne/viz/backends/_pysurfer_mayavi.py b/mne/viz/backends/_pysurfer_mayavi.py index 3db8eba1536..0239f431edd 100644 --- a/mne/viz/backends/_pysurfer_mayavi.py +++ b/mne/viz/backends/_pysurfer_mayavi.py @@ -24,7 +24,7 @@ from tvtk.pyface.tvtk_scene import TVTKScene from .base_renderer import _BaseRenderer -from ._utils import _check_color, ALLOWED_QUIVER_MODES +from ._utils import _check_color, _alpha_blend_background, ALLOWED_QUIVER_MODES from ...surface import _normalize_vectors from ...utils import (_import_mlab, _validate_type, SilenceStdout, copy_base_doc_to_subclass_doc, _check_option) @@ -301,10 +301,7 @@ def scalarbar(self, source, color="white", title=None, n_labels=4, ctable = lut.table.to_array() cbar_lut = tvtk.LookupTable() cbar_lut.deep_copy(lut) - alphas = ctable[:, -1][:, np.newaxis] / 255. - use_lut = ctable.copy() - use_lut[:, -1] = 255. - vals = (use_lut * alphas) + bgcolor * (1 - alphas) + vals = _alpha_blend_background(ctable, bgcolor) cbar_lut.table.from_array(vals) cmap.scalar_bar.lookup_table = cbar_lut diff --git a/mne/viz/backends/_pyvista.py b/mne/viz/backends/_pyvista.py index 5515e91d9af..648cd98602d 100644 --- a/mne/viz/backends/_pyvista.py +++ b/mne/viz/backends/_pyvista.py @@ -21,7 +21,8 @@ import vtk from .base_renderer import _BaseRenderer -from ._utils import _get_colormap_from_array, ALLOWED_QUIVER_MODES +from ._utils import (_get_colormap_from_array, _alpha_blend_background, + ALLOWED_QUIVER_MODES) from ...fixes import _get_args from ...utils import copy_base_doc_to_subclass_doc, _check_option from ...externals.decorator import decorator @@ -874,20 +875,21 @@ def _process_events(plotter): plotter.app.processEvents() -def _set_colormap_range(actor, ctable, scalar_bar, rng=None): +def _set_colormap_range(actor, ctable, scalar_bar, rng=None, + background_color=None): from vtk.util.numpy_support import numpy_to_vtk - mapper = actor.GetMapper() - lut = mapper.GetLookupTable() - # Catch: FutureWarning: Conversion of the second argument of - # issubdtype from `complex` to `np.complexfloating` is deprecated. - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=FutureWarning) - lut.SetTable(numpy_to_vtk(ctable)) if rng is not None: - mapper.SetScalarRange(rng[0], rng[1]) - lut.SetRange(rng[0], rng[1]) + mapper = actor.GetMapper() + mapper.SetScalarRange(*rng) + lut = mapper.GetLookupTable() + lut.SetTable(numpy_to_vtk(ctable)) if scalar_bar is not None: - scalar_bar.SetLookupTable(actor.GetMapper().GetLookupTable()) + lut = scalar_bar.GetLookupTable() + if background_color is not None: + background_color = np.array(background_color) * 255 + ctable = _alpha_blend_background(ctable, background_color) + lut.SetTable(numpy_to_vtk(ctable, array_type=vtk.VTK_UNSIGNED_CHAR)) + lut.SetRange(*rng) def _set_volume_range(volume, ctable, alpha, scalar_bar, rng): diff --git a/mne/viz/backends/_utils.py b/mne/viz/backends/_utils.py index cc60cc39ac4..f9c36a097da 100644 --- a/mne/viz/backends/_utils.py +++ b/mne/viz/backends/_utils.py @@ -56,3 +56,10 @@ def _check_color(color): raise TypeError("Expected type is `str` or iterable but " "{} was given.".format(type(color))) return color + + +def _alpha_blend_background(ctable, background_color): + alphas = ctable[:, -1][:, np.newaxis] / 255. + use_table = ctable.copy() + use_table[:, -1] = 255. + return (use_table * alphas) + background_color * (1 - alphas) diff --git a/mne/viz/backends/tests/test_renderer.py b/mne/viz/backends/tests/test_renderer.py index 8c5299d0ab6..d707694816a 100644 --- a/mne/viz/backends/tests/test_renderer.py +++ b/mne/viz/backends/tests/test_renderer.py @@ -157,7 +157,8 @@ def test_3d_backend(renderer): scalars=np.array([[1.0, 1.0]])) # scalar bar - rend.scalarbar(source=tube, title="Scalar Bar") + rend.scalarbar(source=tube, title="Scalar Bar", + bgcolor=[1, 1, 1]) # use text rend.text2d(x_window=txt_x, y_window=txt_y, text=txt_text, diff --git a/tutorials/misc/plot_seeg.py b/tutorials/misc/plot_seeg.py index 2b479dcad0b..f68a654f320 100644 --- a/tutorials/misc/plot_seeg.py +++ b/tutorials/misc/plot_seeg.py @@ -122,7 +122,7 @@ surfaces=["pial", "head"]) ############################################################################### -# Next, we will get the raw data and plot its amplitude over time. +# Next, we'll get the raw data and plot its amplitude over time. raw.plot() diff --git a/tutorials/source-modeling/plot_visualize_stc.py b/tutorials/source-modeling/plot_visualize_stc.py index efca1401e3f..d4632a119bb 100644 --- a/tutorials/source-modeling/plot_visualize_stc.py +++ b/tutorials/source-modeling/plot_visualize_stc.py @@ -54,7 +54,7 @@ clim=dict(kind='value', lims=[3, 6, 9])) ############################################################################### -# You can also morph it to fsaverage and visualize it using a flatmap: +# You can also morph it to fsaverage and visualize it using a flatmap # sphinx_gallery_thumbnail_number = 3 stc_fs = mne.compute_source_morph(stc, 'sample', 'fsaverage', subjects_dir, From 76b64ff884474c7b1569cf74d6183e2ab13b7e9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Richard=20H=C3=B6chenberger?= Date: Fri, 11 Dec 2020 16:18:21 +0100 Subject: [PATCH 005/387] Add regression test for EEGLAB data with a chanlocs struct (#8647) * Use testing data release 0.112 * Add regression test for EEGLAB data with chanlocs struct * Reduce code duplication --- mne/datasets/utils.py | 4 +-- mne/io/eeglab/tests/test_eeglab.py | 42 +++++++++++++++++++++++------- 2 files changed, 35 insertions(+), 11 deletions(-) diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index 7b5febcdfa2..ddcf82713e5 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -245,7 +245,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, path = _get_path(path, key, name) # To update the testing or misc dataset, push commits, then make a new # release on GitHub. Then update the "releases" variable: - releases = dict(testing='0.110', misc='0.7') + releases = dict(testing='0.112', misc='0.7') # And also update the "md5_hashes['testing']" variable below. # To update any other dataset, update the data archive itself (upload # an updated version) and update the md5 hash. @@ -331,7 +331,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, sample='12b75d1cb7df9dfb4ad73ed82f61094f', somato='32fd2f6c8c7eb0784a1de6435273c48b', spm='9f43f67150e3b694b523a21eb929ea75', - testing='c4cd3385f321cd1151ed9de34fc4ce5a', + testing='8eabd73532dd7df7c155983962c5b1fd', multimodal='26ec847ae9ab80f58f204d09e2c08367', fnirs_motor='c4935d19ddab35422a69f3326a01fef8', opm='370ad1dcfd5c47e029e692c85358a374', diff --git a/mne/io/eeglab/tests/test_eeglab.py b/mne/io/eeglab/tests/test_eeglab.py index d79e9aaa7f3..3ed15af5ec2 100644 --- a/mne/io/eeglab/tests/test_eeglab.py +++ b/mne/io/eeglab/tests/test_eeglab.py @@ -33,6 +33,7 @@ epochs_fname_onefile_mat = op.join(base_dir, 'test_epochs_onefile.set') raw_mat_fnames = [raw_fname_mat, raw_fname_onefile_mat] epochs_mat_fnames = [epochs_fname_mat, epochs_fname_onefile_mat] +raw_fname_chanloc = op.join(base_dir, 'test_raw_chanloc.set') raw_fname_h5 = op.join(base_dir, 'test_raw_h5.set') raw_fname_onefile_h5 = op.join(base_dir, 'test_raw_onefile_h5.set') @@ -58,7 +59,7 @@ def _check_h5(fname): @testing.requires_testing_data @pytest.mark.slowtest @pytest.mark.parametrize( - 'fname', [raw_fname_mat, raw_fname_h5], ids=op.basename + 'fname', [raw_fname_mat, raw_fname_h5, raw_fname_chanloc], ids=op.basename ) def test_io_set_raw(fname): """Test importing EEGLAB .set files.""" @@ -67,17 +68,40 @@ def test_io_set_raw(fname): 'EEG {0:03d}'.format(ii) for ii in range(len(montage.ch_names)) ] - _test_raw_reader(read_raw_eeglab, input_fname=fname) + kws = dict(reader=read_raw_eeglab, input_fname=fname) + if fname.endswith('test_raw_chanloc.set'): + with pytest.warns(RuntimeWarning, + match="The data contains 'boundary' events"): + _test_raw_reader(**kws) + else: + _test_raw_reader(**kws) + # test that preloading works - raw0 = read_raw_eeglab(input_fname=fname, preload=True) - raw0.set_montage(montage) - raw0.filter(1, None, l_trans_bandwidth='auto', filter_length='auto', - phase='zero') + read_raw_kws = dict(input_fname=fname, preload=True) + if fname.endswith('test_raw_chanloc.set'): + with pytest.warns(RuntimeWarning, + match="The data contains 'boundary' events"): + raw0 = read_raw_eeglab(**read_raw_kws) + raw0.set_montage(montage, on_missing='ignore') + # crop to check if the data has been properly preloaded; we cannot + # filter as the snippet of raw data is very short + raw0.crop(0, 1) + else: + raw0 = read_raw_eeglab(**read_raw_kws) + raw0.set_montage(montage) + raw0.filter(1, None, l_trans_bandwidth='auto', filter_length='auto', + phase='zero') # test that using uint16_codec does not break stuff - raw0 = read_raw_eeglab(input_fname=fname, - preload=False, uint16_codec='ascii') - raw0.set_montage(montage) + read_raw_kws = dict(input_fname=fname, preload=False, uint16_codec='ascii') + if fname.endswith('test_raw_chanloc.set'): + with pytest.warns(RuntimeWarning, + match="The data contains 'boundary' events"): + raw0 = read_raw_eeglab(**read_raw_kws) + raw0.set_montage(montage, on_missing='ignore') + else: + raw0 = read_raw_eeglab(**read_raw_kws) + raw0.set_montage(montage) @testing.requires_testing_data From cfd5659ba5639b1af9b64edd9e9701a58c3262c3 Mon Sep 17 00:00:00 2001 From: Daniel McCloy Date: Sat, 12 Dec 2020 13:41:55 -0600 Subject: [PATCH 006/387] VIZ, ENH: allow show/hide annotations by label (#8624) * implement annotation show/hide checkboxes * tweak instruction wording/spacing * cruft * add test * fix doubled checkboxes & checkbox scaling * auto-make-visible annot type when adding a span * use constants * fix checkboxes migrating offscreen * add changelog entry (and move non-new-contrib entry to bottom) [ci skip] --- doc/changes/latest.inc | 6 +- mne/viz/_figure.py | 173 +++++++++++++++++++++++++++----------- mne/viz/raw.py | 1 - mne/viz/tests/test_raw.py | 13 ++- 4 files changed, 139 insertions(+), 54 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index c333914bcd1..5a0f52c1e35 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -31,8 +31,6 @@ Current (0.22.dev0) Enhancements ~~~~~~~~~~~~ -- Add :func:`mne.source_space.compute_distance_to_sensors` to compute distances between vertices and sensors (:gh:`8534` by `Olaf Hauk`_ and `Marijn van Vliet`_) - - Add :func:`mne.read_evokeds_mff` to read averaged MFFs (requires mffpy >= 0.5.7) **by new contributor** |Evan Hathaway|_ (:gh:`8354`) - Add :class:`mne.decoding.SSD` for spatial filtering with spatio-spectral-decomposition (:gh:`7070` **by new contributor** |Victoria Peterson|_ and `Denis Engemann`_) @@ -91,6 +89,10 @@ Enhancements - `~mne.Epochs` will now retain the information about an applied baseline correction, even if the baseline period is partially or completely removed through cropping later on (:gh:`8442` by `Richard Höchenberger`_) +- Add :func:`mne.source_space.compute_distance_to_sensors` to compute distances between vertices and sensors (:gh:`8534` by `Olaf Hauk`_ and `Marijn van Vliet`_) + +- Annotations can now be shown/hidden interactively in raw plots (:gh:`8624` by `Daniel McCloy`_) + Bugs ~~~~ - Fix a transpose issue of :func:`mne.decoding.CSP.plot_filters` (:gh:`8580` **by new contributor** |Hongjiang Ye|_) diff --git a/mne/viz/_figure.py b/mne/viz/_figure.py index 1e2c11ae73f..36b9ddde443 100644 --- a/mne/viz/_figure.py +++ b/mne/viz/_figure.py @@ -58,6 +58,12 @@ _DATA_CH_TYPES_SPLIT, _DATA_CH_TYPES_ORDER_DEFAULT, _VALID_CHANNEL_TYPES, _FNIRS_CH_TYPES_SPLIT) +# CONSTANTS (inches) +ANNOTATION_FIG_PAD = 0.1 +ANNOTATION_FIG_MIN_H = 2.9 # fixed part, not including radio buttons/labels +ANNOTATION_FIG_W = 5.0 +ANNOTATION_FIG_CHECKBOX_COLUMN_W = 0.5 + class MNEFigParams: """Container object for MNE figure parameters.""" @@ -1044,25 +1050,30 @@ def _create_annotation_fig(self): from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable # make figure labels = np.array(sorted(set(self.mne.inst.annotations.description))) - width, var_height, fixed_height, pad = \ - self._compute_annotation_figsize(len(labels)) - figsize = (width, var_height + fixed_height) + radio_button_h = self._compute_annotation_figsize(len(labels)) + figsize = (ANNOTATION_FIG_W, ANNOTATION_FIG_MIN_H + radio_button_h) fig = self._new_child_figure(figsize=figsize, FigureClass=MNEAnnotationFigure, fig_name='fig_annotation', window_title='Annotations') # make main axes - left = fig._inch_to_rel(pad) - bottom = fig._inch_to_rel(pad, horiz=False) + left = fig._inch_to_rel(ANNOTATION_FIG_PAD) + bottom = fig._inch_to_rel(ANNOTATION_FIG_PAD, horiz=False) width = 1 - 2 * left height = 1 - 2 * bottom fig.mne.radio_ax = fig.add_axes((left, bottom, width, height), frame_on=False, aspect='equal') div = make_axes_locatable(fig.mne.radio_ax) - self._update_annotation_fig() # populate w/ radio buttons & labels + # append show/hide checkboxes at right + fig.mne.show_hide_ax = div.append_axes( + position='right', size=Fixed(ANNOTATION_FIG_CHECKBOX_COLUMN_W), + pad=Fixed(ANNOTATION_FIG_PAD), aspect='equal', + sharey=fig.mne.radio_ax) + # populate w/ radio buttons & labels + self._update_annotation_fig() # append instructions at top instructions_ax = div.append_axes(position='top', size=Fixed(1), - pad=Fixed(5 * pad)) + pad=Fixed(5 * ANNOTATION_FIG_PAD)) # XXX when we support a newer matplotlib (something >3.0) the # instructions can have inline bold formatting: # instructions = '\n'.join( @@ -1070,41 +1081,47 @@ def _create_annotation_fig(self): # r'$\mathbf{Right‐click~on~plot~annotation:}$ delete annotation', # r'$\mathbf{Type~in~annotation~window:}$ modify new label name', # r'$\mathbf{Enter~(or~click~button):}$ add new label to list', - # r'$\mathbf{Esc:}$ exit annotation mode & close window']) + # r'$\mathbf{Esc:}$ exit annotation mode & close this window']) instructions = '\n'.join( ['Left click & drag on plot: create/modify annotation', - 'Right click on plot annotation: delete annotation', - 'Type in annotation window: modify new label name', + 'Right click on annotation highlight: delete annotation', + 'Type in this window: modify new label name', 'Enter (or click button): add new label to list', - 'Esc: exit annotation mode & close window']) + 'Esc: exit annotation mode & close this dialog window']) instructions_ax.text(0, 1, instructions, va='top', ha='left', + linespacing=1.7, usetex=False) # force use of MPL mathtext parser instructions_ax.set_axis_off() # append text entry axes at bottom - text_entry_ax = div.append_axes(position='bottom', size=Fixed(3 * pad), - pad=Fixed(pad)) + text_entry_ax = div.append_axes(position='bottom', + size=Fixed(3 * ANNOTATION_FIG_PAD), + pad=Fixed(ANNOTATION_FIG_PAD)) text_entry_ax.text(0.4, 0.5, 'New label:', va='center', ha='right', weight='bold') fig.label = text_entry_ax.text(0.5, 0.5, 'BAD_', va='center', ha='left') text_entry_ax.set_axis_off() # append button at bottom - button_ax = div.append_axes(position='bottom', size=Fixed(3 * pad), - pad=Fixed(pad)) + button_ax = div.append_axes(position='bottom', + size=Fixed(3 * ANNOTATION_FIG_PAD), + pad=Fixed(ANNOTATION_FIG_PAD)) fig.button = Button(button_ax, 'Add new label') fig.button.on_clicked(self._add_annotation_label) plt_show(fig=fig) # add "draggable" checkbox - drag_ax_height = 3 * pad + drag_ax_height = 3 * ANNOTATION_FIG_PAD drag_ax = div.append_axes('bottom', size=Fixed(drag_ax_height), - pad=Fixed(pad), aspect='equal') + pad=Fixed(ANNOTATION_FIG_PAD), + aspect='equal') checkbox = CheckButtons(drag_ax, labels=('Draggable edges?',), actives=(self.mne.draggable_annotations,)) checkbox.on_clicked(self._toggle_draggable_annotations) fig.mne.drag_checkbox = checkbox # reposition & resize axes width_in, height_in = fig.get_size_inches() - width_ax = fig._inch_to_rel(width_in - 2 * pad) + width_ax = fig._inch_to_rel(width_in + - ANNOTATION_FIG_CHECKBOX_COLUMN_W + - 3 * ANNOTATION_FIG_PAD) aspect = width_ax / fig._inch_to_rel(drag_ax_height) drag_ax.set_xlim(0, aspect) drag_ax.set_axis_off() @@ -1123,7 +1140,8 @@ def _create_annotation_fig(self): # setup interactivity in plot window col = ('#ff0000' if len(fig.mne.radio_ax.buttons.circles) < 1 else fig.mne.radio_ax.buttons.circles[0].get_edgecolor()) - # TODO: we would like useblit=True here, but MPL #9660 prevents it + # TODO: we would like useblit=True here, but it behaves oddly when the + # first span is dragged (subsequent spans seem to work OK) selector = SpanSelector(self.mne.ax_main, self._select_annotation_span, 'horizontal', minspan=0.1, useblit=False, rectprops=dict(alpha=0.5, facecolor=col)) @@ -1131,30 +1149,43 @@ def _create_annotation_fig(self): self.mne._callback_ids['motion_notify_event'] = \ self.canvas.mpl_connect('motion_notify_event', self._hover) + def _toggle_visible_annotations(self, event): + """Enable/disable display of annotations on a per-label basis.""" + checkboxes = self.mne.show_hide_annotation_checkboxes + labels = [t.get_text() for t in checkboxes.labels] + actives = checkboxes.get_status() + self.mne.visible_annotations = dict(zip(labels, actives)) + self._redraw(update_data=False, annotations=True) + def _toggle_draggable_annotations(self, event): """Enable/disable draggable annotation edges.""" self.mne.draggable_annotations = not self.mne.draggable_annotations + def _get_annotation_labels(self): + """Get the unique labels in the raw object and added in the UI.""" + labels = list(set(self.mne.inst.annotations.description)) + return np.union1d(labels, self.mne.new_annotation_labels) + def _update_annotation_fig(self): """Draw or redraw the radio buttons and annotation labels.""" - from matplotlib.widgets import RadioButtons + from matplotlib.widgets import RadioButtons, CheckButtons # define shorthand variables fig = self.mne.fig_annotation ax = fig.mne.radio_ax - # get all the labels - labels = list(set(self.mne.inst.annotations.description)) - labels = np.union1d(labels, self.mne.new_annotation_labels) + labels = self._get_annotation_labels() # compute new figsize - width, var_height, fixed_height, pad = \ - self._compute_annotation_figsize(len(labels)) - fig.set_size_inches(width, var_height + fixed_height, forward=True) + radio_button_h = self._compute_annotation_figsize(len(labels)) + fig.set_size_inches(ANNOTATION_FIG_W, + ANNOTATION_FIG_MIN_H + radio_button_h, + forward=True) # populate center axes with labels & radio buttons ax.clear() title = 'Existing labels:' if len(labels) else 'No existing labels' ax.set_title(title, size=None, loc='left') ax.buttons = RadioButtons(ax, labels) # adjust xlim to keep equal aspect & full width (keep circles round) - aspect = (width - 2 * pad) / var_height + aspect = (ANNOTATION_FIG_W - ANNOTATION_FIG_CHECKBOX_COLUMN_W + - 3 * ANNOTATION_FIG_PAD) / radio_button_h ax.set_xlim((0, aspect)) # style the buttons & adjust spacing radius = 0.15 @@ -1177,6 +1208,45 @@ def _update_annotation_fig(self): ax.buttons.on_clicked(fig._radiopress) ax.buttons.connect_event('button_press_event', fig._click_override) + # now do the show/hide checkboxes + show_hide_ax = fig.mne.show_hide_ax + show_hide_ax.clear() + show_hide_ax.set_axis_off() + aspect = ANNOTATION_FIG_CHECKBOX_COLUMN_W / radio_button_h + show_hide_ax.set(xlim=(0, aspect), ylim=(0, 1)) + # ensure new labels have checkbox values + check_values = {label: False for label in labels} + check_values.update(self.mne.visible_annotations) # existing checks + actives = [check_values[label] for label in labels] + # regenerate checkboxes + checkboxes = CheckButtons(ax=fig.mne.show_hide_ax, + labels=labels, + actives=actives) + checkboxes.on_clicked(self._toggle_visible_annotations) + # add title, hide labels + show_hide_ax.set_title('show/\nhide ', size=None, loc='right') + for label in checkboxes.labels: + label.set_visible(False) + # fix aspect and right-align + if len(labels) == 1: + bounds = (0.05, 0.375, 0.25, 0.25) # undo MPL special case + checkboxes.rectangles[0].set_bounds(bounds) + for line, step in zip(checkboxes.lines[0], (1, -1)): + line.set_xdata((bounds[0], bounds[0] + bounds[2])) + line.set_ydata((bounds[1], bounds[1] + bounds[3])[::step]) + for rect in checkboxes.rectangles: + rect.set_transform(show_hide_ax.transData) + bbox = rect.get_bbox() + bounds = (aspect, bbox.ymin, -bbox.width, bbox.height) + rect.set_bounds(bounds) + rect.set_clip_on(False) + for line in np.array(checkboxes.lines).ravel(): + line.set_transform(show_hide_ax.transData) + line.set_xdata(aspect + 0.05 - np.array(line.get_xdata())) + # store state + self.mne.visible_annotations = check_values + self.mne.show_hide_annotation_checkboxes = checkboxes + def _toggle_annotation_fig(self): """Show/hide the annotation dialog window.""" if self.mne.fig_annotation is None: @@ -1194,7 +1264,7 @@ def _compute_annotation_figsize(self, n_labels): 0.1 top margin 1.0 instructions 0.5 padding below instructions - --- (variable-height axis for label list) + --- (variable-height axis for label list, returned by this method) 0.1 padding above text entry 0.3 text entry 0.1 padding above button @@ -1205,11 +1275,7 @@ def _compute_annotation_figsize(self, n_labels): ------------------------------------------ 2.9 total fixed height """ - pad = 0.1 - width = 4.5 - var_height = max(pad, 0.7 * n_labels) - fixed_height = 2.9 - return (width, var_height, fixed_height, pad) + return max(ANNOTATION_FIG_PAD, 0.7 * n_labels) def _add_annotation_label(self, event): """Add new annotation description.""" @@ -1227,7 +1293,7 @@ def _add_annotation_label(self, event): self.mne.fig_annotation.label.set_text('BAD_') def _setup_annotation_colors(self): - """Set up colors for annotations.""" + """Set up colors for annotations; init some annotation vars.""" raw = self.mne.inst segment_colors = getattr(self.mne, 'annotation_segment_colors', dict()) # sort the segments by start time @@ -1248,6 +1314,10 @@ def _setup_annotation_colors(self): else: segment_colors[key] = next(color_cycle) self.mne.annotation_segment_colors = segment_colors + # init a couple other annotation-related variables + labels = self._get_annotation_labels() + self.mne.visible_annotations = {label: True for label in labels} + self.mne.show_hide_annotation_checkboxes = None def _select_annotation_span(self, vmin, vmax): """Handle annotation span selector.""" @@ -1258,8 +1328,10 @@ def _select_annotation_span(self, vmin, vmax): active_idx = labels.index(buttons.value_selected) _merge_annotations(onset, onset + duration, labels[active_idx], self.mne.inst.annotations) - self._draw_annotations() - self.canvas.draw_idle() + # if adding a span with an annotation label that is hidden, show it + if not self.mne.visible_annotations[buttons.value_selected]: + self.mne.show_hide_annotation_checkboxes.set_active(active_idx) + self._redraw(update_data=False, annotations=True) def _remove_annotation_hover_line(self): """Remove annotation line from the plot and reactivate selector.""" @@ -1321,20 +1393,21 @@ def _draw_annotations(self): segment_color = self.mne.annotation_segment_colors[descr] kwargs = dict(color=segment_color, alpha=0.3, zorder=self.mne.zorder['ann']) - # draw all segments on ax_hscroll - annot = self.mne.ax_hscroll.fill_betweenx((0, 1), start, end, - **kwargs) - self.mne.hscroll_annotations.append(annot) - # draw only visible segments on ax_main - visible_segment = np.clip([start, end], times[0], times[-1]) - if np.diff(visible_segment) > 0: - annot = ax.fill_betweenx(ylim, *visible_segment, **kwargs) - self.mne.annotations.append(annot) - xy = (visible_segment.mean(), ylim[1]) - text = ax.annotate(descr, xy, xytext=(0, 9), - textcoords='offset points', ha='center', - va='baseline', color=segment_color) - self.mne.annotation_texts.append(text) + if self.mne.visible_annotations[descr]: + # draw all segments on ax_hscroll + annot = self.mne.ax_hscroll.fill_betweenx((0, 1), start, end, + **kwargs) + self.mne.hscroll_annotations.append(annot) + # draw only visible segments on ax_main + visible_segment = np.clip([start, end], times[0], times[-1]) + if np.diff(visible_segment) > 0: + annot = ax.fill_betweenx(ylim, *visible_segment, **kwargs) + self.mne.annotations.append(annot) + xy = (visible_segment.mean(), ylim[1]) + text = ax.annotate(descr, xy, xytext=(0, 9), + textcoords='offset points', ha='center', + va='baseline', color=segment_color) + self.mne.annotation_texts.append(text) def _update_annotation_segments(self): """Update the array of annotation start/end times.""" diff --git a/mne/viz/raw.py b/mne/viz/raw.py index 36bee9d1920..9ea76e77a29 100644 --- a/mne/viz/raw.py +++ b/mne/viz/raw.py @@ -354,7 +354,6 @@ def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=20, # plot annotations (if any) fig._setup_annotation_colors() - fig._update_annotation_segments() fig._draw_annotations() # start with projectors dialog open, if requested diff --git a/mne/viz/tests/test_raw.py b/mne/viz/tests/test_raw.py index 283b428d171..cc0623a08fe 100644 --- a/mne/viz/tests/test_raw.py +++ b/mne/viz/tests/test_raw.py @@ -526,7 +526,18 @@ def test_plot_annotations(raw): with pytest.warns(RuntimeWarning, match='expanding outside'): raw.set_annotations(annot) _annotation_helper(raw) - plt.close('all') + # test annotation visibility toggle + fig = raw.plot() + assert len(fig.mne.annotations) == 1 + assert len(fig.mne.annotation_texts) == 1 + fig.canvas.key_press_event('a') # start annotation mode + checkboxes = fig.mne.show_hide_annotation_checkboxes + checkboxes.set_active(0) + assert len(fig.mne.annotations) == 0 + assert len(fig.mne.annotation_texts) == 0 + checkboxes.set_active(0) + assert len(fig.mne.annotations) == 1 + assert len(fig.mne.annotation_texts) == 1 @pytest.mark.parametrize('filtorder', (0, 2)) # FIR, IIR From a087895ebd69d98afc3e236677d7bf17a76d5921 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Mon, 14 Dec 2020 08:09:27 -0500 Subject: [PATCH 007/387] BUG: Mayavi center (#8644) --- mne/viz/_3d.py | 2 +- mne/viz/tests/test_3d.py | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py index 9eb90e39c91..4fd7018c63f 100644 --- a/mne/viz/_3d.py +++ b/mne/viz/_3d.py @@ -1905,8 +1905,8 @@ def _plot_stc(stc, subject, surface, hemi, colormap, time_label, if transparent is None: transparent = True - sd_kwargs = dict(transparent=transparent, verbose=False) center = 0. if diverging else None + sd_kwargs = dict(transparent=transparent, center=center, verbose=False) kwargs = { "array": stc, "colormap": colormap, diff --git a/mne/viz/tests/test_3d.py b/mne/viz/tests/test_3d.py index 0cc56f326e2..092131f2b96 100644 --- a/mne/viz/tests/test_3d.py +++ b/mne/viz/tests/test_3d.py @@ -387,8 +387,12 @@ def test_process_clim_plot(renderer_interactive, brain_gc): stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample') # Test for simple use cases - stc.plot(**kwargs) - stc.plot(clim=dict(pos_lims=(10, 50, 90)), **kwargs) + brain = stc.plot(**kwargs) + assert brain.data['center'] is None + brain.close() + brain = stc.plot(clim=dict(pos_lims=(10, 50, 90)), **kwargs) + assert brain.data['center'] == 0. + brain.close() stc.plot(colormap='hot', clim='auto', **kwargs) stc.plot(colormap='mne', clim='auto', **kwargs) stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99, **kwargs) From 52c4fffc32fdcce39e3c8b654f77b2ba5f89543f Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Mon, 14 Dec 2020 10:47:05 -0500 Subject: [PATCH 008/387] MRG, MAINT: Improve server env (#8656) * MAINT: Improve server env [skip github] [skip circle] * FIX: Try again [skip circle] [skip github] * FIX: Restore [skip github] [skip circle] * FIX: Yield --- mne/conftest.py | 6 +++--- mne/tests/test_coreg.py | 2 +- mne/utils/config.py | 5 +++-- server_environment.yml | 19 ++++++++++--------- 4 files changed, 17 insertions(+), 15 deletions(-) diff --git a/mne/conftest.py b/mne/conftest.py index eb5a54dfe28..ff8cf3df07c 100644 --- a/mne/conftest.py +++ b/mne/conftest.py @@ -272,7 +272,7 @@ def backend_name(request): yield request.param -@pytest.yield_fixture +@pytest.fixture def renderer(backend_name, garbage_collect): """Yield the 3D backends.""" from mne.viz.backends.renderer import _use_test_3d_backend @@ -283,7 +283,7 @@ def renderer(backend_name, garbage_collect): renderer.backend._close_all() -@pytest.yield_fixture +@pytest.fixture def garbage_collect(): """Garbage collect on exit.""" yield @@ -299,7 +299,7 @@ def backend_name_interactive(request): yield request.param -@pytest.yield_fixture +@pytest.fixture def renderer_interactive(backend_name_interactive): """Yield the 3D backends.""" from mne.viz.backends.renderer import _use_test_3d_backend diff --git a/mne/tests/test_coreg.py b/mne/tests/test_coreg.py index 30a8707785c..e62d579cf9d 100644 --- a/mne/tests/test_coreg.py +++ b/mne/tests/test_coreg.py @@ -25,7 +25,7 @@ data_path = testing.data_path(download=False) -@pytest.yield_fixture +@pytest.fixture def few_surfaces(): """Set the _MNE_FEW_SURFACES env var.""" with modified_env(_MNE_FEW_SURFACES='true'): diff --git a/mne/utils/config.py b/mne/utils/config.py index 6a537ab668e..c53907cdac6 100644 --- a/mne/utils/config.py +++ b/mne/utils/config.py @@ -478,9 +478,10 @@ def sys_info(fid=None, show_paths=False): sklearn: 0.23.1 numba: 0.50.1 nibabel: 3.1.1 + nilearn: 0.7.0 + dipy: 1.1.1 cupy: Not found pandas: 1.0.5 - dipy: 1.1.1 mayavi: Not found pyvista: 0.25.3 {pyvistaqt=0.1.1, OpenGL 3.3 (Core Profile) Mesa 18.3.6 via llvmpipe (LLVM 7.0, 256 bits)} vtk: 9.0.1 @@ -521,7 +522,7 @@ def sys_info(fid=None, show_paths=False): libs = _get_numpy_libs() has_3d = False for mod_name in ('mne', 'numpy', 'scipy', 'matplotlib', '', 'sklearn', - 'numba', 'nibabel', 'cupy', 'pandas', 'dipy', + 'numba', 'nibabel', 'nilearn', 'dipy', 'cupy', 'pandas', 'mayavi', 'pyvista', 'vtk', 'PyQt5'): if mod_name == '': out += '\n' diff --git a/server_environment.yml b/server_environment.yml index 066e8773c02..8530c5dd767 100644 --- a/server_environment.yml +++ b/server_environment.yml @@ -2,7 +2,6 @@ name: base channels: - conda-forge/label/vtk_dev - conda-forge -- defaults dependencies: - python>=3.7 - pip @@ -10,16 +9,18 @@ dependencies: - ffmpeg - vtk - traits +- scipy +- numpy +- matplotlib-base +- pyvista +- nilearn +- nibabel +- nbformat +- nbclient +- mffpy>=0.5.7 - pip: - mne - - scipy - - numpy<1.19.0 - - matplotlib - - ipympl - jupyter - - pyvista + - ipympl - ipywidgets - - nbformat - - nbclient - jupyter_client!=6.1.5 - - mffpy>=0.5.7 From e40e592d3d2c6aeaa5c44bab2602d4aa486fc830 Mon Sep 17 00:00:00 2001 From: Daniel McCloy Date: Mon, 14 Dec 2020 10:04:28 -0600 Subject: [PATCH 009/387] MRG, MAINT: bump sphinxcontrib-bitex version (#8653) * bump sphinxcontrib-bitex version * fix conf * fix footcite missing colon --- doc/conf.py | 1 - examples/preprocessing/plot_eeg_csd.py | 2 +- requirements_doc.txt | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index df315811271..3f09e5c25a1 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -69,7 +69,6 @@ 'sphinx_bootstrap_theme', 'sphinx_bootstrap_divs', 'sphinxcontrib.bibtex', - 'sphinxcontrib.bibtex2', ] linkcheck_ignore = [ diff --git a/examples/preprocessing/plot_eeg_csd.py b/examples/preprocessing/plot_eeg_csd.py index 3935d4e58c8..d0fb8dfa33a 100644 --- a/examples/preprocessing/plot_eeg_csd.py +++ b/examples/preprocessing/plot_eeg_csd.py @@ -4,7 +4,7 @@ ===================================================== This script shows an example of how to use CSD -:footcite`PerrinEtAl1987,PerrinEtAl1989,Cohen2014,KayserTenke2015`. +:footcite:`PerrinEtAl1987,PerrinEtAl1989,Cohen2014,KayserTenke2015`. CSD takes the spatial Laplacian of the sensor signal (derivative in both x and y). It does what a planar gradiometer does in MEG. Computing these spatial derivatives reduces point spread. CSD transformed data have a sharper diff --git a/requirements_doc.txt b/requirements_doc.txt index ad2c7ec6a04..90fce35486b 100644 --- a/requirements_doc.txt +++ b/requirements_doc.txt @@ -3,7 +3,7 @@ https://github.com/numpy/numpydoc/archive/master.zip sphinx_fontawesome sphinx_bootstrap_theme https://github.com/sphinx-gallery/sphinx-gallery/archive/master.zip -https://github.com/mcmtroffaes/sphinxcontrib-bibtex/archive/29694f215b39d64a31b845aafd9ff2ae9329494f.zip +sphinxcontrib-bibtex>=2.0.0 memory_profiler neo seaborn From 5d7ddd2bb64f22a150ff49e7f2c61f7dab2a6e53 Mon Sep 17 00:00:00 2001 From: Daniel McCloy Date: Mon, 14 Dec 2020 10:48:31 -0600 Subject: [PATCH 010/387] VIZ: Fix head size (#8651) * fix EEG head size * touch tutorial * fix: compat w/ old MPL --- mne/viz/topomap.py | 5 ++-- mne/viz/utils.py | 10 +++++-- .../plot_50_artifact_correction_ssp.py | 26 +++++++++---------- 3 files changed, 23 insertions(+), 18 deletions(-) diff --git a/mne/viz/topomap.py b/mne/viz/topomap.py index 828e4402ad4..cbcc3dd3c20 100644 --- a/mne/viz/topomap.py +++ b/mne/viz/topomap.py @@ -349,11 +349,10 @@ def plot_projs_topomap(projs, info, cmap=None, sensors=True, n_projs = len(projs) if axes is None: fig, axes, ncols, nrows = _prepare_trellis( - n_projs, ncols='auto', nrows='auto') + n_projs, ncols='auto', nrows='auto', sharex=True, sharey=True) elif isinstance(axes, plt.Axes): axes = [axes] - if len(axes) != n_projs: - raise RuntimeError('There must be an axes for each picked projector.') + _validate_if_list_of_axes(axes, n_projs) # handle vmin/vmax vlims = [None for _ in range(len(datas))] diff --git a/mne/viz/utils.py b/mne/viz/utils.py index eb7ded5a6e3..27182f4b061 100644 --- a/mne/viz/utils.py +++ b/mne/viz/utils.py @@ -362,7 +362,7 @@ def _make_event_color_dict(event_color, events=None, event_id=None): def _prepare_trellis(n_cells, ncols, nrows='auto', title=False, colorbar=False, - size=1.3): + size=1.3, sharex=False, sharey=False): import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec @@ -401,7 +401,13 @@ def _prepare_trellis(n_cells, ncols, nrows='auto', title=False, colorbar=False, else: ax_idxs = range(n_cells) for ax_idx in ax_idxs: - axes.append(plt.subplot(gs[ax_idx])) + subplot_kw = dict() + if ax_idx > 0: + if sharex: + subplot_kw.update(sharex=axes[0]) + if sharey: + subplot_kw.update(sharey=axes[0]) + axes.append(plt.subplot(gs[ax_idx], **subplot_kw)) fig = axes[0].get_figure() diff --git a/tutorials/preprocessing/plot_50_artifact_correction_ssp.py b/tutorials/preprocessing/plot_50_artifact_correction_ssp.py index 8dc8345acdf..c9c715c5cbc 100644 --- a/tutorials/preprocessing/plot_50_artifact_correction_ssp.py +++ b/tutorials/preprocessing/plot_50_artifact_correction_ssp.py @@ -63,7 +63,7 @@ # The :ref:`example data ` was recorded on a Neuromag system, # which stores SSP projectors for environmental noise removal in the system # configuration (so that reasonably clean raw data can be viewed in real-time -# during acquisition). For this reason, all the :class:`~mne.io.Raw` data in +# during acquisition). For this reason, all the `~mne.io.Raw` data in # the example dataset already includes SSP projectors, which are noted in the # output when loading the data: @@ -116,7 +116,7 @@ # ``vlim='joint'`` so that the colormap is computed jointly for all projectors # of a given channel type; this makes it easier to compare their relative # smoothness. Note that for the function to know the types of channels in a -# projector, you must also provide the corresponding :class:`~mne.Info` object: +# projector, you must also provide the corresponding `~mne.Info` object: # sphinx_gallery_thumbnail_number = 3 empty_room_projs = mne.compute_proj_raw(empty_room_raw, n_grad=3, n_mag=3) @@ -143,7 +143,7 @@ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # We could visualize the different effects these have on the data by applying -# each set of projectors to different copies of the :class:`~mne.io.Raw` object +# each set of projectors to different copies of the `~mne.io.Raw` object # using :meth:`~mne.io.Raw.apply_proj`. However, the :meth:`~mne.io.Raw.plot` # method has a ``proj`` parameter that allows us to *temporarily* apply # projectors while plotting, so we can use this to visualize the difference @@ -208,7 +208,7 @@ # MNE-Python provides several functions for detecting and removing heartbeats # from EEG and MEG data. As we saw in :ref:`tut-artifact-overview`, # :func:`~mne.preprocessing.create_ecg_epochs` can be used to both detect and -# extract heartbeat artifacts into an :class:`~mne.Epochs` object, which can +# extract heartbeat artifacts into an `~mne.Epochs` object, which can # be used to visualize how the heartbeat artifacts manifest across the sensors: ecg_evoked = create_ecg_epochs(raw).average() @@ -224,7 +224,7 @@ ############################################################################### # To compute SSP projectors for the heartbeat artifact, you can use # :func:`~mne.preprocessing.compute_proj_ecg`, which takes a -# :class:`~mne.io.Raw` object as input and returns the requested number of +# `~mne.io.Raw` object as input and returns the requested number of # projectors for magnetometers, gradiometers, and EEG channels (default is two # projectors for each channel type). # :func:`~mne.preprocessing.compute_proj_ecg` also returns an :term:`events` @@ -237,7 +237,7 @@ ############################################################################### # The first line of output tells us that # :func:`~mne.preprocessing.compute_proj_ecg` found three existing projectors -# already in the :class:`~mne.io.Raw` object, and will include those in the +# already in the `~mne.io.Raw` object, and will include those in the # list of projectors that it returns (appending the new ECG projectors to the # end of the list). If you don't want that, you can change that behavior with # the boolean ``no_proj`` parameter. Since we've already run the computation, @@ -255,12 +255,12 @@ ############################################################################### # Since no dedicated ECG sensor channel was detected in the -# :class:`~mne.io.Raw` object, by default +# `~mne.io.Raw` object, by default # :func:`~mne.preprocessing.compute_proj_ecg` used the magnetometers to # estimate the ECG signal (as stated on the third line of output, above). You # can also supply the ``ch_name`` parameter to restrict which channel to use # for ECG artifact detection; this is most useful when you had an ECG sensor -# but it is not labeled as such in the :class:`~mne.io.Raw` file. +# but it is not labeled as such in the `~mne.io.Raw` file. # # The next few lines of the output describe the filter used to isolate ECG # events. The default settings are usually adequate, but the filter can be @@ -328,14 +328,14 @@ # for performing each part of the process. Specifically: # # - :func:`mne.preprocessing.find_ecg_events` for detecting heartbeats in a -# :class:`~mne.io.Raw` object and returning a corresponding :term:`events` +# `~mne.io.Raw` object and returning a corresponding :term:`events` # array # # - :func:`mne.preprocessing.create_ecg_epochs` for detecting heartbeats in a -# :class:`~mne.io.Raw` object and returning an :class:`~mne.Epochs` object +# `~mne.io.Raw` object and returning an `~mne.Epochs` object # # - :func:`mne.compute_proj_epochs` for creating projector(s) from any -# :class:`~mne.Epochs` object +# `~mne.Epochs` object # # See the documentation of each function for further details. # @@ -354,11 +354,11 @@ ############################################################################### # Just like we did with the heartbeat artifact, we can compute SSP projectors # for the ocular artifact using :func:`~mne.preprocessing.compute_proj_eog`, -# which again takes a :class:`~mne.io.Raw` object as input and returns the +# which again takes a `~mne.io.Raw` object as input and returns the # requested number of projectors for magnetometers, gradiometers, and EEG # channels (default is two projectors for each channel type). This time, we'll # pass ``no_proj`` parameter (so we get back only the new EOG projectors, not -# also the existing projectors in the :class:`~mne.io.Raw` object), and we'll +# also the existing projectors in the `~mne.io.Raw` object), and we'll # ignore the events array by assigning it to ``_`` (the conventional way of # handling unwanted return elements in Python). From 391f462317e5487b6dceeaa3c5361fe97f9deba2 Mon Sep 17 00:00:00 2001 From: Guillaume Favelier Date: Mon, 14 Dec 2020 19:45:40 +0100 Subject: [PATCH 011/387] Render is necessary now (#8657) --- mne/viz/backends/_notebook.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mne/viz/backends/_notebook.py b/mne/viz/backends/_notebook.py index 7a8702e7b47..ec211f229c7 100644 --- a/mne/viz/backends/_notebook.py +++ b/mne/viz/backends/_notebook.py @@ -69,6 +69,7 @@ def screenshot(self): return fig, dh def update(self): + self.plotter.render() self.dh.set_data(self.plotter.screenshot()) self.fig.canvas.draw() From ef2a2e491763db89d7411eee4c0dd49b1c8032ee Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Mon, 14 Dec 2020 16:06:25 -0500 Subject: [PATCH 012/387] MRG, ENH: Progressbar for csd_morlet (#8608) * ENH: Progressbar for csd_morlet * FIX: Cleanup --- doc/changes/latest.inc | 2 + mne/time_frequency/csd.py | 26 ++++++------ mne/time_frequency/tests/test_tfr.py | 2 +- mne/time_frequency/tfr.py | 60 +++++++++++++++++----------- mne/utils/tests/test_progressbar.py | 3 +- 5 files changed, 55 insertions(+), 38 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 5a0f52c1e35..88fb89cfee0 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -75,6 +75,8 @@ Enhancements - Add ``picks`` parameter to :func:`mne.preprocessing.fix_stim_artifact` to specify which channel needs to be fixed (:gh:`8482` by `Alex Gramfort`_) +- Add progress bar support to :func:`mne.time_frequency.csd_morlet` (:gh:`8608` by `Eric Larson`_) + - Further improved documentation building instructions and execution on Windows (:gh:`8502` by `kalenkovich`_ and `Eric Larson`_) - Add option to disable TQDM entirely with ``MNE_TQDM='off'`` (:gh:`8515` by `Eric Larson`_) diff --git a/mne/time_frequency/csd.py b/mne/time_frequency/csd.py index c0de757ab89..a5a2cd172d4 100644 --- a/mne/time_frequency/csd.py +++ b/mne/time_frequency/csd.py @@ -9,10 +9,11 @@ import numbers import numpy as np -from .tfr import cwt, morlet +from .tfr import _cwt_array, morlet, _get_nfft from ..fixes import rfftfreq from ..io.pick import pick_channels, _picks_to_idx -from ..utils import logger, verbose, warn, copy_function_doc_to_method_doc +from ..utils import (logger, verbose, warn, copy_function_doc_to_method_doc, + ProgressBar) from ..viz.misc import plot_csd from ..time_frequency.multitaper import (_compute_mt_params, _mt_spectra, _csd_from_mt, _psd_from_mt_adaptive) @@ -1021,9 +1022,10 @@ def csd_array_morlet(X, sfreq, frequencies, t0=0, tmin=None, tmax=None, times = times[csd_tslice] # Compute the CSD + nfft = _get_nfft(wavelets, X, use_fft) return _execute_csd_function(X, times, frequencies, _csd_morlet, - params=[sfreq, wavelets, csd_tslice, use_fft, - decim], + params=[sfreq, wavelets, nfft, csd_tslice, + use_fft, decim], n_fft=1, ch_names=ch_names, projs=projs, n_jobs=n_jobs, verbose=verbose) @@ -1140,14 +1142,8 @@ def _execute_csd_function(X, times, frequencies, csd_function, params, n_fft, # Compute CSD for each trial n_blocks = int(np.ceil(n_epochs / float(n_jobs))) - for i in range(n_blocks): + for i in ProgressBar(range(n_blocks), mesg='CSD epoch blocks'): epoch_block = X[i * n_jobs:(i + 1) * n_jobs] - if n_jobs > 1: - logger.info(' Computing CSD matrices for epochs %d..%d' - % (i * n_jobs + 1, (i + 1) * n_jobs)) - else: - logger.info(' Computing CSD matrix for epoch %d' % (i + 1)) - csds = parallel(my_csd(this_epoch, *params) for this_epoch in epoch_block) @@ -1274,7 +1270,8 @@ def _csd_multitaper(X, sfreq, n_times, window_fun, eigvals, freq_mask, n_fft, return csds -def _csd_morlet(data, sfreq, wavelets, tslice=None, use_fft=True, decim=1): +def _csd_morlet(data, sfreq, wavelets, nfft, tslice=None, use_fft=True, + decim=1): """Compute cross spectral density (CSD) using the given Morlet wavelets. Computes the CSD for a single epoch of data. @@ -1289,6 +1286,8 @@ def _csd_morlet(data, sfreq, wavelets, tslice=None, use_fft=True, decim=1): wavelets : list of ndarray The Morlet wavelets for which to compute the CSD's. These have been created by the `mne.time_frequency.tfr.morlet` function. + nfft : int + The number of FFT points. tslice : slice | None The desired time samples to compute the CSD over. If None, defaults to including all time samples. @@ -1314,7 +1313,8 @@ def _csd_morlet(data, sfreq, wavelets, tslice=None, use_fft=True, decim=1): _vector_to_sym_mat : For converting the CSD to a full matrix. """ # Compute PSD - psds = cwt(data, wavelets, use_fft=use_fft, decim=decim) + psds = _cwt_array(data, wavelets, nfft, mode='same', use_fft=use_fft, + decim=decim) if tslice is not None: tstart = None if tslice.start is None else tslice.start // decim diff --git a/mne/time_frequency/tests/test_tfr.py b/mne/time_frequency/tests/test_tfr.py index 01fd66b386c..701291ffe9e 100644 --- a/mne/time_frequency/tests/test_tfr.py +++ b/mne/time_frequency/tests/test_tfr.py @@ -256,7 +256,7 @@ def test_time_frequency(): # When convolving in time, wavelets must not be longer than the data pytest.raises(ValueError, cwt, data[0, :, :Ws[0].size - 1], Ws, use_fft=False) - with pytest.warns(UserWarning, match='one of the wavelets is longer'): + with pytest.warns(UserWarning, match='one of the wavelets.*is longer'): cwt(data[0, :, :Ws[0].size - 1], Ws, use_fft=True) # Check for off-by-one errors when using wavelets with an even number of diff --git a/mne/time_frequency/tfr.py b/mne/time_frequency/tfr.py index 74077a48a4b..873ec2ee7bf 100644 --- a/mne/time_frequency/tfr.py +++ b/mne/time_frequency/tfr.py @@ -20,6 +20,7 @@ from ..baseline import rescale from ..fixes import fft, ifft +from ..filter import next_fast_len from ..parallel import parallel_func from ..utils import (logger, verbose, _time_mask, _freq_mask, check_fname, sizeof_fmt, GetEpochsMixin, _prepare_read_metadata, @@ -172,7 +173,24 @@ def _make_dpss(sfreq, freqs, n_cycles=7., time_bandwidth=4.0, zero_mean=False): # Low level convolution -def _cwt(X, Ws, mode="same", decim=1, use_fft=True): +def _get_nfft(wavelets, X, use_fft=True, check=True): + n_times = X.shape[-1] + max_size = max(w.size for w in wavelets) + if max_size > n_times: + msg = (f'At least one of the wavelets ({max_size}) is longer than the ' + f'signal ({n_times}). Consider using a longer signal or ' + 'shorter wavelets.') + if check: + if use_fft: + warn(msg, UserWarning) + else: + raise ValueError(msg) + nfft = n_times + max_size - 1 + nfft = next_fast_len(nfft) # 2 ** int(np.ceil(np.log2(nfft))) + return nfft + + +def _cwt_gen(X, Ws, *, fsize=0, mode="same", decim=1, use_fft=True): """Compute cwt with fft based convolutions or temporal convolutions. Parameters @@ -181,6 +199,8 @@ def _cwt(X, Ws, mode="same", decim=1, use_fft=True): The data. Ws : list of array Wavelets time series. + fsize : int + FFT length. mode : {'full', 'valid', 'same'} See numpy.convolve. decim : int | slice, default 1 @@ -204,31 +224,15 @@ def _cwt(X, Ws, mode="same", decim=1, use_fft=True): X = np.asarray(X) # Precompute wavelets for given frequency range to save time - n_signals, n_times = X.shape + _, n_times = X.shape n_times_out = X[:, decim].shape[1] n_freqs = len(Ws) - Ws_max_size = max(W.size for W in Ws) - size = n_times + Ws_max_size - 1 - # Always use 2**n-sized FFT - fsize = 2 ** int(np.ceil(np.log2(size))) - # precompute FFTs of Ws if use_fft: fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128) - - warn_me = True - for i, W in enumerate(Ws): - if use_fft: + for i, W in enumerate(Ws): fft_Ws[i] = fft(W, fsize) - if len(W) > n_times and warn_me: - msg = ('At least one of the wavelets is longer than the signal. ' - 'Consider padding the signal or using shorter wavelets.') - if use_fft: - warn(msg, UserWarning) - warn_me = False # Suppress further warnings - else: - raise ValueError(msg) # Make generator looping across signals tfr = np.zeros((n_freqs, n_times_out), dtype=np.complex128) @@ -380,6 +384,8 @@ def _compute_tfr(epoch_data, freqs, sfreq=1.0, method='morlet', out = np.empty((n_chans, n_epochs, n_freqs, n_times), dtype) # Parallel computation + all_Ws = sum([list(W) for W in Ws], list()) + _get_nfft(all_Ws, epoch_data, use_fft) parallel, my_cwt, _ = parallel_func(_time_frequency_loop, n_jobs) # Parallelization is applied across channels. @@ -510,7 +516,10 @@ def _time_frequency_loop(X, Ws, output, use_fft, mode, decim): # Loops across tapers. for W in Ws: - coefs = _cwt(X, W, mode, decim=decim, use_fft=use_fft) + # No need to check here, it's done earlier (outside parallel part) + nfft = _get_nfft(W, X, use_fft, check=False) + coefs = _cwt_gen( + X, W, fsize=nfft, mode=mode, decim=decim, use_fft=use_fft) # Inter-trial phase locking is apparently computed per taper... if 'itc' in output: @@ -586,11 +595,16 @@ def cwt(X, Ws, use_fft=True, mode='same', decim=1): mne.time_frequency.tfr_morlet : Compute time-frequency decomposition with Morlet wavelets. """ - decim = _check_decim(decim) - n_signals, n_times = X[:, decim].shape + nfft = _get_nfft(Ws, X, use_fft) + return _cwt_array(X, Ws, nfft, mode, decim, use_fft) + - coefs = _cwt(X, Ws, mode, decim=decim, use_fft=use_fft) +def _cwt_array(X, Ws, nfft, mode, decim, use_fft): + decim = _check_decim(decim) + coefs = _cwt_gen( + X, Ws, fsize=nfft, mode=mode, decim=decim, use_fft=use_fft) + n_signals, n_times = X[:, decim].shape tfrs = np.empty((n_signals, len(Ws), n_times), dtype=np.complex128) for k, tfr in enumerate(coefs): tfrs[k] = tfr diff --git a/mne/utils/tests/test_progressbar.py b/mne/utils/tests/test_progressbar.py index c7e25282b76..c259a8b140a 100644 --- a/mne/utils/tests/test_progressbar.py +++ b/mne/utils/tests/test_progressbar.py @@ -24,7 +24,8 @@ def test_progressbar(): def iter_func(a): for ii in a: pass - pytest.raises(Exception, iter_func, ProgressBar(20)) + with pytest.raises(TypeError, match='not iterable'): + iter_func(pbar) # Make sure different progress bars can be used with catch_logging() as log, modified_env(MNE_TQDM='tqdm'), \ From d4f99de184f80a069a42302e9427f2e9b0aad8fb Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Tue, 15 Dec 2020 12:54:20 -0500 Subject: [PATCH 013/387] MRG, BUG: Fix bugs with envcorr (#8658) * BUG: Fix bugs with envcorr * DOC: Name, faster log * FIX: Invalid * FIX: Missed one --- doc/changes/latest.inc | 4 ++ doc/changes/names.inc | 2 + .../plot_mne_inverse_envelope_correlation.py | 1 + mne/connectivity/envelope.py | 39 +++++++++++--- mne/connectivity/tests/test_envelope.py | 54 ++++++++++++++----- 5 files changed, 80 insertions(+), 20 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 88fb89cfee0..34ed9d9b248 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -28,6 +28,8 @@ Current (0.22.dev0) .. |Hongjiang Ye| replace:: **Hongjiang Ye** +.. |Qianliang Li| replace:: **Qianliang Li** + Enhancements ~~~~~~~~~~~~ @@ -97,6 +99,8 @@ Enhancements Bugs ~~~~ +- Fix orthogonalization of power envelopes in :func:`mne.connectivity.envelope_correlation` (:gh:`8658` **by new contributor** |Qianliang Li|_ and `Eric Larson`_) + - Fix a transpose issue of :func:`mne.decoding.CSP.plot_filters` (:gh:`8580` **by new contributor** |Hongjiang Ye|_) - Fix :func:`mne.io.read_raw_curry` to deal with Curry datasets that have channels that are listed in the labels file, but which are absent from the saved data file (e.g. 'Ref' channel). Also now populates info['meas_date'] if possible (:gh:`8400` **by new contributor** |Tod Flak|_) diff --git a/doc/changes/names.inc b/doc/changes/names.inc index 756bdf9cbff..813b5e1992c 100644 --- a/doc/changes/names.inc +++ b/doc/changes/names.inc @@ -341,3 +341,5 @@ .. _Evan Hathaway: https://github.com/ephathaway .. _Hongjiang Ye: https://github.com/rubyyhj + +.. _Qianliang Li: https://www.dtu.dk/english/service/phonebook/person?id=126774 diff --git a/examples/connectivity/plot_mne_inverse_envelope_correlation.py b/examples/connectivity/plot_mne_inverse_envelope_correlation.py index b73845ed06f..38f34e447a1 100644 --- a/examples/connectivity/plot_mne_inverse_envelope_correlation.py +++ b/examples/connectivity/plot_mne_inverse_envelope_correlation.py @@ -9,6 +9,7 @@ :footcite:`HippEtAl2012,KhanEtAl2018` in source space using resting state CTF data. """ + # Authors: Eric Larson # Sheraz Khan # Denis Engemann diff --git a/mne/connectivity/envelope.py b/mne/connectivity/envelope.py index 4d813ed4984..1587bdcc9ef 100644 --- a/mne/connectivity/envelope.py +++ b/mne/connectivity/envelope.py @@ -13,7 +13,7 @@ @verbose def envelope_correlation(data, combine='mean', orthogonalize="pairwise", - verbose=None): + log=False, absolute=True, verbose=None): """Compute the envelope correlation. Parameters @@ -40,6 +40,18 @@ def envelope_correlation(data, combine='mean', orthogonalize="pairwise", absolute values. .. versionadded:: 0.19 + log : bool + If True (default False), square and take the log before orthonalizing + envelopes or computing correlations. + + .. versionadded:: 0.22 + absolute : bool + If True (default), then take the absolute value of correlation + coefficients before making each epoch's correlation matrix + symmetric (and thus before combining matrices across epochs). + Only used when ``orthogonalize=True``. + + .. versionadded:: 0.22 %(verbose)s Returns @@ -54,6 +66,10 @@ def envelope_correlation(data, combine='mean', orthogonalize="pairwise", This function computes the power envelope correlation between orthogonalized signals [1]_ [2]_. + .. versionchanged:: 0.22 + Computations fixed for ``orthogonalize=True`` and diagonal entries are + set explicitly to zero. + References ---------- .. [1] Hipp JF, Hawellek DJ, Corbetta M, Siegel M, Engel AK (2012) @@ -99,6 +115,9 @@ def envelope_correlation(data, combine='mean', orthogonalize="pairwise", data_mag = np.abs(epoch_data) data_conj_scaled = epoch_data.conj() data_conj_scaled /= data_mag + if log: + data_mag *= data_mag + np.log(data_mag, out=data_mag) # subtract means data_mag_nomean = data_mag - np.mean(data_mag, axis=-1, keepdims=True) # compute variances using linalg.norm (square, sum, sqrt) since mean=0 @@ -107,21 +126,29 @@ def envelope_correlation(data, combine='mean', orthogonalize="pairwise", corr = np.empty((n_nodes, n_nodes)) for li, label_data in enumerate(epoch_data): if orthogonalize is False: # the new code - label_data_orth = data_mag - label_data_orth_std = data_mag_std + label_data_orth = data_mag[li] + label_data_orth_std = data_mag_std[li] else: label_data_orth = (label_data * data_conj_scaled).imag + np.abs(label_data_orth, out=label_data_orth) + # protect against invalid value -- this will be zero + # after (log and) mean subtraction + label_data_orth[li] = 1. + if log: + label_data_orth *= label_data_orth + np.log(label_data_orth, out=label_data_orth) label_data_orth -= np.mean(label_data_orth, axis=-1, keepdims=True) label_data_orth_std = np.linalg.norm(label_data_orth, axis=-1) label_data_orth_std[label_data_orth_std == 0] = 1 # correlation is dot product divided by variances - corr[li] = np.dot(label_data_orth, data_mag_nomean[li]) - corr[li] /= data_mag_std[li] + corr[li] = np.sum(label_data_orth * data_mag_nomean, axis=1) + corr[li] /= data_mag_std corr[li] /= label_data_orth_std if orthogonalize is not False: # Make it symmetric (it isn't at this point) - corr = np.abs(corr) + if absolute: + corr = np.abs(corr) corr = (corr.T + corr) / 2. corrs.append(corr) del corr diff --git a/mne/connectivity/tests/test_envelope.py b/mne/connectivity/tests/test_envelope.py index 5220a3f65f9..41d0930510b 100644 --- a/mne/connectivity/tests/test_envelope.py +++ b/mne/connectivity/tests/test_envelope.py @@ -15,19 +15,23 @@ def _compute_corrs_orig(data): # This is the version of the code by Sheraz and Denis. # For this version (epochs, labels, time) must be -> (labels, time, epochs) - data = np.transpose(data, (1, 2, 0)) - corr_mats = np.empty((data.shape[0], data.shape[0], data.shape[2])) - for index, label_data in enumerate(data): - label_data_orth = np.imag(label_data * (data.conj() / np.abs(data))) - label_data_orig = np.abs(label_data) - label_data_cont = np.transpose( - np.dstack((label_data_orig, np.transpose(label_data_orth, - (1, 2, 0)))), (1, 2, 0)) - corr_mats[index] = np.array([np.corrcoef(dat) - for dat in label_data_cont])[:, 0, 1:].T - corr_mats = np.transpose(corr_mats, (2, 0, 1)) - corr = np.mean(np.array([(np.abs(corr_mat) + np.abs(corr_mat).T) / 2. - for corr_mat in corr_mats]), axis=0) + n_epochs, n_labels, _ = data.shape + corr = np.zeros((n_labels, n_labels)) + for epoch_data in data: + for ii in range(n_labels): + for jj in range(n_labels): + # Get timeseries for each pair + x, y = epoch_data[ii], epoch_data[jj] + x_mag = np.abs(x) + x_conj_scaled = x.conj() + x_conj_scaled /= x_mag + # Calculate orthogonalization + y_orth_x = (y * x_conj_scaled).imag + y_orth_x_mag = np.abs(y_orth_x) + # Estimate correlation + corr[ii, jj] += np.abs(np.corrcoef(x_mag, y_orth_x_mag)[0, 1]) + corr = (corr + corr.T) / (2. * n_epochs) + corr.flat[::n_labels + 1] = 0. return corr @@ -37,7 +41,7 @@ def test_envelope_correlation(): data = rng.randn(2, 4, 64) data_hilbert = hilbert(data, axis=-1) corr_orig = _compute_corrs_orig(data_hilbert) - assert (0 < corr_orig).all() + assert (0 <= corr_orig).all() assert (corr_orig < 1).all() # using complex data corr = envelope_correlation(data_hilbert) @@ -72,3 +76,25 @@ def test_envelope_correlation(): assert_allclose(np.diag(corr_plain_mean), 1) np_corr = np.array([np.corrcoef(np.abs(x)) for x in data_hilbert]) assert_allclose(corr_plain, np_corr) + + # check against FieldTrip, which uses the square-log-norm version + # from scipy.io import savemat + # savemat('data.mat', dict(data_hilbert=data_hilbert)) + # matlab + # load data + # ft_connectivity_powcorr_ortho(reshape(data_hilbert(1,:,:), [4, 64])) + # ft_connectivity_powcorr_ortho(reshape(data_hilbert(2,:,:), [4, 64])) + ft_vals = np.array([ + [[np.nan, 0.196734553900236, 0.063173148355451, -0.242638384630448], + [0.196734553900236, np.nan, 0.041799775495150, -0.088205187548542], + [0.063173148355451, 0.041799775495150, np.nan, 0.090331428512317], + [-0.242638384630448, -0.088205187548542, 0.090331428512317, np.nan]], + [[np.nan, -0.013270857462890, 0.185200598081295, 0.140284351572544], + [-0.013270857462890, np.nan, 0.150981508043722, -0.000671809276372], + [0.185200598081295, 0.150981508043722, np.nan, 0.137460244313337], + [0.140284351572544, -0.000671809276372, 0.137460244313337, np.nan]], + ], float) + ft_vals[np.isnan(ft_vals)] = 0 + corr_log = envelope_correlation( + data, combine=None, log=True, absolute=False) + assert_allclose(corr_log, ft_vals) From 88d9be9f8694911c2a923154411f24229b437310 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Tue, 15 Dec 2020 13:40:06 -0500 Subject: [PATCH 014/387] BUG: Fix alpha for volumes (#8663) --- doc/changes/latest.inc | 2 ++ mne/viz/_3d.py | 15 ++++++++---- mne/viz/_brain/_brain.py | 11 +++++---- mne/viz/_brain/colormap.py | 4 ++-- mne/viz/_brain/tests/test_brain.py | 24 ++++++++++++------- mne/viz/backends/_pyvista.py | 4 ++-- tutorials/misc/plot_seeg.py | 1 + .../source-modeling/plot_beamformer_lcmv.py | 3 ++- 8 files changed, 42 insertions(+), 22 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 34ed9d9b248..2c91156fc94 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -141,6 +141,8 @@ Bugs - Fix bug with coordinate frames when performing volumetric morphs via :func:`mne.compute_source_morph` and :meth:`mne.SourceMorph.apply` that could lead to ~5 mm bias (:gh:`8642` by `Eric Larson`_) +- Fix bug with volumetric rendering alpha in :meth:`mne.VolSourceEstimate.plot_3d` and related functions (:gh:`8663` by `Eric Larson`_) + - Fix missing documentation of :func:`mne.io.read_raw_nihon` in :ref:`tut-imorting-eeg-data` (:gh`8320` by `Adam Li`_) - Fix bug with :func:`mne.add_reference_channels` when :func:`mne.io.Raw.reorder_channels` or related methods are used afterward (:gh:`8303`, :gh:`#8484` by `Eric Larson`_) diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py index 4fd7018c63f..be5c11ba612 100644 --- a/mne/viz/_3d.py +++ b/mne/viz/_3d.py @@ -1242,6 +1242,15 @@ def _sensor_shape(coil): return rrs, tris +def _get_cmap(colormap): + import matplotlib.pyplot as plt + if isinstance(colormap, str) and colormap in ('mne', 'mne_analyze'): + colormap = mne_analyze_colormap([0, 1, 2], format='matplotlib') + else: + colormap = plt.get_cmap(colormap) + return colormap + + def _process_clim(clim, colormap, transparent, data=0., allow_pos_lims=True): """Convert colormap/clim options to dict. @@ -1249,7 +1258,6 @@ def _process_clim(clim, colormap, transparent, data=0., allow_pos_lims=True): calling gives the same results. """ # Based on type of limits specified, get cmap control points - import matplotlib.pyplot as plt from matplotlib.colors import Colormap _validate_type(colormap, (str, Colormap), 'colormap') data = np.asarray(data) @@ -1265,10 +1273,7 @@ def _process_clim(clim, colormap, transparent, data=0., allow_pos_lims=True): colormap = 'hot' else: # 'pos_lims' in clim colormap = 'mne' - if colormap in ('mne', 'mne_analyze'): - colormap = mne_analyze_colormap([0, 1, 2], format='matplotlib') - else: - colormap = plt.get_cmap(colormap) + colormap = _get_cmap(colormap) assert isinstance(colormap, Colormap) diverging_maps = ['PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu', 'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index 9519d819145..5afba92007f 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -56,10 +56,10 @@ def __init__(self, scalars, colormap, rng, opacity): self._opacity = opacity def to_colors(self): - from matplotlib.cm import get_cmap + from .._3d import _get_cmap from matplotlib.colors import ListedColormap if isinstance(self._colormap, str): - cmap = get_cmap(self._colormap) + cmap = _get_cmap(self._colormap) else: cmap = ListedColormap(self._colormap / 255.) @@ -344,8 +344,8 @@ def __init__(self, subject_id, hemi, surf, title=None, offscreen=False, interaction='trackball', units='mm', view_layout='vertical', show=True): from ..backends.renderer import backend, _get_renderer, _get_3d_backend + from .._3d import _get_cmap from matplotlib.colors import colorConverter - from matplotlib.cm import get_cmap if hemi in ('both', 'split'): self._hemis = ('lh', 'rh') @@ -414,7 +414,7 @@ def __init__(self, subject_id, hemi, surf, title=None, geo_kwargs = self._cortex_colormap(cortex) # evaluate at the midpoint of the used colormap val = -geo_kwargs['vmin'] / (geo_kwargs['vmax'] - geo_kwargs['vmin']) - self._brain_color = get_cmap(geo_kwargs['colormap'])(val) + self._brain_color = _get_cmap(geo_kwargs['colormap'])(val) # load geometry for one or both hemispheres as necessary offset = None if (not offset or hemi != 'both') else 0.0 @@ -3098,6 +3098,9 @@ def LeaveEvent(self): def SetEventInformation(self, *args, **kwargs): pass + def CharEvent(self): + pass + def KeyPressEvent(self, *args, **kwargs): pass diff --git a/mne/viz/_brain/colormap.py b/mne/viz/_brain/colormap.py index d5de54c7e7c..5c68ca2da34 100644 --- a/mne/viz/_brain/colormap.py +++ b/mne/viz/_brain/colormap.py @@ -10,9 +10,9 @@ def create_lut(cmap, n_colors=256, center=None): """Return a colormap suitable for setting as a LUT.""" - from matplotlib import cm + from .._3d import _get_cmap assert not (isinstance(cmap, str) and cmap == 'auto') - cmap = cm.get_cmap(cmap) + cmap = _get_cmap(cmap) lut = np.round(cmap(np.linspace(0, 1, n_colors)) * 255.0).astype(np.int64) return lut diff --git a/mne/viz/_brain/tests/test_brain.py b/mne/viz/_brain/tests/test_brain.py index cc2ccf9be51..93a1cd13684 100644 --- a/mne/viz/_brain/tests/test_brain.py +++ b/mne/viz/_brain/tests/test_brain.py @@ -13,7 +13,7 @@ import pytest import numpy as np -from numpy.testing import assert_allclose +from numpy.testing import assert_allclose, assert_array_equal from mne import (read_source_estimate, SourceEstimate, MixedSourceEstimate, VolSourceEstimate) @@ -424,13 +424,18 @@ def test_brain_traces(renderer_interactive, hemi, src, tmpdir, brain = _create_testing_brain( hemi=hemi, surf='white', src=src, show_traces=0.5, initial_time=0, volume_options=None, # for speed, don't upsample - n_time=1 if src == 'mixed' else 5, + n_time=1 if src == 'mixed' else 5, diverging=True, add_data_kwargs=dict(colorbar_kwargs=dict(n_labels=3)), ) assert brain.show_traces assert hasattr(brain, "picked_points") assert hasattr(brain, "_spheres") assert brain.plotter.scalar_bar.GetNumberOfLabels() == 3 + # mne_analyze should be chosen + ctab = brain._data['ctable'] + assert_array_equal(ctab[0], [0, 255, 255, 255]) # opaque cyan + assert_array_equal(ctab[-1], [255, 255, 0, 255]) # opaque yellow + assert_allclose(ctab[len(ctab) // 2], [128, 128, 128, 0], atol=3) # add foci should work for volumes brain.add_foci([[0, 0, 0]], hemi='lh' if src == 'surface' else 'vol') @@ -685,7 +690,7 @@ def test_calculate_lut(): def _create_testing_brain(hemi, surf='inflated', src='surface', size=300, - n_time=5, **kwargs): + n_time=5, diverging=False, **kwargs): assert src in ('surface', 'mixed', 'volume') meth = 'plot' if src in ('surface', 'mixed'): @@ -715,14 +720,17 @@ def _create_testing_brain(hemi, surf='inflated', src='surface', size=300, stc_data[(rng.rand(stc_size // 20) * stc_size).astype(int)] = \ rng.rand(stc_data.size // 20) stc_data.shape = (n_verts, n_time) + if diverging: + stc_data -= 0.5 stc = klass(stc_data, vertices, 1, 1) - fmin = stc.data.min() - fmax = stc.data.max() - fmid = (fmin + fmax) / 2. + clim = dict(kind='value', lims=[0.1, 0.2, 0.3]) + if diverging: + clim['pos_lims'] = clim.pop('lims') + brain_data = getattr(stc, meth)( subject=subject_id, hemi=hemi, surface=surf, size=size, - subjects_dir=subjects_dir, colormap='hot', - clim=dict(kind='value', lims=(fmin, fmid, fmax)), src=sample_src, + subjects_dir=subjects_dir, colormap='auto', + clim=clim, src=sample_src, **kwargs) return brain_data diff --git a/mne/viz/backends/_pyvista.py b/mne/viz/backends/_pyvista.py index 648cd98602d..1b340d2a974 100644 --- a/mne/viz/backends/_pyvista.py +++ b/mne/viz/backends/_pyvista.py @@ -898,8 +898,8 @@ def _set_volume_range(volume, ctable, alpha, scalar_bar, rng): color_tf = vtk.vtkColorTransferFunction() opacity_tf = vtk.vtkPiecewiseFunction() for loc, color in zip(np.linspace(*rng, num=len(ctable)), ctable): - color_tf.AddRGBPoint(loc, *color[:-1]) - opacity_tf.AddPoint(loc, color[-1] * alpha / 255. / (len(ctable) - 1)) + color_tf.AddRGBPoint(loc, *(color[:-1] / 255.)) + opacity_tf.AddPoint(loc, color[-1] * alpha / 255.) color_tf.ClampingOn() opacity_tf.ClampingOn() volume.GetProperty().SetColor(color_tf) diff --git a/tutorials/misc/plot_seeg.py b/tutorials/misc/plot_seeg.py index f68a654f320..b7370f80693 100644 --- a/tutorials/misc/plot_seeg.py +++ b/tutorials/misc/plot_seeg.py @@ -26,6 +26,7 @@ :ref:`tut_working_with_ecog`. In the ECoG example, we show how to visualize surface grid channels on the brain. """ + # Authors: Eric Larson # Adam Li # diff --git a/tutorials/source-modeling/plot_beamformer_lcmv.py b/tutorials/source-modeling/plot_beamformer_lcmv.py index 9d5fa7dfa34..81448ad9073 100644 --- a/tutorials/source-modeling/plot_beamformer_lcmv.py +++ b/tutorials/source-modeling/plot_beamformer_lcmv.py @@ -10,7 +10,8 @@ :depth: 2 """ -# Author: Britta Westner +# Authors: Britta Westner +# Eric Larson # # License: BSD (3-clause) From ae750c60b3a6f97b8ba610fecf272c10c5a4a227 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Tue, 15 Dec 2020 13:50:35 -0500 Subject: [PATCH 015/387] MRG, BUG, ENH: Add window option (#8662) * BUG, ENH: Add window option * FIX: Test --- doc/changes/latest.inc | 4 ++++ mne/io/base.py | 5 +++-- mne/time_frequency/psd.py | 32 ++++++++++++++++------------ mne/time_frequency/tests/test_psd.py | 21 +++++++++++++----- mne/utils/docs.py | 11 ++++++++++ mne/viz/_figure.py | 4 ++-- mne/viz/epochs.py | 5 ++++- mne/viz/raw.py | 9 ++++++-- mne/viz/tests/test_figure.py | 2 +- 9 files changed, 66 insertions(+), 27 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 2c91156fc94..97fbb53d190 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -85,6 +85,8 @@ Enhancements - Add option ``on_header_missing`` to :func:`mne.channels.read_polhemus_fastscan` (:gh:`8622` by `Eric Larson`_) +- Add option ``window`` to :func:`mne.time_frequency.psd_welch` and related functions (:gh:`8862` by `Eric Larson`_) + - `mne.preprocessing.ICA.plot_sources` now displays an `mne.preprocessing.ICA.plot_properties` window when right-clicking on component names on the y-axis (:gh:`8381` by `Daniel McCloy`_) - :func:`mne.io.read_raw_edf`, :func:`mne.io.read_raw_bdf`, and :func:`mne.io.read_raw_gdf` now detect and handle invalid highpass/lowpass filter settings (:gh:`8584` by `Clemens Brunner`_) @@ -179,6 +181,8 @@ Bugs - Fix bug in `mne.viz.plot_compare_evokeds` where evokeds with identical ``comment`` attributes would not plot properly if passed as a list (:gh:`8590` by `Daniel McCloy`_) +- Fix bug in :func:`mne.time_frequency.psd_welch` and related functions where the window default errantly changed from ``'hamming'`` to ``('tukey', 0.25)`` (:gh:`8862` by `Eric Larson`_) + - Fix bug in :func:`mne.io.read_raw_kit` where scale factors for EEG channels could be set to zero (:gh:`8542` by `Eric Larson`_) - Fix reading GDF files with excluded channels in :func:`mne.io.read_raw_gdf` (:gh:`8520` by `Clemens Brunner`_) diff --git a/mne/io/base.py b/mne/io/base.py index 448ee910988..5084398b507 100644 --- a/mne/io/base.py +++ b/mne/io/base.py @@ -1443,7 +1443,8 @@ def plot_psd(self, fmin=0, fmax=np.inf, tmin=None, tmax=None, proj=False, picks=None, ax=None, color='black', xscale='linear', area_mode='std', area_alpha=0.33, dB=True, estimate='auto', show=True, n_jobs=1, average=False, line_alpha=None, - spatial_colors=True, sphere=None, verbose=None): + spatial_colors=True, sphere=None, window='hamming', + verbose=None): return plot_raw_psd(self, fmin=fmin, fmax=fmax, tmin=tmin, tmax=tmax, proj=proj, n_fft=n_fft, n_overlap=n_overlap, reject_by_annotation=reject_by_annotation, @@ -1452,7 +1453,7 @@ def plot_psd(self, fmin=0, fmax=np.inf, tmin=None, tmax=None, proj=False, dB=dB, estimate=estimate, show=show, n_jobs=n_jobs, average=average, line_alpha=line_alpha, spatial_colors=spatial_colors, sphere=sphere, - verbose=verbose) + window=window, verbose=verbose) @copy_function_doc_to_method_doc(plot_raw_psd_topo) def plot_psd_topo(self, tmin=0., tmax=None, fmin=0, fmax=100, proj=False, diff --git a/mne/time_frequency/psd.py b/mne/time_frequency/psd.py index 6adaa310876..7e1dac881d0 100644 --- a/mne/time_frequency/psd.py +++ b/mne/time_frequency/psd.py @@ -84,7 +84,8 @@ def _check_psd_data(inst, tmin, tmax, picks, proj, reject_by_annotation=False): @verbose def psd_array_welch(x, sfreq, fmin=0, fmax=np.inf, n_fft=256, n_overlap=0, - n_per_seg=None, n_jobs=1, average='mean', verbose=None): + n_per_seg=None, n_jobs=1, average='mean', window='hamming', + verbose=None): """Compute power spectral density (PSD) using Welch's method. Parameters @@ -107,13 +108,12 @@ def psd_array_welch(x, sfreq, fmin=0, fmax=np.inf, n_fft=256, n_overlap=0, Length of each Welch segment (windowed with a Hamming window). Defaults to None, which sets n_per_seg equal to n_fft. %(n_jobs)s - average : str | None - How to average the segments. If ``mean`` (default), calculate the - arithmetic mean. If ``median``, calculate the median, corrected for - its bias relative to the mean. If ``None``, returns the unaggregated - segments. + %(average-psd)s .. versionadded:: 0.19.0 + %(window-psd)s + + .. versionadded:: 0.22.0 %(verbose)s Returns @@ -154,11 +154,14 @@ def psd_array_welch(x, sfreq, fmin=0, fmax=np.inf, n_fft=256, n_overlap=0, # Parallelize across first N-1 dimensions x_splits = np.array_split(x, n_jobs) + logger.debug( + f'Spectogram using {n_fft}-point FFT on {n_per_seg} samples with ' + f'{n_overlap} overlap and {window} window') from scipy.signal import spectrogram parallel, my_spect_func, n_jobs = parallel_func(_spect_func, n_jobs=n_jobs) func = partial(spectrogram, noverlap=n_overlap, nperseg=n_per_seg, - nfft=n_fft, fs=sfreq) + nfft=n_fft, fs=sfreq, window=window) f_spect = parallel(my_spect_func(d, func=func, freq_sl=freq_sl, average=average) for d in x_splits) @@ -173,7 +176,8 @@ def psd_array_welch(x, sfreq, fmin=0, fmax=np.inf, n_fft=256, n_overlap=0, @verbose def psd_welch(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None, n_fft=256, n_overlap=0, n_per_seg=None, picks=None, proj=False, n_jobs=1, - reject_by_annotation=True, average='mean', verbose=None): + reject_by_annotation=True, average='mean', window='hamming', + verbose=None): """Compute the power spectral density (PSD) using Welch's method. Calculates periodograms for a sliding window over the time dimension, then @@ -209,13 +213,12 @@ def psd_welch(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None, n_fft=256, %(reject_by_annotation_raw)s .. versionadded:: 0.15.0 - average : str | None - How to average the segments. If ``mean`` (default), calculate the - arithmetic mean. If ``median``, calculate the median, corrected for - its bias relative to the mean. If ``None``, returns the unaggregated - segments. + %(average-psd)s .. versionadded:: 0.19.0 + %(window-psd)s + + .. versionadded:: 0.22.0 %(verbose)s Returns @@ -246,7 +249,8 @@ def psd_welch(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None, n_fft=256, reject_by_annotation=reject_by_annotation) return psd_array_welch(data, sfreq, fmin=fmin, fmax=fmax, n_fft=n_fft, n_overlap=n_overlap, n_per_seg=n_per_seg, - average=average, n_jobs=n_jobs, verbose=verbose) + average=average, n_jobs=n_jobs, window=window, + verbose=verbose) @verbose diff --git a/mne/time_frequency/tests/test_psd.py b/mne/time_frequency/tests/test_psd.py index ce4618ac0a7..61e9145206e 100644 --- a/mne/time_frequency/tests/test_psd.py +++ b/mne/time_frequency/tests/test_psd.py @@ -6,7 +6,7 @@ from mne import pick_types, Epochs, read_events from mne.io import RawArray, read_raw_fif -from mne.utils import run_tests_if_main +from mne.utils import catch_logging from mne.time_frequency import psd_welch, psd_multitaper, psd_array_welch base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') @@ -30,6 +30,12 @@ def test_psd_nan(): x[0], float(n_fft), n_fft=n_fft, n_overlap=n_overlap) assert_allclose(freqs, freqs_2) assert_allclose(psds[0], psds_2) + # defaults + with catch_logging() as log: + psd_array_welch(x, float(n_fft), verbose='debug') + log = log.getvalue() + assert 'using 256-point FFT on 256 samples with 0 overlap' in log + assert 'hamming window' in log def test_psd(): @@ -61,7 +67,15 @@ def test_psd(): for func, kws in funcs: kws = kws.copy() kws.update(kws_psd) - psds, freqs = func(raw, proj=False, **kws) + kws.update(verbose='debug') + if func is psd_welch: + kws.update(window='hann') + with catch_logging() as log: + psds, freqs = func(raw, proj=False, **kws) + log = log.getvalue() + if func is psd_welch: + assert f'{n_fft}-point FFT on {n_fft} samples with 0 overl' in log + assert 'hann window' in log psds_proj, freqs_proj = func(raw, proj=True, **kws) assert psds.shape == (len(kws['picks']), len(freqs)) @@ -264,6 +278,3 @@ def test_compares_psd(): assert (np.sum(psds_welch < 0) == 0) assert (np.sum(psds_mpl < 0) == 0) - - -run_tests_if_main() diff --git a/mne/utils/docs.py b/mne/utils/docs.py index 12f9b3a1e82..d9b4aacc006 100644 --- a/mne/utils/docs.py +++ b/mne/utils/docs.py @@ -578,6 +578,17 @@ Frequency-domain window to use in resampling. See :func:`scipy.signal.resample`. """ +docdict['average-psd'] = """ +average : str | None + How to average the segments. If ``mean`` (default), calculate the + arithmetic mean. If ``median``, calculate the median, corrected for + its bias relative to the mean. If ``None``, returns the unaggregated + segments. +""" +docdict['window-psd'] = """ +window : str | float | tuple + Windowing function to use. See :func:`scipy.signal.get_window`. +""" docdict['decim'] = """ decim : int Factor by which to subsample the data. diff --git a/mne/viz/_figure.py b/mne/viz/_figure.py index 36b9ddde443..4274455fa1a 100644 --- a/mne/viz/_figure.py +++ b/mne/viz/_figure.py @@ -2297,7 +2297,7 @@ def _line_figure(inst, axes=None, picks=None, **kwargs): def _psd_figure(inst, proj, picks, axes, area_mode, tmin, tmax, fmin, fmax, n_jobs, color, area_alpha, dB, estimate, average, - spatial_colors, xscale, line_alpha, sphere, **kwargs): + spatial_colors, xscale, line_alpha, sphere, window, **kwargs): """Instantiate a new power spectral density figure.""" from .. import BaseEpochs from ..io import BaseRaw @@ -2309,7 +2309,7 @@ def _psd_figure(inst, proj, picks, axes, area_mode, tmin, tmax, fmin, fmax, if kw in kwargs: psd_kwargs[kw] = kwargs.pop(kw) if isinstance(inst, BaseRaw): - psd_func = psd_welch + psd_func = partial(psd_welch, window=window) elif isinstance(inst, BaseEpochs): psd_func = psd_multitaper else: diff --git a/mne/viz/epochs.py b/mne/viz/epochs.py index 7373405e618..55d00e14be4 100644 --- a/mne/viz/epochs.py +++ b/mne/viz/epochs.py @@ -965,12 +965,15 @@ def plot_epochs_psd(epochs, fmin=0, fmax=np.inf, tmin=None, tmax=None, from ._figure import _psd_figure # generate figure + # epochs always use multitaper, not Welch, so no need to allow "window" + # param above fig = _psd_figure( inst=epochs, proj=proj, picks=picks, axes=ax, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, sphere=sphere, xscale=xscale, dB=dB, average=average, estimate=estimate, area_mode=area_mode, line_alpha=line_alpha, area_alpha=area_alpha, color=color, spatial_colors=spatial_colors, n_jobs=n_jobs, bandwidth=bandwidth, - adaptive=adaptive, low_bias=low_bias, normalization=normalization) + adaptive=adaptive, low_bias=low_bias, normalization=normalization, + window='hamming') plt_show(show) return fig diff --git a/mne/viz/raw.py b/mne/viz/raw.py index 9ea76e77a29..22247c9a6df 100644 --- a/mne/viz/raw.py +++ b/mne/viz/raw.py @@ -374,7 +374,8 @@ def plot_raw_psd(raw, fmin=0, fmax=np.inf, tmin=None, tmax=None, proj=False, picks=None, ax=None, color='black', xscale='linear', area_mode='std', area_alpha=0.33, dB=True, estimate='auto', show=True, n_jobs=1, average=False, line_alpha=None, - spatial_colors=True, sphere=None, verbose=None): + spatial_colors=True, sphere=None, window='hamming', + verbose=None): """%(plot_psd_doc)s. Parameters @@ -414,6 +415,9 @@ def plot_raw_psd(raw, fmin=0, fmax=np.inf, tmin=None, tmax=None, proj=False, %(plot_psd_line_alpha)s %(plot_psd_spatial_colors)s %(topomap_sphere_auto)s + %(window-psd)s + + .. versionadded:: 0.22.0 %(verbose)s Returns @@ -435,7 +439,8 @@ def plot_raw_psd(raw, fmin=0, fmax=np.inf, tmin=None, tmax=None, proj=False, average=average, estimate=estimate, area_mode=area_mode, line_alpha=line_alpha, area_alpha=area_alpha, color=color, spatial_colors=spatial_colors, n_jobs=n_jobs, n_fft=n_fft, - n_overlap=n_overlap, reject_by_annotation=reject_by_annotation) + n_overlap=n_overlap, reject_by_annotation=reject_by_annotation, + window=window) plt_show(show) return fig diff --git a/mne/viz/tests/test_figure.py b/mne/viz/tests/test_figure.py index 23643750a6c..9f28d1b87b8 100644 --- a/mne/viz/tests/test_figure.py +++ b/mne/viz/tests/test_figure.py @@ -15,4 +15,4 @@ def test_browse_figure_constructor(): def test_psd_figure_constructor(): """Test error handling in MNELineFigure constructor.""" with pytest.raises(TypeError, match='an instance of Raw or Epochs, got'): - _psd_figure('foo', *((None,) * 18)) + _psd_figure('foo', *((None,) * 19)) From 5ba77c8b947001da1598671503b165f232687dca Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Tue, 15 Dec 2020 15:52:35 -0500 Subject: [PATCH 016/387] MRG, ENH: Add DICS bias tests (#8610) * WIP: Add DICS bias tests * FIX: Fix whitening * FIX: Latest * FIX: Flake * FIX: Which func * ENH: Add orientation tests * FIX: Bounds * FIX: Tol * FIX: Less magic * FIX: Tol * ENH: max power ori test * FIX: Tol * STY: Sp --- doc/changes/latest.inc | 2 + mne/beamformer/_compute_beamformer.py | 26 ++++- mne/beamformer/_dics.py | 23 +++-- mne/beamformer/_lcmv.py | 26 +---- mne/beamformer/tests/test_dics.py | 74 ++++++++++++-- mne/beamformer/tests/test_lcmv.py | 133 +++++++++++++++++++------ mne/conftest.py | 19 +++- mne/minimum_norm/tests/test_inverse.py | 61 ++++++++---- 8 files changed, 267 insertions(+), 97 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 97fbb53d190..05b42686b11 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -115,6 +115,8 @@ Bugs - Fix bug with reading EDF and KIT files on big endian architectures such as s390x (:gh:`8618` by `Eric Larson`_) +- Fix bug with :func:`mne.beamformer.apply_dics` where the whitener was not properly applied (:gh:`8610` by `Eric Larson`_) + - Fix bug with `~mne.viz.plot_epochs_image` when ``order`` is supplied and multiple conditions are plotted (:gh:`8377` by `Daniel McCloy`_ ) - Fix bug with :func:`mne.viz.plot_source_estimates` when using the PyVista backend where singleton time points were not handled properly (:gh:`8285` by `Eric Larson`_) diff --git a/mne/beamformer/_compute_beamformer.py b/mne/beamformer/_compute_beamformer.py index c0ccc435ba0..387f631cf80 100644 --- a/mne/beamformer/_compute_beamformer.py +++ b/mne/beamformer/_compute_beamformer.py @@ -94,7 +94,7 @@ def _prepare_beamformer_input(info, forward, label=None, pick_ori=None, orient_std = np.ones(gain.shape[1]) # Get the projector - proj, ncomp, _ = make_projector( + proj, _, _ = make_projector( info_picked['projs'], info_picked['ch_names']) return (is_free_ori, info_picked, proj, vertno, gain, whitener, nn, orient_std) @@ -142,7 +142,8 @@ def _sym_inv_sm(x, reduce_rank, inversion, sk): def _compute_beamformer(G, Cm, reg, n_orient, weight_norm, pick_ori, - reduce_rank, rank, inversion, nn, orient_std): + reduce_rank, rank, inversion, nn, orient_std, + whitener): """Compute a spatial beamformer filter (LCMV or DICS). For more detailed information on the parameters, see the docstrings of @@ -172,6 +173,8 @@ def _compute_beamformer(G, Cm, reg, n_orient, weight_norm, pick_ori, The source normals. orient_std : ndarray, shape (n_dipoles,) The std of the orientation prior used in weighting the lead fields. + whitener : ndarray, shape (n_channels, n_channels) + The whitener. Returns ------- @@ -181,6 +184,13 @@ def _compute_beamformer(G, Cm, reg, n_orient, weight_norm, pick_ori, _check_option('weight_norm', weight_norm, ['unit-noise-gain-invariant', 'unit-noise-gain', 'nai', None]) + + # Whiten the data covariance + Cm = whitener @ Cm @ whitener.T.conj() + # Restore to properly Hermitian as large whitening coefs can have bad + # rounding error + Cm[:] = (Cm + Cm.T.conj()) / 2. + assert Cm.shape == (G.shape[0],) * 2 s, _ = np.linalg.eigh(Cm) if not (s >= -s.max() * 1e-7).all(): @@ -499,3 +509,15 @@ def read_beamformer(fname): for arg in ('data', 'names', 'bads', 'projs', 'nfree', 'eig', 'eigvec', 'method', 'loglik')]) return Beamformer(beamformer) + + +def _proj_whiten_data(M, proj, filters): + if filters.get('is_ssp', True): + # check whether data and filter projs match + _check_proj_match(proj, filters) + if filters['whitener'] is None: + M = np.dot(filters['proj'], M) + + if filters['whitener'] is not None: + M = np.dot(filters['whitener'], M) + return M diff --git a/mne/beamformer/_dics.py b/mne/beamformer/_dics.py index 6745dc1ee9f..52c5b512dc9 100644 --- a/mne/beamformer/_dics.py +++ b/mne/beamformer/_dics.py @@ -9,6 +9,7 @@ import numpy as np from ..channels import equalize_channels +from ..io.pick import pick_info, pick_channels from ..utils import (logger, verbose, warn, _check_one_ch_type, _check_channels_spatial_filter, _check_rank, _check_option, _validate_type) @@ -16,9 +17,10 @@ from ..minimum_norm.inverse import combine_xyz, _check_reference, _check_depth from ..source_estimate import _make_stc, _get_src_type from ..time_frequency import csd_fourier, csd_multitaper, csd_morlet -from ._compute_beamformer import (_check_proj_match, _prepare_beamformer_input, +from ._compute_beamformer import (_prepare_beamformer_input, _compute_beamformer, _check_src_type, - Beamformer, _compute_power) + Beamformer, _compute_power, + _proj_whiten_data) @verbose @@ -165,7 +167,9 @@ def make_dics(info, forward, csd, reg=0.05, noise_csd=None, label=None, n_freqs = len(frequencies) _check_one_ch_type('dics', info, forward, csd, noise_csd) - info, fwd, csd = equalize_channels([info, forward, csd]) + # remove bads so that equalize_channels only keeps all good + info = pick_info(info, pick_channels(info['ch_names'], [], info['bads'])) + info, forward, csd = equalize_channels([info, forward, csd]) csd, noise_csd = _prepare_noise_csd(csd, noise_csd, real_filter) @@ -189,17 +193,18 @@ def make_dics(info, forward, csd, reg=0.05, noise_csd=None, label=None, (freq, i + 1, n_freqs)) Cm = csd.get_data(index=i) + + # XXX: Weird that real_filter happens *before* whitening, which could + # make things complex again...? if real_filter: Cm = Cm.real - # Whiten the CSD - Cm = np.dot(whitener, np.dot(Cm, whitener.conj().T)) - # compute spatial filter n_orient = 3 if is_free_ori else 1 W, max_power_ori = _compute_beamformer( G, Cm, reg, n_orient, weight_norm, pick_ori, reduce_rank, - rank=rank, inversion=inversion, nn=nn, orient_std=orient_std) + rank=rank, inversion=inversion, nn=nn, orient_std=orient_std, + whitener=whitener) Ws.append(W) max_oris.append(max_power_ori) @@ -256,9 +261,7 @@ def _apply_dics(data, filters, info, tmin): logger.info("Processing epoch : %d" % (i + 1)) # Apply SSPs - if info['projs']: - _check_proj_match(info['projs'], filters) - M = np.dot(filters['proj'], M) + M = _proj_whiten_data(M, info['projs'], filters) stcs = [] for W in Ws: diff --git a/mne/beamformer/_lcmv.py b/mne/beamformer/_lcmv.py index c58bace14e9..f58a6305dd7 100644 --- a/mne/beamformer/_lcmv.py +++ b/mne/beamformer/_lcmv.py @@ -16,8 +16,8 @@ from ..utils import logger, verbose, _check_channels_spatial_filter from ..utils import _check_one_ch_type, _check_info_inv from ._compute_beamformer import ( - _check_proj_match, _prepare_beamformer_input, _compute_power, - _compute_beamformer, _check_src_type, Beamformer) + _prepare_beamformer_input, _compute_power, + _compute_beamformer, _check_src_type, Beamformer, _proj_whiten_data) @verbose @@ -168,13 +168,6 @@ def make_lcmv(info, forward, data_cov, reg=0.05, noise_cov=None, label=None, Cm = data_cov._get_square() if 'estimator' in data_cov: del data_cov['estimator'] - - # Whiten the data covariance - Cm = np.dot(whitener, np.dot(Cm, whitener.T)) - # Restore to positive semi-definite, as - # (negative eigenvalues are errant / due to massive scaling differences) - s, u = np.linalg.eigh(Cm) - Cm = np.dot(u * np.abs(s), u.T.conj()) rank_int = sum(rank.values()) del rank @@ -182,7 +175,8 @@ def make_lcmv(info, forward, data_cov, reg=0.05, noise_cov=None, label=None, n_orient = 3 if is_free_ori else 1 W, max_power_ori = _compute_beamformer( G, Cm, reg, n_orient, weight_norm, pick_ori, reduce_rank, rank_int, - inversion=inversion, nn=nn, orient_std=orient_std) + inversion=inversion, nn=nn, orient_std=orient_std, + whitener=whitener) # get src type to store with filters for _make_stc src_type = _get_src_type(forward['src'], vertno) @@ -206,18 +200,6 @@ def make_lcmv(info, forward, data_cov, reg=0.05, noise_cov=None, label=None, return filters -def _proj_whiten_data(M, proj, filters): - if filters['is_ssp']: - # check whether data and filter projs match - _check_proj_match(proj, filters) - if filters['whitener'] is None: - M = np.dot(filters['proj'], M) - - if filters['whitener'] is not None: - M = np.dot(filters['whitener'], M) - return M - - def _apply_lcmv(data, filters, info, tmin, max_ori_out): """Apply LCMV spatial filter to data for source reconstruction.""" if max_ori_out != 'signed': diff --git a/mne/beamformer/tests/test_dics.py b/mne/beamformer/tests/test_dics.py index 9c70742475f..38127a8be65 100644 --- a/mne/beamformer/tests/test_dics.py +++ b/mne/beamformer/tests/test_dics.py @@ -18,7 +18,7 @@ from mne.beamformer._compute_beamformer import _prepare_beamformer_input from mne.beamformer._dics import _prepare_noise_csd from mne.time_frequency import csd_morlet -from mne.utils import run_tests_if_main, object_diff, requires_h5py +from mne.utils import object_diff, requires_h5py from mne.proj import compute_proj_evoked, make_projector from mne.surface import _compute_nearest from mne.beamformer.tests.test_lcmv import _assert_weight_norm @@ -110,6 +110,18 @@ def _simulate_data(fwd, idx): # Somewhere on the frontal lobe by default idx_param = pytest.mark.parametrize('idx', [0, 100, 200, 233]) +def _rand_csd(rng, info): + scales = mne.make_ad_hoc_cov(info).data + n = scales.size + # Some random complex correlation structure (with channel scalings) + data = rng.randn(n, n) + 1j * rng.randn(n, n) + data = data @ data.conj().T + data *= scales + data *= scales[:, np.newaxis] + data.flat[::n + 1] = scales + return data + + @pytest.mark.slowtest @testing.requires_testing_data @requires_h5py @@ -127,14 +139,7 @@ def test_make_dics(tmpdir, _load_forward, idx, whiten): make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None) if whiten: rng = np.random.RandomState(0) - scales = mne.make_ad_hoc_cov(epochs.info).data - n = scales.size - # Some random complex correlation structure (with channel scalings) - data = rng.randn(n, n) + 1j * rng.randn(n, n) - data = data @ data.conj().T - data *= scales - data *= scales[:, np.newaxis] - data.flat[::n + 1] = scales + data = _rand_csd(rng, epochs.info) noise_csd = CrossSpectralDensity( _sym_mat_to_vector(data), epochs.ch_names, 0., csd.n_fft) else: @@ -669,4 +674,53 @@ def test_tf_dics(_load_forward, idx): assert np.all(np.isnan(stcs[0].data)) -run_tests_if_main() +def _cov_as_csd(cov, info): + rng = np.random.RandomState(0) + assert cov['data'].ndim == 2 + assert len(cov['data']) == len(cov['names']) + # we need to make this have at least some complex structure + data = cov['data'] + 1e-1 * _rand_csd(rng, info) + assert data.dtype == np.complex128 + return CrossSpectralDensity(_sym_mat_to_vector(data), cov['names'], 0., 16) + + +# Just test free ori here (assume fixed is same as LCMV if these are) +# Changes here should be synced with test_lcmv.py +@pytest.mark.parametrize( + 'reg, pick_ori, weight_norm, use_cov, depth, lower, upper, real_filter', [ + (0.05, None, 'unit-noise-gain-invariant', False, None, 26, 28, False), + (0.05, None, 'unit-noise-gain-invariant', True, None, 40, 42, False), + (0.05, None, 'unit-noise-gain-invariant', True, None, 40, 42, True), + (0.05, None, 'unit-noise-gain', False, None, 13, 14, False), + (0.05, None, 'unit-noise-gain', True, None, 35, 37, False), + (0.05, None, 'nai', True, None, 35, 37, False), + (0.05, None, None, True, None, 12, 14, False), + (0.05, None, None, True, 0.8, 39, 43, False), + (0.05, 'max-power', 'unit-noise-gain-invariant', False, None, 17, 20, + False), + (0.05, 'max-power', 'unit-noise-gain', False, None, 17, 20, False), + (0.05, 'max-power', 'unit-noise-gain', False, None, 17, 20, True), + (0.05, 'max-power', 'nai', True, None, 21, 24, False), + (0.05, 'max-power', None, True, None, 7, 10, False), + (0.05, 'max-power', None, True, 0.8, 15, 18, False), + # skip most no-reg tests, assume others are equal to LCMV if these are + (0.00, None, None, True, None, 21, 32, False), + (0.00, 'max-power', None, True, None, 13, 19, False), + ]) +def test_localization_bias_free(bias_params_free, reg, pick_ori, weight_norm, + use_cov, depth, lower, upper, real_filter): + """Test localization bias for free-orientation DICS.""" + evoked, fwd, noise_cov, data_cov, want = bias_params_free + noise_csd = _cov_as_csd(noise_cov, evoked.info) + data_csd = _cov_as_csd(data_cov, evoked.info) + del noise_cov, data_cov + if not use_cov: + evoked.pick_types(meg='grad') + noise_csd = None + loc = apply_dics(evoked, make_dics( + evoked.info, fwd, data_csd, reg, noise_csd, pick_ori=pick_ori, + weight_norm=weight_norm, depth=depth, real_filter=real_filter)).data + loc = np.linalg.norm(loc, axis=1) if pick_ori == 'vector' else np.abs(loc) + # Compute the percentage of sources for which there is no loc bias: + perc = (want == np.argmax(loc, axis=0)).mean() * 100 + assert lower <= perc <= upper diff --git a/mne/beamformer/tests/test_lcmv.py b/mne/beamformer/tests/test_lcmv.py index 2beb8ad15a6..9799b52ea87 100644 --- a/mne/beamformer/tests/test_lcmv.py +++ b/mne/beamformer/tests/test_lcmv.py @@ -10,6 +10,7 @@ assert_array_less) import mne +from mne.transforms import apply_trans, invert_transform from mne import (convert_forward_solution, read_forward_solution, compute_rank, VolVectorSourceEstimate, VolSourceEstimate, EvokedArray, pick_channels_cov) @@ -20,7 +21,9 @@ from mne.datasets import testing from mne.fixes import _get_args from mne.io.compensator import set_current_comp +from mne.io.constants import FIFF from mne.minimum_norm import make_inverse_operator, apply_inverse +from mne.minimum_norm.tests.test_inverse import _assert_free_ori_match from mne.simulation import simulate_evoked from mne.utils import object_diff, requires_h5py, catch_logging @@ -446,10 +449,11 @@ def test_make_lcmv_sphere(pick_ori, weight_norm): # Test that we get an error if not reducing rank with pytest.raises(ValueError, match='Singular matrix detected'): - make_lcmv( - evoked.info, fwd_sphere, data_cov, reg=0.1, - noise_cov=noise_cov, weight_norm=weight_norm, - pick_ori=pick_ori, reduce_rank=False, rank='full') + with pytest.warns(RuntimeWarning, match='positive semidefinite'): + make_lcmv( + evoked.info, fwd_sphere, data_cov, reg=0.1, + noise_cov=noise_cov, weight_norm=weight_norm, + pick_ori=pick_ori, reduce_rank=False, rank='full') # Now let's reduce it filters = make_lcmv(evoked.info, fwd_sphere, data_cov, reg=0.1, @@ -666,46 +670,111 @@ def test_localization_bias_fixed(bias_params_fixed, reg, weight_norm, use_cov, assert lower <= perc <= upper +# Changes here should be synced with test_dics.py @pytest.mark.parametrize( - 'reg, pick_ori, weight_norm, use_cov, depth, lower, upper', [ - (0.05, 'vector', 'unit-noise-gain-invariant', False, None, 26, 28), - (0.05, 'vector', 'unit-noise-gain-invariant', True, None, 40, 42), - (0.05, 'vector', 'unit-noise-gain', False, None, 13, 14), - (0.05, 'vector', 'unit-noise-gain', True, None, 35, 37), - (0.05, 'vector', 'nai', True, None, 35, 37), - (0.05, 'vector', None, True, None, 12, 14), - (0.05, 'vector', None, True, 0.8, 39, 43), - (0.05, 'max-power', 'unit-noise-gain-invariant', False, None, 17, 20), - (0.05, 'max-power', 'unit-noise-gain', False, None, 17, 20), - (0.05, 'max-power', 'nai', True, None, 21, 24), - (0.05, 'max-power', None, True, None, 7, 10), - (0.05, 'max-power', None, True, 0.8, 15, 18), - (0.05, None, None, True, 0.8, 40, 42), + 'reg, pick_ori, weight_norm, use_cov, depth, lower, upper, ' + 'lower_ori, upper_ori', [ + (0.05, 'vector', 'unit-noise-gain-invariant', False, None, 26, 28, 0.82, 0.84), # noqa: E501 + (0.05, 'vector', 'unit-noise-gain-invariant', True, None, 40, 42, 0.96, 0.98), # noqa: E501 + (0.05, 'vector', 'unit-noise-gain', False, None, 13, 14, 0.79, 0.81), + (0.05, 'vector', 'unit-noise-gain', True, None, 35, 37, 0.98, 0.99), + (0.05, 'vector', 'nai', True, None, 35, 37, 0.98, 0.99), + (0.05, 'vector', None, True, None, 12, 14, 0.97, 0.98), + (0.05, 'vector', None, True, 0.8, 39, 43, 0.97, 0.98), + (0.05, 'max-power', 'unit-noise-gain-invariant', False, None, 17, 20, 0, 0), # noqa: E501 + (0.05, 'max-power', 'unit-noise-gain', False, None, 17, 20, 0, 0), + (0.05, 'max-power', 'nai', True, None, 21, 24, 0, 0), + (0.05, 'max-power', None, True, None, 7, 10, 0, 0), + (0.05, 'max-power', None, True, 0.8, 15, 18, 0, 0), + (0.05, None, None, True, 0.8, 40, 42, 0, 0), # no reg - (0.00, 'vector', None, True, None, 21, 32), - (0.00, 'vector', 'unit-noise-gain-invariant', True, None, 50, 65), - (0.00, 'vector', 'unit-noise-gain', True, None, 42, 65), - (0.00, 'vector', 'nai', True, None, 42, 65), - (0.00, 'max-power', None, True, None, 13, 19), - (0.00, 'max-power', 'unit-noise-gain-invariant', True, None, 43, 50), - (0.00, 'max-power', 'unit-noise-gain', True, None, 43, 50), - (0.00, 'max-power', 'nai', True, None, 43, 50), + (0.00, 'vector', None, True, None, 23, 24, 0.96, 0.97), + (0.00, 'vector', 'unit-noise-gain-invariant', True, None, 52, 54, 0.95, 0.96), # noqa: E501 + (0.00, 'vector', 'unit-noise-gain', True, None, 44, 46, 0.97, 0.98), + (0.00, 'vector', 'nai', True, None, 44, 46, 0.97, 0.98), + (0.00, 'max-power', None, True, None, 14, 15, 0, 0), + (0.00, 'max-power', 'unit-noise-gain-invariant', True, None, 35, 37, 0, 0), # noqa: E501 + (0.00, 'max-power', 'unit-noise-gain', True, None, 35, 37, 0, 0), + (0.00, 'max-power', 'nai', True, None, 35, 37, 0, 0), ]) def test_localization_bias_free(bias_params_free, reg, pick_ori, weight_norm, - use_cov, depth, lower, upper): + use_cov, depth, lower, upper, + lower_ori, upper_ori): """Test localization bias for free-orientation LCMV.""" evoked, fwd, noise_cov, data_cov, want = bias_params_free if not use_cov: evoked.pick_types(meg='grad') noise_cov = None - loc = apply_lcmv(evoked, make_lcmv(evoked.info, fwd, data_cov, reg, - noise_cov, pick_ori=pick_ori, - weight_norm=weight_norm, - depth=depth)).data + with pytest.warns(None): # rank deficiency of data_cov + filters = make_lcmv(evoked.info, fwd, data_cov, reg, + noise_cov, pick_ori=pick_ori, + weight_norm=weight_norm, + depth=depth) + loc = apply_lcmv(evoked, filters).data + if pick_ori == 'vector': + ori = loc.copy() / np.linalg.norm(loc, axis=1, keepdims=True) + else: + # doesn't make sense for pooled (None) or max-power (can't be all 3) + ori = None loc = np.linalg.norm(loc, axis=1) if pick_ori == 'vector' else np.abs(loc) # Compute the percentage of sources for which there is no loc bias: - perc = (want == np.argmax(loc, axis=0)).mean() * 100 + max_idx = np.argmax(loc, axis=0) + perc = (want == max_idx).mean() * 100 + assert lower <= perc <= upper + _assert_free_ori_match(ori, max_idx, lower_ori, upper_ori) + + +# Changes here should be synced with the ones above, but these have meaningful +# orientation values +@pytest.mark.parametrize( + 'reg, weight_norm, use_cov, depth, lower, upper, lower_ori, upper_ori', [ + (0.05, 'unit-noise-gain-invariant', False, None, 38, 40, 0.52, 0.54), + (0.05, 'unit-noise-gain', False, None, 38, 40, 0.52, 0.54), + (0.05, 'nai', True, None, 56, 57, 0.56, 0.58), + (0.05, None, True, None, 27, 28, 0.54, 0.56), + (0.05, None, True, 0.8, 42, 43, 0.54, 0.56), + # no reg + (0.00, None, True, None, 50, 51, 0.57, 0.58), + (0.00, 'unit-noise-gain-invariant', True, None, 73, 75, 0.57, 0.58), + (0.00, 'unit-noise-gain', True, None, 73, 75, 0.57, 0.58), + (0.00, 'nai', True, None, 73, 75, 0.57, 0.58), + ]) +def test_orientation_max_power(bias_params_fixed, bias_params_free, + reg, weight_norm, use_cov, depth, lower, upper, + lower_ori, upper_ori): + """Test orientation selection for bias for max-power LCMV.""" + # we simulate data for the fixed orientation forward and beamform using + # the free orientation forward, and check the orientation match at the end + evoked, _, noise_cov, data_cov, want = bias_params_fixed + fwd = bias_params_free[1] + if not use_cov: + evoked.pick_types(meg='grad') + noise_cov = None + with pytest.warns(None): # rank deficiency of data_cov + filters = make_lcmv(evoked.info, fwd, data_cov, reg, + noise_cov, pick_ori='max-power', + weight_norm=weight_norm, + depth=depth) + loc = apply_lcmv(evoked, filters).data + ori = filters['max_power_ori'] + loc = np.abs(loc) + # Compute the percentage of sources for which there is no loc bias: + max_idx = np.argmax(loc, axis=0) + perc = (want == max_idx).mean() * 100 assert lower <= perc <= upper + # Compute the dot products of our forward normals and + assert fwd['coord_frame'] == FIFF.FIFFV_COORD_HEAD + nn = np.concatenate( + [s['nn'][v] for s, v in zip(fwd['src'], filters['vertices'])]) + nn = nn[want] + nn = apply_trans(invert_transform(fwd['mri_head_t']), nn, move=False) + assert_allclose(np.linalg.norm(nn, axis=1), 1, atol=1e-6) + assert_allclose(np.linalg.norm(ori, axis=1), 1, atol=1e-12) + dots = np.abs((nn * ori).sum(-1)) + assert_array_less(dots, 1) + assert_array_less(0, dots) + got = np.mean(dots) + assert lower_ori < got < upper_ori @pytest.mark.parametrize('weight_norm', ('nai', 'unit-noise-gain')) diff --git a/mne/conftest.py b/mne/conftest.py index ff8cf3df07c..edf67efee55 100644 --- a/mne/conftest.py +++ b/mne/conftest.py @@ -35,6 +35,7 @@ fname_evoked = op.join(s_path, 'sample_audvis_trunc-ave.fif') fname_cov = op.join(s_path, 'sample_audvis_trunc-cov.fif') fname_fwd = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif') +fname_fwd_full = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif') bem_path = op.join(test_path, 'subjects', 'sample', 'bem') fname_bem = op.join(bem_path, 'sample-1280-bem.fif') fname_aseg = op.join(test_path, 'subjects', 'sample', 'mri', 'aseg.mgz') @@ -240,7 +241,8 @@ def bias_params_free(evoked, noise_cov): def bias_params_fixed(evoked, noise_cov): """Provide inputs for fixed bias functions.""" fwd = mne.read_forward_solution(fname_fwd) - fwd = mne.convert_forward_solution(fwd, force_fixed=True, surf_ori=True) + mne.convert_forward_solution( + fwd, force_fixed=True, surf_ori=True, copy=False) return _bias_params(evoked, noise_cov, fwd) @@ -248,14 +250,23 @@ def _bias_params(evoked, noise_cov, fwd): evoked.pick_types(meg=True, eeg=True, exclude=()) # restrict to limited set of verts (small src here) and one hemi for speed vertices = [fwd['src'][0]['vertno'].copy(), []] - stc = mne.SourceEstimate(np.zeros((sum(len(v) for v in vertices), 1)), - vertices, 0., 1.) + stc = mne.SourceEstimate( + np.zeros((sum(len(v) for v in vertices), 1)), vertices, 0, 1) fwd = mne.forward.restrict_forward_to_stc(fwd, stc) assert fwd['sol']['row_names'] == noise_cov['names'] assert noise_cov['names'] == evoked.ch_names evoked = mne.EvokedArray(fwd['sol']['data'].copy(), evoked.info) data_cov = noise_cov.copy() - data_cov['data'] = np.dot(fwd['sol']['data'], fwd['sol']['data'].T) + data = fwd['sol']['data'] @ fwd['sol']['data'].T + data *= 1e-14 # 100 nAm at each source, effectively (1e-18 would be 1 nAm) + # This is rank-deficient, so let's make it actually positive semidefinite + # by regularizing a tiny bit + data.flat[::data.shape[0] + 1] += mne.make_ad_hoc_cov(evoked.info)['data'] + # Do our projection + proj, _, _ = mne.io.proj.make_projector( + data_cov['projs'], data_cov['names']) + data = proj @ data @ proj.T + data_cov['data'][:] = data assert data_cov['data'].shape[0] == len(noise_cov['names']) want = np.arange(fwd['sol']['data'].shape[1]) if not mne.forward.is_fixed_orient(fwd): diff --git a/mne/minimum_norm/tests/test_inverse.py b/mne/minimum_norm/tests/test_inverse.py index 1c30d3217d6..e00ea0b958c 100644 --- a/mne/minimum_norm/tests/test_inverse.py +++ b/mne/minimum_norm/tests/test_inverse.py @@ -31,7 +31,7 @@ make_inverse_operator, apply_inverse_cov, write_inverse_operator, prepare_inverse_operator, compute_rank_inverse, INVERSE_METHODS) -from mne.utils import _TempDir, run_tests_if_main, catch_logging +from mne.utils import _TempDir, catch_logging test_path = testing.data_path(download=False) s_path = op.join(test_path, 'MEG', 'sample') @@ -364,30 +364,39 @@ def test_localization_bias_loose(bias_params_fixed, method, lower, upper, assert lower <= perc <= upper, method -@pytest.mark.parametrize('method, lower, upper, kwargs, depth, loose', [ - ('MNE', 21, 24, {}, dict(limit=None, combine_xyz=False, exp=1.), 1), - ('MNE', 35, 40, {}, dict(limit_depth_chs=False), 1), # ancient default - ('MNE', 45, 55, {}, 0.8, 1), # MNE default - ('MNE', 65, 70, {}, dict(limit_depth_chs='whiten'), 1), # sparse default - ('dSPM', 40, 45, {}, 0.8, 1), - ('sLORETA', 90, 95, {}, 0.8, 1), - ('eLORETA', 93, 100, dict(method_params=dict(force_equal=True)), None, 1), - ('eLORETA', 100, 100, {}, None, 1.0), - ('eLORETA', 100, 100, {}, 0.8, 1.0), - ('eLORETA', 100, 100, {}, 0.8, 0.999), -]) +@pytest.mark.parametrize( + 'method, lower, upper, lower_ori, upper_ori, kwargs, depth, loose', [ + ('MNE', 21, 24, 0.73, 0.75, {}, + dict(limit=None, combine_xyz=False, exp=1.), 1), + ('MNE', 35, 40, 0.93, 0.94, {}, + dict(limit_depth_chs=False), 1), # ancient default + ('MNE', 45, 55, 0.94, 0.95, {}, 0.8, 1), # MNE default + ('MNE', 65, 70, 0.945, 0.955, {}, + dict(limit_depth_chs='whiten'), 1), # sparse default + ('dSPM', 40, 45, 0.96, 0.97, {}, 0.8, 1), + ('sLORETA', 93, 95, 0.95, 0.96, {}, 0.8, 1), + ('eLORETA', 93, 100, 0.95, 0.96, + dict(method_params=dict(force_equal=True)), None, 1), + ('eLORETA', 100, 100, 0.98, 0.99, {}, None, 1.0), + ('eLORETA', 100, 100, 0.98, 0.99, {}, 0.8, 1.0), + ('eLORETA', 100, 100, 0.98, 0.99, {}, 0.8, 0.999), + ] +) def test_localization_bias_free(bias_params_free, method, lower, upper, - kwargs, depth, loose): + lower_ori, upper_ori, kwargs, depth, loose): """Test inverse localization bias for free minimum-norm solvers.""" evoked, fwd, noise_cov, _, want = bias_params_free - inv_free = make_inverse_operator(evoked.info, fwd, noise_cov, loose=1., + inv_free = make_inverse_operator(evoked.info, fwd, noise_cov, loose=loose, depth=depth) loc = apply_inverse(evoked, inv_free, lambda2, method, pick_ori='vector', verbose='debug', **kwargs).data + ori = loc / np.linalg.norm(loc, axis=1, keepdims=True) loc = np.linalg.norm(loc, axis=1) # Compute the percentage of sources for which there is no loc bias: - perc = (want == np.argmax(loc, axis=0)).mean() * 100 + max_idx = np.argmax(loc, axis=0) + perc = (want == max_idx).mean() * 100 assert lower <= perc <= upper, method + _assert_free_ori_match(ori, max_idx, lower_ori, upper_ori) def test_apply_inverse_sphere(evoked): @@ -1244,4 +1253,22 @@ def test_sss_rank(): assert rank == 67 -run_tests_if_main() +def _assert_free_ori_match(ori, max_idx, lower_ori, upper_ori): + __tracebackhide__ = True + # Because of how we construct our free ori tests, the correct orientations + # are just np.eye(3) repeated, so our dot products are just np.diag() + # of all of the orientations + if ori is None: + return + if ori.ndim == 3: # time-varying + assert ori.shape == (ori.shape[0], 3, max_idx.size) + ori = ori[max_idx, :, np.arange(max_idx.size)] + else: + assert ori.ndim == 2 + assert ori.shape == (ori.shape[0], 3) + ori = ori[max_idx] + assert ori.shape == (max_idx.size, 3) + ori.shape = (max_idx.size // 3, 3, 3) + dots = np.abs(np.diagonal(ori, axis1=1, axis2=2)) + mu = np.mean(dots) + assert lower_ori <= mu <= upper_ori, mu From 679f954b51ae6d51be29423eab53e500be9d8755 Mon Sep 17 00:00:00 2001 From: Alexandre Gramfort Date: Wed, 16 Dec 2020 14:29:34 +0100 Subject: [PATCH 017/387] MRG, BUG: fix rstrip in EDF (#8577) * fix rstrip * ENH: Cleaner _edf_str * FIX: Missed one Co-authored-by: Eric Larson --- mne/io/edf/edf.py | 53 ++++++++++++++++-------------------- mne/io/edf/tests/test_edf.py | 4 +-- 2 files changed, 26 insertions(+), 31 deletions(-) diff --git a/mne/io/edf/edf.py b/mne/io/edf/edf.py index f74b6d0a6e4..ad93262f8da 100644 --- a/mne/io/edf/edf.py +++ b/mne/io/edf/edf.py @@ -516,8 +516,8 @@ def _parse_prefilter_string(prefiltering): return highpass, lowpass -def _edf_str_int(x, fid=None): - return int(x.decode().split('\x00')[0]) +def _edf_str(x): + return x.decode('latin-1').split('\x00')[0] def _read_edf_header(fname, exclude): @@ -581,7 +581,7 @@ def _read_edf_header(fname, exclude): f'{day:02d} {hour:02d}:{minute:02d}:{sec:02d}).') meas_date = None - header_nbytes = _edf_str_int(fid.read(8)) + header_nbytes = int(_edf_str(fid.read(8))) # The following 44 bytes sometimes identify the file type, but this is # not guaranteed. Therefore, we skip this field and use the file @@ -591,15 +591,15 @@ def _read_edf_header(fname, exclude): fid.read(44) subtype = os.path.splitext(fname)[1][1:].lower() - n_records = _edf_str_int(fid.read(8)) - record_length = fid.read(8).decode().strip('\x00').strip() - record_length = np.array([float(record_length), 1.]) # in seconds + n_records = int(_edf_str(fid.read(8))) + record_length = float(_edf_str(fid.read(8))) + record_length = np.array([record_length, 1.]) # in seconds if record_length[0] == 0: record_length = record_length[0] = 1. warn('Header information is incorrect for record length. Default ' 'record length set to 1.') - nchan = _edf_str_int(fid.read(4)) + nchan = int(_edf_str(fid.read(4))) channels = list(range(nchan)) ch_names = [fid.read(16).strip().decode('latin-1') for ch in channels] exclude = _find_exclude_idx(ch_names, exclude) @@ -628,20 +628,19 @@ def _read_edf_header(fname, exclude): ch_names = _unique_channel_names(ch_names) orig_units = dict(zip(ch_names, units)) - physical_min = np.array([float(fid.read(8).decode()) - for ch in channels])[sel] - physical_max = np.array([float(fid.read(8).decode()) - for ch in channels])[sel] - digital_min = np.array([float(fid.read(8).decode()) - for ch in channels])[sel] - digital_max = np.array([float(fid.read(8).decode()) - for ch in channels])[sel] - prefiltering = [fid.read(80).decode().strip(' \x00') - for ch in channels][:-1] + physical_min = np.array( + [float(_edf_str(fid.read(8))) for ch in channels])[sel] + physical_max = np.array( + [float(_edf_str(fid.read(8))) for ch in channels])[sel] + digital_min = np.array( + [float(_edf_str(fid.read(8))) for ch in channels])[sel] + digital_max = np.array( + [float(_edf_str(fid.read(8))) for ch in channels])[sel] + prefiltering = [_edf_str(fid.read(80)).strip() for ch in channels][:-1] highpass, lowpass = _parse_prefilter_string(prefiltering) # number of samples per record - n_samps = np.array([_edf_str_int(fid.read(8)) for ch in channels]) + n_samps = np.array([int(_edf_str(fid.read(8))) for ch in channels]) # Populate edf_info edf_info.update( @@ -731,10 +730,10 @@ def _read_gdf_header(fname, exclude): # Recording ID meas_id = {} - meas_id['recording_id'] = fid.read(80).decode().strip(' \x00') + meas_id['recording_id'] = _edf_str(fid.read(80)).strip() # date - tm = fid.read(16).decode().strip(' \x00') + tm = _edf_str(fid.read(16)).strip() try: if tm[14:16] == ' ': tm = tm[:14] + '00' + tm[16:] @@ -762,13 +761,11 @@ def _read_gdf_header(fname, exclude): 'Default record length set to 1.') nchan = np.fromfile(fid, UINT32, 1)[0] channels = list(range(nchan)) - ch_names = [fid.read(16).decode('latin-1').strip(' \x00') - for ch in channels] + ch_names = [_edf_str(fid.read(16)).strip() for ch in channels] exclude = _find_exclude_idx(ch_names, exclude) sel = np.setdiff1d(np.arange(len(ch_names)), exclude) fid.seek(80 * len(channels), 1) # transducer - units = [fid.read(8).decode('latin-1').strip(' \x00') - for ch in channels] + units = [_edf_str(fid.read(8)).strip() for ch in channels] edf_info['units'] = list() for i, unit in enumerate(units): if i in exclude: @@ -784,8 +781,7 @@ def _read_gdf_header(fname, exclude): physical_max = np.fromfile(fid, FLOAT64, len(channels)) digital_min = np.fromfile(fid, INT64, len(channels)) digital_max = np.fromfile(fid, INT64, len(channels)) - prefiltering = [fid.read(80).decode().strip(' \x00') - for ch in channels][:-1] + prefiltering = [_edf_str(fid.read(80)) for ch in channels][:-1] highpass, lowpass = _parse_prefilter_string(prefiltering) # n samples per record @@ -876,7 +872,7 @@ def _read_gdf_header(fname, exclude): # Recording identification meas_id = {} - meas_id['recording_id'] = fid.read(64).decode().strip(' \x00') + meas_id['recording_id'] = _edf_str(fid.read(64)).strip() vhsv = np.fromfile(fid, UINT8, 4) loc = {} if vhsv[3] == 0: @@ -945,8 +941,7 @@ def _read_gdf_header(fname, exclude): # Channels (variable header) channels = list(range(nchan)) - ch_names = [fid.read(16).decode().strip(' \x00') - for ch in channels] + ch_names = [_edf_str(fid.read(16)).strip() for ch in channels] exclude = _find_exclude_idx(ch_names, exclude) sel = np.setdiff1d(np.arange(len(ch_names)), exclude) diff --git a/mne/io/edf/tests/test_edf.py b/mne/io/edf/tests/test_edf.py index 80019e08989..22f550d020d 100644 --- a/mne/io/edf/tests/test_edf.py +++ b/mne/io/edf/tests/test_edf.py @@ -26,7 +26,7 @@ from mne.io import read_raw_edf, read_raw_bdf, read_raw_fif, edf, read_raw_gdf from mne.io.tests.test_raw import _test_raw_reader from mne.io.edf.edf import (_get_edf_default_event_id, _read_annotations_edf, - _read_ch, _parse_prefilter_string, _edf_str_int, + _read_ch, _parse_prefilter_string, _edf_str, _read_edf_header, _read_header) from mne.io.pick import channel_indices_by_type, get_channel_type_constants from mne.annotations import events_from_annotations, read_annotations @@ -460,7 +460,7 @@ def test_invalid_date(tmpdir): def test_empty_chars(): """Test blank char support.""" - assert _edf_str_int(b'1819\x00 ') == 1819 + assert int(_edf_str(b'1819\x00 ')) == 1819 def _hp_lp_rev(*args, **kwargs): From 5eeca84cef086a3ee6edab577b138ffa43ff965c Mon Sep 17 00:00:00 2001 From: jstout211 Date: Wed, 16 Dec 2020 09:29:13 -0500 Subject: [PATCH 018/387] FIX: prevent raw._data overwrite from _add_exg and _add_chpi (#8633) * FIX: prevent raw._data overwrite from _add_exg and _add_chpi * Test function for simulation bug fix * Fixed pep8 issues * Import ordering changed according to coding guidelines * Cascading simulation test moved to simulation dir * Cleanup * Pull update for latest.inc * Update - contrib doc * WIP - failing test for chpi simulation * FIX: Faster test * FIX: latest * FIX: Missed one * FIX: Missed Co-authored-by: Jeff Stout Co-authored-by: Eric Larson --- doc/changes/latest.inc | 4 ++++ doc/changes/names.inc | 2 ++ mne/simulation/raw.py | 4 ++-- mne/simulation/tests/test_raw.py | 41 +++++++++++++++++++++++++++++--- 4 files changed, 46 insertions(+), 5 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 05b42686b11..5776c8e3033 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -30,6 +30,8 @@ Current (0.22.dev0) .. |Qianliang Li| replace:: **Qianliang Li** +.. |Jeff Stout| replace:: **Jeff Stout** + Enhancements ~~~~~~~~~~~~ @@ -103,6 +105,8 @@ Bugs ~~~~ - Fix orthogonalization of power envelopes in :func:`mne.connectivity.envelope_correlation` (:gh:`8658` **by new contributor** |Qianliang Li|_ and `Eric Larson`_) +- Fix data overwrite of cascading simulation operations :`mne.simulation.simulate_raw` (:gh:`8633` **by new contributor** |Jeff Stout|_) + - Fix a transpose issue of :func:`mne.decoding.CSP.plot_filters` (:gh:`8580` **by new contributor** |Hongjiang Ye|_) - Fix :func:`mne.io.read_raw_curry` to deal with Curry datasets that have channels that are listed in the labels file, but which are absent from the saved data file (e.g. 'Ref' channel). Also now populates info['meas_date'] if possible (:gh:`8400` **by new contributor** |Tod Flak|_) diff --git a/doc/changes/names.inc b/doc/changes/names.inc index 813b5e1992c..b28dfe35dfe 100644 --- a/doc/changes/names.inc +++ b/doc/changes/names.inc @@ -342,4 +342,6 @@ .. _Hongjiang Ye: https://github.com/rubyyhj +.. _Jeff Stout: https://megcore.nih.gov/index.php/Staff + .. _Qianliang Li: https://www.dtu.dk/english/service/phonebook/person?id=126774 diff --git a/mne/simulation/raw.py b/mne/simulation/raw.py index 4d07829f17e..6717ad725f9 100644 --- a/mne/simulation/raw.py +++ b/mne/simulation/raw.py @@ -524,7 +524,7 @@ def _add_exg(raw, kind, head_pos, interp, n_jobs, random_state): proc_lims = np.concatenate([np.arange(0, len(used), 10000), [len(used)]]) for start, stop in zip(proc_lims[:-1], proc_lims[1:]): fwd, _ = interper.feed(stop - start) - data[picks, start:stop] = einsum( + data[picks, start:stop] += einsum( 'svt,vt->st', fwd, exg_data[:, start:stop]) assert not used[start:stop].any() used[start:stop] = True @@ -580,7 +580,7 @@ def add_chpi(raw, head_pos=None, interp='cos2', n_jobs=1, verbose=None): lims = np.concatenate([offsets, [len(raw.times)]]) for start, stop in zip(lims[:-1], lims[1:]): fwd, = interper.feed(stop - start) - data[meg_picks, start:stop] = einsum( + data[meg_picks, start:stop] += einsum( 'svt,vt->st', fwd, sinusoids[:, start:stop]) assert not used[start:stop].any() used[start:stop] = True diff --git a/mne/simulation/tests/test_raw.py b/mne/simulation/tests/test_raw.py index ba097b5b6a8..ac4338fe759 100644 --- a/mne/simulation/tests/test_raw.py +++ b/mne/simulation/tests/test_raw.py @@ -24,13 +24,13 @@ from mne.tests.test_chpi import _assert_quats from mne.datasets import testing from mne.simulation import (simulate_sparse_stc, simulate_raw, add_eog, - add_ecg, add_chpi) + add_ecg, add_chpi, add_noise) from mne.source_space import _compare_source_spaces from mne.surface import _get_ico_surface from mne.io import read_raw_fif, RawArray from mne.io.constants import FIFF from mne.time_frequency import psd_welch -from mne.utils import run_tests_if_main, catch_logging, check_version +from mne.utils import catch_logging, check_version base_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') raw_fname_short = op.join(base_path, 'test_raw.fif') @@ -479,4 +479,39 @@ def test_simulate_raw_chpi(): vel_atol=0.03) # velicity huge because of t_step_min above -run_tests_if_main() +@testing.requires_testing_data +def test_simulation_cascade(): + """Test that cascading operations do not overwrite data.""" + # Create 10 second raw dataset with zeros in the data matrix + raw_null = read_raw_fif(raw_chpi_fname, allow_maxshield='yes') + raw_null.crop(0, 1).pick_types(meg=True).load_data() + raw_null.apply_function(lambda x: np.zeros_like(x)) + assert_array_equal(raw_null.get_data(), 0.) + + # Calculate independent signal additions + raw_eog = raw_null.copy() + add_eog(raw_eog, random_state=0) + + raw_ecg = raw_null.copy() + add_ecg(raw_ecg, random_state=0) + + raw_noise = raw_null.copy() + cov = make_ad_hoc_cov(raw_null.info) + add_noise(raw_noise, cov, random_state=0) + + raw_chpi = raw_null.copy() + add_chpi(raw_chpi) + + # Calculate Cascading signal additions + raw_cascade = raw_null.copy() + add_eog(raw_cascade, random_state=0) + add_ecg(raw_cascade, random_state=0) + add_chpi(raw_cascade) + add_noise(raw_cascade, cov, random_state=0) + + cascade_data = raw_cascade.get_data() + serial_data = 0. + for raw_other in (raw_eog, raw_ecg, raw_noise, raw_chpi): + serial_data += raw_other.get_data() + + assert_allclose(cascade_data, serial_data, atol=1e-20) From ae9ed2332cf24209ec2a178e61c0d02fdd35b03c Mon Sep 17 00:00:00 2001 From: Mikolaj Magnuski Date: Wed, 16 Dec 2020 17:40:40 +0100 Subject: [PATCH 019/387] MRG, FIX: prettier "head" extrapolation (#8645) * FIX: first step towards prettier head extrapolation * FIX: move extrapolation points further away * FIX: fix test, do not go below 12 extrapolation points * TST: Adjust test * DOC: Latest * DOC: Leading _ Co-authored-by: Eric Larson --- doc/changes/latest.inc | 2 ++ mne/viz/tests/test_topomap.py | 18 +++++++++--------- mne/viz/topomap.py | 7 ++++--- 3 files changed, 15 insertions(+), 12 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 5776c8e3033..8b571306a4f 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -143,6 +143,8 @@ Bugs - Fix bug with :class:`mne.preprocessing.ICA` where projections were not tracked properly (:gh:`8343` by `Eric Larson`_) +- Fix bug where extrapolation points created artifacts in :func:`mne.viz.plot_evoked_topomap` and related functions (:gh:`8425` by `Mikołaj Magnuski`_) + - Fix bug with :func:`mne.preprocessing.read_ica_eeglab` where full-rank data were not handled properly (:gh:`8326` by `Eric Larson`_) - Fix bug with :ref:`somato-dataset` where the BEM was not included (:gh:`8317` by `Eric Larson`_) diff --git a/mne/viz/tests/test_topomap.py b/mne/viz/tests/test_topomap.py index 6ee9b2af9fa..f58b3b16a97 100644 --- a/mne/viz/tests/test_topomap.py +++ b/mne/viz/tests/test_topomap.py @@ -203,14 +203,14 @@ def test_plot_topomap_basic(monkeypatch): # border=0 and border='mean': # --------------------------- - ch_names = list('abcde') - ch_pos = np.array([[0, 0, 1], [1, 0, 0], [-1, 0, 0], - [0, -1, 0], [0, 1, 0]]) - ch_pos_dict = {name: pos for name, pos in zip(ch_names, ch_pos)} + ch_pos = np.array(sum(([[0, 0, r], [r, 0, 0], [-r, 0, 0], + [0, -r, 0], [0, r, 0]] + for r in np.linspace(0.2, 1.0, 5)), [])) + rng = np.random.RandomState(23) + data = np.full(len(ch_pos), 5) + rng.randn(len(ch_pos)) + info = create_info(len(ch_pos), 250, 'eeg') + ch_pos_dict = {name: pos for name, pos in zip(info['ch_names'], ch_pos)} dig = make_dig_montage(ch_pos_dict, coord_frame='head') - - data = np.full(5, 5) + np.random.RandomState(23).randn(5) - info = create_info(ch_names, 250, ['eeg'] * 5) info.set_montage(dig) # border=0 @@ -218,7 +218,7 @@ def test_plot_topomap_basic(monkeypatch): img_data = ax.get_array().data assert np.abs(img_data[31, 31] - data[0]) < 0.12 - assert np.abs(img_data[10, 55]) < 0.3 + assert np.abs(img_data[0, 0]) < 1.5 # border='mean' ax, _ = plot_topomap(data, info, extrapolate='head', border='mean', @@ -226,7 +226,7 @@ def test_plot_topomap_basic(monkeypatch): img_data = ax.get_array().data assert np.abs(img_data[31, 31] - data[0]) < 0.12 - assert img_data[10, 54] > 5 + assert img_data[0, 0] > 5 # error when not numeric or str: error_msg = 'border must be an instance of numeric or str' diff --git a/mne/viz/topomap.py b/mne/viz/topomap.py index cbcc3dd3c20..c2a3b82b093 100644 --- a/mne/viz/topomap.py +++ b/mne/viz/topomap.py @@ -567,9 +567,10 @@ def _get_extra_points(pos, extrapolate, origin, radii): else: assert extrapolate == 'head' # return points on the head circle - angle = np.arcsin(distance / 2 / np.mean(radii)) - points_l = np.arange(0, 2 * np.pi, angle) - use_radii = radii * 1.1 + angle = np.arcsin(distance / np.mean(radii)) + n_pnts = max(12, int(np.round(2 * np.pi / angle))) + points_l = np.linspace(0, 2 * np.pi, n_pnts, endpoint=False) + use_radii = radii * 1.1 + distance points_x = np.cos(points_l) * use_radii[0] + x points_y = np.sin(points_l) * use_radii[1] + y new_pos = np.stack([points_x, points_y], axis=1) From c23df983dbd0e8f5bd481af07ceeb5453de8ca84 Mon Sep 17 00:00:00 2001 From: Marijn van Vliet Date: Thu, 17 Dec 2020 02:47:12 +0200 Subject: [PATCH 020/387] MRG: Fix DICS rank handling (#8594) * Fix DICS rank handling * Fix case when no noise_csd is given * TST: Add test * DOC: latest * FIX: Tol * FIX: Tol * FIX: Full rank Co-authored-by: Eric Larson --- doc/changes/latest.inc | 2 + mne/beamformer/_dics.py | 25 +++++++++-- mne/beamformer/tests/test_dics.py | 75 ++++++++++++++++++++++++++++--- 3 files changed, 93 insertions(+), 9 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 8b571306a4f..bd50ad4589b 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -119,6 +119,8 @@ Bugs - Fix bug with reading EDF and KIT files on big endian architectures such as s390x (:gh:`8618` by `Eric Larson`_) +- Fix bug with :func:`mne.beamformer.make_dics` where the ``rank`` parameter was not properly handled (:gh:`8594` by `Marijn van Vliet`_ and `Eric Larson`_) + - Fix bug with :func:`mne.beamformer.apply_dics` where the whitener was not properly applied (:gh:`8610` by `Eric Larson`_) - Fix bug with `~mne.viz.plot_epochs_image` when ``order`` is supplied and multiple conditions are plotted (:gh:`8377` by `Daniel McCloy`_ ) diff --git a/mne/beamformer/_dics.py b/mne/beamformer/_dics.py index 52c5b512dc9..28f9b0c5e29 100644 --- a/mne/beamformer/_dics.py +++ b/mne/beamformer/_dics.py @@ -15,6 +15,7 @@ _check_option, _validate_type) from ..forward import _subject_from_forward from ..minimum_norm.inverse import combine_xyz, _check_reference, _check_depth +from ..rank import compute_rank from ..source_estimate import _make_stc, _get_src_type from ..time_frequency import csd_fourier, csd_multitaper, csd_morlet from ._compute_beamformer import (_prepare_beamformer_input, @@ -166,7 +167,8 @@ def make_dics(info, forward, csd, reg=0.05, noise_csd=None, label=None, frequencies = [np.mean(freq_bin) for freq_bin in csd.frequencies] n_freqs = len(frequencies) - _check_one_ch_type('dics', info, forward, csd, noise_csd) + _, _, allow_mismatch = _check_one_ch_type('dics', info, forward, csd, + noise_csd) # remove bads so that equalize_channels only keeps all good info = pick_info(info, pick_channels(info['ch_names'], [], info['bads'])) info, forward, csd = equalize_channels([info, forward, csd]) @@ -181,6 +183,23 @@ def make_dics(info, forward, csd, reg=0.05, noise_csd=None, label=None, _prepare_beamformer_input( info, forward, label, pick_ori, noise_cov=noise_csd, rank=rank, pca=False, **depth) + + # Compute ranks + csd_int_rank = [] + if not allow_mismatch: + noise_rank = compute_rank(noise_csd, info=info, rank=rank) + for i in range(len(frequencies)): + csd_rank = compute_rank(csd.get_data(index=i, as_cov=True), + info=info, rank=rank) + if not allow_mismatch: + for key in csd_rank: + if key not in noise_rank or csd_rank[key] != noise_rank[key]: + raise ValueError('%s data rank (%s) did not match the ' + 'noise rank (%s)' + % (key, csd_rank[key], + noise_rank.get(key, None))) + csd_int_rank.append(sum(csd_rank.values())) + del noise_csd ch_names = list(info['ch_names']) @@ -203,8 +222,8 @@ def make_dics(info, forward, csd, reg=0.05, noise_csd=None, label=None, n_orient = 3 if is_free_ori else 1 W, max_power_ori = _compute_beamformer( G, Cm, reg, n_orient, weight_norm, pick_ori, reduce_rank, - rank=rank, inversion=inversion, nn=nn, orient_std=orient_std, - whitener=whitener) + rank=csd_int_rank[i], inversion=inversion, nn=nn, + orient_std=orient_std, whitener=whitener) Ws.append(W) max_oris.append(max_power_ori) diff --git a/mne/beamformer/tests/test_dics.py b/mne/beamformer/tests/test_dics.py index 38127a8be65..19ae2977781 100644 --- a/mne/beamformer/tests/test_dics.py +++ b/mne/beamformer/tests/test_dics.py @@ -18,7 +18,7 @@ from mne.beamformer._compute_beamformer import _prepare_beamformer_input from mne.beamformer._dics import _prepare_noise_csd from mne.time_frequency import csd_morlet -from mne.utils import object_diff, requires_h5py +from mne.utils import object_diff, requires_h5py, catch_logging from mne.proj import compute_proj_evoked, make_projector from mne.surface import _compute_nearest from mne.beamformer.tests.test_lcmv import _assert_weight_norm @@ -93,7 +93,7 @@ def _simulate_data(fwd, idx): # Somewhere on the frontal lobe by default evoked = epochs.average() # Compute the cross-spectral density matrix - csd = csd_morlet(epochs, frequencies=[10, 20], n_cycles=[5, 10], decim=10) + csd = csd_morlet(epochs, frequencies=[10, 20], n_cycles=[5, 10], decim=5) labels = mne.read_labels_from_annot( 'sample', hemi='lh', subjects_dir=subjects_dir) @@ -122,6 +122,19 @@ def _rand_csd(rng, info): return data +def _make_rand_csd(info, csd): + rng = np.random.RandomState(0) + data = _rand_csd(rng, info) + # now we need to have the same null space as the data csd + s, u = np.linalg.eigh(csd.get_data(csd.frequencies[0])) + mask = np.abs(s) >= s[-1] * 1e-7 + rank = mask.sum() + assert rank == len(data) == len(info['ch_names']) + noise_csd = CrossSpectralDensity( + _sym_mat_to_vector(data), info['ch_names'], 0., csd.n_fft) + return noise_csd, rank + + @pytest.mark.slowtest @testing.requires_testing_data @requires_h5py @@ -138,10 +151,8 @@ def test_make_dics(tmpdir, _load_forward, idx, whiten): with pytest.raises(ValueError, match='several sensor types'): make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None) if whiten: - rng = np.random.RandomState(0) - data = _rand_csd(rng, epochs.info) - noise_csd = CrossSpectralDensity( - _sym_mat_to_vector(data), epochs.ch_names, 0., csd.n_fft) + noise_csd, rank = _make_rand_csd(epochs.info, csd) + assert rank == len(epochs.info['ch_names']) == 62 else: noise_csd = None epochs.pick_types(meg='grad') @@ -724,3 +735,55 @@ def test_localization_bias_free(bias_params_free, reg, pick_ori, weight_norm, # Compute the percentage of sources for which there is no loc bias: perc = (want == np.argmax(loc, axis=0)).mean() * 100 assert lower <= perc <= upper + + +@testing.requires_testing_data +@idx_param +@pytest.mark.parametrize('whiten', (False, True)) +def test_make_dics_rank(_load_forward, idx, whiten): + """Test making DICS beamformer filters with rank param.""" + _, fwd_surf, fwd_fixed, _ = _load_forward + epochs, _, csd, _, label, _, _ = _simulate_data(fwd_fixed, idx) + if whiten: + noise_csd, want_rank = _make_rand_csd(epochs.info, csd) + kind = 'mag + grad' + else: + noise_csd = None + epochs.pick_types(meg='grad') + want_rank = len(epochs.ch_names) + assert want_rank == 41 + kind = 'grad' + + with catch_logging() as log: + filters = make_dics( + epochs.info, fwd_surf, csd, label=label, noise_csd=noise_csd, + verbose=True) + log = log.getvalue() + assert f'Estimated rank ({kind}): {want_rank}' in log, log + stc, _ = apply_dics_csd(csd, filters) + other_rank = want_rank - 1 # shouldn't make a huge difference + use_rank = dict(meg=other_rank) + if not whiten: + # XXX it's a bug that our rank functions don't treat "meg" + # properly here... + use_rank['grad'] = use_rank.pop('meg') + with catch_logging() as log: + filters_2 = make_dics( + epochs.info, fwd_surf, csd, label=label, noise_csd=noise_csd, + rank=use_rank, verbose=True) + log = log.getvalue() + assert f'Computing rank from covariance with rank={use_rank}' in log, log + stc_2, _ = apply_dics_csd(csd, filters_2) + corr = np.corrcoef(stc_2.data.ravel(), stc.data.ravel())[0, 1] + assert 0.8 < corr < 0.99999 + + # degenerate conditions + if whiten: + # make rank deficient + data = noise_csd.get_data(0.) + data[0] = data[:0] = 0 + noise_csd._data[:, 0] = _sym_mat_to_vector(data) + with pytest.raises(ValueError, match='meg data rank.*the noise rank'): + filters = make_dics( + epochs.info, fwd_surf, csd, label=label, noise_csd=noise_csd, + verbose=True) From 1b9d2228bc40f817ef1765686bc2ec6e81079d13 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 17 Dec 2020 05:42:03 -0500 Subject: [PATCH 021/387] MRG, MAINT: Speed up tests (#8665) * FIX: 10 sec of EGI * MAINT: io * WIP * MAINT: Beamformer * TST: Speed up preprocessing * STY: Docstring * FIX: Tols * FIX: No need --- mne/beamformer/tests/test_dics.py | 6 +- mne/beamformer/tests/test_lcmv.py | 260 ++++++++--------- mne/io/ctf/tests/test_ctf.py | 3 +- mne/io/eeglab/tests/test_eeglab.py | 64 ++-- mne/io/egi/tests/test_egi.py | 14 +- mne/io/fieldtrip/tests/test_fieldtrip.py | 5 + mne/io/fiff/tests/test_raw_fiff.py | 41 +-- mne/preprocessing/tests/test_ecg.py | 28 +- mne/preprocessing/tests/test_ica.py | 356 ++++++++++++----------- mne/preprocessing/tests/test_ssp.py | 241 +++++++-------- mne/tests/test_epochs.py | 9 +- 11 files changed, 539 insertions(+), 488 deletions(-) diff --git a/mne/beamformer/tests/test_dics.py b/mne/beamformer/tests/test_dics.py index 19ae2977781..98d8e4b9817 100644 --- a/mne/beamformer/tests/test_dics.py +++ b/mne/beamformer/tests/test_dics.py @@ -566,12 +566,12 @@ def test_apply_dics_timeseries(_load_forward, idx): @pytest.mark.slowtest @testing.requires_testing_data -@idx_param -def test_tf_dics(_load_forward, idx): +def test_tf_dics(_load_forward): """Test 5D time-frequency beamforming based on DICS.""" fwd_free, fwd_surf, fwd_fixed, _ = _load_forward + # idx isn't really used so let's just simulate one epochs, _, _, source_vertno, label, vertices, source_ind = \ - _simulate_data(fwd_fixed, idx) + _simulate_data(fwd_fixed, idx=0) reg = 1 # Lots of regularization for our toy dataset tmin = 0 diff --git a/mne/beamformer/tests/test_lcmv.py b/mne/beamformer/tests/test_lcmv.py index 9799b52ea87..774bcda04e5 100644 --- a/mne/beamformer/tests/test_lcmv.py +++ b/mne/beamformer/tests/test_lcmv.py @@ -6,8 +6,7 @@ from scipy import linalg from scipy.spatial.distance import cdist from numpy.testing import (assert_array_almost_equal, assert_array_equal, - assert_almost_equal, assert_allclose, - assert_array_less) + assert_allclose, assert_array_less) import mne from mne.transforms import apply_trans, invert_transform @@ -130,9 +129,9 @@ def test_lcmv_vector(): forward = mne.read_forward_solution(fname_fwd) forward = mne.pick_channels_forward(forward, info['ch_names']) - vertices = [s['vertno'][::100] for s in forward['src']] + vertices = [s['vertno'][::200] for s in forward['src']] n_vertices = sum(len(v) for v in vertices) - assert 5 < n_vertices < 20 + assert n_vertices == 4 amplitude = 100e-9 stc = mne.SourceEstimate(amplitude * np.eye(n_vertices), vertices, @@ -207,92 +206,89 @@ def test_lcmv_vector(): @pytest.mark.slowtest @requires_h5py @testing.requires_testing_data -@pytest.mark.parametrize('reg', (0.01, 0.)) -@pytest.mark.parametrize('proj', (True, False)) -def test_make_lcmv(tmpdir, reg, proj): +@pytest.mark.parametrize('reg, proj, kind', [ + (0.01, True, 'volume'), + (0., False, 'volume'), + (0.01, False, 'surface'), + (0., True, 'surface'), +]) +def test_make_lcmv_bem(tmpdir, reg, proj, kind): """Test LCMV with evoked data and single trials.""" raw, epochs, evoked, data_cov, noise_cov, label, forward,\ forward_surf_ori, forward_fixed, forward_vol = _get_data(proj=proj) - for fwd in [forward, forward_vol]: - filters = make_lcmv(evoked.info, fwd, data_cov, reg=reg, - noise_cov=noise_cov) - stc = apply_lcmv(evoked, filters, max_ori_out='signed') - stc.crop(0.02, None) + if kind == 'surface': + fwd = forward + else: + fwd = forward_vol + assert kind == 'volume' - stc_pow = np.sum(np.abs(stc.data), axis=1) - idx = np.argmax(stc_pow) - max_stc = stc.data[idx] - tmax = stc.times[np.argmax(max_stc)] - - assert 0.08 < tmax < 0.15, tmax - assert 0.9 < np.max(max_stc) < 3.5, np.max(max_stc) - - if fwd is forward: - # Test picking normal orientation (surface source space only). - filters = make_lcmv(evoked.info, forward_surf_ori, data_cov, - reg=reg, noise_cov=noise_cov, - pick_ori='normal', weight_norm=None) - stc_normal = apply_lcmv(evoked, filters, max_ori_out='signed') - stc_normal.crop(0.02, None) - - stc_pow = np.sum(np.abs(stc_normal.data), axis=1) - idx = np.argmax(stc_pow) - max_stc = stc_normal.data[idx] - tmax = stc_normal.times[np.argmax(max_stc)] - - lower = 0.04 if proj else 0.025 - assert lower < tmax < 0.14, tmax - lower = 3e-7 if proj else 2e-7 - assert lower < np.max(max_stc) < 3e-6, np.max(max_stc) - - # No weight normalization was applied, so the amplitude of normal - # orientation results should always be smaller than free - # orientation results. - assert (np.abs(stc_normal.data) <= stc.data).all() - - # Test picking source orientation maximizing output source power - filters = make_lcmv(evoked.info, fwd, data_cov, reg=reg, - noise_cov=noise_cov, pick_ori='max-power') - stc_max_power = apply_lcmv(evoked, filters, max_ori_out='signed') - stc_max_power.crop(0.02, None) - stc_pow = np.sum(np.abs(stc_max_power.data), axis=1) + filters = make_lcmv(evoked.info, fwd, data_cov, reg=reg, + noise_cov=noise_cov) + stc = apply_lcmv(evoked, filters, max_ori_out='signed') + stc.crop(0.02, None) + + stc_pow = np.sum(np.abs(stc.data), axis=1) + idx = np.argmax(stc_pow) + max_stc = stc.data[idx] + tmax = stc.times[np.argmax(max_stc)] + + assert 0.08 < tmax < 0.15, tmax + assert 0.9 < np.max(max_stc) < 3.5, np.max(max_stc) + + if kind == 'surface': + # Test picking normal orientation (surface source space only). + filters = make_lcmv(evoked.info, forward_surf_ori, data_cov, + reg=reg, noise_cov=noise_cov, + pick_ori='normal', weight_norm=None) + stc_normal = apply_lcmv(evoked, filters, max_ori_out='signed') + stc_normal.crop(0.02, None) + + stc_pow = np.sum(np.abs(stc_normal.data), axis=1) idx = np.argmax(stc_pow) - max_stc = np.abs(stc_max_power.data[idx]) - tmax = stc.times[np.argmax(max_stc)] - - lower = 0.08 if proj else 0.04 - assert lower < tmax < 0.15, tmax - assert 0.8 < np.max(max_stc) < 3., np.max(max_stc) - - stc_max_power.data[:, :] = np.abs(stc_max_power.data) - - if fwd is forward: - # Maximum output source power orientation results should be - # similar to free orientation results in areas with channel - # coverage - label = mne.read_label(fname_label) - mean_stc = stc.extract_label_time_course(label, fwd['src'], - mode='mean') - mean_stc_max_pow = \ - stc_max_power.extract_label_time_course(label, fwd['src'], - mode='mean') - assert_array_less(np.abs(mean_stc - mean_stc_max_pow), 1.0) - - # Test NAI weight normalization: - filters = make_lcmv(evoked.info, fwd, data_cov, reg=reg, - noise_cov=noise_cov, pick_ori='max-power', - weight_norm='nai') - stc_nai = apply_lcmv(evoked, filters, max_ori_out='signed') - stc_nai.crop(0.02, None) - - # Test whether unit-noise-gain solution is a scaled version of NAI - pearsoncorr = np.corrcoef(np.concatenate(np.abs(stc_nai.data)), - np.concatenate(stc_max_power.data)) - assert_almost_equal(pearsoncorr[0, 1], 1.) + max_stc = stc_normal.data[idx] + tmax = stc_normal.times[np.argmax(max_stc)] + + lower = 0.04 if proj else 0.025 + assert lower < tmax < 0.14, tmax + lower = 3e-7 if proj else 2e-7 + assert lower < np.max(max_stc) < 3e-6, np.max(max_stc) + + # No weight normalization was applied, so the amplitude of normal + # orientation results should always be smaller than free + # orientation results. + assert (np.abs(stc_normal.data) <= stc.data).all() + + # Test picking source orientation maximizing output source power + filters = make_lcmv(evoked.info, fwd, data_cov, reg=reg, + noise_cov=noise_cov, pick_ori='max-power') + stc_max_power = apply_lcmv(evoked, filters, max_ori_out='signed') + stc_max_power.crop(0.02, None) + stc_pow = np.sum(np.abs(stc_max_power.data), axis=1) + idx = np.argmax(stc_pow) + max_stc = np.abs(stc_max_power.data[idx]) + tmax = stc.times[np.argmax(max_stc)] + + lower = 0.08 if proj else 0.04 + assert lower < tmax < 0.15, tmax + assert 0.8 < np.max(max_stc) < 3., np.max(max_stc) + + stc_max_power.data[:, :] = np.abs(stc_max_power.data) + + if kind == 'surface': + # Maximum output source power orientation results should be + # similar to free orientation results in areas with channel + # coverage + label = mne.read_label(fname_label) + mean_stc = stc.extract_label_time_course( + label, fwd['src'], mode='mean') + mean_stc_max_pow = \ + stc_max_power.extract_label_time_course( + label, fwd['src'], mode='mean') + assert_array_less(np.abs(mean_stc - mean_stc_max_pow), 1.0) # Test if spatial filter contains src_type - assert 'src_type' in filters + assert filters['src_type'] == kind # __repr__ assert len(evoked.ch_names) == 22 @@ -301,7 +297,7 @@ def test_make_lcmv(tmpdir, reg, proj): rank = 17 if proj else 20 assert 'LCMV' in repr(filters) assert 'unknown subject' not in repr(filters) - assert '4157 vert' in repr(filters) + assert f'{fwd["nsource"]} vert' in repr(filters) assert '20 ch' in repr(filters) assert 'rank %s' % rank in repr(filters) @@ -317,6 +313,9 @@ def test_make_lcmv(tmpdir, reg, proj): filters['rank'] = int(filters['rank']) assert object_diff(filters, filters_read) == '' + if kind != 'surface': + return + # Test if fixed forward operator is detected when picking normal or # max-power orientation pytest.raises(ValueError, make_lcmv, evoked.info, forward_fixed, data_cov, @@ -357,9 +356,8 @@ def test_make_lcmv(tmpdir, reg, proj): # this channel from the data # also test here that no warnings are thrown - implemented to check whether # src should not be None warning occurs - with pytest.warns(None) as w: - stc = apply_lcmv(evoked, filters, max_ori_out='signed') - assert len(w) == 0 + stc = apply_lcmv(evoked, filters, max_ori_out='signed') + # the result should be equal to applying this filter to a dataset without # this channel: stc_ch = apply_lcmv(evoked_ch, filters, max_ori_out='signed') @@ -367,11 +365,16 @@ def test_make_lcmv(tmpdir, reg, proj): # Test if non-matching SSP projection is detected in application of filter if proj: - raw_proj = deepcopy(raw) - raw_proj.del_proj() + raw_proj = raw.copy().del_proj() with pytest.raises(ValueError, match='do not match the projections'): apply_lcmv_raw(raw_proj, filters, max_ori_out='signed') + # Test apply_lcmv_raw + use_raw = raw.copy().crop(0, 1) + stc = apply_lcmv_raw(use_raw, filters) + assert_allclose(stc.times, use_raw.times) + assert_array_equal(stc.vertices[0], forward_vol['src'][0]['vertno']) + # Test if spatial filter contains src_type assert 'src_type' in filters @@ -433,8 +436,13 @@ def test_make_lcmv(tmpdir, reg, proj): @testing.requires_testing_data @pytest.mark.slowtest -@pytest.mark.parametrize('weight_norm', (None, 'unit-noise-gain', 'nai')) -@pytest.mark.parametrize('pick_ori', (None, 'max-power', 'vector')) +@pytest.mark.parametrize('weight_norm, pick_ori', [ + ('unit-noise-gain', 'max-power'), + ('unit-noise-gain', 'vector'), + ('unit-noise-gain', None), + ('nai', 'vector'), + (None, 'max-power'), +]) def test_make_lcmv_sphere(pick_ori, weight_norm): """Test LCMV with sphere head model.""" # unit-noise gain beamformer and orientation @@ -479,33 +487,6 @@ def test_make_lcmv_sphere(pick_ori, weight_norm): assert min_ < np.max(max_stc) < max_, (min_, np.max(max_stc), max_) -@testing.requires_testing_data -def test_lcmv_raw(): - """Test LCMV with raw data.""" - raw, _, _, _, noise_cov, label, forward, _, _, _ =\ - _get_data(all_forward=False, epochs=False, data_cov=False) - - tmin, tmax = 0, 20 - start, stop = raw.time_as_index([tmin, tmax]) - - # use only the left-temporal MEG channels for LCMV - data_cov = mne.compute_raw_covariance(raw, tmin=tmin, tmax=tmax) - filters = make_lcmv(raw.info, forward, data_cov, reg=0.01, - noise_cov=noise_cov, label=label) - stc = apply_lcmv_raw(raw, filters, start=start, stop=stop, - max_ori_out='signed') - - assert_array_almost_equal(np.array([tmin, tmax]), - np.array([stc.times[0], stc.times[-1]]), - decimal=2) - - # make sure we get an stc with vertices only in the lh - vertno = [forward['src'][0]['vertno'], forward['src'][1]['vertno']] - assert len(stc.vertices[0]) == len(np.intersect1d(vertno[0], - label.vertices)) - assert len(stc.vertices[1]) == 0 - - @testing.requires_testing_data @pytest.mark.parametrize('weight_norm', (None, 'unit-noise-gain')) @pytest.mark.parametrize('pick_ori', ('max-power', 'normal')) @@ -542,16 +523,15 @@ def test_lcmv_ctf_comp(): ctf_dir = op.join(testing.data_path(download=False), 'CTF') raw_fname = op.join(ctf_dir, 'somMDYO-18av.ds') raw = mne.io.read_raw_ctf(raw_fname, preload=True) + raw.pick(raw.ch_names[:70]) events = mne.make_fixed_length_events(raw, duration=0.2)[:2] epochs = mne.Epochs(raw, events, tmin=-0.1, tmax=0.2) evoked = epochs.average() - with pytest.warns(RuntimeWarning, - match='Too few samples .* estimate may be unreliable'): - data_cov = mne.compute_covariance(epochs) + data_cov = mne.compute_covariance(epochs) fwd = mne.make_forward_solution(evoked.info, None, - mne.setup_volume_source_space(pos=15.0), + mne.setup_volume_source_space(pos=30.0), mne.make_sphere_model()) with pytest.raises(ValueError, match='reduce_rank'): make_lcmv(evoked.info, fwd, data_cov) @@ -566,8 +546,12 @@ def test_lcmv_ctf_comp(): @testing.requires_testing_data -@pytest.mark.parametrize('proj', [False, True]) -@pytest.mark.parametrize('weight_norm', (None, 'nai', 'unit-noise-gain')) +@pytest.mark.parametrize('proj, weight_norm', [ + (True, 'unit-noise-gain'), + (False, 'unit-noise-gain'), + (True, None), + (True, 'nai'), +]) def test_lcmv_reg_proj(proj, weight_norm): """Test LCMV with and without proj.""" raw = mne.io.read_raw_fif(fname_raw, preload=True) @@ -777,8 +761,12 @@ def test_orientation_max_power(bias_params_fixed, bias_params_free, assert lower_ori < got < upper_ori -@pytest.mark.parametrize('weight_norm', ('nai', 'unit-noise-gain')) -@pytest.mark.parametrize('pick_ori', ('vector', 'max-power', None)) +@pytest.mark.parametrize('weight_norm, pick_ori', [ + ('nai', 'max-power'), + ('unit-noise-gain', 'vector'), + ('unit-noise-gain', 'max-power'), + ('unit-noise-gain', None), +]) def test_depth_does_not_matter(bias_params_free, weight_norm, pick_ori): """Test that depth weighting does not matter for normalized filters.""" evoked, fwd, noise_cov, data_cov, _ = bias_params_free @@ -814,12 +802,24 @@ def test_lcmv_maxfiltered(): make_lcmv(epochs.info, fwd, data_cov, rank=use_rank) +# To reduce test time, only test combinations that should matter rather than +# all of them @testing.requires_testing_data -@pytest.mark.parametrize('pick_ori', ['vector', 'max-power', 'normal']) -@pytest.mark.parametrize( - 'weight_norm', ['unit-noise-gain', 'nai', 'unit-noise-gain-invariant']) -@pytest.mark.parametrize('reg', (0.05, 0.)) -@pytest.mark.parametrize('inversion', ['matrix', 'single']) +@pytest.mark.parametrize('pick_ori, weight_norm, reg, inversion', [ + ('vector', 'unit-noise-gain-invariant', 0.05, 'matrix'), + ('vector', 'unit-noise-gain-invariant', 0.05, 'single'), + ('vector', 'unit-noise-gain', 0.05, 'matrix'), + ('vector', 'unit-noise-gain', 0.05, 'single'), + ('vector', 'unit-noise-gain', 0.0, 'matrix'), + ('vector', 'unit-noise-gain', 0.0, 'single'), + ('vector', 'nai', 0.05, 'matrix'), + ('max-power', 'unit-noise-gain', 0.05, 'matrix'), + ('max-power', 'unit-noise-gain', 0.0, 'single'), + ('max-power', 'unit-noise-gain', 0.05, 'single'), + ('max-power', 'unit-noise-gain-invariant', 0.05, 'matrix'), + ('normal', 'unit-noise-gain', 0.05, 'matrix'), + ('normal', 'nai', 0.0, 'matrix'), +]) def test_unit_noise_gain_formula(pick_ori, weight_norm, reg, inversion): """Test unit-noise-gain filter against formula.""" raw = mne.io.read_raw_fif(fname_raw, preload=True) diff --git a/mne/io/ctf/tests/test_ctf.py b/mne/io/ctf/tests/test_ctf.py index ee2630f8bc8..b7dd0eb6067 100644 --- a/mne/io/ctf/tests/test_ctf.py +++ b/mne/io/ctf/tests/test_ctf.py @@ -212,8 +212,7 @@ def test_read_ctf(tmpdir): assert_allclose(raw_read[pick_ch, sl_time][0], raw_c[pick_ch, sl_time][0]) # all data / preload - with pytest.warns(None): # sometimes MISC - raw = read_raw_ctf(fname, preload=True) + raw.load_data() assert_allclose(raw[:][0], raw_c[:][0], atol=1e-15) # test bad segment annotations if 'testdata_ctf_short.ds' in fname: diff --git a/mne/io/eeglab/tests/test_eeglab.py b/mne/io/eeglab/tests/test_eeglab.py index 3ed15af5ec2..c0c9a22846d 100644 --- a/mne/io/eeglab/tests/test_eeglab.py +++ b/mne/io/eeglab/tests/test_eeglab.py @@ -20,7 +20,7 @@ from mne.io import read_raw_eeglab from mne.io.tests.test_raw import _test_raw_reader from mne.datasets import testing -from mne.utils import requires_h5py, run_tests_if_main +from mne.utils import check_version from mne.annotations import events_from_annotations, read_annotations from mne.io.eeglab.tests._utils import _read_eeglab_montage @@ -42,25 +42,18 @@ raw_h5_fnames = [raw_fname_h5, raw_fname_onefile_h5] epochs_h5_fnames = [epochs_fname_h5, epochs_fname_onefile_h5] -raw_fnames = [raw_fname_mat, raw_fname_onefile_mat, - raw_fname_h5, raw_fname_onefile_h5] montage_path = op.join(base_dir, 'test_chans.locs') -def _check_h5(fname): - if fname.endswith('_h5.set'): - try: - import h5py # noqa, analysis:ignore - except Exception: - raise SkipTest('h5py module required') +needs_h5 = pytest.mark.skipif(not check_version('h5py'), reason='Needs h5py') -@requires_h5py @testing.requires_testing_data -@pytest.mark.slowtest -@pytest.mark.parametrize( - 'fname', [raw_fname_mat, raw_fname_h5, raw_fname_chanloc], ids=op.basename -) +@pytest.mark.parametrize('fname', [ + raw_fname_mat, + pytest.param(raw_fname_h5, marks=needs_h5), + raw_fname_chanloc, +], ids=op.basename) def test_io_set_raw(fname): """Test importing EEGLAB .set files.""" montage = _read_eeglab_montage(montage_path) @@ -72,22 +65,19 @@ def test_io_set_raw(fname): if fname.endswith('test_raw_chanloc.set'): with pytest.warns(RuntimeWarning, match="The data contains 'boundary' events"): - _test_raw_reader(**kws) + raw0 = _test_raw_reader(**kws) + elif '_h5' in fname: # should be safe enough, and much faster + raw0 = read_raw_eeglab(fname, preload=True) else: - _test_raw_reader(**kws) + raw0 = _test_raw_reader(**kws) # test that preloading works - read_raw_kws = dict(input_fname=fname, preload=True) if fname.endswith('test_raw_chanloc.set'): - with pytest.warns(RuntimeWarning, - match="The data contains 'boundary' events"): - raw0 = read_raw_eeglab(**read_raw_kws) - raw0.set_montage(montage, on_missing='ignore') - # crop to check if the data has been properly preloaded; we cannot - # filter as the snippet of raw data is very short - raw0.crop(0, 1) + raw0.set_montage(montage, on_missing='ignore') + # crop to check if the data has been properly preloaded; we cannot + # filter as the snippet of raw data is very short + raw0.crop(0, 1) else: - raw0 = read_raw_eeglab(**read_raw_kws) raw0.set_montage(montage) raw0.filter(1, None, l_trans_bandwidth='auto', filter_length='auto', phase='zero') @@ -103,6 +93,12 @@ def test_io_set_raw(fname): raw0 = read_raw_eeglab(**read_raw_kws) raw0.set_montage(montage) + # Annotations + if fname != raw_fname_chanloc: + assert len(raw0.annotations) == 154 + assert set(raw0.annotations.description) == {'rt', 'square'} + assert_array_equal(raw0.annotations.duration, 0.) + @testing.requires_testing_data def test_io_set_raw_more(tmpdir): @@ -247,11 +243,12 @@ def test_io_set_raw_more(tmpdir): np.array([np.nan, np.nan, np.nan])) -@pytest.mark.slowtest # slow-ish on Travis OSX @pytest.mark.timeout(60) # ~60 sec on Travis OSX -@requires_h5py @testing.requires_testing_data -@pytest.mark.parametrize('fnames', [epochs_mat_fnames, epochs_h5_fnames]) +@pytest.mark.parametrize('fnames', [ + epochs_mat_fnames, + pytest.param(epochs_h5_fnames, marks=[needs_h5, pytest.mark.slowtest]), +]) def test_io_set_epochs(fnames): """Test importing EEGLAB .set epochs files.""" epochs_fname, epochs_fname_onefile = fnames @@ -306,12 +303,16 @@ def test_degenerate(tmpdir): bad_epochs_fname) -@pytest.mark.parametrize("fname", raw_fnames) +@pytest.mark.parametrize("fname", [ + raw_fname_mat, + raw_fname_onefile_mat, + # We don't test the h5 varaints here because they are implicitly tested + # in test_io_set_raw +]) @pytest.mark.filterwarnings('ignore: Complex objects') @testing.requires_testing_data def test_eeglab_annotations(fname): """Test reading annotations in EEGLAB files.""" - _check_h5(fname) annotations = read_annotations(fname) assert len(annotations) == 154 assert set(annotations.description) == {'rt', 'square'} @@ -421,6 +422,3 @@ def test_position_information(one_chanpos_fname): _assert_array_allclose_nan(np.array([ch['loc'] for ch in raw.info['chs']]), EXPECTED_LOCATIONS_FROM_MONTAGE) - - -run_tests_if_main() diff --git a/mne/io/egi/tests/test_egi.py b/mne/io/egi/tests/test_egi.py index 40e667cf006..01486ab98a4 100644 --- a/mne/io/egi/tests/test_egi.py +++ b/mne/io/egi/tests/test_egi.py @@ -59,11 +59,15 @@ ]) def test_egi_mff_pause(fname, skip_times, event_times): """Test EGI MFF with pauses.""" - with pytest.warns(RuntimeWarning, match='Acquisition skips detected'): - raw = _test_raw_reader(read_raw_egi, input_fname=fname, - test_scaling=False, # XXX probably some bug - test_rank='less', - ) + if fname == egi_pause_w1337_fname: + # too slow to _test_raw_reader + raw = read_raw_egi(fname).load_data() + else: + with pytest.warns(RuntimeWarning, match='Acquisition skips detected'): + raw = _test_raw_reader(read_raw_egi, input_fname=fname, + test_scaling=False, # XXX probably some bug + test_rank='less', + ) assert raw.info['sfreq'] == 250. # true for all of these files assert len(raw.annotations) == len(skip_times) diff --git a/mne/io/fieldtrip/tests/test_fieldtrip.py b/mne/io/fieldtrip/tests/test_fieldtrip.py index b4890d794a3..2e654d4608a 100644 --- a/mne/io/fieldtrip/tests/test_fieldtrip.py +++ b/mne/io/fieldtrip/tests/test_fieldtrip.py @@ -33,6 +33,11 @@ all_test_params_epochs = list(itertools.product(all_systems_epochs, all_versions, use_info)) +# just for speed we skip some slowest ones -- the coverage should still +# be sufficient +for key in [('CTF', 'v73', True), ('neuromag306', 'v73', False)]: + all_test_params_epochs.pop(all_test_params_epochs.index(key)) + all_test_params_raw.pop(all_test_params_raw.index(key)) no_info_warning = {'expected_warning': RuntimeWarning, 'match': NOINFO_WARNING} diff --git a/mne/io/fiff/tests/test_raw_fiff.py b/mne/io/fiff/tests/test_raw_fiff.py index 51c1137464b..e3dbfb99358 100644 --- a/mne/io/fiff/tests/test_raw_fiff.py +++ b/mne/io/fiff/tests/test_raw_fiff.py @@ -606,7 +606,7 @@ def test_io_raw(tmpdir): def test_io_raw_additional(fname_in, fname_out, tmpdir): """Test IO for raw data (Neuromag + CTF + gz).""" fname_out = tmpdir.join(fname_out) - raw = read_raw_fif(fname_in) + raw = read_raw_fif(fname_in).crop(0, 2) nchan = raw.info['nchan'] ch_names = raw.info['ch_names'] @@ -1067,16 +1067,21 @@ def test_resample_equiv(): @testing.requires_testing_data -@pytest.mark.parametrize('preload', (True, False)) -def test_resample(tmpdir, preload): +@pytest.mark.parametrize('preload, n, npad', [ + (True, 512, 'auto'), + (False, 512, 0), +]) +def test_resample(tmpdir, preload, n, npad): """Test resample (with I/O and multiple files).""" - raw = read_raw_fif(fif_fname).crop(0, 3) + raw = read_raw_fif(fif_fname) + raw.crop(0, raw.times[n - 1]) + assert len(raw.times) == n if preload: raw.load_data() raw_resamp = raw.copy() sfreq = raw.info['sfreq'] # test parallel on upsample - raw_resamp.resample(sfreq * 2, n_jobs=2, npad='auto') + raw_resamp.resample(sfreq * 2, n_jobs=2, npad=npad) assert raw_resamp.n_times == len(raw_resamp.times) raw_resamp.save(tmpdir.join('raw_resamp-raw.fif')) raw_resamp = read_raw_fif(tmpdir.join('raw_resamp-raw.fif'), @@ -1086,7 +1091,7 @@ def test_resample(tmpdir, preload): assert raw_resamp.get_data().shape[1] == raw_resamp.n_times assert raw.get_data().shape[0] == raw_resamp._data.shape[0] # test non-parallel on downsample - raw_resamp.resample(sfreq, n_jobs=1, npad='auto') + raw_resamp.resample(sfreq, n_jobs=1, npad=npad) assert raw_resamp.info['sfreq'] == sfreq assert raw.get_data().shape == raw_resamp._data.shape assert raw.first_samp == raw_resamp.first_samp @@ -1109,9 +1114,9 @@ def test_resample(tmpdir, preload): raw3 = raw.copy() raw4 = raw.copy() raw1 = concatenate_raws([raw1, raw2]) - raw1.resample(10., npad='auto') - raw3.resample(10., npad='auto') - raw4.resample(10., npad='auto') + raw1.resample(10., npad=npad) + raw3.resample(10., npad=npad) + raw4.resample(10., npad=npad) raw3 = concatenate_raws([raw3, raw4]) assert_array_equal(raw1._data, raw3._data) assert_array_equal(raw1._first_samps, raw3._first_samps) @@ -1129,12 +1134,12 @@ def test_resample(tmpdir, preload): # basic decimation stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0] raw = RawArray([stim], create_info(1, len(stim), ['stim'])) - assert_allclose(raw.resample(8., npad='auto')._data, + assert_allclose(raw.resample(8., npad=npad)._data, [[1, 1, 0, 0, 1, 1, 0, 0]]) # decimation of multiple stim channels raw = RawArray(2 * [stim], create_info(2, len(stim), 2 * ['stim'])) - assert_allclose(raw.resample(8., npad='auto', verbose='error')._data, + assert_allclose(raw.resample(8., npad=npad, verbose='error')._data, [[1, 1, 0, 0, 1, 1, 0, 0], [1, 1, 0, 0, 1, 1, 0, 0]]) @@ -1142,20 +1147,20 @@ def test_resample(tmpdir, preload): # done naively stim = [0, 0, 0, 1, 1, 0, 0, 0] raw = RawArray([stim], create_info(1, len(stim), ['stim'])) - assert_allclose(raw.resample(4., npad='auto')._data, + assert_allclose(raw.resample(4., npad=npad)._data, [[0, 1, 1, 0]]) # two events are merged in this case (warning) stim = [0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0] raw = RawArray([stim], create_info(1, len(stim), ['stim'])) with pytest.warns(RuntimeWarning, match='become unreliable'): - raw.resample(8., npad='auto') + raw.resample(8., npad=npad) # events are dropped in this case (warning) stim = [0, 1, 1, 0, 0, 1, 1, 0] raw = RawArray([stim], create_info(1, len(stim), ['stim'])) with pytest.warns(RuntimeWarning, match='become unreliable'): - raw.resample(4., npad='auto') + raw.resample(4., npad=npad) # test resampling events: this should no longer give a warning # we often have first_samp != 0, include it here too @@ -1167,7 +1172,7 @@ def test_resample(tmpdir, preload): raw = RawArray([stim], create_info(1, o_sfreq, ['stim']), first_samp=first_samp) events = find_events(raw) - raw, events = raw.resample(n_sfreq, events=events, npad='auto') + raw, events = raw.resample(n_sfreq, events=events, npad=npad) # Try index into raw.times with resampled events: raw.times[events[:, 0] - raw.first_samp] n_fsamp = int(first_samp * sfreq_ratio) # how it's calc'd in base.py @@ -1183,15 +1188,15 @@ def test_resample(tmpdir, preload): # test copy flag stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0] raw = RawArray([stim], create_info(1, len(stim), ['stim'])) - raw_resampled = raw.copy().resample(4., npad='auto') + raw_resampled = raw.copy().resample(4., npad=npad) assert (raw_resampled is not raw) - raw_resampled = raw.resample(4., npad='auto') + raw_resampled = raw.resample(4., npad=npad) assert (raw_resampled is raw) # resample should still work even when no stim channel is present raw = RawArray(np.random.randn(1, 100), create_info(1, 100, ['eeg'])) raw.info['lowpass'] = 50. - raw.resample(10, npad='auto') + raw.resample(10, npad=npad) assert raw.info['lowpass'] == 5. assert len(raw) == 10 diff --git a/mne/preprocessing/tests/test_ecg.py b/mne/preprocessing/tests/test_ecg.py index 1f62a1522c1..16cfdfdbebb 100644 --- a/mne/preprocessing/tests/test_ecg.py +++ b/mne/preprocessing/tests/test_ecg.py @@ -4,7 +4,6 @@ from mne.io import read_raw_fif from mne import pick_types from mne.preprocessing import find_ecg_events, create_ecg_epochs -from mne.utils import run_tests_if_main data_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') raw_fname = op.join(data_path, 'test_raw.fif') @@ -15,7 +14,9 @@ def test_find_ecg(): """Test find ECG peaks.""" # Test if ECG analysis will work on data that is not preloaded - raw = read_raw_fif(raw_fname, preload=False) + raw = read_raw_fif(raw_fname, preload=False).pick_types(meg=True) + raw.pick(raw.ch_names[::10] + ['MEG 2641']) + raw.info.normalize_proj() # once with mag-trick # once with characteristic channel @@ -24,14 +25,16 @@ def test_find_ecg(): raw_bad._data[ecg_idx, :1] = 1e6 # this will break the detector raw_bad.annotations.append(raw.first_samp / raw.info['sfreq'], 1. / raw.info['sfreq'], 'BAD_values') + raw_noload = raw.copy() + raw.resample(100) - for ch_name, tstart in zip(['MEG 1531', None, None], - [raw.times[-1] / 2, raw.times[-1] / 2, 0]): + for ch_name, tstart in zip(['MEG 1531', None], + [raw.times[-1] / 2, 0]): events, ch_ECG, average_pulse, ecg = find_ecg_events( raw, event_id=999, ch_name=ch_name, tstart=tstart, return_ecg=True) assert raw.n_times == ecg.shape[-1] - assert 55 < average_pulse < 60 + assert 40 < average_pulse < 60 n_events = len(events) # with annotations @@ -44,11 +47,6 @@ def test_find_ecg(): reject_by_annotation=True)[2] assert 55 < average_pulse < 60 - average_pulse = find_ecg_events(raw_bad, ch_name='MEG 2641', - reject_by_annotation=False)[2] - assert 55 < average_pulse < 65 - del raw_bad - picks = pick_types( raw.info, meg='grad', eeg=False, stim=False, eog=False, ecg=True, emg=False, ref_meg=False, @@ -58,10 +56,11 @@ def test_find_ecg(): # tested assert 'ecg' not in raw - ecg_epochs = create_ecg_epochs(raw, picks=picks, keep_ecg=True) + ecg_epochs = create_ecg_epochs(raw_noload, picks=picks, keep_ecg=True) assert len(ecg_epochs.events) == n_events assert 'ECG-SYN' not in raw.ch_names assert 'ECG-SYN' in ecg_epochs.ch_names + assert len(ecg_epochs) == 23 picks = pick_types( ecg_epochs.info, meg=False, eeg=False, stim=False, @@ -74,15 +73,14 @@ def test_find_ecg(): # test with user provided ecg channel raw.info['projs'] = list() + assert 'MEG 2641' in raw.ch_names with pytest.warns(RuntimeWarning, match='unit for channel'): raw.set_channel_types({'MEG 2641': 'ecg'}) create_ecg_epochs(raw) - raw.load_data().pick_types(meg=True) # remove ECG + raw.pick_types(meg=True) # remove ECG + assert 'MEG 2641' not in raw.ch_names ecg_epochs = create_ecg_epochs(raw, keep_ecg=False) assert len(ecg_epochs.events) == n_events assert 'ECG-SYN' not in raw.ch_names assert 'ECG-SYN' not in ecg_epochs.ch_names - - -run_tests_if_main() diff --git a/mne/preprocessing/tests/test_ica.py b/mne/preprocessing/tests/test_ica.py index fc21a218d59..78a504a2684 100644 --- a/mne/preprocessing/tests/test_ica.py +++ b/mne/preprocessing/tests/test_ica.py @@ -515,27 +515,37 @@ def test_ica_core(method, n_components, noise_cov, n_pca_components): ica.fit(epochs, picks=[0, 1]) -@requires_sklearn -@pytest.mark.slowtest -@pytest.mark.parametrize("method", ["picard", "fastica"]) -def test_ica_additional(method, tmpdir): - """Test additional ICA functionality.""" - _skip_check_picard(method) - - stop2 = 500 - raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data() +@pytest.fixture +def short_raw_epochs(): + """Get small data.""" + raw = read_raw_fif(raw_fname).crop(0, 5).load_data() + raw.pick_channels(set(raw.ch_names[::10]) | set( + ['EOG 061', 'MEG 1531', 'MEG 1441', 'MEG 0121'])) + assert 'eog' in raw raw.del_proj() # avoid warnings raw.set_annotations(Annotations([0.5], [0.5], ['BAD'])) + raw.resample(100) # XXX This breaks the tests :( # raw.info['bads'] = [raw.ch_names[1]] - test_cov = read_cov(test_cov_name) - events = read_events(event_name) - picks = pick_types(raw.info, meg=True, stim=False, ecg=False, - eog=False, exclude='bads')[1::4] + # Create epochs that have different channels from raw + events = make_fixed_length_events(raw) + picks = pick_types(raw.info, meg=True, eeg=True, eog=False)[:-1] epochs = Epochs(raw, events, None, tmin, tmax, picks=picks, baseline=(None, 0), preload=True, proj=False) - epochs.decimate(3, verbose='error') - assert len(epochs) == 4 + assert len(epochs) == 3 + epochs_eog = Epochs(raw, epochs.events, event_id, tmin, tmax, + picks=('meg', 'eog'), baseline=(None, 0), preload=True) + return raw, epochs, epochs_eog + + +@requires_sklearn +@pytest.mark.slowtest +@pytest.mark.parametrize("method", ["picard", "fastica"]) +def test_ica_additional(method, tmpdir, short_raw_epochs): + """Test additional ICA functionality.""" + _skip_check_picard(method) + raw, epochs, epochs_eog = short_raw_epochs + few_picks = np.arange(5) # test if n_components=None works ica = ICA(n_components=None, n_pca_components=None, method=method, @@ -543,17 +553,12 @@ def test_ica_additional(method, tmpdir): with pytest.warns(UserWarning, match='did not converge'): ica.fit(epochs) _assert_ica_attributes(ica, epochs.get_data('data'), limits=(0.05, 20)) - # for testing eog functionality - picks2 = np.concatenate([picks, pick_types(raw.info, False, eog=True)]) - epochs_eog = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks2, - baseline=(None, 0), preload=True) - del picks2 - - test_cov2 = test_cov.copy() - ica = ICA(noise_cov=test_cov2, n_components=3, method=method) + + test_cov = read_cov(test_cov_name) + ica = ICA(noise_cov=test_cov, n_components=3, method=method) assert (ica.info is None) with pytest.warns(RuntimeWarning, match='normalize_proj'): - ica.fit(raw, picks[:5]) + ica.fit(raw, picks=few_picks) _assert_ica_attributes(ica, raw.get_data(np.arange(5)), limits=(1, 90)) assert (isinstance(ica.info, Info)) assert (ica.n_components_ < 5) @@ -563,12 +568,12 @@ def test_ica_additional(method, tmpdir): ica.save('') with pytest.warns(Warning, match='converge'): - ica.fit(raw, picks=[1, 2, 3, 4, 5], start=start, stop=stop2) + ica.fit(raw, np.arange(1, 6)) _assert_ica_attributes( - ica, raw.get_data(np.arange(1, 6), start=start, stop=stop2)) + ica, raw.get_data(np.arange(1, 6))) # check Kuiper index threshold - assert_equal(ica._get_ctps_threshold(), 0.21) + assert_allclose(ica._get_ctps_threshold(), 0.5) with pytest.raises(TypeError, match='str or numeric'): ica.find_bads_ecg(raw, threshold=None) with pytest.warns(RuntimeWarning, match='is longer than the signal'): @@ -581,6 +586,7 @@ def test_ica_additional(method, tmpdir): # check passing a ch_name to find_bads_ecg with pytest.warns(RuntimeWarning, match='longer'): _, scores_1 = ica.find_bads_ecg(raw, threshold='auto') + with pytest.warns(RuntimeWarning, match='longer'): _, scores_2 = ica.find_bads_ecg(raw, raw.ch_names[1], threshold='auto') assert scores_1[0] != scores_2[0] @@ -616,7 +622,7 @@ def test_ica_additional(method, tmpdir): assert 'No maps selected' in log # make sure a single threshold in a list works - corrmap([ica, ica3], template, threshold=[0.5], label='blinks', plot=True, + corrmap([ica, ica3], template, threshold=[0.5], label='blinks', plot=False, ch_type="mag") ica_different_channels = ICA(n_components=2, max_iter=1) @@ -638,7 +644,7 @@ def test_ica_additional(method, tmpdir): raw_.append(raw_) n_samples = raw_._data.shape[1] with pytest.warns(UserWarning, match='did not converge'): - ica.fit(raw, picks=picks[:5], decim=3) + ica.fit(raw, picks=few_picks) _assert_ica_attributes(ica) assert raw_._data.shape[1] == n_samples @@ -649,7 +655,7 @@ def test_ica_additional(method, tmpdir): ICA(n_components=1, method=method) ica = ICA(n_components=4, method=method, max_iter=1) with pytest.warns(UserWarning, match='did not converge'): - ica.fit(raw, picks=None, decim=3) + ica.fit(raw) _assert_ica_attributes(ica) assert ica.n_components_ == 4 ica_var = _ica_explained_variance(ica, raw, normalize=True) @@ -664,92 +670,63 @@ def test_ica_additional(method, tmpdir): # epochs extraction from raw fit pytest.raises(RuntimeError, ica.get_sources, epochs) - # test reading and writing + + # test filtering + ica_raw = ica.get_sources(raw) + d1 = ica_raw._data[0].copy() + ica_raw.filter(4, 20, fir_design='firwin2') + assert_equal(ica_raw.info['lowpass'], 20.) + assert_equal(ica_raw.info['highpass'], 4.) + assert ((d1 != ica_raw._data[0]).any()) + d1 = ica_raw._data[0].copy() + ica_raw.notch_filter([10], trans_bandwidth=10, fir_design='firwin') + assert ((d1 != ica_raw._data[0]).any()) + test_ica_fname = tmpdir.join('test-ica.fif') - kwargs = dict(n_pca_components=4) - for cov in (None, test_cov): - ica = ICA(noise_cov=cov, n_components=2, method=method, max_iter=1) - with pytest.warns(None): # ICA does not converge - ica.fit(raw, picks=picks[:10], start=start, stop=stop2) - _assert_ica_attributes(ica) - sources = ica.get_sources(epochs).get_data() - assert (ica.mixing_matrix_.shape == (2, 2)) - assert (ica.unmixing_matrix_.shape == (2, 2)) - assert (ica.pca_components_.shape == (10, 10)) - assert (sources.shape[1] == ica.n_components_) - - for exclude in [[], [0], np.array([1, 2, 3])]: - ica.exclude = exclude - ica.labels_ = {'foo': [0]} - ica.save(test_ica_fname) - ica_read = read_ica(test_ica_fname) - assert (list(ica.exclude) == ica_read.exclude) - assert_equal(ica.labels_, ica_read.labels_) - ica.apply(raw.copy(), **kwargs) - ica.exclude = [] - ica.apply(raw.copy(), exclude=[1], **kwargs) - assert (ica.exclude == []) - - ica.exclude = [0, 1] - ica.apply(raw.copy(), exclude=[1], **kwargs) - assert (ica.exclude == [0, 1]) - - ica_raw = ica.get_sources(raw) - assert (ica.exclude == [ica_raw.ch_names.index(e) for e in - ica_raw.info['bads']]) - - # test filtering - d1 = ica_raw._data[0].copy() - ica_raw.filter(4, 20, fir_design='firwin2') - assert_equal(ica_raw.info['lowpass'], 20.) - assert_equal(ica_raw.info['highpass'], 4.) - assert ((d1 != ica_raw._data[0]).any()) - d1 = ica_raw._data[0].copy() - ica_raw.notch_filter([10], trans_bandwidth=10, fir_design='firwin') - assert ((d1 != ica_raw._data[0]).any()) - - ica.n_pca_components = 2 - ica.method = 'fake' - ica.save(test_ica_fname) - ica_read = read_ica(test_ica_fname) - assert (ica.n_pca_components == ica_read.n_pca_components) - assert_equal(ica.method, ica_read.method) - assert_equal(ica.labels_, ica_read.labels_) + ica.n_pca_components = 2 + ica.method = 'fake' + ica.save(test_ica_fname) + ica_read = read_ica(test_ica_fname) + assert (ica.n_pca_components == ica_read.n_pca_components) + assert_equal(ica.method, ica_read.method) + assert_equal(ica.labels_, ica_read.labels_) - # check type consistency - attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ ' - 'pca_explained_variance_ pre_whitener_') + # check type consistency + attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ ' + 'pca_explained_variance_ pre_whitener_') - def f(x, y): - return getattr(x, y).dtype + def f(x, y): + return getattr(x, y).dtype - for attr in attrs.split(): - assert_equal(f(ica_read, attr), f(ica, attr)) + for attr in attrs.split(): + assert_equal(f(ica_read, attr), f(ica, attr)) - ica.n_pca_components = 4 - ica_read.n_pca_components = 4 + ica.n_pca_components = 4 + ica_read.n_pca_components = 4 - ica.exclude = [] - ica.save(test_ica_fname) - ica_read = read_ica(test_ica_fname) - for attr in ['mixing_matrix_', 'unmixing_matrix_', 'pca_components_', - 'pca_mean_', 'pca_explained_variance_', - 'pre_whitener_']: - assert_array_almost_equal(getattr(ica, attr), - getattr(ica_read, attr)) + ica.exclude = [] + ica.save(test_ica_fname) + ica_read = read_ica(test_ica_fname) + for attr in ['mixing_matrix_', 'unmixing_matrix_', 'pca_components_', + 'pca_mean_', 'pca_explained_variance_', + 'pre_whitener_']: + assert_array_almost_equal(getattr(ica, attr), getattr(ica_read, attr)) - assert (ica.ch_names == ica_read.ch_names) - assert (isinstance(ica_read.info, Info)) + assert (ica.ch_names == ica_read.ch_names) + assert (isinstance(ica_read.info, Info)) - sources = ica.get_sources(raw)[:, :][0] - sources2 = ica_read.get_sources(raw)[:, :][0] - assert_array_almost_equal(sources, sources2) + sources = ica.get_sources(raw)[:, :][0] + sources2 = ica_read.get_sources(raw)[:, :][0] + assert_array_almost_equal(sources, sources2) - _raw1 = ica.apply(raw.copy(), exclude=[1]) - _raw2 = ica_read.apply(raw.copy(), exclude=[1]) - assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0]) + _raw1 = ica.apply(raw.copy(), exclude=[1]) + _raw2 = ica_read.apply(raw.copy(), exclude=[1]) + assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0]) + + ica = ICA(n_components=2, method=method, max_iter=1) + with pytest.warns(None): # ICA does not converge + ica.fit(raw, picks=few_picks) - os.remove(test_ica_fname) # check score funcs for name, func in get_score_funcs().items(): if name in score_funcs_unsuited: @@ -892,8 +869,8 @@ def f(x, y): ica = ICA(method=method) with pytest.warns(None): # sometimes does not converge - ica.fit(raw, picks=picks[:5]) - _assert_ica_attributes(ica, raw.get_data(picks[:5])) + ica.fit(raw, picks=few_picks) + _assert_ica_attributes(ica, raw.get_data(few_picks)) with pytest.warns(RuntimeWarning, match='longer'): ica.find_bads_ecg(raw, threshold='auto') ica.find_bads_eog(epochs, ch_name='MEG 0121') @@ -908,6 +885,55 @@ def f(x, y): ica.find_bads_ecg(raw, threshold='auto') +@requires_sklearn +@pytest.mark.slowtest +@pytest.mark.parametrize('method, cov', [ + ('picard', None), + ('picard', test_cov_name), + ('fastica', None), +]) +def test_ica_cov(method, cov, tmpdir, short_raw_epochs): + """Test ICA with cov.""" + _skip_check_picard(method) + raw, epochs, epochs_eog = short_raw_epochs + if cov is not None: + cov = read_cov(cov) + + # test reading and writing + test_ica_fname = tmpdir.join('test-ica.fif') + kwargs = dict(n_pca_components=4) + + ica = ICA(noise_cov=cov, n_components=2, method=method, max_iter=1) + with pytest.warns(None): # ICA does not converge + ica.fit(raw, picks=np.arange(10)) + _assert_ica_attributes(ica) + sources = ica.get_sources(epochs).get_data() + assert (ica.mixing_matrix_.shape == (2, 2)) + assert (ica.unmixing_matrix_.shape == (2, 2)) + assert (ica.pca_components_.shape == (10, 10)) + assert (sources.shape[1] == ica.n_components_) + + for exclude in [[], [0], np.array([1, 2, 3])]: + ica.exclude = exclude + ica.labels_ = {'foo': [0]} + ica.save(test_ica_fname) + ica_read = read_ica(test_ica_fname) + assert (list(ica.exclude) == ica_read.exclude) + assert_equal(ica.labels_, ica_read.labels_) + ica.apply(raw.copy(), **kwargs) + ica.exclude = [] + ica.apply(raw.copy(), exclude=[1], **kwargs) + assert (ica.exclude == []) + + ica.exclude = [0, 1] + ica.apply(raw.copy(), exclude=[1], **kwargs) + assert (ica.exclude == [0, 1]) + + ica_raw = ica.get_sources(raw) + assert (ica.exclude == [ica_raw.ch_names.index(e) for e in + ica_raw.info['bads']]) + + @requires_sklearn @pytest.mark.parametrize("method", ("fastica", "picard", "infomax")) @pytest.mark.parametrize("idx", (None, -1, slice(2), [0, 1])) @@ -949,15 +975,22 @@ def test_ica_twice(method): """Test running ICA twice.""" _skip_check_picard(method) raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data() + raw.pick(raw.ch_names[::10]) picks = pick_types(raw.info, meg='grad', exclude='bads') - n_components = 0.9 + n_components = 0.99 n_pca_components = 0.9999 + if method == 'fastica': + ctx = pytest.warns(None) # convergence, sometimes + else: + ctx = nullcontext() ica1 = ICA(n_components=n_components, method=method) - ica1.fit(raw, picks=picks, decim=3) + with ctx: + ica1.fit(raw, picks=picks, decim=3) raw_new = ica1.apply(raw, n_pca_components=n_pca_components) ica2 = ICA(n_components=n_components, method=method) - ica2.fit(raw_new, picks=picks, decim=3) + with ctx: + ica2.fit(raw_new, picks=picks, decim=3) assert_equal(ica1.n_components_, ica2.n_components_) @@ -1167,6 +1200,9 @@ def test_ica_ctf(): """Test run ICA computation on ctf data with/without compensation.""" method = 'fastica' raw = read_raw_ctf(ctf_fname, preload=True) + picks = sorted(set(range(0, len(raw.ch_names), 10)) | + set(pick_types(raw.info, ref_meg=True))) + raw.pick(picks) events = make_fixed_length_events(raw, 99999) for comp in [0, 1]: raw.apply_gradient_compensation(comp) @@ -1208,9 +1244,13 @@ def test_ica_labels(): # The CTF data are uniquely well suited to testing the ICA.find_bads_ # methods raw = read_raw_ctf(ctf_fname, preload=True) + raw.pick_channels(raw.ch_names[:300:10] + raw.ch_names[300:]) # set the appropriate EEG channels to EOG and ECG - raw.set_channel_types({'EEG057': 'eog', 'EEG058': 'eog', 'EEG059': 'ecg'}) + rename = {'EEG057': 'eog', 'EEG058': 'eog', 'EEG059': 'ecg'} + for key in rename: + assert key in raw.ch_names + raw.set_channel_types(rename) ica = ICA(n_components=4, max_iter=2, method='fastica', allow_ref_meg=True) with pytest.warns(UserWarning, match='did not converge'): ica.fit(raw) @@ -1262,64 +1302,52 @@ def test_ica_labels(): @requires_sklearn @testing.requires_testing_data -def test_ica_eeg(): +@pytest.mark.parametrize('fname, grade', [ + (fif_fname, None), + (eeglab_fname, None), + (ctf_fname2, 0), + (ctf_fname2, 1), +]) +def test_ica_eeg(fname, grade): """Test ICA on EEG.""" method = 'fastica' - raw_fif = read_raw_fif(fif_fname, preload=True) - raw_eeglab = read_raw_eeglab(input_fname=eeglab_fname, - preload=True) - for raw in [raw_fif, raw_eeglab]: - events = make_fixed_length_events(raw, 99999, start=0, stop=0.3, - duration=0.1) - picks_meg = pick_types(raw.info, meg=True, eeg=False)[:2] - picks_eeg = pick_types(raw.info, meg=False, eeg=True)[:2] - picks_all = [] - picks_all.extend(picks_meg) - picks_all.extend(picks_eeg) - epochs = Epochs(raw, events, None, -0.1, 0.1, preload=True, proj=False) - evoked = epochs.average() + if fname.endswith('.fif'): + raw = read_raw_fif(fif_fname) + raw.pick(raw.ch_names[::5]).load_data() + raw.info.normalize_proj() + elif fname.endswith('.set'): + raw = read_raw_eeglab(input_fname=eeglab_fname, preload=True) + else: + with pytest.warns(RuntimeWarning, match='MISC channel'): + raw = read_raw_ctf(ctf_fname2) + raw.pick(raw.ch_names[:30] + raw.ch_names[30::10]).load_data() + if grade is not None: + raw.apply_gradient_compensation(grade) - for picks in [picks_meg, picks_eeg, picks_all]: - if len(picks) == 0: - continue - # test fit - for inst in [raw, epochs]: - ica = ICA(n_components=2, max_iter=2, method=method) - with pytest.warns(None): - ica.fit(inst, picks=picks, verbose=True) - _assert_ica_attributes(ica) - - # test apply and get_sources - for inst in [raw, epochs, evoked]: - ica.apply(inst) - ica.get_sources(inst) - - with pytest.warns(RuntimeWarning, match='MISC channel'): - raw = read_raw_ctf(ctf_fname2, preload=True) - events = make_fixed_length_events(raw, 99999, start=0, stop=0.2, + events = make_fixed_length_events(raw, 99999, start=0, stop=0.3, duration=0.1) - picks_meg = pick_types(raw.info, meg=True, eeg=False)[:2] + picks_meg = pick_types(raw.info, meg=True, eeg=False, ref_meg=False)[:2] picks_eeg = pick_types(raw.info, meg=False, eeg=True)[:2] - picks_all = picks_meg + picks_eeg - for comp in [0, 1]: - raw.apply_gradient_compensation(comp) - epochs = Epochs(raw, events, None, -0.1, 0.1, preload=True) - evoked = epochs.average() + picks_all = [] + picks_all.extend(picks_meg) + picks_all.extend(picks_eeg) + epochs = Epochs(raw, events, None, -0.1, 0.1, preload=True, proj=False) + evoked = epochs.average() - for picks in [picks_meg, picks_eeg, picks_all]: - if len(picks) == 0: - continue - # test fit - for inst in [raw, epochs]: - ica = ICA(n_components=2, max_iter=2, method=method) - with pytest.warns(None): - ica.fit(inst) - _assert_ica_attributes(ica) - - # test apply and get_sources - for inst in [raw, epochs, evoked]: - ica.apply(inst) - ica.get_sources(inst) + for picks in [picks_meg, picks_eeg, picks_all]: + if len(picks) == 0: + continue + # test fit + for inst in [raw, epochs]: + ica = ICA(n_components=2, max_iter=2, method=method) + with pytest.warns(None): + ica.fit(inst, picks=picks, verbose=True) + _assert_ica_attributes(ica) + + # test apply and get_sources + for inst in [raw, epochs, evoked]: + ica.apply(inst) + ica.get_sources(inst) @testing.requires_testing_data diff --git a/mne/preprocessing/tests/test_ssp.py b/mne/preprocessing/tests/test_ssp.py index 858e800e267..b16f5190a16 100644 --- a/mne/preprocessing/tests/test_ssp.py +++ b/mne/preprocessing/tests/test_ssp.py @@ -7,7 +7,6 @@ from mne.io import read_raw_fif, read_raw_ctf from mne.io.proj import make_projector, activate_proj from mne.preprocessing.ssp import compute_proj_ecg, compute_proj_eog -from mne.utils import run_tests_if_main from mne.datasets import testing from mne import pick_types @@ -20,98 +19,107 @@ 'testdata_ctf.ds') -def test_compute_proj_ecg(): +@pytest.fixture() +def short_raw(): + """Create a short, picked raw instance.""" + raw = read_raw_fif(raw_fname).crop(0, 7).pick_types( + meg=True, eeg=True, eog=True) + raw.pick(raw.ch_names[:306:10] + raw.ch_names[306:]).load_data() + raw.info.normalize_proj() + return raw + + +@pytest.mark.parametrize('average', (True, False)) +def test_compute_proj_ecg(short_raw, average): """Test computation of ECG SSP projectors.""" - raw = read_raw_fif(raw_fname).crop(0, 10) - raw.load_data() - for average in [False, True]: - # For speed, let's not filter here (must also not reject then) + raw = short_raw + + # For speed, let's not filter here (must also not reject then) + with pytest.warns(RuntimeWarning, match='Attenuation'): projs, events = compute_proj_ecg( raw, n_mag=2, n_grad=2, n_eeg=2, ch_name='MEG 1531', bads=['MEG 2443'], average=average, avg_ref=True, no_proj=True, l_freq=None, h_freq=None, reject=None, tmax=dur_use, - qrs_threshold=0.5, filter_length=6000) - assert len(projs) == 7 - # heart rate at least 0.5 Hz, but less than 3 Hz - assert (events.shape[0] > 0.5 * dur_use and - events.shape[0] < 3 * dur_use) - ssp_ecg = [proj for proj in projs if proj['desc'].startswith('ECG')] - # check that the first principal component have a certain minimum - ssp_ecg = [proj for proj in ssp_ecg if 'PCA-01' in proj['desc']] - thresh_eeg, thresh_axial, thresh_planar = .9, .3, .1 - for proj in ssp_ecg: - if 'planar' in proj['desc']: - assert proj['explained_var'] > thresh_planar - elif 'axial' in proj['desc']: - assert proj['explained_var'] > thresh_axial - elif 'eeg' in proj['desc']: - assert proj['explained_var'] > thresh_eeg - # XXX: better tests - - # without setting a bad channel, this should throw a warning - with pytest.warns(RuntimeWarning, match='No good epochs found'): - projs, events, drop_log = compute_proj_ecg( - raw, n_mag=2, n_grad=2, n_eeg=2, ch_name='MEG 1531', bads=[], - average=average, avg_ref=True, no_proj=True, l_freq=None, - h_freq=None, tmax=dur_use, return_drop_log=True) - assert projs is None - assert len(events) == len(drop_log) - - -def test_compute_proj_eog(): + qrs_threshold=0.5, filter_length=1000) + assert len(projs) == 7 + # heart rate at least 0.5 Hz, but less than 3 Hz + assert (events.shape[0] > 0.5 * dur_use and + events.shape[0] < 3 * dur_use) + ssp_ecg = [proj for proj in projs if proj['desc'].startswith('ECG')] + # check that the first principal component have a certain minimum + ssp_ecg = [proj for proj in ssp_ecg if 'PCA-01' in proj['desc']] + thresh_eeg, thresh_axial, thresh_planar = .9, .3, .1 + for proj in ssp_ecg: + if 'planar' in proj['desc']: + assert proj['explained_var'] > thresh_planar + elif 'axial' in proj['desc']: + assert proj['explained_var'] > thresh_axial + elif 'eeg' in proj['desc']: + assert proj['explained_var'] > thresh_eeg + # XXX: better tests + + # without setting a bad channel, this should throw a warning + with pytest.warns(RuntimeWarning, match='No good epochs found'): + projs, events, drop_log = compute_proj_ecg( + raw, n_mag=2, n_grad=2, n_eeg=2, ch_name='MEG 1531', bads=[], + average=average, avg_ref=True, no_proj=True, l_freq=None, + h_freq=None, tmax=dur_use, return_drop_log=True) + assert projs is None + assert len(events) == len(drop_log) + + +@pytest.mark.parametrize('average', [True, False]) +def test_compute_proj_eog(average, short_raw): """Test computation of EOG SSP projectors.""" - raw = read_raw_fif(raw_fname).crop(0, 10) - raw.load_data() - for average in [False, True]: - n_projs_init = len(raw.info['projs']) - projs, events = compute_proj_eog(raw, n_mag=2, n_grad=2, n_eeg=2, - bads=['MEG 2443'], average=average, - avg_ref=True, no_proj=False, - l_freq=None, h_freq=None, - reject=None, tmax=dur_use, - filter_length=6000) - assert (len(projs) == (7 + n_projs_init)) - assert (np.abs(events.shape[0] - - np.sum(np.less(eog_times, dur_use))) <= 1) - ssp_eog = [proj for proj in projs if proj['desc'].startswith('EOG')] - # check that the first principal component have a certain minimum - ssp_eog = [proj for proj in ssp_eog if 'PCA-01' in proj['desc']] - thresh_eeg, thresh_axial, thresh_planar = .9, .3, .1 - for proj in ssp_eog: - if 'planar' in proj['desc']: - assert (proj['explained_var'] > thresh_planar) - elif 'axial' in proj['desc']: - assert (proj['explained_var'] > thresh_axial) - elif 'eeg' in proj['desc']: - assert (proj['explained_var'] > thresh_eeg) - # XXX: better tests - - with pytest.warns(RuntimeWarning, match='longer'): - projs, events = compute_proj_eog(raw, n_mag=2, n_grad=2, n_eeg=2, - average=average, bads=[], - avg_ref=True, no_proj=False, - l_freq=None, h_freq=None, - tmax=dur_use) - assert projs is None + raw = short_raw + + n_projs_init = len(raw.info['projs']) + with pytest.warns(RuntimeWarning, match='Attenuation'): + projs, events = compute_proj_eog( + raw, n_mag=2, n_grad=2, n_eeg=2, bads=['MEG 2443'], + average=average, avg_ref=True, no_proj=False, l_freq=None, + h_freq=None, reject=None, tmax=dur_use, filter_length=1000) + assert (len(projs) == (7 + n_projs_init)) + assert (np.abs(events.shape[0] - + np.sum(np.less(eog_times, dur_use))) <= 1) + ssp_eog = [proj for proj in projs if proj['desc'].startswith('EOG')] + # check that the first principal component have a certain minimum + ssp_eog = [proj for proj in ssp_eog if 'PCA-01' in proj['desc']] + thresh_eeg, thresh_axial, thresh_planar = .9, .3, .1 + for proj in ssp_eog: + if 'planar' in proj['desc']: + assert (proj['explained_var'] > thresh_planar) + elif 'axial' in proj['desc']: + assert (proj['explained_var'] > thresh_axial) + elif 'eeg' in proj['desc']: + assert (proj['explained_var'] > thresh_eeg) + # XXX: better tests + + with pytest.warns(RuntimeWarning, match='longer'): + projs, events = compute_proj_eog( + raw, n_mag=2, n_grad=2, n_eeg=2, average=average, bads=[], + avg_ref=True, no_proj=False, l_freq=None, h_freq=None, + tmax=dur_use) + assert projs is None @pytest.mark.slowtest # can be slow on OSX -def test_compute_proj_parallel(): +def test_compute_proj_parallel(short_raw): """Test computation of ExG projectors using parallelization.""" - raw_0 = read_raw_fif(raw_fname).crop(0, 10) - raw_0.load_data() - raw = raw_0.copy() - projs, _ = compute_proj_eog(raw, n_mag=2, n_grad=2, n_eeg=2, - bads=['MEG 2443'], average=False, - avg_ref=True, no_proj=False, n_jobs=1, - l_freq=None, h_freq=None, reject=None, - tmax=dur_use, filter_length=6000) - raw_2 = raw_0.copy() - projs_2, _ = compute_proj_eog(raw_2, n_mag=2, n_grad=2, n_eeg=2, - bads=['MEG 2443'], average=False, - avg_ref=True, no_proj=False, n_jobs=2, - l_freq=None, h_freq=None, reject=None, - tmax=dur_use, filter_length=6000) + short_raw = short_raw.copy().pick(('eeg', 'eog')).resample(100) + raw = short_raw.copy() + with pytest.warns(RuntimeWarning, match='Attenuation'): + projs, _ = compute_proj_eog( + raw, n_eeg=2, bads=raw.ch_names[1:2], average=False, + avg_ref=True, no_proj=False, n_jobs=1, l_freq=None, h_freq=None, + reject=None, tmax=dur_use, filter_length=100) + raw_2 = short_raw.copy() + with pytest.warns(RuntimeWarning, match='Attenuation'): + projs_2, _ = compute_proj_eog( + raw_2, n_eeg=2, bads=raw.ch_names[1:2], + average=False, avg_ref=True, no_proj=False, n_jobs=2, + l_freq=None, h_freq=None, reject=None, tmax=dur_use, + filter_length=100) projs = activate_proj(projs) projs_2 = activate_proj(projs_2) projs, _, _ = make_projector(projs, raw_2.info['ch_names'], @@ -122,6 +130,7 @@ def test_compute_proj_parallel(): def _check_projs_for_expected_channels(projs, n_mags, n_grads, n_eegs): + assert projs is not None for p in projs: if 'planar' in p['desc']: assert len(p['data']['col_names']) == n_grads @@ -135,41 +144,39 @@ def _check_projs_for_expected_channels(projs, n_mags, n_grads, n_eegs): @testing.requires_testing_data def test_compute_proj_ctf(): """Test to show that projector code completes on CTF data.""" - raw = read_raw_ctf(ctf_fname) - raw.load_data() + raw = read_raw_ctf(ctf_fname, preload=True) # expected channels per projector type - n_mags = len(pick_types(raw.info, meg='mag', ref_meg=False, - exclude='bads')) - n_grads = len(pick_types(raw.info, meg='grad', ref_meg=False, - exclude='bads')) - n_eegs = len(pick_types(raw.info, meg=False, eeg=True, ref_meg=False, - exclude='bads')) + mag_picks = pick_types( + raw.info, meg='mag', ref_meg=False, exclude='bads')[::10] + n_mags = len(mag_picks) + grad_picks = pick_types(raw.info, meg='grad', ref_meg=False, + exclude='bads')[::10] + n_grads = len(grad_picks) + eeg_picks = pick_types(raw.info, meg=False, eeg=True, ref_meg=False, + exclude='bads')[2::3] + n_eegs = len(eeg_picks) + ref_picks = pick_types(raw.info, meg=False, ref_meg=True) + raw.pick(np.sort(np.concatenate( + [mag_picks, grad_picks, eeg_picks, ref_picks]))) + del mag_picks, grad_picks, eeg_picks, ref_picks # Test with and without gradient compensation - for c in [0, 1]: - raw.apply_gradient_compensation(c) - for average in [False, True]: - n_projs_init = len(raw.info['projs']) - projs, events = compute_proj_eog(raw, n_mag=2, n_grad=2, n_eeg=2, - average=average, - ch_name='EEG059', - avg_ref=True, no_proj=False, - l_freq=None, h_freq=None, - reject=None, tmax=dur_use, - filter_length=6000) - _check_projs_for_expected_channels(projs, n_mags, n_grads, n_eegs) - assert len(projs) == (5 + n_projs_init) - - projs, events = compute_proj_ecg(raw, n_mag=1, n_grad=1, n_eeg=2, - average=average, - ch_name='EEG059', - avg_ref=True, no_proj=False, - l_freq=None, h_freq=None, - reject=None, tmax=dur_use, - filter_length=6000) - _check_projs_for_expected_channels(projs, n_mags, n_grads, n_eegs) - assert len(projs) == (4 + n_projs_init) - - -run_tests_if_main() + raw.apply_gradient_compensation(0) + n_projs_init = len(raw.info['projs']) + with pytest.warns(RuntimeWarning, match='Attenuation'): + projs, _ = compute_proj_eog( + raw, n_mag=2, n_grad=2, n_eeg=2, average=True, ch_name='EEG059', + avg_ref=True, no_proj=False, l_freq=None, h_freq=None, + reject=None, tmax=dur_use, filter_length=1000) + _check_projs_for_expected_channels(projs, n_mags, n_grads, n_eegs) + assert len(projs) == (5 + n_projs_init) + + raw.apply_gradient_compensation(1) + with pytest.warns(RuntimeWarning, match='Attenuation'): + projs, _ = compute_proj_ecg( + raw, n_mag=1, n_grad=1, n_eeg=2, average=True, ch_name='EEG059', + avg_ref=True, no_proj=False, l_freq=None, h_freq=None, + reject=None, tmax=dur_use, filter_length=1000) + _check_projs_for_expected_channels(projs, n_mags, n_grads, n_eegs) + assert len(projs) == (4 + n_projs_init) diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py index bece65c669d..c1686db5c25 100644 --- a/mne/tests/test_epochs.py +++ b/mne/tests/test_epochs.py @@ -310,9 +310,16 @@ def _assert_drop_log_types(drop_log): def test_reject(): """Test epochs rejection.""" - raw, events, picks = _get_data() + raw, events, _ = _get_data() + names = raw.ch_names[::5] + assert 'MEG 2443' in names + raw.pick(names).load_data() + assert 'eog' in raw + raw.info.normalize_proj() + picks = np.arange(len(raw.ch_names)) # cull the list just to contain the relevant event events = events[events[:, 2] == event_id, :] + assert len(events) == 7 selection = np.arange(3) drop_log = ((),) * 3 + (('MEG 2443',),) * 4 _assert_drop_log_types(drop_log) From d26170c0424331e2d9c0b59a4011d0d5bbeee46d Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 17 Dec 2020 07:58:25 -0500 Subject: [PATCH 022/387] DOC: Cited --- doc/cited.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/cited.rst b/doc/cited.rst index 9dfb7b2b409..bf4a37c6064 100644 --- a/doc/cited.rst +++ b/doc/cited.rst @@ -5,7 +5,7 @@ Publications by users ===================== -Estimates provided by Google Scholar as of July 9th 2020: +Estimates provided by Google Scholar as of December 17th 2020: -- `MNE (838) `_ -- `MNE-Python (655) `_ +- `MNE (893) `_ +- `MNE-Python (726) `_ From 0317b7de9827fb00a6e9361d133c8bd51153eb9b Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 17 Dec 2020 08:27:38 -0500 Subject: [PATCH 023/387] DOC: Update author list --- .mailmap | 8 +++++- doc/changes/latest.inc | 65 ++++++++++++++++++++++++++++++++++-------- 2 files changed, 60 insertions(+), 13 deletions(-) diff --git a/.mailmap b/.mailmap index 72ef02e1f4a..3b5489bffea 100644 --- a/.mailmap +++ b/.mailmap @@ -90,7 +90,7 @@ Guillaume Dumas deep-introspection Guillaume Dumas Félix Raimundo Felix Raimundo Asish Panda kaichogami -Mikolaj Magnuski mmagnuski +Mikołaj Magnuski mmagnuski Alexandre Barachant alexandre barachant Lorenzo Alfine lorrandal Paul Pasler ppasler @@ -154,3 +154,9 @@ Demetres Kostas <40433000+kostasde@users.noreply.github.com> kostasde <40433000+ Mohammad Daneshzand <55800429+mdaneshzand@users.noreply.github.com> mdaneshzand <55800429+mdaneshzand@users.noreply.github.com> Mohamed Sherif mohdsherif Dmitrii Altukhov dmalt +Jeff Stout jstout211 +Eduard Ort examplename +Eduard Ort eort +Tod Flak <45362686+todflak@users.noreply.github.com> todflak <45362686+todflak@users.noreply.github.com> +Hongjiang Ye YE Hongjiang +Victoria Peterson vpeterson diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index bd50ad4589b..e71fcb33e05 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -7,30 +7,30 @@ - "Bugs" for bug fixes - "API changes" for backward-incompatible changes -.. _current: +.. _changes_0_22: -Current (0.22.dev0) -------------------- +Version 0.22.0 +-------------- -.. |Eduard Ort| replace:: **Eduard Ort** +.. |Austin Hurst| replace:: **Austin Hurst** .. |Aniket Pradhan| replace:: **Aniket Pradhan** -.. |Tod Flak| replace:: **Tod Flak** - -.. |Victoria Peterson| replace:: **Victoria Peterson** - -.. |Jonathan Kuziek| replace:: **Jonathan Kuziek** +.. |Eduard Ort| replace:: **Eduard Ort** .. |Evan Hathaway| replace:: **Evan Hathaway** -.. |Austin Hurst| replace:: **Austin Hurst** - .. |Hongjiang Ye| replace:: **Hongjiang Ye** +.. |Jeff Stout| replace:: **Jeff Stout** + +.. |Jonathan Kuziek| replace:: **Jonathan Kuziek** + .. |Qianliang Li| replace:: **Qianliang Li** -.. |Jeff Stout| replace:: **Jeff Stout** +.. |Tod Flak| replace:: **Tod Flak** + +.. |Victoria Peterson| replace:: **Victoria Peterson** Enhancements @@ -229,3 +229,44 @@ API changes - Add ``group_by`` parameter to `mne.viz.plot_epochs` and `mne.Epochs.plot` to allow displaying channel data by sensor position (:gh:`8381` by `Daniel McCloy`_) - Parameter ``event_colors`` in `mne.viz.plot_epochs` and `mne.Epochs.plot` is deprecated, replaced by ``event_color`` which is consistent with `mne.viz.plot_raw` and provides greater flexibility (:gh:`8381` by `Daniel McCloy`_) + +Authors +~~~~~~~ + +People who contributed to this release in alphabetical order +(people with a + are first time contributors): + +* Adam Li +* Alexandre Gramfort +* Aniket Pradhan + +* Austin Hurst + +* Christian Brodbeck +* Clemens Brunner +* Daniel McCloy +* Denis A. Engemann +* Eduard Ort + +* Eric Larson +* Evan Hathaway + +* Evgenii Kalenkovich +* Fede Raimondo +* Guillaume Favelier +* Hongjiang Ye + +* Jean-Remi King +* Jeff Stout + +* Jonathan Kuziek + +* Jussi Nurminen +* Justus Schwabedal +* Keith Doelling +* Kyle Mathewson +* Mads Jensen +* Mainak Jas +* Marijn van Vliet +* Mikolaj Magnuski +* Olaf Hauk +* Qianliang Li + +* Richard Höchenberger +* Robert Luke +* Stefan Appelhoff +* Thomas Hartmann +* Tod Flak + +* Victoria Peterson + From d4912b8264a50bb1bd6b41ebcc33555f9f499341 Mon Sep 17 00:00:00 2001 From: Quanliang Li Date: Thu, 17 Dec 2020 08:29:14 -0500 Subject: [PATCH 024/387] Empty commit for credit From f997905a4cec4923fb69baef0d7d6af3052908a1 Mon Sep 17 00:00:00 2001 From: Austin Hurst Date: Thu, 17 Dec 2020 08:30:33 -0500 Subject: [PATCH 025/387] Empty commit for credit From 894606e2aed652af7d5d8cda4233fefd58981a6f Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 17 Dec 2020 08:37:27 -0500 Subject: [PATCH 026/387] MAINT: Version --- .circleci/config.yml | 4 ++-- SECURITY.md | 6 +++--- doc/_templates/navbar.html | 3 ++- doc/changes/{latest.inc => 0.22.inc} | 0 doc/whats_new.rst | 2 +- mne/_version.py | 2 +- 6 files changed, 9 insertions(+), 8 deletions(-) rename doc/changes/{latest.inc => 0.22.inc} (100%) diff --git a/.circleci/config.yml b/.circleci/config.yml index 5e93be99fc0..3cd2fb5392a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -305,7 +305,7 @@ jobs: name: Deploy docs command: | set -e; - if [ "${CIRCLE_BRANCH}" == "master" ] || [ "${CIRCLE_BRANCH}" == "maint/0.21" ]; then + if [ "${CIRCLE_BRANCH}" == "master" ] || [ "${CIRCLE_BRANCH}" == "maint/0.22" ]; then git config --global user.email "circle@mne.com"; git config --global user.name "Circle CI"; cd ~/mne-tools.github.io; @@ -384,7 +384,7 @@ workflows: branches: only: - master - - maint/0.21 + - maint/0.22 # interactive_test weekly: diff --git a/SECURITY.md b/SECURITY.md index 4390f1a9c13..292e7e8e357 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -10,9 +10,9 @@ without a proper 6-month deprecation cycle. | Version | Supported | | ------- | ------------------------ | -| 0.22.x | :heavy_check_mark: (dev) | -| 0.21.x | :heavy_check_mark: | -| < 0.21 | :x: | +| 0.23.x | :heavy_check_mark: (dev) | +| 0.22.x | :heavy_check_mark: | +| < 0.22 | :x: | ## Reporting a Vulnerability diff --git a/doc/_templates/navbar.html b/doc/_templates/navbar.html index 841891a9400..7c871f1c264 100644 --- a/doc/_templates/navbar.html +++ b/doc/_templates/navbar.html @@ -18,7 +18,8 @@ - ' - elif fname.endswith(tuple(VALID_EXTENSIONS + - ['bem', 'custom'])): + elif fname.endswith(VALID_EXTENSIONS + ('bem', 'custom')): html_toc += toc_list.substitute(div_klass=div_klass, id=global_id, tooltip=tooltip, @@ -1830,7 +1857,11 @@ def _render_raw(self, raw_fname, data_path): """Render raw (only text).""" global_id = self._get_id() - raw = read_raw_fif(raw_fname, allow_maxshield='yes') + kwargs = dict(fname=raw_fname, preload=False) + if raw_fname.endswith(('.fif', '.fif.gz')): + kwargs['allow_maxshield'] = True + raw = read_raw(**kwargs) + extra = '(MaxShield on)' if raw.info.get('maxshield', False) else '' caption = self._gen_caption(prefix='Raw', suffix=extra, fname=raw_fname, data_path=data_path) @@ -2147,7 +2178,7 @@ def _recursive_search(path, pattern): for f in fnmatch.filter(files, pattern): # only the following file types are supported # this ensures equitable distribution of jobs - if f.endswith(tuple(VALID_EXTENSIONS)): + if f.endswith(VALID_EXTENSIONS): filtered_files.append(op.realpath(op.join(dirpath, f))) return filtered_files diff --git a/mne/tests/test_report.py b/mne/tests/test_report.py index 31df6831460..77e9b9766d3 100644 --- a/mne/tests/test_report.py +++ b/mne/tests/test_report.py @@ -15,7 +15,6 @@ import pathlib import numpy as np -from numpy.testing import assert_equal import pytest from matplotlib import pyplot as plt @@ -41,6 +40,10 @@ inv_fname = op.join(report_dir, 'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif') mri_fname = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz') +bdf_fname = op.realpath(op.join(op.dirname(__file__), '..', 'io', + 'edf', 'tests', 'data', 'test.bdf')) +edf_fname = op.realpath(op.join(op.dirname(__file__), '..', 'io', + 'edf', 'tests', 'data', 'test.edf')) base_dir = op.realpath(op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')) @@ -57,7 +60,7 @@ def _get_example_figures(): @pytest.mark.slowtest @testing.requires_testing_data def test_render_report(renderer, tmpdir): - """Test rendering -*.fif files for mne report.""" + """Test rendering *.fif files for mne report.""" tempdir = str(tmpdir) raw_fname_new = op.join(tempdir, 'temp_raw.fif') raw_fname_new_bids = op.join(tempdir, 'temp_meg.fif') @@ -105,9 +108,9 @@ def test_render_report(renderer, tmpdir): [op.basename(x) for x in report.fnames]) assert (''.join(report.html).find(op.basename(fname)) != -1) - assert_equal(len(report.fnames), len(fnames)) - assert_equal(len(report.html), len(report.fnames)) - assert_equal(len(report.fnames), len(report)) + assert len(report.fnames) == len(fnames) + assert len(report.html) == len(report.fnames) + assert len(report.fnames) == len(report) # Check saving functionality report.data_path = tempdir @@ -126,8 +129,8 @@ def test_render_report(renderer, tmpdir): assert 'Topomap (ch_type =' in html assert f'Evoked: {op.basename(evoked_fname)} (GFPs)' in html - assert_equal(len(report.html), len(fnames)) - assert_equal(len(report.html), len(report.fnames)) + assert len(report.html) == len(fnames) + assert len(report.html) == len(report.fnames) # Check saving same report to new filename report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False) @@ -170,6 +173,42 @@ def test_render_report(renderer, tmpdir): report.add_figs_to_section(['foo'], 'caption', 'section') +@testing.requires_testing_data +def test_render_non_fiff(tmpdir): + """Test rendering non-FIFF files for mne report.""" + tempdir = str(tmpdir) + fnames_in = [bdf_fname, edf_fname] + fnames_out = [] + for fname in fnames_in: + basename = op.basename(fname) + basename, ext = op.splitext(basename) + fname_out = f'{basename}_raw{ext}' + outpath = op.join(tempdir, fname_out) + shutil.copyfile(fname, outpath) + fnames_out.append(fname_out) + + report = Report() + report.parse_folder(data_path=tempdir, render_bem=False, on_error='raise') + + # Check correct paths and filenames + for fname in fnames_out: + assert (op.basename(fname) in + [op.basename(x) for x in report.fnames]) + + assert len(report.fnames) == len(fnames_out) + assert len(report.html) == len(report.fnames) + assert len(report.fnames) == len(report) + + report.data_path = tempdir + fname = op.join(tempdir, 'report.html') + report.save(fname=fname, open_browser=False) + with open(fname, 'rb') as fid: + html = fid.read().decode('utf-8') + + assert '

Raw: test_raw.bdf

' in html + assert '

Raw: test_raw.edf

' in html + + @testing.requires_testing_data def test_report_raw_psd_and_date(tmpdir): """Test report raw PSD and DATE_NONE functionality.""" @@ -396,7 +435,7 @@ def test_validate_input(): comments=comments[:-1]) values = report._validate_input(items, captions, section, comments=None) items_new, captions_new, comments_new = values - assert_equal(len(comments_new), len(items)) + assert len(comments_new) == len(items) @requires_h5py diff --git a/server_environment.yml b/server_environment.yml index 86c3fda05b6..ce41c0c774d 100644 --- a/server_environment.yml +++ b/server_environment.yml @@ -15,7 +15,7 @@ dependencies: - pyvista - nilearn - nibabel -- nbformat +- nbformat <5.1 # XXX remove pinning once https://github.com/jupyter/nbformat/issues/206 has been fixed - nbclient - mffpy>=0.5.7 - pip: From 3a83e2cc6b5323e7b8bf97d1f9b0daa3698c1f1d Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Fri, 15 Jan 2021 22:04:12 +0100 Subject: [PATCH 066/387] MRG, VIZ: Fix title position in plot_sensors (#8752) * Fix title position in plot_sensors * Move title to window title * Add changelog entry * Set window title only in new figure * Adjust top border accordingly * Apply suggestions from code review Co-authored-by: Daniel McCloy Co-authored-by: Daniel McCloy --- doc/changes/latest.inc | 2 ++ mne/viz/utils.py | 9 ++++----- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index eb624417375..125a40c2e35 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -45,6 +45,8 @@ Bugs - Fix anonymization issue of FIF files after IO round trip (:gh:`8731` by `Alex Gramfort`_) +- Fix title not shown in :func:`mne.viz.plot_montage` (:gh:`8752` by `Clemens Brunner`_) + API changes ~~~~~~~~~~~ diff --git a/mne/viz/utils.py b/mne/viz/utils.py index 4eec3cd8cd4..9c88fdb4a76 100644 --- a/mne/viz/utils.py +++ b/mne/viz/utils.py @@ -1045,9 +1045,8 @@ def _plot_sensors(pos, info, picks, colors, bads, ch_names, title, show_names, # Equal aspect for 3D looks bad, so only use for 2D ax.set(aspect='equal') - if axes_was_none: - fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, - hspace=None) + if axes_was_none: # we'll show the plot title as the window title + fig.subplots_adjust(left=0, bottom=0, right=1, top=1) ax.axis("off") # remove border around figure del sphere @@ -1069,8 +1068,8 @@ def _plot_sensors(pos, info, picks, colors, bads, ch_names, title, show_names, picker = partial(_onpick_sensor, fig=fig, ax=ax, pos=pos, ch_names=ch_names, show_names=show_names) fig.canvas.mpl_connect('pick_event', picker) - - ax.set(title=title) + if axes_was_none: + _set_window_title(fig, title) closed = partial(_close_event, fig=fig) fig.canvas.mpl_connect('close_event', closed) plt_show(show, block=block) From e4fcd77a60bc50085640aec34e9fba1f1c74a8b3 Mon Sep 17 00:00:00 2001 From: Richard Koehler Date: Fri, 15 Jan 2021 22:25:03 +0100 Subject: [PATCH 067/387] Add "dbs" as new channel type (#8739) * Add current commit fiff-constants to test_constants.py * Add DBS as channel type * Add DBS as channel type * Add DBS as channel type * Add "dbs" as new channel type * Update latest.inc * Update doc/changes/latest.inc Adedd gh pull number Co-authored-by: Alexandre Gramfort * Fix tests and API compatibility * Add dbs to topomap and bugfixes * Update test_topo.py * bugfix documentation and added test * Bugfix tests * Add missing space to mne/io/constants.py Co-authored-by: Alexandre Gramfort * Apply suggestions from code review Co-authored-by: Alexandre Gramfort Co-authored-by: Eric Larson * Update test_pick.py Co-authored-by: Alexandre Gramfort Co-authored-by: Eric Larson --- codemeta.json | 3 +- doc/_includes/channel_types.rst | 2 + doc/_includes/units.rst | 2 +- doc/changes/latest.inc | 3 ++ doc/changes/names.inc | 2 + mne/channels/channels.py | 26 +++++++------ mne/channels/montage.py | 8 ++-- mne/channels/tests/test_channels.py | 10 +++-- mne/channels/tests/test_montage.py | 4 +- mne/cov.py | 16 +++++--- mne/defaults.py | 32 ++++++++-------- mne/epochs.py | 4 +- mne/evoked.py | 10 +++-- mne/io/array/array.py | 2 +- mne/io/array/tests/test_array.py | 6 ++- mne/io/constants.py | 2 + mne/io/fiff/tests/test_raw_fiff.py | 8 ++-- mne/io/meas_info.py | 6 +-- mne/io/nicolet/nicolet.py | 2 +- mne/io/pick.py | 37 ++++++++++++------- mne/io/reference.py | 10 ++--- mne/io/tests/test_constants.py | 2 +- mne/io/tests/test_pick.py | 25 +++++++++++++ mne/io/tests/test_reference.py | 28 +++++++++++--- mne/preprocessing/ica.py | 13 ++++--- mne/preprocessing/tests/test_ica.py | 24 ++++++++++++ mne/source_estimate.py | 10 ++--- mne/tests/test_epochs.py | 29 ++++++++++----- mne/tests/test_filter.py | 6 +-- mne/utils/docs.py | 4 +- mne/viz/_3d.py | 17 +++++---- mne/viz/raw.py | 2 +- mne/viz/tests/test_topo.py | 8 ++++ mne/viz/topomap.py | 3 ++ mne/viz/utils.py | 11 +++--- tools/generate_codemeta.py | 3 +- tutorials/raw/plot_10_raw_overview.py | 6 +-- .../plot_creating_data_structures.py | 2 +- 38 files changed, 259 insertions(+), 129 deletions(-) diff --git a/codemeta.json b/codemeta.json index 64cec7f5211..2ff4460bd9e 100644 --- a/codemeta.json +++ b/codemeta.json @@ -19,7 +19,8 @@ "EEG", "fNIRS", "ECoG", - "sEEG" + "sEEG", + "DBS" ], "programmingLanguage": [ "Python" diff --git a/doc/_includes/channel_types.rst b/doc/_includes/channel_types.rst index fc370fb36bb..be2abe2a50c 100644 --- a/doc/_includes/channel_types.rst +++ b/doc/_includes/channel_types.rst @@ -36,6 +36,8 @@ ecg Electrocardiography (ECG) Volts seeg Stereotactic EEG channels Volts +dbs Deep brain stimulation (DBS) Volts + ecog Electrocorticography (ECoG) Volts fnirs (hbo) Functional near-infrared spectroscopy Moles/liter diff --git a/doc/_includes/units.rst b/doc/_includes/units.rst index f1319c3d281..f37f03ebaf4 100644 --- a/doc/_includes/units.rst +++ b/doc/_includes/units.rst @@ -14,7 +14,7 @@ Irrespective of the units used in your manufacturer's format, when importing data, MNE-Python will always convert measurements to the same standard units. Thus the in-memory representation of data are always in: -- Volts (eeg, eog, seeg, emg, ecg, bio, ecog) +- Volts (eeg, eog, seeg, emg, ecg, bio, ecog, dbs) - Teslas (magnetometers) - Teslas/meter (gradiometers) - Amperes*meter (dipole fits, minimum-norm estimates, etc.) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 125a40c2e35..47f4726929e 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -14,9 +14,12 @@ Current (0.23.dev0) .. |New Contributor| replace:: **New Contributor** +.. |Richard Koehler| replace:: **Richard Koehler** Enhancements ~~~~~~~~~~~~ +- Add dbs as new channel type for deep brain stimulation (DBS) recordings (:gh:`8739` **by new contributor** |Richard Koehler|_) + - Update the ``notebook`` 3d backend to use ``ipyvtk_simple`` for a better integration within ``Jupyter`` (:gh:`8503` by `Guillaume Favelier`_) - Add toggle-all button to :class:`mne.Report` HTML and ``width`` argument to :meth:`mne.Report.add_bem_to_section` (:gh:`8723` by `Eric Larson`_) diff --git a/doc/changes/names.inc b/doc/changes/names.inc index b28dfe35dfe..5d7865085eb 100644 --- a/doc/changes/names.inc +++ b/doc/changes/names.inc @@ -345,3 +345,5 @@ .. _Jeff Stout: https://megcore.nih.gov/index.php/Staff .. _Qianliang Li: https://www.dtu.dk/english/service/phonebook/person?id=126774 + +.. _Richard Koehler: https://github.com/richardkoehler diff --git a/mne/channels/channels.py b/mne/channels/channels.py index cb187842e3d..2b3eef06dad 100644 --- a/mne/channels/channels.py +++ b/mne/channels/channels.py @@ -82,7 +82,7 @@ def _get_ch_type(inst, ch_type, allow_ref_meg=False): allowed_types = ['mag', 'grad', 'planar1', 'planar2', 'eeg', 'csd', 'fnirs_cw_amplitude', 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od', 'hbo', 'hbr', - 'ecog', 'seeg'] + 'ecog', 'seeg', 'dbs'] allowed_types += ['ref_meg'] if allow_ref_meg else [] for type_ in allowed_types: if isinstance(inst, Info): @@ -257,7 +257,7 @@ def get_montage(self): # get the channel names and chs data structure ch_names, chs = self.info['ch_names'], self.info['chs'] picks = pick_types(self.info, meg=False, eeg=True, - seeg=True, ecog=True) + seeg=True, ecog=True, dbs=True) # channel positions from dig do not match ch_names one to one, # so use loc[:3] instead @@ -410,8 +410,8 @@ def set_channel_types(self, mapping, verbose=None): ----- The following sensor types are accepted: - ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, stim, syst, ecog, - hbo, hbr, fnirs_cw_amplitude, fnirs_fd_ac_amplitude, + ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, dbs, stim, syst, + ecog, hbo, hbr, fnirs_cw_amplitude, fnirs_fd_ac_amplitude, fnirs_fd_phase, fnirs_od .. versionadded:: 0.9.0 @@ -446,7 +446,7 @@ def set_channel_types(self, mapping, verbose=None): unit_changes[this_change] = list() unit_changes[this_change].append(ch_name) self.info['chs'][c_ind]['unit'] = _human2unit[ch_type] - if ch_type in ['eeg', 'seeg', 'ecog']: + if ch_type in ['eeg', 'seeg', 'ecog', 'dbs']: coil_type = FIFF.FIFFV_COIL_EEG elif ch_type == 'hbo': coil_type = FIFF.FIFFV_COIL_FNIRS_HBO @@ -509,9 +509,9 @@ def plot_sensors(self, kind='topomap', ch_type=None, title=None, figure instance. Defaults to 'topomap'. ch_type : None | str The channel type to plot. Available options 'mag', 'grad', 'eeg', - 'seeg', 'ecog', 'all'. If ``'all'``, all the available mag, grad, - eeg, seeg and ecog channels are plotted. If None (default), then - channels are chosen in the order given above. + 'seeg', 'dbs', 'ecog', 'all'. If ``'all'``, all the available mag, + grad, eeg, seeg, dbs, and ecog channels are plotted. If + None (default), then channels are chosen in the order given above. title : str | None Title for the figure. If None (default), equals to ``'Sensor positions (%%s)' %% ch_type``. @@ -656,9 +656,9 @@ class UpdateChannelsMixin(object): def pick_types(self, meg=False, eeg=False, stim=False, eog=False, ecg=False, emg=False, ref_meg='auto', misc=False, resp=False, chpi=False, exci=False, ias=False, syst=False, - seeg=False, dipole=False, gof=False, bio=False, ecog=False, - fnirs=False, csd=False, include=(), exclude='bads', - selection=None, verbose=None): + seeg=False, dipole=False, gof=False, bio=False, + ecog=False, fnirs=False, csd=False, dbs=False, include=(), + exclude='bads', selection=None, verbose=None): """Pick some channels by type and names. Parameters @@ -712,6 +712,8 @@ def pick_types(self, meg=False, eeg=False, stim=False, eog=False, include channels measuring deoxyhemoglobin). csd : bool EEG-CSD channels. + dbs : bool + Deep brain stimulation channels. include : list of str List of additional channels to include. If empty do not include any. @@ -739,7 +741,7 @@ def pick_types(self, meg=False, eeg=False, stim=False, eog=False, self.info, meg=meg, eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg, ref_meg=ref_meg, misc=misc, resp=resp, chpi=chpi, exci=exci, ias=ias, syst=syst, seeg=seeg, dipole=dipole, gof=gof, bio=bio, - ecog=ecog, fnirs=fnirs, include=include, exclude=exclude, + ecog=ecog, fnirs=fnirs, dbs=dbs, include=include, exclude=exclude, selection=selection) self._pick_drop_channels(idx) diff --git a/mne/channels/montage.py b/mne/channels/montage.py index 5dc268a062c..c184779f711 100644 --- a/mne/channels/montage.py +++ b/mne/channels/montage.py @@ -707,16 +707,16 @@ def _backcompat_value(pos, ref_pos): # get the channels in the montage in head ch_pos = mnt_head._get_ch_pos() - # only get the eeg, seeg, ecog channels + # only get the eeg, seeg, dbs, ecog channels _pick_chs = partial( - pick_types, exclude=[], eeg=True, seeg=True, ecog=True, meg=False, - ) + pick_types, exclude=[], eeg=True, seeg=True, dbs=True, ecog=True, + meg=False) # get the reference position from the loc[3:6] chs = info['chs'] ref_pos = [chs[ii]['loc'][3:6] for ii in _pick_chs(info)] - # keep reference location from EEG/ECoG/SEEG channels if they + # keep reference location from EEG/ECoG/SEEG/DBS channels if they # already exist and are all the same. custom_eeg_ref_dig = False # Note: ref position is an empty list for fieldtrip data diff --git a/mne/channels/tests/test_channels.py b/mne/channels/tests/test_channels.py index 06dec3155ec..922020cb7d5 100644 --- a/mne/channels/tests/test_channels.py +++ b/mne/channels/tests/test_channels.py @@ -121,9 +121,9 @@ def test_set_channel_types(): with pytest.raises(ValueError, match='cannot change to this channel type'): raw.set_channel_types(mapping) # Test changing type if in proj - mapping = {'EEG 058': 'ecog', 'EEG 059': 'ecg', 'EEG 060': 'eog', - 'EOG 061': 'seeg', 'MEG 2441': 'eeg', 'MEG 2443': 'eeg', - 'MEG 2442': 'hbo'} + mapping = {'EEG 057': 'dbs', 'EEG 058': 'ecog', 'EEG 059': 'ecg', + 'EEG 060': 'eog', 'EOG 061': 'seeg', 'MEG 2441': 'eeg', + 'MEG 2443': 'eeg', 'MEG 2442': 'hbo'} raw2 = read_raw_fif(raw_fname) raw2.info['bads'] = ['EEG 059', 'EEG 060', 'EOG 061'] with pytest.raises(RuntimeError, match='type .* in projector "PCA-v1"'): @@ -132,6 +132,10 @@ def test_set_channel_types(): with pytest.warns(RuntimeWarning, match='unit for channel.* has changed'): raw2 = raw2.set_channel_types(mapping) info = raw2.info + assert info['chs'][371]['ch_name'] == 'EEG 057' + assert info['chs'][371]['kind'] == FIFF.FIFFV_DBS_CH + assert info['chs'][371]['unit'] == FIFF.FIFF_UNIT_V + assert info['chs'][371]['coil_type'] == FIFF.FIFFV_COIL_EEG assert info['chs'][372]['ch_name'] == 'EEG 058' assert info['chs'][372]['kind'] == FIFF.FIFFV_ECOG_CH assert info['chs'][372]['unit'] == FIFF.FIFF_UNIT_V diff --git a/mne/channels/tests/test_montage.py b/mne/channels/tests/test_montage.py index bca551db139..6af3592f1a6 100644 --- a/mne/channels/tests/test_montage.py +++ b/mne/channels/tests/test_montage.py @@ -1201,7 +1201,7 @@ def test_set_montage_with_sub_super_set_of_ch_names(): def test_heterogeneous_ch_type(): """Test ch_names matching criteria with heterogeneous ch_type.""" - VALID_MONTAGE_NAMED_CHS = ('eeg', 'ecog', 'seeg') + VALID_MONTAGE_NAMED_CHS = ('eeg', 'ecog', 'seeg', 'dbs') montage = _make_toy_dig_montage( n_channels=len(VALID_MONTAGE_NAMED_CHS), @@ -1210,7 +1210,7 @@ def test_heterogeneous_ch_type(): # Montage and info match info = create_info(montage.ch_names, 1., list(VALID_MONTAGE_NAMED_CHS)) - RawArray(np.zeros((3, 1)), info, copy=None).set_montage(montage) + RawArray(np.zeros((4, 1)), info, copy=None).set_montage(montage) def test_set_montage_coord_frame_in_head_vs_unknown(): diff --git a/mne/cov.py b/mne/cov.py index 72e33bec09f..6a679258bc5 100644 --- a/mne/cov.py +++ b/mne/cov.py @@ -1252,10 +1252,11 @@ def _auto_low_rank_model(data, mode, n_jobs, method_params, cv, class _RegCovariance(BaseEstimator): """Aux class.""" - def __init__(self, info, grad=0.1, mag=0.1, eeg=0.1, seeg=0.1, ecog=0.1, - hbo=0.1, hbr=0.1, fnirs_cw_amplitude=0.1, + def __init__(self, info, grad=0.1, mag=0.1, eeg=0.1, seeg=0.1, + ecog=0.1, hbo=0.1, hbr=0.1, fnirs_cw_amplitude=0.1, fnirs_fd_ac_amplitude=0.1, fnirs_fd_phase=0.1, fnirs_od=0.1, - csd=0.1, store_precision=False, assume_centered=False): + csd=0.1, dbs=0.1, store_precision=False, + assume_centered=False): self.info = info # For sklearn compat, these cannot (easily?) be combined into # a single dictionary @@ -1263,6 +1264,7 @@ def __init__(self, info, grad=0.1, mag=0.1, eeg=0.1, seeg=0.1, ecog=0.1, self.mag = mag self.eeg = eeg self.seeg = seeg + self.dbs = dbs self.ecog = ecog self.hbo = hbo self.hbr = hbr @@ -1289,7 +1291,7 @@ def fit(self, X): cov_ = regularize( cov_, self.info, proj=False, exclude='bads', grad=self.grad, mag=self.mag, eeg=self.eeg, - ecog=self.ecog, seeg=self.seeg, + ecog=self.ecog, seeg=self.seeg, dbs=self.dbs, hbo=self.hbo, hbr=self.hbr, rank='full') self.estimator_.covariance_ = self.covariance_ = cov_.data return self @@ -1549,7 +1551,7 @@ def _smart_eigh(C, info, rank, scalings=None, projs=None, def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', proj=True, seeg=0.1, ecog=0.1, hbo=0.1, hbr=0.1, fnirs_cw_amplitude=0.1, fnirs_fd_ac_amplitude=0.1, - fnirs_fd_phase=0.1, fnirs_od=0.1, csd=0.1, + fnirs_fd_phase=0.1, fnirs_od=0.1, csd=0.1, dbs=0.1, rank=None, scalings=None, verbose=None): """Regularize noise covariance matrix. @@ -1600,6 +1602,8 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', Regularization factor for fNIRS optical density signals. csd : float (default 0.1) Regularization factor for EEG-CSD signals. + dbs : float (default 0.1) + Regularization factor for DBS signals. %(rank_None)s .. versionadded:: 0.17 @@ -1625,7 +1629,7 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', cov = cov.copy() info._check_consistency() scalings = _handle_default('scalings_cov_rank', scalings) - regs = dict(eeg=eeg, seeg=seeg, ecog=ecog, hbo=hbo, hbr=hbr, + regs = dict(eeg=eeg, seeg=seeg, dbs=dbs, ecog=ecog, hbo=hbo, hbr=hbr, fnirs_cw_amplitude=fnirs_cw_amplitude, fnirs_fd_ac_amplitude=fnirs_fd_ac_amplitude, fnirs_fd_phase=fnirs_fd_phase, fnirs_od=fnirs_od, csd=csd) diff --git a/mne/defaults.py b/mne/defaults.py index 9e86df26191..0ef75308edf 100644 --- a/mne/defaults.py +++ b/mne/defaults.py @@ -9,38 +9,38 @@ DEFAULTS = dict( color=dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='m', emg='k', ref_meg='steelblue', misc='k', stim='k', resp='k', chpi='k', - exci='k', ias='k', syst='k', seeg='saddlebrown', dipole='k', - gof='k', bio='k', ecog='k', hbo='#AA3377', hbr='b', + exci='k', ias='k', syst='k', seeg='saddlebrown', dbs='seagreen', + dipole='k', gof='k', bio='k', ecog='k', hbo='#AA3377', hbr='b', fnirs_cw_amplitude='k', fnirs_fd_ac_amplitude='k', fnirs_fd_phase='k', fnirs_od='k', csd='k'), units=dict(mag='fT', grad='fT/cm', eeg='µV', eog='µV', ecg='µV', emg='µV', - misc='AU', seeg='mV', dipole='nAm', gof='GOF', bio='µV', - ecog='µV', hbo='µM', hbr='µM', ref_meg='fT', + misc='AU', seeg='mV', dbs='µV', dipole='nAm', gof='GOF', + bio='µV', ecog='µV', hbo='µM', hbr='µM', ref_meg='fT', fnirs_cw_amplitude='V', fnirs_fd_ac_amplitude='V', fnirs_fd_phase='rad', fnirs_od='V', csd='mV/m²'), # scalings for the units scalings=dict(mag=1e15, grad=1e13, eeg=1e6, eog=1e6, emg=1e6, ecg=1e6, - misc=1.0, seeg=1e3, dipole=1e9, gof=1.0, bio=1e6, ecog=1e6, - hbo=1e6, hbr=1e6, ref_meg=1e15, fnirs_cw_amplitude=1.0, - fnirs_fd_ac_amplitude=1.0, fnirs_fd_phase=1., - fnirs_od=1.0, csd=1e3), + misc=1.0, seeg=1e3, dbs=1e6, ecog=1e6, dipole=1e9, gof=1.0, + bio=1e6, hbo=1e6, hbr=1e6, ref_meg=1e15, + fnirs_cw_amplitude=1.0, fnirs_fd_ac_amplitude=1.0, + fnirs_fd_phase=1., fnirs_od=1.0, csd=1e3), # rough guess for a good plot scalings_plot_raw=dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4, emg=1e-3, ref_meg=1e-12, misc='auto', stim=1, resp=1, chpi=1e-4, exci=1, ias=1, syst=1, - seeg=1e-4, bio=1e-6, ecog=1e-4, hbo=10e-6, + seeg=1e-4, dbs=1e-4, bio=1e-6, ecog=1e-4, hbo=10e-6, hbr=10e-6, whitened=10., fnirs_cw_amplitude=2e-2, fnirs_fd_ac_amplitude=2e-2, fnirs_fd_phase=2e-1, fnirs_od=2e-2, csd=200e-4), scalings_cov_rank=dict(mag=1e12, grad=1e11, eeg=1e5, # ~100x scalings - seeg=1e1, ecog=1e4, hbo=1e4, hbr=1e4), + seeg=1e1, dbs=1e4, ecog=1e4, hbo=1e4, hbr=1e4), ylim=dict(mag=(-600., 600.), grad=(-200., 200.), eeg=(-200., 200.), - misc=(-5., 5.), seeg=(-20., 20.), dipole=(-100., 100.), - gof=(0., 1.), bio=(-500., 500.), ecog=(-200., 200.), hbo=(0, 20), - hbr=(0, 20), csd=(-50., 50.)), + misc=(-5., 5.), seeg=(-20., 20.), dbs=(-200., 200.), + dipole=(-100., 100.), gof=(0., 1.), bio=(-500., 500.), + ecog=(-200., 200.), hbo=(0, 20), hbr=(0, 20), csd=(-50., 50.)), titles=dict(mag='Magnetometers', grad='Gradiometers', eeg='EEG', eog='EOG', - ecg='ECG', emg='EMG', misc='misc', seeg='sEEG', bio='BIO', - dipole='Dipole', ecog='ECoG', hbo='Oxyhemoglobin', + ecg='ECG', emg='EMG', misc='misc', seeg='sEEG', dbs='DBS', + bio='BIO', dipole='Dipole', ecog='ECoG', hbo='Oxyhemoglobin', ref_meg='Reference Magnetometers', fnirs_cw_amplitude='fNIRS (CW amplitude)', fnirs_fd_ac_amplitude='fNIRS (FD AC amplitude)', @@ -63,6 +63,7 @@ eeg_scale=4e-3, eegp_scale=20e-3, eegp_height=0.1, ecog_scale=5e-3, seeg_scale=5e-3, + dbs_scale=5e-3, fnirs_scale=5e-3, source_scale=5e-3, detector_scale=5e-3, @@ -73,6 +74,7 @@ extra_color=(1., 1., 1.), eeg_color=(1., 0.596, 0.588), eegp_color=(0.839, 0.15, 0.16), ecog_color=(1., 1., 1.), + dbs_color=(0.82, 0.455, 0.659), seeg_color=(1., 1., .3), fnirs_color=(1., .647, 0.), source_color=(1., .05, 0.), diff --git a/mne/epochs.py b/mne/epochs.py index d2a0fff1350..2785174954e 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -828,7 +828,7 @@ def _detrend_offset_decim(self, epoch, verbose=None): picks = pick_types(self.info, meg=True, eeg=True, stim=False, ref_meg=True, eog=True, ecg=True, seeg=True, emg=True, bio=True, ecog=True, fnirs=True, - exclude=[]) + dbs=True, exclude=[]) epoch[picks] = rescale(epoch[picks], self._raw_times, self.baseline, copy=False, verbose=False) @@ -2299,7 +2299,7 @@ class EpochsArray(BaseEpochs): ----- Proper units of measure: - * V: eeg, eog, seeg, emg, ecg, bio, ecog + * V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog * T: mag * T/m: grad * M: hbo, hbr diff --git a/mne/evoked.py b/mne/evoked.py index f9181476cd6..821be2ffc80 100644 --- a/mne/evoked.py +++ b/mne/evoked.py @@ -563,7 +563,7 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, .. versionadded:: 0.16 """ # noqa: E501 - supported = ('mag', 'grad', 'eeg', 'seeg', 'ecog', 'misc', + supported = ('mag', 'grad', 'eeg', 'seeg', 'dbs', 'ecog', 'misc', 'None') + _FNIRS_CH_TYPES_SPLIT types_used = self.get_channel_types(unique=True, only_data_chs=True) @@ -585,7 +585,7 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, raise ValueError('Negative mode (mode=neg) does not make ' 'sense with merge_grads=True') - meg = eeg = misc = seeg = ecog = fnirs = False + meg = eeg = misc = seeg = dbs = ecog = fnirs = False picks = None if ch_type in ('mag', 'grad'): meg = ch_type @@ -595,6 +595,8 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, misc = True elif ch_type == 'seeg': seeg = True + elif ch_type == 'dbs': + dbs = True elif ch_type == 'ecog': ecog = True elif ch_type in _FNIRS_CH_TYPES_SPLIT: @@ -606,7 +608,7 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, else: picks = pick_types(self.info, meg=meg, eeg=eeg, misc=misc, seeg=seeg, ecog=ecog, ref_meg=False, - fnirs=fnirs) + fnirs=fnirs, dbs=dbs) data = self.data ch_names = self.ch_names @@ -732,7 +734,7 @@ class EvokedArray(Evoked): Notes ----- Proper units of measure: - * V: eeg, eog, seeg, emg, ecg, bio, ecog + * V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog * T: mag * T/m: grad * M: hbo, hbr diff --git a/mne/io/array/array.py b/mne/io/array/array.py index 47477c4fbf0..945938541fe 100644 --- a/mne/io/array/array.py +++ b/mne/io/array/array.py @@ -42,7 +42,7 @@ class RawArray(BaseRaw): Notes ----- Proper units of measure: - * V: eeg, eog, seeg, emg, ecg, bio, ecog + * V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog * T: mag * T/m: grad * M: hbo, hbr diff --git a/mne/io/array/tests/test_array.py b/mne/io/array/tests/test_array.py index 4d6966ed67e..58a4c8512aa 100644 --- a/mne/io/array/tests/test_array.py +++ b/mne/io/array/tests/test_array.py @@ -83,8 +83,9 @@ def test_array_raw(): types = list() for ci in range(101): types.extend(('grad', 'grad', 'mag')) - types.extend(['ecog', 'seeg', 'hbo']) # really 3 meg channels + types.extend(['ecog', 'seeg', 'hbo']) # really 4 meg channels types.extend(['stim'] * 9) + types.extend(['dbs']) # really eeg channel types.extend(['eeg'] * 60) picks = np.concatenate([pick_types(raw.info, meg=True)[::20], pick_types(raw.info, meg=False, stim=True), @@ -139,7 +140,8 @@ def test_array_raw(): # plotting raw2.plot() - raw2.plot_psd(tmax=2., average=True, n_fft=1024, spatial_colors=False) + raw2.plot_psd(tmax=2., average=True, n_fft=1024, + spatial_colors=False) plt.close('all') # epoching diff --git a/mne/io/constants.py b/mne/io/constants.py index 4f791feba93..4dfd3e1cc1f 100644 --- a/mne/io/constants.py +++ b/mne/io/constants.py @@ -176,6 +176,7 @@ FIFF.FIFFV_MISC_CH = 502 FIFF.FIFFV_RESP_CH = 602 # Respiration monitoring FIFF.FIFFV_SEEG_CH = 802 # stereotactic EEG +FIFF.FIFFV_DBS_CH = 803 # deep brain stimulation FIFF.FIFFV_SYST_CH = 900 # some system status information (on Triux systems only) FIFF.FIFFV_ECOG_CH = 902 FIFF.FIFFV_IAS_CH = 910 # Internal Active Shielding data (maybe on Triux only) @@ -196,6 +197,7 @@ FIFF.FIFFV_MISC_CH, FIFF.FIFFV_RESP_CH, FIFF.FIFFV_SEEG_CH, + FIFF.FIFFV_DBS_CH, FIFF.FIFFV_SYST_CH, FIFF.FIFFV_ECOG_CH, FIFF.FIFFV_IAS_CH, diff --git a/mne/io/fiff/tests/test_raw_fiff.py b/mne/io/fiff/tests/test_raw_fiff.py index 38dfacdfd57..1d140cdc075 100644 --- a/mne/io/fiff/tests/test_raw_fiff.py +++ b/mne/io/fiff/tests/test_raw_fiff.py @@ -973,16 +973,16 @@ def test_filter(): def test_filter_picks(): """Test filtering default channel picks.""" - ch_types = ['mag', 'grad', 'eeg', 'seeg', 'misc', 'stim', 'ecog', 'hbo', - 'hbr'] + ch_types = ['mag', 'grad', 'eeg', 'seeg', 'dbs', 'misc', 'stim', 'ecog', + 'hbo', 'hbr'] info = create_info(ch_names=ch_types, ch_types=ch_types, sfreq=256) raw = RawArray(data=np.zeros((len(ch_types), 1000)), info=info) # -- Deal with meg mag grad and fnirs exceptions - ch_types = ('misc', 'stim', 'meg', 'eeg', 'seeg', 'ecog') + ch_types = ('misc', 'stim', 'meg', 'eeg', 'seeg', 'dbs', 'ecog') # -- Filter data channels - for ch_type in ('mag', 'grad', 'eeg', 'seeg', 'ecog', 'hbo', 'hbr'): + for ch_type in ('mag', 'grad', 'eeg', 'seeg', 'dbs', 'ecog', 'hbo', 'hbr'): picks = {ch: ch == ch_type for ch in ch_types} picks['meg'] = ch_type if ch_type in ('mag', 'grad') else False picks['fnirs'] = ch_type if ch_type in ('hbo', 'hbr') else False diff --git a/mne/io/meas_info.py b/mne/io/meas_info.py index 04106bf8931..5022d444194 100644 --- a/mne/io/meas_info.py +++ b/mne/io/meas_info.py @@ -1948,8 +1948,8 @@ def create_info(ch_names, sfreq, ch_types='misc', verbose=None): Channel types, default is ``'misc'`` which is not a :term:`data channel `. Currently supported fields are 'ecg', 'bio', 'stim', 'eog', 'misc', - 'seeg', 'ecog', 'mag', 'eeg', 'ref_meg', 'grad', 'emg', 'hbr' or 'hbo'. - If str, then all channels are assumed to be of the same type. + 'seeg', 'dbs', 'ecog', 'mag', 'eeg', 'ref_meg', 'grad', 'emg', 'hbr' + or 'hbo'. If str, then all channels are assumed to be of the same type. %(verbose)s Returns @@ -1968,7 +1968,7 @@ def create_info(ch_names, sfreq, ch_types='misc', verbose=None): be initialized to the identity transform. Proper units of measure: - * V: eeg, eog, seeg, emg, ecg, bio, ecog + * V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog * T: mag * T/m: grad * M: hbo, hbr diff --git a/mne/io/nicolet/nicolet.py b/mne/io/nicolet/nicolet.py index 50aba9f5f33..2ae8869f095 100644 --- a/mne/io/nicolet/nicolet.py +++ b/mne/io/nicolet/nicolet.py @@ -29,7 +29,7 @@ def read_raw_nicolet(input_fname, ch_type, eog=(), Path to the data file. ch_type : str Channel type to designate to the data channels. Supported data types - include 'eeg', 'seeg'. + include 'eeg', 'dbs'. eog : list | tuple | 'auto' Names of channels or list of indices that should be designated EOG channels. If 'auto', the channel names beginning with diff --git a/mne/io/pick.py b/mne/io/pick.py index 4a7ae650994..e78dc91bf7d 100644 --- a/mne/io/pick.py +++ b/mne/io/pick.py @@ -49,6 +49,9 @@ def get_channel_type_constants(include_defaults=False): seeg=dict(kind=FIFF.FIFFV_SEEG_CH, unit=FIFF.FIFF_UNIT_V, coil_type=FIFF.FIFFV_COIL_EEG), + dbs=dict(kind=FIFF.FIFFV_DBS_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_EEG), ecog=dict(kind=FIFF.FIFFV_ECOG_CH, unit=FIFF.FIFF_UNIT_V, coil_type=FIFF.FIFFV_COIL_EEG), @@ -127,6 +130,7 @@ def get_channel_type_constants(include_defaults=False): FIFF.FIFFV_IAS_CH: 'ias', FIFF.FIFFV_SYST_CH: 'syst', FIFF.FIFFV_SEEG_CH: 'seeg', + FIFF.FIFFV_DBS_CH: 'dbs', FIFF.FIFFV_BIO_CH: 'bio', FIFF.FIFFV_QUAT_0: 'chpi', FIFF.FIFFV_QUAT_1: 'chpi', @@ -181,8 +185,8 @@ def channel_type(info, idx): Type of channel. Will be one of:: {'grad', 'mag', 'eeg', 'csd', 'stim', 'eog', 'emg', 'ecg', - 'ref_meg', 'resp', 'exci', 'ias', 'syst', 'misc', 'seeg', 'bio', - 'chpi', 'dipole', 'gof', 'ecog', 'hbo', 'hbr'} + 'ref_meg', 'resp', 'exci', 'ias', 'syst', 'misc', 'seeg', 'dbs', + 'bio', 'chpi', 'dipole', 'gof', 'ecog', 'hbo', 'hbr'} """ # This is faster than the original _channel_type_old now in test_pick.py # because it uses (at most!) two dict lookups plus one conditional @@ -366,7 +370,7 @@ def pick_types(info, meg=False, eeg=False, stim=False, eog=False, ecg=False, emg=False, ref_meg='auto', misc=False, resp=False, chpi=False, exci=False, ias=False, syst=False, seeg=False, dipole=False, gof=False, bio=False, ecog=False, fnirs=False, csd=False, - include=(), exclude='bads', selection=None): + dbs=False, include=(), exclude='bads', selection=None): """Pick channels by type and names. Parameters @@ -421,6 +425,8 @@ def pick_types(info, meg=False, eeg=False, stim=False, eog=False, ecg=False, include channels measuring deoxyhemoglobin). csd : bool Current source density channels. + dbs : bool + Deep brain stimulation channels. include : list of str List of additional channels to include. If empty do not include any. exclude : list of str | str @@ -449,7 +455,7 @@ def pick_types(info, meg=False, eeg=False, stim=False, eog=False, ecg=False, len(info['comps']) > 0 and meg is not False) for param in (eeg, stim, eog, ecg, emg, misc, resp, chpi, exci, - ias, syst, seeg, dipole, gof, bio, ecog, csd): + ias, syst, seeg, dipole, gof, bio, ecog, csd, dbs): if not isinstance(param, bool): w = ('Parameters for all channel types (with the exception of ' '"meg", "ref_meg" and "fnirs") must be of type bool, not {}.') @@ -457,7 +463,7 @@ def pick_types(info, meg=False, eeg=False, stim=False, eog=False, ecg=False, param_dict = dict(eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg, misc=misc, resp=resp, chpi=chpi, exci=exci, - ias=ias, syst=syst, seeg=seeg, dipole=dipole, + ias=ias, syst=syst, seeg=seeg, dbs=dbs, dipole=dipole, gof=gof, bio=bio, ecog=ecog, csd=csd) # avoid triage if possible if isinstance(meg, bool): @@ -705,7 +711,7 @@ def pick_channels_forward(orig, include=[], exclude=[], ordered=False, def pick_types_forward(orig, meg=False, eeg=False, ref_meg=True, seeg=False, - ecog=False, include=[], exclude=[]): + ecog=False, dbs=False, include=[], exclude=[]): """Pick by channel type and names from a forward operator. Parameters @@ -724,6 +730,8 @@ def pick_types_forward(orig, meg=False, eeg=False, ref_meg=True, seeg=False, If True include stereotactic EEG channels. ecog : bool If True include electrocorticography channels. + dbs : bool + If True include deep brain stimulation channels. include : list of str List of additional channels to include. If empty do not include any. exclude : list of str | str @@ -736,8 +744,8 @@ def pick_types_forward(orig, meg=False, eeg=False, ref_meg=True, seeg=False, Forward solution restricted to selected channel types. """ info = orig['info'] - sel = pick_types(info, meg, eeg, ref_meg=ref_meg, seeg=seeg, ecog=ecog, - include=include, exclude=exclude) + sel = pick_types(info, meg, eeg, ref_meg=ref_meg, seeg=seeg, + ecog=ecog, dbs=dbs, include=include, exclude=exclude) if len(sel) == 0: raise ValueError('No valid channels found') include_ch_names = [info['ch_names'][k] for k in sel] @@ -956,21 +964,22 @@ def _check_excludes_includes(chs, info=None, allow_bads=False): _PICK_TYPES_DATA_DICT = dict( meg=True, eeg=True, csd=True, stim=False, eog=False, ecg=False, emg=False, misc=False, resp=False, chpi=False, exci=False, ias=False, syst=False, - seeg=True, dipole=False, gof=False, bio=False, ecog=True, fnirs=True) + seeg=True, dipole=False, gof=False, bio=False, ecog=True, fnirs=True, + dbs=True) _PICK_TYPES_KEYS = tuple(list(_PICK_TYPES_DATA_DICT) + ['ref_meg']) _MEG_CH_TYPES_SPLIT = ('mag', 'grad', 'planar1', 'planar2') _FNIRS_CH_TYPES_SPLIT = ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od') _DATA_CH_TYPES_ORDER_DEFAULT = ( 'mag', 'grad', 'eeg', 'csd', 'eog', 'ecg', 'emg', 'ref_meg', 'misc', - 'stim', 'resp', 'chpi', 'exci', 'ias', 'syst', 'seeg', 'bio', - 'ecog') + _FNIRS_CH_TYPES_SPLIT + ('whitened',) + 'stim', 'resp', 'chpi', 'exci', 'ias', 'syst', 'seeg', 'bio', 'ecog', + 'dbs') + _FNIRS_CH_TYPES_SPLIT + ('whitened',) # Valid data types, ordered for consistency, used in viz/evoked. _VALID_CHANNEL_TYPES = ( - 'eeg', 'grad', 'mag', 'seeg', 'eog', 'ecg', 'emg', 'dipole', 'gof', 'bio', - 'ecog') + _FNIRS_CH_TYPES_SPLIT + ('misc', 'csd') + 'eeg', 'grad', 'mag', 'seeg', 'eog', 'ecg', 'emg', 'dipole', 'gof', + 'bio', 'ecog', 'dbs') + _FNIRS_CH_TYPES_SPLIT + ('misc', 'csd') _DATA_CH_TYPES_SPLIT = ( - 'mag', 'grad', 'eeg', 'csd', 'seeg', 'ecog') + _FNIRS_CH_TYPES_SPLIT + 'mag', 'grad', 'eeg', 'csd', 'seeg', 'ecog', 'dbs') + _FNIRS_CH_TYPES_SPLIT def _pick_data_channels(info, exclude='bads', with_ref_meg=True): diff --git a/mne/io/reference.py b/mne/io/reference.py index ce9c6418ebd..f1733a1f719 100644 --- a/mne/io/reference.py +++ b/mne/io/reference.py @@ -113,8 +113,8 @@ def _apply_reference(inst, ref_from, ref_to=None, forward=None, data[..., ref_to, :] -= ref_data ref_data = ref_data[..., 0, :] - # If the reference touches EEG/ECoG/sEEG electrodes, note in the info - # that a non-CAR has been applied. + # If the reference touches EEG/ECoG/sEEG/DBS electrodes, note in the + # info that a non-CAR has been applied. if len(np.intersect1d(ref_to, eeg_idx)) > 0: inst.info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_ON # REST @@ -355,11 +355,11 @@ def set_eeg_reference(inst, ref_channels='average', copy=True, def _get_ch_type(inst, ch_type): _validate_type(ch_type, str, 'ch_type') - _check_option('ch_type', ch_type, ('auto', 'eeg', 'ecog', 'seeg')) + _check_option('ch_type', ch_type, ('auto', 'eeg', 'ecog', 'seeg', 'dbs')) # if ch_type is 'auto', search through list to find first reasonable # reference-able channel type. if ch_type == 'auto': - for type_ in ['eeg', 'ecog', 'seeg']: + for type_ in ['eeg', 'ecog', 'seeg', 'dbs']: if type_ in inst: ch_type = type_ logger.info('%s channel type selected for ' @@ -367,7 +367,7 @@ def _get_ch_type(inst, ch_type): break # if auto comes up empty, or the user specifies a bad ch_type. else: - raise ValueError('No EEG, ECoG or sEEG channels found ' + raise ValueError('No EEG, ECoG, sEEG or DBS channels found ' 'to rereference.') return ch_type diff --git a/mne/io/tests/test_constants.py b/mne/io/tests/test_constants.py index 020217c0ba0..ca5f613e321 100644 --- a/mne/io/tests/test_constants.py +++ b/mne/io/tests/test_constants.py @@ -19,7 +19,7 @@ # https://github.com/mne-tools/fiff-constants/commits/master -commit = '198d943d0ff92ecdfb947b84af6289a0e79ad060' +commit = '5bd84d224de502bee66f70b7867b8f45b45264c1' # These are oddities that we won't address: iod_dups = (355, 359) # these are in both MEGIN and MNE files diff --git a/mne/io/tests/test_pick.py b/mne/io/tests/test_pick.py index 732dae85b91..14689bff189 100644 --- a/mne/io/tests/test_pick.py +++ b/mne/io/tests/test_pick.py @@ -244,6 +244,31 @@ def test_pick_seeg_ecog(): assert_equal(len(pick_types(raw.info, meg=False, seeg=True, ecog=True)), 0) +def test_pick_dbs(): + """Test picking with DBS.""" + # gh-8739 + names = 'A1 A2 Fz O OTp1 OTp2 OTp3'.split() + types = 'mag mag eeg eeg dbs dbs dbs'.split() + info = create_info(names, 1024., types) + picks_by_type = [('mag', [0, 1]), ('eeg', [2, 3]), ('dbs', [4, 5, 6])] + assert_indexing(info, picks_by_type) + assert_array_equal(pick_types(info, meg=False, dbs=True), [4, 5, 6]) + for i, t in enumerate(types): + assert channel_type(info, i) == types[i] + raw = RawArray(np.zeros((len(names), 7)), info) + events = np.array([[1, 0, 0], [2, 0, 0]]) + epochs = Epochs(raw, events=events, event_id={'event': 0}, + tmin=-1e-5, tmax=1e-5, + baseline=(0, 0)) # only one sample + evoked = epochs.average(pick_types(epochs.info, meg=True, dbs=True)) + e_dbs = evoked.copy().pick_types(meg=False, dbs=True) + for lt, rt in zip(e_dbs.ch_names, [names[4], names[5], names[6]]): + assert lt == rt + raw = read_raw_fif(op.join(io_dir, 'tests', 'data', + 'test_chpi_raw_sss.fif')) + assert len(pick_types(raw.info, meg=False, dbs=True)) == 0 + + def test_pick_chpi(): """Test picking cHPI.""" # Make sure we don't mis-classify cHPI channels diff --git a/mne/io/tests/test_reference.py b/mne/io/tests/test_reference.py index 0cdc6bb1e73..d2e0faeea43 100644 --- a/mne/io/tests/test_reference.py +++ b/mne/io/tests/test_reference.py @@ -238,21 +238,37 @@ def test_set_eeg_reference(): set_eeg_reference(raw, ['EEG 001'], True, True) -@pytest.mark.parametrize('ch_type', ('auto', 'ecog')) +@pytest.mark.parametrize('ch_type', ('auto', 'ecog', 'dbs')) def test_set_eeg_reference_ch_type(ch_type): - """Test setting EEG reference for ECoG.""" + """Test setting EEG reference for ECoG or DBS.""" # gh-6454 + # gh-8739 added DBS + ch_names = ['ECOG01', 'ECOG02', 'DBS01', 'DBS02', 'MISC'] rng = np.random.RandomState(0) - data = rng.randn(3, 1000) - raw = RawArray(data, create_info(3, 1000., ['ecog'] * 2 + ['misc'])) + data = rng.randn(5, 1000) + raw = RawArray(data, create_info(ch_names, 1000., ['ecog'] * 2 + + ['dbs'] * 2 + ['misc'])) + if ch_type == 'auto': + + ref_ch = ch_names[:2] + else: + ref_ch = raw.copy().pick(picks=ch_type).ch_names with catch_logging() as log: reref, ref_data = set_eeg_reference(raw.copy(), ch_type=ch_type, verbose=True) - assert 'Applying a custom ECoG' in log.getvalue() + if ch_type in ['auto', 'ecog']: + assert 'Applying a custom ECoG' in log.getvalue() + else: + assert 'Applying a custom DBS' in log.getvalue() assert reref.info['custom_ref_applied'] # gh-7350 - _test_reference(raw, reref, ref_data, ['0', '1']) + _test_reference(raw, reref, ref_data, ref_ch) with pytest.raises(ValueError, match='No channels supplied'): set_eeg_reference(raw, ch_type='eeg') + # gh-8739 + raw2 = RawArray(data, create_info(5, 1000., ['mag'] * 4 + ['misc'])) + with pytest.raises(ValueError, match='No EEG, ECoG, sEEG or DBS channels ' + 'found to rereference.'): + set_eeg_reference(raw2, ch_type='auto') @testing.requires_testing_data diff --git a/mne/preprocessing/ica.py b/mne/preprocessing/ica.py index 888144a6296..fa6b1795348 100644 --- a/mne/preprocessing/ica.py +++ b/mne/preprocessing/ica.py @@ -108,7 +108,8 @@ def _check_for_unsupported_ica_channels(picks, info, allow_ref_meg=False): """Check for channels in picks that are not considered valid channels. Accepted channels are the data channels - ('seeg','ecog','eeg', 'hbo', 'hbr', 'mag', and 'grad'), 'eog' and 'ref_meg' + ('seeg', 'dbs', 'ecog', 'eeg', 'hbo', 'hbr', 'mag', and 'grad'), 'eog' + and 'ref_meg'. This prevents the program from crashing without feedback when a bad channel is provided to ICA whitening. """ @@ -461,8 +462,8 @@ def fit(self, inst, picks=None, start=None, stop=None, decim=None, within ``start`` and ``stop`` are used. reject : dict | None Rejection parameters based on peak-to-peak amplitude. - Valid keys are 'grad', 'mag', 'eeg', 'seeg', 'ecog', 'eog', 'ecg', - 'hbo', 'hbr'. + Valid keys are 'grad', 'mag', 'eeg', 'seeg', 'dbs', 'ecog', 'eog', + 'ecg', 'hbo', 'hbr'. If reject is None then no rejection is done. Example:: reject = dict(grad=4000e-13, # T / m (gradiometers) @@ -474,8 +475,8 @@ def fit(self, inst, picks=None, start=None, stop=None, decim=None, It only applies if ``inst`` is of type Raw. flat : dict | None Rejection parameters based on flatness of signal. - Valid keys are 'grad', 'mag', 'eeg', 'seeg', 'ecog', 'eog', 'ecg', - 'hbo', 'hbr'. + Valid keys are 'grad', 'mag', 'eeg', 'seeg', 'dbs', 'ecog', 'eog', + 'ecg', 'hbo', 'hbr'. Values are floats that set the minimum acceptable peak-to-peak amplitude. If flat is None then no rejection is done. It only applies if ``inst`` is of type Raw. @@ -603,6 +604,8 @@ def _compute_pre_whitener(self, data): if _contains_ch_type(info, ch_type): if ch_type == 'seeg': this_picks = pick_types(info, meg=False, seeg=True) + elif ch_type == 'dbs': + this_picks = pick_types(info, meg=False, dbs=True) elif ch_type == 'ecog': this_picks = pick_types(info, meg=False, ecog=True) elif ch_type == 'eeg': diff --git a/mne/preprocessing/tests/test_ica.py b/mne/preprocessing/tests/test_ica.py index 7971c674a26..96bd5f72fe8 100644 --- a/mne/preprocessing/tests/test_ica.py +++ b/mne/preprocessing/tests/test_ica.py @@ -1357,4 +1357,28 @@ def _assert_ica_attributes(ica, data=None, limits=(1.0, 70)): assert norms.max() < limits[1], 'Not roughly unity' +@pytest.mark.parametrize("ch_type", ["dbs", "seeg"]) +def test_ica_ch_types(ch_type): + """Test ica with different channel types.""" + # gh-8739 + data = np.random.RandomState(0).randn(10, 1000) + info = create_info(10, 1000., ch_type) + raw = RawArray(data, info) + events = make_fixed_length_events(raw, 99999, start=0, stop=0.3, + duration=0.1) + epochs = Epochs(raw, events, None, -0.1, 0.1, preload=True, proj=False) + evoked = epochs.average() + # test fit + method = 'infomax' + for inst in [raw, epochs]: + ica = ICA(n_components=2, max_iter=2, method=method) + with pytest.warns(None): + ica.fit(inst, verbose=True) + _assert_ica_attributes(ica) + # test apply and get_sources + for inst in [raw, epochs, evoked]: + ica.apply(inst) + ica.get_sources(inst) + + run_tests_if_main() diff --git a/mne/source_estimate.py b/mne/source_estimate.py index de9ccb98034..08f66eee61f 100644 --- a/mne/source_estimate.py +++ b/mne/source_estimate.py @@ -3210,12 +3210,12 @@ def extract_label_time_course(stcs, labels, src, mode='auto', @verbose def stc_near_sensors(evoked, trans, subject, distance=0.01, mode='sum', project=True, subjects_dir=None, src=None, verbose=None): - """Create a STC from ECoG and sEEG sensor data. + """Create a STC from ECoG, sEEG and DBS sensor data. Parameters ---------- evoked : instance of Evoked - The evoked data. Must contain ECoG, or sEEG channels. + The evoked data. Must contain ECoG, sEEG or DBS channels. %(trans)s subject : str The subject name. @@ -3271,7 +3271,7 @@ def stc_near_sensors(evoked, trans, subject, distance=0.01, mode='sum', ``distance`` (beyond which it is zero). If creating a Volume STC, ``src`` must be passed in, and this - function will project sEEG sensors to nearby surrounding vertices. + function will project sEEG and DBS sensors to nearby surrounding vertices. Then the activation at each volume vertex is given by the mode in the same way as ECoG surface projections. @@ -3284,8 +3284,8 @@ def stc_near_sensors(evoked, trans, subject, distance=0.01, mode='sum', _validate_type(src, (None, SourceSpaces), 'src') _check_option('mode', mode, ('sum', 'single', 'nearest')) - # create a copy of Evoked using ecog and seeg - evoked = evoked.copy().pick_types(ecog=True, seeg=True) + # create a copy of Evoked using ecog, seeg and dbs + evoked = evoked.copy().pick_types(ecog=True, seeg=True, dbs=True) # get channel positions that will be used to pinpoint where # in the Source space we will use the evoked data diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py index c1686db5c25..a586fe71f93 100644 --- a/mne/tests/test_epochs.py +++ b/mne/tests/test_epochs.py @@ -2219,19 +2219,30 @@ def test_contains(): 'proj_name'): seeg.info[key] = raw.info[key] raw.add_channels([seeg]) - tests = [(('mag', False, False), ('grad', 'eeg', 'seeg')), - (('grad', False, False), ('mag', 'eeg', 'seeg')), - ((False, True, False), ('grad', 'mag', 'seeg')), - ((False, False, True), ('grad', 'mag', 'eeg'))] - - for (meg, eeg, seeg), others in tests: - picks_contains = pick_types(raw.info, meg=meg, eeg=eeg, seeg=seeg) + # Add dbs channel + dbs = RawArray(np.zeros((1, len(raw.times))), + create_info(['DBS 001'], raw.info['sfreq'], 'dbs')) + for key in ('dev_head_t', 'highpass', 'lowpass', + 'dig', 'description', 'acq_pars', 'experimenter', + 'proj_name'): + dbs.info[key] = raw.info[key] + raw.add_channels([dbs]) + tests = [(('mag', False, False, False), ('grad', 'eeg', 'seeg', 'dbs')), + (('grad', False, False, False), ('mag', 'eeg', 'seeg', 'dbs')), + ((False, True, False, False), ('grad', 'mag', 'seeg', 'dbs')), + ((False, False, True, False), ('grad', 'mag', 'eeg', 'dbs'))] + + for (meg, eeg, seeg, dbs), others in tests: + picks_contains = pick_types(raw.info, meg=meg, eeg=eeg, seeg=seeg, + dbs=dbs) epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks_contains) if eeg: test = 'eeg' elif seeg: test = 'seeg' + elif dbs: + test = 'dbs' else: test = meg assert (test in epochs) @@ -2619,12 +2630,12 @@ def test_add_channels(): def test_seeg_ecog(): - """Test the compatibility of the Epoch object with SEEG and ECoG data.""" + """Test compatibility of the Epoch object with SEEG, DBS and ECoG data.""" n_epochs, n_channels, n_times, sfreq = 5, 10, 20, 1000. data = np.ones((n_epochs, n_channels, n_times)) events = np.array([np.arange(n_epochs), [0] * n_epochs, [1] * n_epochs]).T pick_dict = dict(meg=False, exclude=[]) - for key in ('seeg', 'ecog'): + for key in ('seeg', 'dbs', 'ecog'): info = create_info(n_channels, sfreq, key) epochs = EpochsArray(data, info, events) pick_dict.update({key: True}) diff --git a/mne/tests/test_filter.py b/mne/tests/test_filter.py index e3e9dc8148e..bb46ec51d02 100644 --- a/mne/tests/test_filter.py +++ b/mne/tests/test_filter.py @@ -733,9 +733,9 @@ def test_filter_picks(): fs = 1000. kwargs = dict(l_freq=None, h_freq=40.) filt = filter_data(data, fs, **kwargs) - # don't include seeg or stim in this list because they are in the one below - # to ensure default cases are treated properly - for kind in ('eeg', 'grad', 'emg', 'misc'): + # don't include seeg, dbs or stim in this list because they are in the one + # below to ensure default cases are treated properly + for kind in ('eeg', 'grad', 'emg', 'misc', 'dbs'): for picks in (None, [-2], kind, 'k'): # With always at least one data channel info = create_info(['s', 'k', 't'], fs, ['seeg', kind, 'stim']) diff --git a/mne/utils/docs.py b/mne/utils/docs.py index 26cdefc232d..4d377f91341 100644 --- a/mne/utils/docs.py +++ b/mne/utils/docs.py @@ -679,9 +679,9 @@ must be set to ``False`` (the default in this case). """ docdict['set_eeg_reference_ch_type'] = """ -ch_type : 'auto' | 'eeg' | 'ecog' | 'seeg' +ch_type : 'auto' | 'eeg' | 'ecog' | 'seeg' | 'dbs' The name of the channel type to apply the reference to. If 'auto', - the first channel type of eeg, ecog or seeg that is found (in that + the first channel type of eeg, ecog, seeg or dbs that is found (in that order) will be selected. .. versionadded:: 0.19 diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py index 15e87488206..4af33fa3005 100644 --- a/mne/viz/_3d.py +++ b/mne/viz/_3d.py @@ -424,8 +424,8 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, surfaces='auto', coord_frame='head', meg=None, eeg='original', fwd=None, dig=False, ecog=True, src=None, mri_fiducials=False, - bem=None, seeg=True, fnirs=True, show_axes=False, fig=None, - interaction='trackball', verbose=None): + bem=None, seeg=True, fnirs=True, show_axes=False, dbs=True, + fig=None, interaction='trackball', verbose=None): """Plot head, sensor, and source space alignment in 3D. Parameters @@ -520,6 +520,8 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, * MEG in blue (if MEG sensors are present). .. versionadded:: 0.16 + dbs : bool + If True (default), show DBS (deep brain stimulation) electrodes. fig : mayavi.mlab.Figure | None Mayavi Scene in which to plot the alignment. If ``None``, creates a new 600x600 pixel figure with black background. @@ -655,13 +657,13 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, ref_meg = 'ref' in meg meg_picks = pick_types(info, meg=True, ref_meg=ref_meg) eeg_picks = pick_types(info, meg=False, eeg=True, ref_meg=False) - fnirs_picks = pick_types(info, meg=False, eeg=False, - ref_meg=False, fnirs=True) - other_bools = dict(ecog=ecog, seeg=seeg, + fnirs_picks = pick_types(info, meg=False, eeg=False, ref_meg=False, + fnirs=True) + other_bools = dict(ecog=ecog, seeg=seeg, dbs=dbs, fnirs=(('channels' in fnirs) | ('sources' in fnirs) | ('detectors' in fnirs))) - del ecog, seeg + del ecog, seeg, dbs other_keys = sorted(other_bools.keys()) other_picks = {key: pick_types(info, meg=False, ref_meg=False, **{key: True}) for key in other_keys} @@ -866,7 +868,8 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, skull_alpha = dict() skull_colors = dict() hemi_val = 0.5 - max_alpha = 1.0 if len(other_picks['seeg']) == 0 else 0.75 + no_deep = all(len(other_picks[key]) == 0 for key in ('dbs', 'seeg')) + max_alpha = 1.0 if no_deep else 0.75 if src is None or (brain and any(s['type'] == 'surf' for s in src)): hemi_val = max_alpha alphas = np.linspace(max_alpha / 2., 0, 5)[:len(skull) + 1] diff --git a/mne/viz/raw.py b/mne/viz/raw.py index 22247c9a6df..630a8629593 100644 --- a/mne/viz/raw.py +++ b/mne/viz/raw.py @@ -567,7 +567,7 @@ def _setup_channel_selections(raw, kind, order): misc = pick_types(raw.info, meg=False, eeg=False, stim=True, eog=True, ecg=True, emg=True, ref_meg=False, misc=True, resp=True, chpi=True, exci=True, ias=True, syst=True, - seeg=False, bio=True, ecog=False, fnirs=False, + seeg=False, bio=True, ecog=False, fnirs=False, dbs=False, exclude=()) if len(misc) and np.in1d(misc, order).any(): selections_dict['Misc'] = misc diff --git a/mne/viz/tests/test_topo.py b/mne/viz/tests/test_topo.py index 45d42642af2..f0ff1bdfad7 100644 --- a/mne/viz/tests/test_topo.py +++ b/mne/viz/tests/test_topo.py @@ -115,6 +115,14 @@ def return_inds(d): # to test function kwarg to zorder arg of evoked.plot evoked.set_channel_types(mapping) evoked.plot_joint() + # test DBS (gh:8739) + evoked = _get_epochs().average().pick_types('mag') + mapping = {ch_name: 'dbs' for ch_name in evoked.ch_names} + with pytest.warns(RuntimeWarning, match='The unit for'): + evoked.set_channel_types(mapping) + evoked.plot_joint() + plt.close('all') + def test_plot_topo(): """Test plotting of ERP topography.""" diff --git a/mne/viz/topomap.py b/mne/viz/topomap.py index 0b738f9d6c3..bc968392846 100644 --- a/mne/viz/topomap.py +++ b/mne/viz/topomap.py @@ -98,6 +98,9 @@ def _prepare_topomap_plot(inst, ch_type, sphere=None): elif ch_type == 'csd': picks = pick_types(info, meg=False, csd=True, ref_meg=False, exclude='bads') + elif ch_type == 'dbs': + picks = pick_types(info, meg=False, dbs=True, ref_meg=False, + exclude='bads') elif ch_type == 'seeg': picks = pick_types(info, meg=False, seeg=True, ref_meg=False, exclude='bads') diff --git a/mne/viz/utils.py b/mne/viz/utils.py index 9c88fdb4a76..515311b365d 100644 --- a/mne/viz/utils.py +++ b/mne/viz/utils.py @@ -46,8 +46,9 @@ _channel_type_prettyprint = {'eeg': "EEG channel", 'grad': "Gradiometer", 'mag': "Magnetometer", 'seeg': "sEEG channel", - 'eog': "EOG channel", 'ecg': "ECG sensor", - 'emg': "EMG sensor", 'ecog': "ECoG channel", + 'dbs': "DBS channel", 'eog': "EOG channel", + 'ecg': "ECG sensor", 'emg': "EMG sensor", + 'ecog': "ECoG channel", 'misc': "miscellaneous sensor"} @@ -821,9 +822,9 @@ def plot_sensors(info, kind='topomap', ch_type=None, title=None, 'topomap'. ch_type : None | str The channel type to plot. Available options 'mag', 'grad', 'eeg', - 'seeg', 'ecog', 'all'. If ``'all'``, all the available mag, grad, eeg, - seeg and ecog channels are plotted. If None (default), then channels - are chosen in the order given above. + 'seeg', 'dbs', 'ecog', 'all'. If ``'all'``, all the available mag, + grad, eeg, seeg, dbs and ecog channels are plotted. If None (default), + then channels are chosen in the order given above. title : str | None Title for the figure. If None (default), equals to ``'Sensor positions (%%s)' %% ch_type``. diff --git a/tools/generate_codemeta.py b/tools/generate_codemeta.py index 40b6a75567a..19cf3f918d4 100644 --- a/tools/generate_codemeta.py +++ b/tools/generate_codemeta.py @@ -112,7 +112,8 @@ def parse_name(name): "EEG", "fNIRS", "ECoG", - "sEEG" + "sEEG", + "DBS" ], "programmingLanguage": [ "Python" diff --git a/tutorials/raw/plot_10_raw_overview.py b/tutorials/raw/plot_10_raw_overview.py index 08fbaa81b60..9221aa5149b 100644 --- a/tutorials/raw/plot_10_raw_overview.py +++ b/tutorials/raw/plot_10_raw_overview.py @@ -283,9 +283,9 @@ # inaccurate, you can change the type of any channel with the # :meth:`~mne.io.Raw.set_channel_types` method. The method takes a # :class:`dictionary ` mapping channel names to types; allowed types are -# ``ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, stim, syst, ecog, hbo, -# hbr``. A common use case for changing channel type is when using frontal EEG -# electrodes as makeshift EOG channels: +# ``ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, dbs, stim, syst, ecog, +# hbo, hbr``. A common use case for changing channel type is when using frontal +# EEG electrodes as makeshift EOG channels: raw.set_channel_types({'EEG_001': 'eog'}) print(raw.copy().pick_types(meg=False, eog=True).ch_names) diff --git a/tutorials/simulation/plot_creating_data_structures.py b/tutorials/simulation/plot_creating_data_structures.py index f249e221e21..f7d3e27bee9 100644 --- a/tutorials/simulation/plot_creating_data_structures.py +++ b/tutorials/simulation/plot_creating_data_structures.py @@ -99,7 +99,7 @@ # # The expected units for the different channel types are: # -# - Volts: eeg, eog, seeg, emg, ecg, bio, ecog +# - Volts: eeg, eog, seeg, dbs, emg, ecg, bio, ecog # - Teslas: mag # - Teslas/meter: grad # - Molar: hbo, hbr From 5feaee815b65ad4bf779987ee2ff9c5b154720c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Richard=20H=C3=B6chenberger?= Date: Mon, 18 Jan 2021 12:02:42 +0100 Subject: [PATCH 068/387] BUG, DOC: read_raw_egi didn't support pathlib.Path; update read_raw() docstring (#8759) * WIP, BUG: read_raw_egi didn't support pathlib.Path * read_raw() should accept path-like; update docs * Add test * Update changelog * Fix boc build & add note to read_raw_egi [skip azp][skip github] * Fix typo [skip azp][skip github] * Add missing period [skip azp][skip github] --- doc/changes/latest.inc | 1 + mne/io/_read_raw.py | 34 ++++++++++++++++++++++------------ mne/io/egi/egi.py | 14 ++++++++++---- mne/io/egi/tests/test_egi.py | 6 ++++++ 4 files changed, 39 insertions(+), 16 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 47f4726929e..4e942b714c1 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -50,6 +50,7 @@ Bugs - Fix title not shown in :func:`mne.viz.plot_montage` (:gh:`8752` by `Clemens Brunner`_) +- `mne.io.read_raw_egi` now correctly handles `pathlib.Path` filenames (:gh:`8759` by `Richard Höchenberger`_) API changes ~~~~~~~~~~~ diff --git a/mne/io/_read_raw.py b/mne/io/_read_raw.py index 2bfef58f644..d4ef31c2b7a 100644 --- a/mne/io/_read_raw.py +++ b/mne/io/_read_raw.py @@ -12,7 +12,7 @@ read_raw_fif, read_raw_eeglab, read_raw_cnt, read_raw_egi, read_raw_eximia, read_raw_nirx, read_raw_fieldtrip, read_raw_artemis123, read_raw_nicolet, read_raw_kit, - read_raw_ctf) + read_raw_ctf, read_raw_boxy) from ..utils import fill_doc @@ -42,7 +42,8 @@ def _read_unsupported(fname, **kwargs): ".bin": read_raw_artemis123, ".data": read_raw_nicolet, ".sqd": read_raw_kit, - ".ds": read_raw_ctf} + ".ds": read_raw_ctf, + ".txt": read_raw_boxy} # known but unsupported file formats suggested = {".vmrk": partial(_read_unsupported, suggest=".vhdr"), @@ -56,26 +57,35 @@ def _read_unsupported(fname, **kwargs): def read_raw(fname, *, preload=False, verbose=None, **kwargs): """Read raw file. + This function is a convenient wrapper for readers defined in `mne.io`. The + correct reader is automatically selected based on the detected file format. + All function arguments are passed to the respective reader. + + The following readers are currently supported: + + `~mne.io.read_raw_artemis123`, `~mne.io.read_raw_bdf`, + `~mne.io.read_raw_boxy`, `~mne.io.read_raw_brainvision`, + `~mne.io.read_raw_cnt`, `~mne.io.read_raw_ctf`, `~mne.io.read_raw_edf`, + `~mne.io.read_raw_eeglab`, `~mne.io.read_raw_egi`, + `~mne.io.read_raw_eximia`, `~mne.io.read_raw_fieldtrip`, + `~mne.io.read_raw_fif`, `~mne.io.read_raw_gdf`, `~mne.io.read_raw_kit`, + `~mne.io.read_raw_nicolet`, and `~mne.io.read_raw_nirx`. + Parameters ---------- - fname : str - File name to load. + fname : path-like + Name of the file to read. %(preload)s %(verbose)s **kwargs - Keyword arguments to pass to the underlying reader. For details, see - the arguments of the reader for the underlying file format. + Additional keyword arguments to pass to the underlying reader. For + details, see the arguments of the reader for the respective file + format. Returns ------- raw : mne.io.Raw Raw object. - - Notes - ----- - This function is a wrapper for specific read_raw_xxx readers defined in the - readers dict. If it does not work with a specific file, try using a - dedicated reader function (read_raw_xxx) instead. """ ext = "".join(Path(fname).suffixes) if ext in readers: diff --git a/mne/io/egi/egi.py b/mne/io/egi/egi.py index 98432a9bb46..7267dcbece2 100644 --- a/mne/io/egi/egi.py +++ b/mne/io/egi/egi.py @@ -14,7 +14,7 @@ from ..utils import _read_segments_file, _create_chs from ..meas_info import _empty_info from ..constants import FIFF -from ...utils import verbose, logger, warn +from ...utils import verbose, logger, warn, _validate_type def _read_header(fid): @@ -92,9 +92,12 @@ def read_raw_egi(input_fname, eog=None, misc=None, channel_naming='E%d', verbose=None): """Read EGI simple binary as raw object. + .. note:: This function attempts to create a synthetic trigger channel. + See the Notes section below. + Parameters ---------- - input_fname : str + input_fname : path-like Path to the raw file. Files with an extension .mff are automatically considered to be EGI's native MFF format files. eog : list or tuple @@ -135,8 +138,8 @@ def read_raw_egi(input_fname, eog=None, misc=None, Notes ----- The trigger channel names are based on the arbitrary user dependent event - codes used. However this function will attempt to generate a synthetic - trigger channel named ``STI 014`` in accordance with the general + codes used. However this function will attempt to generate a **synthetic + trigger channel** named ``STI 014`` in accordance with the general Neuromag / MNE naming pattern. The event_id assignment equals ``np.arange(n_events) + 1``. The resulting @@ -147,6 +150,9 @@ def read_raw_egi(input_fname, eog=None, misc=None, This step will fail if events are not mutually exclusive. """ + _validate_type(input_fname, 'path-like', 'input_fname') + input_fname = str(input_fname) + if input_fname.endswith('.mff'): return _read_raw_egi_mff(input_fname, eog, misc, include, exclude, preload, channel_naming, verbose) diff --git a/mne/io/egi/tests/test_egi.py b/mne/io/egi/tests/test_egi.py index 01486ab98a4..af2f38e8851 100644 --- a/mne/io/egi/tests/test_egi.py +++ b/mne/io/egi/tests/test_egi.py @@ -3,6 +3,7 @@ # simplified BSD-3 license +from pathlib import Path import os.path as op import os import shutil @@ -148,6 +149,11 @@ def test_io_egi(): with pytest.warns(RuntimeWarning, match='Did not find any event code'): raw = read_raw_egi(egi_fname, include=None) + + # The reader should accept a Path, too. + with pytest.warns(RuntimeWarning, match='Did not find any event code'): + raw = read_raw_egi(Path(egi_fname), include=None) + assert 'RawEGI' in repr(raw) data_read, t_read = raw[:256] assert_allclose(t_read, t) From 52011825822c9c79ded515c952077176f309e56e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Richard=20H=C3=B6chenberger?= Date: Mon, 18 Jan 2021 15:10:52 +0100 Subject: [PATCH 069/387] MRG, ENH: Add Report.add_custom_css (#8762) * ENH: Add Report.add_custom_css This new method allows top add user-defined CSS to the report. * Typo * Style [skip azp][skip github] * Add versionadded [skip azp][skip github] --- doc/changes/latest.inc | 2 ++ mne/report.py | 16 ++++++++++++++++ mne/tests/test_report.py | 18 ++++++++++++++++++ 3 files changed, 36 insertions(+) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 4e942b714c1..6c673a45cba 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -28,6 +28,8 @@ Enhancements - `mne.Report.parse_folder` now processes supported non-FIFF files by default, too (:gh:`8744` by `Richard Höchenberger`_) +- `mne.Report` has gained a new method `~mne.Report.add_custom_css` for adding user-defined styles (:gh:`8762` by `Richard Höchenberger`_) + Bugs ~~~~ - Fix zen mode and scalebar toggling for :meth:`raw.plot() ` when using the ``macosx`` matplotlib backend (:gh:`8688` by `Daniel McCloy`_) diff --git a/mne/report.py b/mne/report.py index 268a93b8f1b..c97e3699131 100644 --- a/mne/report.py +++ b/mne/report.py @@ -1024,6 +1024,22 @@ def _validate_input(self, items, captions, section, comments=None): return items, captions, comments + def add_custom_css(self, css): + """Add custom CSS to the report. + + Parameters + ---------- + css : str + Style definitions to add to the report. The content of this string + will be embedded between HTML ```` tags. + + Notes + ----- + .. versionadded:: 0.23 + """ + style = f'\n' + self.include += style + def remove(self, caption, section=None): """Remove a figure from the report. diff --git a/mne/tests/test_report.py b/mne/tests/test_report.py index 77e9b9766d3..674d5b0159b 100644 --- a/mne/tests/test_report.py +++ b/mne/tests/test_report.py @@ -173,6 +173,24 @@ def test_render_report(renderer, tmpdir): report.add_figs_to_section(['foo'], 'caption', 'section') +def test_add_custom_css(tmpdir): + """Test adding custom CSS rules to the report.""" + tempdir = str(tmpdir) + fname = op.join(tempdir, 'report.html') + fig = plt.figure() # Empty figure + + report = Report() + report.add_figs_to_section(figs=fig, captions='Test section') + custom_css = '.report_custom { color: red; }' + report.add_custom_css(css=custom_css) + + assert custom_css in report.include + report.save(fname, open_browser=False) + with open(fname, 'rb') as fid: + html = fid.read().decode('utf-8') + assert custom_css in html + + @testing.requires_testing_data def test_render_non_fiff(tmpdir): """Test rendering non-FIFF files for mne report.""" From a46546ba86bfea5903d306b5598ca20b9aa6296d Mon Sep 17 00:00:00 2001 From: Stefan Appelhoff Date: Mon, 18 Jan 2021 18:21:39 +0100 Subject: [PATCH 070/387] DOC: improve glossary entry about fiducials (#8763) * improve docs for fiducials * add link to fiducials in example * typo * use get_positions in example * fix text * Apply suggestions from code review Co-authored-by: Daniel McCloy * Fix codespell error Co-authored-by: Daniel McCloy --- doc/glossary.rst | 19 ++++++++++++++++--- .../visualization/plot_eeglab_head_sphere.py | 5 ++--- tutorials/intro/plot_40_sensor_locations.py | 3 ++- 3 files changed, 20 insertions(+), 7 deletions(-) diff --git a/doc/glossary.rst b/doc/glossary.rst index e2074c44188..f7ce9ad1d63 100644 --- a/doc/glossary.rst +++ b/doc/glossary.rst @@ -119,9 +119,22 @@ general neuroimaging concepts. If you think a term is missing, please consider object class, and :ref:`tut-evoked-class` for a narrative overview. fiducial point - There are three fiducial (a.k.a. cardinal) points: the left - preauricular point (LPA), the right preauricular point (RPA) - and the nasion. + Fiducials are objects placed in the field of view of an imaging system + to act as a known spatial reference location that is easy to localize. + In neuroimaging, fiducials are often placed on anatomical landmarks + such as the nasion (NAS) or left/right preauricular points (LPA and + RPA). + + These known reference locations are used to define a coordinate system + used for localization of sensors (hence NAS, LPA and RPA are often + called "cardinal points" because they define the cardinal directions of + the "head" coordinate system). The cardinal points are also useful when + co-registering measurements in different coordinate systems (such as + aligning EEG sensor locations to an MRI of the subject's head). + + Due to the common neuroimaging practice of placing fiducial objects on + anatomical landmarks, the terms "fiducial", "anatomical landmark" and + "cardinal point" are often (erroneously) used interchangeably. first_samp The :attr:`~mne.io.Raw.first_samp` attribute of :class:`~mne.io.Raw` diff --git a/examples/visualization/plot_eeglab_head_sphere.py b/examples/visualization/plot_eeglab_head_sphere.py index 488c14edd57..874c261a58e 100644 --- a/examples/visualization/plot_eeglab_head_sphere.py +++ b/examples/visualization/plot_eeglab_head_sphere.py @@ -59,9 +59,8 @@ # the position of Fpz, T8, Oz and T7 channels available in our montage. # first we obtain the 3d positions of selected channels -check_ch = ['Oz', 'Fpz', 'T7', 'T8'] -ch_idx = [fake_evoked.ch_names.index(ch) for ch in check_ch] -pos = np.stack([fake_evoked.info['chs'][idx]['loc'][:3] for idx in ch_idx]) +chs = ['Oz', 'Fpz', 'T7', 'T8'] +pos = np.stack([biosemi_montage.get_positions()['ch_pos'][ch] for ch in chs]) # now we calculate the radius from T7 and T8 x position # (we could use Oz and Fpz y positions as well) diff --git a/tutorials/intro/plot_40_sensor_locations.py b/tutorials/intro/plot_40_sensor_locations.py index 4af9844478f..be20dd93793 100644 --- a/tutorials/intro/plot_40_sensor_locations.py +++ b/tutorials/intro/plot_40_sensor_locations.py @@ -127,7 +127,8 @@ ############################################################################### # In mne-python the head center and therefore the sphere center are calculated -# using fiducial points. Because of this the head circle represents head +# using :term:`fiducial points `. +# Because of this the head circle represents head # circumference at the nasion and ear level, and not where it is commonly # measured in 10-20 EEG system: above nasion at T4/T8, T3/T7, Oz, Fz level. # Notice below that by default T7 and Oz channels are placed within the head From 81051e6eb9ec1a58c1b1f1c956dddfedfcf6b47f Mon Sep 17 00:00:00 2001 From: Tristan Stenner Date: Tue, 19 Jan 2021 16:16:27 +0100 Subject: [PATCH 071/387] ENH: add reader for NeuroElectrics .nedf files (#8734) * ENH: add reader for NeuroElectrics .nedf files * TST: pytest + _test_raw_reader * FIX: Last samples * Add changelog entry * FIX: Link * FIX: Missed * FIX: Spelling Co-authored-by: Eric Larson --- doc/changes/latest.inc | 3 + doc/changes/names.inc | 2 + doc/conf.py | 2 +- doc/python_reference.rst | 1 + mne/datasets/utils.py | 4 +- mne/io/__init__.py | 1 + mne/io/nedf/__init__.py | 7 ++ mne/io/nedf/nedf.py | 217 +++++++++++++++++++++++++++++++++ mne/io/nedf/tests/__init__.py | 1 + mne/io/nedf/tests/test_nedf.py | 131 ++++++++++++++++++++ mne/io/utils.py | 3 +- 11 files changed, 367 insertions(+), 5 deletions(-) create mode 100644 mne/io/nedf/__init__.py create mode 100644 mne/io/nedf/nedf.py create mode 100644 mne/io/nedf/tests/__init__.py create mode 100644 mne/io/nedf/tests/test_nedf.py diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 6c673a45cba..bf559f0cf4b 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -30,6 +30,9 @@ Enhancements - `mne.Report` has gained a new method `~mne.Report.add_custom_css` for adding user-defined styles (:gh:`8762` by `Richard Höchenberger`_) +- Add :func:`mne.io.read_raw_nedf` for reading StarStim / enobio NEDF files (:gh:`8734` by `Tristan Stenner`_) + + Bugs ~~~~ - Fix zen mode and scalebar toggling for :meth:`raw.plot() ` when using the ``macosx`` matplotlib backend (:gh:`8688` by `Daniel McCloy`_) diff --git a/doc/changes/names.inc b/doc/changes/names.inc index 5d7865085eb..06925300d97 100644 --- a/doc/changes/names.inc +++ b/doc/changes/names.inc @@ -347,3 +347,5 @@ .. _Qianliang Li: https://www.dtu.dk/english/service/phonebook/person?id=126774 .. _Richard Koehler: https://github.com/richardkoehler + +.. _Tristan Stenner: https://github.com/tstenner/ diff --git a/doc/conf.py b/doc/conf.py index dafe258ff95..204dc860f2b 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -678,7 +678,7 @@ def reset_warnings(gallery_conf, fname): # Undocumented (on purpose) 'RawKIT', 'RawEximia', 'RawEGI', 'RawEEGLAB', 'RawEDF', 'RawCTF', 'RawBTi', 'RawBrainVision', 'RawCurry', 'RawNIRX', 'RawGDF', 'RawSNIRF', 'RawBOXY', - 'RawPersyst', 'RawNihon', + 'RawPersyst', 'RawNihon', 'RawNedf', # sklearn subclasses 'mapping', 'to', 'any', # unlinkable diff --git a/doc/python_reference.rst b/doc/python_reference.rst index be7f114f6bb..5b86abf9793 100644 --- a/doc/python_reference.rst +++ b/doc/python_reference.rst @@ -63,6 +63,7 @@ Reading raw data read_raw_bdf read_raw_gdf read_raw_kit + read_raw_nedf read_raw_nicolet read_raw_nirx read_raw_snirf diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index d796d0d0898..ca35718554c 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -247,7 +247,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, path = _get_path(path, key, name) # To update the testing or misc dataset, push commits, then make a new # release on GitHub. Then update the "releases" variable: - releases = dict(testing='0.112', misc='0.8') + releases = dict(testing='0.113', misc='0.8') # And also update the "md5_hashes['testing']" variable below. # To update any other dataset, update the data archive itself (upload # an updated version) and update the md5 hash. @@ -333,7 +333,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, sample='12b75d1cb7df9dfb4ad73ed82f61094f', somato='32fd2f6c8c7eb0784a1de6435273c48b', spm='9f43f67150e3b694b523a21eb929ea75', - testing='8eabd73532dd7df7c155983962c5b1fd', + testing='ce114ad6d5e3dbed06119386e6b1ce0c', multimodal='26ec847ae9ab80f58f204d09e2c08367', fnirs_motor='c4935d19ddab35422a69f3326a01fef8', opm='370ad1dcfd5c47e029e692c85358a374', diff --git a/mne/io/__init__.py b/mne/io/__init__.py index b4535295c0b..cc066822fef 100644 --- a/mne/io/__init__.py +++ b/mne/io/__init__.py @@ -43,6 +43,7 @@ from .egi import read_raw_egi, read_evokeds_mff from .kit import read_raw_kit, read_epochs_kit from .fiff import read_raw_fif +from .nedf import read_raw_nedf from .nicolet import read_raw_nicolet from .artemis123 import read_raw_artemis123 from .eeglab import read_raw_eeglab, read_epochs_eeglab diff --git a/mne/io/nedf/__init__.py b/mne/io/nedf/__init__.py new file mode 100644 index 00000000000..9d16c4ce2b5 --- /dev/null +++ b/mne/io/nedf/__init__.py @@ -0,0 +1,7 @@ +"""NEDF file import module.""" + +# Author: Tristan Stenner +# +# License: BSD (3-clause) + +from .nedf import read_raw_nedf, _parse_nedf_header diff --git a/mne/io/nedf/nedf.py b/mne/io/nedf/nedf.py new file mode 100644 index 00000000000..c26a3acf811 --- /dev/null +++ b/mne/io/nedf/nedf.py @@ -0,0 +1,217 @@ +# -*- coding: utf-8 -*- +"""Import NeuroElectrics DataFormat (NEDF) files.""" + +from copy import deepcopy +from datetime import datetime, timezone +from xml.etree import ElementTree + +import numpy as np + +from ..base import BaseRaw +from ..meas_info import create_info +from ..utils import _mult_cal_one +from ...utils import warn, verbose + + +def _getsubnodetext(node, name): + """Get an element from an XML node, raise an error otherwise. + + Parameters + ---------- + node: Element + XML Element + name: str + Child element name + + Returns + ------- + test: str + Text contents of the child nodes + """ + subnode = node.findtext(name) + if not subnode: + raise RuntimeError('NEDF header ' + name + ' not found') + return subnode + + +def _parse_nedf_header(header): + """Read header information from the first 10kB of an .nedf file. + + Parameters + ---------- + header : bytes + Null-terminated header data, mostly the file's first 10240 bytes. + + Returns + ------- + info : dict + A dictionary with header information. + dt : numpy.dtype + Structure of the binary EEG/accelerometer/trigger data in the file. + n_samples : int + The number of data samples. + """ + info = {} + # nedf files have three accelerometer channels sampled at 100Hz followed + # by five EEG samples + TTL trigger sampled at 500Hz + # For 32 EEG channels and no stim channels, the data layout may look like + # [ ('acc', '>u2', (3,)), + # ('data', dtype([ + # ('eeg', 'u1', (32, 3)), + # ('trig', '>i4', (1,)) + # ]), (5,)) + # ] + + dt = [] # dtype for the binary data block + datadt = [] # dtype for a single EEG sample + + headerend = header.find(b'\0') + if headerend == -1: + raise RuntimeError('End of header null not found') + headerxml = ElementTree.fromstring(header[:headerend]) + nedfversion = headerxml.findtext('NEDFversion', '') + if nedfversion not in ['1.3', '1.4']: + warn('NEDFversion unsupported, use with caution') + + if headerxml.findtext('stepDetails/DeviceClass', '') == 'STARSTIM': + warn('Found Starstim, this hasn\'t been tested extensively!') + + if headerxml.findtext('AdditionalChannelStatus', 'OFF') != 'OFF': + raise RuntimeError('Unknown additional channel, aborting.') + + n_acc = int(headerxml.findtext('NumberOfChannelsOfAccelerometer', 0)) + if n_acc: + # expect one sample of u16 accelerometer data per block + dt.append(('acc', '>u2', (n_acc,))) + + eegset = headerxml.find('EEGSettings') + if eegset is None: + raise RuntimeError('No EEG channels found') + nchantotal = int(_getsubnodetext(eegset, 'TotalNumberOfChannels')) + info['nchan'] = nchantotal + + info['sfreq'] = int(_getsubnodetext(eegset, 'EEGSamplingRate')) + info['ch_names'] = [e.text for e in eegset.find('EEGMontage')] + if nchantotal != len(info['ch_names']): + raise RuntimeError( + f"TotalNumberOfChannels ({nchantotal}) != " + f"channel count ({len(info['ch_names'])})") + # expect nchantotal uint24s + datadt.append(('eeg', 'B', (nchantotal, 3))) + + if headerxml.find('STIMSettings') is not None: + # 2* -> two stim samples per eeg sample + datadt.append(('stim', 'B', (2, nchantotal, 3))) + warn('stim channels are currently ignored') + + # Trigger data: 4 bytes in newer versions, 1 byte in older versions + trigger_type = '>i4' if headerxml.findtext('NEDFversion') else 'B' + datadt.append(('trig', trigger_type)) + # 5 data samples per block + dt.append(('data', np.dtype(datadt), (5,))) + + date = headerxml.findtext('StepDetails/StartDate_firstEEGTimestamp', 0) + info['meas_date'] = datetime.fromtimestamp(int(date) / 1000, timezone.utc) + + n_samples = int(_getsubnodetext(eegset, 'NumberOfRecordsOfEEG')) + n_full, n_last = divmod(n_samples, 5) + dt_last = deepcopy(dt) + assert dt_last[-1][-1] == (5,) + dt_last[-1] = list(dt_last[-1]) + dt_last[-1][-1] = (n_last,) + dt_last[-1] = tuple(dt_last[-1]) + return info, np.dtype(dt), np.dtype(dt_last), n_samples, n_full + + +# the first 10240 bytes are header in XML format, padded with NULL bytes +_HDRLEN = 10240 + + +class RawNedf(BaseRaw): + """Raw object from NeuroElectrics nedf file.""" + + def __init__(self, filename, preload=False, verbose=None): + with open(filename, mode='rb') as fid: + header = fid.read(_HDRLEN) + header, dt, dt_last, n_samp, n_full = _parse_nedf_header(header) + ch_names = header['ch_names'] + ['STI 014'] + ch_types = ['eeg'] * len(ch_names) + ch_types[-1] = 'stim' + info = create_info(ch_names, header['sfreq'], ch_types) + # scaling factor ADC-values -> volts + # taken from the NEDF EEGLAB plugin + # (https://www.neuroelectrics.com/resources/software/): + for ch in info['chs'][:-1]: + ch['cal'] = 2.4 / (6.0 * 8388607) + info['meas_date'] = header['meas_date'] + raw_extra = dict(dt=dt, dt_last=dt_last, n_full=n_full) + super().__init__( + info, preload=preload, filenames=[filename], verbose=verbose, + raw_extras=[raw_extra], last_samps=[n_samp - 1]) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + dt = self._raw_extras[fi]['dt'] + dt_last = self._raw_extras[fi]['dt_last'] + n_full = self._raw_extras[fi]['n_full'] + n_eeg = dt[1].subdtype[0][0].shape[0] + # data is stored in 5-sample chunks (except maybe the last one!) + # so we have to do some gymnastics to pick the correct parts to + # read + offset = start // 5 * dt.itemsize + _HDRLEN + start_sl = start % 5 + n_samples = stop - start + n_samples_full = min(stop, n_full * 5) - start + last = None + n_chunks = (n_samples_full - 1) // 5 + 1 + n_tot = n_chunks * 5 + with open(self._filenames[fi], 'rb') as fid: + fid.seek(offset, 0) + chunks = np.fromfile(fid, dtype=dt, count=n_chunks) + assert len(chunks) == n_chunks + if n_samples != n_samples_full: + last = np.fromfile(fid, dtype=dt_last, count=1) + eeg = _convert_eeg(chunks, n_eeg, n_tot) + trig = chunks['data']['trig'].reshape(1, n_tot) + if last is not None: + n_last = dt_last['data'].shape[0] + eeg = np.concatenate( + (eeg, _convert_eeg(last, n_eeg, n_last)), axis=-1) + trig = np.concatenate( + (trig, last['data']['trig'].reshape(1, n_last)), axis=-1) + one_ = np.concatenate((eeg, trig)) + one = one_[:, start_sl:n_samples + start_sl] + _mult_cal_one(data, one, idx, cals, mult) + + +def _convert_eeg(chunks, n_eeg, n_tot): + # convert uint8-triplet -> int32 + eeg = chunks['data']['eeg'] @ np.array([1 << 16, 1 << 8, 1]) + # convert sign if necessary + eeg[eeg > (1 << 23)] -= 1 << 24 + eeg = eeg.reshape((n_tot, n_eeg)).T + return eeg + + +@verbose +def read_raw_nedf(filename, preload=False, verbose=None): + """Read NeuroElectrics .nedf files. + + NEDF file versions starting from 1.3 are supported. + + Parameters + ---------- + filename : str + Path to the .nedf file. + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawNedf + A Raw object containing NEDF data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + return RawNedf(filename, preload, verbose) diff --git a/mne/io/nedf/tests/__init__.py b/mne/io/nedf/tests/__init__.py new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/mne/io/nedf/tests/__init__.py @@ -0,0 +1 @@ + diff --git a/mne/io/nedf/tests/test_nedf.py b/mne/io/nedf/tests/test_nedf.py new file mode 100644 index 00000000000..3df747c81ad --- /dev/null +++ b/mne/io/nedf/tests/test_nedf.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +"""Test reading of NEDF format.""" +# Author: Tristan Stenner +# +# License: BSD (3-clause) + +import os.path as op + +import pytest +from numpy.testing import assert_allclose, assert_array_equal + +from mne import find_events +from mne.io.constants import FIFF +from mne.io.nedf import read_raw_nedf, _parse_nedf_header +from mne.datasets import testing +from mne.io.tests.test_raw import _test_raw_reader + +eeg_path = testing.data_path(download=False, verbose=True) +eegfile = op.join(eeg_path, 'nedf', 'testdata.nedf') + +stimhdr = b""" + + 1.3 + %d + + 4 + 500 + ABCD + 11 + + +\x00""" + + +@pytest.mark.parametrize('nacc', (0, 3)) +def test_nedf_header_parser(nacc): + """Test NEDF header parsing and dtype extraction.""" + with pytest.warns(RuntimeWarning, match='stim channels.*ignored'): + info, dt, dt_last, n_samples, n_full = _parse_nedf_header( + stimhdr % nacc) + assert n_samples == 11 + assert n_full == 2 + nchan = 4 + assert info['nchan'] == nchan + assert dt.itemsize == 200 + nacc * 2 + if nacc: + assert dt.names[0] == 'acc' + assert dt['acc'].shape == (nacc,) + + assert dt['data'].shape == (5,) # blocks of 5 EEG samples each + assert dt_last['data'].shape == (1,) # plus one last extra one + + eegsampledt = dt['data'].subdtype[0] + assert eegsampledt.names == ('eeg', 'stim', 'trig') + assert eegsampledt['eeg'].shape == (nchan, 3) + assert eegsampledt['stim'].shape == (2, nchan, 3) + + +def test_invalid_headers(): + """Test that invalid headers raise exceptions.""" + tpl = b""" + 1.3 + + %s + ABCD + + \x00""" + nchan = b'4' + sr = b'500' + hdr = { + 'null': + b'No null terminator', + 'Unknown additional': + (b'
1.3' + + b'???\x00'), # noqa: E501 + 'No EEG channels found': + b'1.3\x00', + 'TotalNumberOfChannels not found': + tpl % b'No nchan.', + '!= channel count': + tpl % (sr + b'52'), + 'EEGSamplingRate not found': + tpl % nchan, + 'NumberOfRecordsOfEEG not found': + tpl % (sr + nchan), + } + for match, invalid_hdr in hdr.items(): + with pytest.raises(RuntimeError, match=match): + _parse_nedf_header(invalid_hdr) + + sus_hdrs = { + 'unsupported': b'25\x00', + 'tested': ( + b'1.3' + + b'STARSTIM\x00'), + } + for match, sus_hdr in sus_hdrs.items(): + with pytest.warns(RuntimeWarning, match=match): + with pytest.raises(RuntimeError, match='No EEG channels found'): + _parse_nedf_header(sus_hdr) + + +@testing.requires_testing_data +def test_nedf_data(): + """Test reading raw NEDF files.""" + raw = read_raw_nedf(eegfile) + nsamples = len(raw) + assert nsamples == 32538 + + events = find_events(raw, shortest_event=1) + assert len(events) == 4 + assert_array_equal(events[:, 2], [1, 1, 1, 1]) + onsets = events[:, 0] / raw.info['sfreq'] + assert raw.info['sfreq'] == 500 + + data_end = raw.get_data('Fp1', nsamples - 100, nsamples).mean() + assert_allclose(data_end, .0176, atol=.01) + assert_allclose(raw.get_data('Fpz', 0, 100).mean(), .0185, atol=.01) + + assert_allclose(onsets, [22.384, 38.238, 49.496, 63.15]) + assert raw.info['meas_date'].year == 2019 + assert raw.ch_names[2] == 'AF7' + + for ch in raw.info['chs'][:-1]: + assert ch['kind'] == FIFF.FIFFV_EEG_CH + assert ch['unit'] == FIFF.FIFF_UNIT_V + assert raw.info['chs'][-1]['kind'] == FIFF.FIFFV_STIM_CH + assert raw.info['chs'][-1]['unit'] == FIFF.FIFF_UNIT_V + + # full tests + _test_raw_reader(read_raw_nedf, filename=eegfile) diff --git a/mne/io/utils.py b/mne/io/utils.py index a272cd23065..7c9ab92240b 100644 --- a/mne/io/utils.py +++ b/mne/io/utils.py @@ -77,8 +77,7 @@ def _find_channels(ch_names, ch_type='EOG'): def _mult_cal_one(data_view, one, idx, cals, mult): """Take a chunk of raw data, multiply by mult or cals, and store.""" one = np.asarray(one, dtype=data_view.dtype) - assert data_view.shape[1] == one.shape[1], \ - (data_view.shape[1], one.shape[1]) + assert data_view.shape[1] == one.shape[1], (data_view.shape[1], one.shape[1]) # noqa: E501 if mult is not None: mult.ndim == one.ndim == 2 data_view[:] = mult @ one[idx] From 5f17b357bbd549626d92c73f82daede8751bb711 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 20 Jan 2021 08:18:26 -0500 Subject: [PATCH 072/387] MRG, ENH: Add infant template downloader (#8738) * ENH: Add infant template downloader * TST: Add test * Update tutorials/source-modeling/plot_eeg_no_mri.py Co-authored-by: Daniel McCloy Co-authored-by: Daniel McCloy --- MANIFEST.in | 1 + doc/changes/latest.inc | 2 + doc/overview/datasets_index.rst | 6 + doc/python_reference.rst | 1 + doc/references.bib | 34 +++++- mne/datasets/__init__.py | 3 +- mne/datasets/_fsaverage/base.py | 37 ++---- mne/datasets/_infant/ANTS1-0Months3T.txt | 117 ++++++++++++++++++ mne/datasets/_infant/ANTS10-5Months3T.txt | 115 ++++++++++++++++++ mne/datasets/_infant/ANTS12-0Months3T.txt | 121 +++++++++++++++++++ mne/datasets/_infant/ANTS15-0Months3T.txt | 121 +++++++++++++++++++ mne/datasets/_infant/ANTS18-0Months3T.txt | 121 +++++++++++++++++++ mne/datasets/_infant/ANTS2-0Weeks3T.txt | 117 ++++++++++++++++++ mne/datasets/_infant/ANTS2-0Years3T.txt | 121 +++++++++++++++++++ mne/datasets/_infant/ANTS3-0Months3T.txt | 121 +++++++++++++++++++ mne/datasets/_infant/ANTS4-5Months3T.txt | 121 +++++++++++++++++++ mne/datasets/_infant/ANTS6-0Months3T.txt | 121 +++++++++++++++++++ mne/datasets/_infant/ANTS7-5Months3T.txt | 121 +++++++++++++++++++ mne/datasets/_infant/ANTS9-0Months3T.txt | 121 +++++++++++++++++++ mne/datasets/_infant/base.py | 94 ++++++++++++++ mne/datasets/tests/test_datasets.py | 19 ++- mne/datasets/utils.py | 6 +- setup.py | 2 + tutorials/source-modeling/plot_eeg_no_mri.py | 67 +++++++++- 24 files changed, 1679 insertions(+), 31 deletions(-) create mode 100644 mne/datasets/_infant/ANTS1-0Months3T.txt create mode 100644 mne/datasets/_infant/ANTS10-5Months3T.txt create mode 100644 mne/datasets/_infant/ANTS12-0Months3T.txt create mode 100644 mne/datasets/_infant/ANTS15-0Months3T.txt create mode 100644 mne/datasets/_infant/ANTS18-0Months3T.txt create mode 100644 mne/datasets/_infant/ANTS2-0Weeks3T.txt create mode 100644 mne/datasets/_infant/ANTS2-0Years3T.txt create mode 100644 mne/datasets/_infant/ANTS3-0Months3T.txt create mode 100644 mne/datasets/_infant/ANTS4-5Months3T.txt create mode 100644 mne/datasets/_infant/ANTS6-0Months3T.txt create mode 100644 mne/datasets/_infant/ANTS7-5Months3T.txt create mode 100644 mne/datasets/_infant/ANTS9-0Months3T.txt create mode 100644 mne/datasets/_infant/base.py diff --git a/MANIFEST.in b/MANIFEST.in index 70166aa84f8..8e126e4c718 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -19,6 +19,7 @@ recursive-include mne/data/image * recursive-include mne/data/fsaverage * include mne/datasets/_fsaverage/root.txt include mne/datasets/_fsaverage/bem.txt +include mne/datasets/_infant/*.txt recursive-include mne/channels/data/layouts * recursive-include mne/channels/data/montages * diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index bf559f0cf4b..b0186e754e9 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -24,6 +24,8 @@ Enhancements - Add toggle-all button to :class:`mne.Report` HTML and ``width`` argument to :meth:`mne.Report.add_bem_to_section` (:gh:`8723` by `Eric Larson`_) +- Add infant template MRI dataset downloader :func:`mne.datasets.fetch_infant_template` (:gh:`8738` by `Eric Larson`_ and `Christian O'Reilly`_) + - Speed up :func:`mne.inverse_sparse.tf_mixed_norm` using STFT/ISTFT linearity (:gh:`8697` by `Eric Larson`_) - `mne.Report.parse_folder` now processes supported non-FIFF files by default, too (:gh:`8744` by `Richard Höchenberger`_) diff --git a/doc/overview/datasets_index.rst b/doc/overview/datasets_index.rst index c8248f19f09..86b75db6744 100644 --- a/doc/overview/datasets_index.rst +++ b/doc/overview/datasets_index.rst @@ -351,6 +351,12 @@ For convenience, we provide a function to separately download and extract the :ref:`tut-eeg-fsaverage-source-modeling` +Infant template MRIs +^^^^^^^^^^^^^^^^^^^^ +:func:`mne.datasets.fetch_infant_template` + +This function will download an infant template MRI from +:footcite:`OReillyEtAl2021` along with MNE-specific files. ECoG Dataset ^^^^^^^^^^^^ diff --git a/doc/python_reference.rst b/doc/python_reference.rst index 5b86abf9793..02f6b053ae2 100644 --- a/doc/python_reference.rst +++ b/doc/python_reference.rst @@ -196,6 +196,7 @@ Datasets fetch_aparc_sub_parcellation fetch_fsaverage fetch_hcp_mmp_parcellation + fetch_infant_template fnirs_motor.data_path hf_sef.data_path kiloword.data_path diff --git a/doc/references.bib b/doc/references.bib index 12b60939a02..02184fb478a 100644 --- a/doc/references.bib +++ b/doc/references.bib @@ -1888,10 +1888,42 @@ @misc{WikipediaSI urldate = "12-October-2020" } - @misc{BIDSdocs, author = "{BIDS} contributors", title = {Brain Imaging Data Structure — Specification}, url = {https://bids-specification.readthedocs.io/en/stable/}, urldate = "12-October-2020" } + + +@article{OReillyEtAl2021, + title = {Structural templates for imaging {EEG} cortical sources in infants}, + volume = {227}, + issn = {1053-8119}, + url = {http://www.sciencedirect.com/science/article/pii/S1053811920311678}, + doi = {10.1016/j.neuroimage.2020.117682}, + language = {en}, + urldate = {2021-01-12}, + journal = {NeuroImage}, + author = {O'Reilly, Christian and Larson, Eric and Richards, John E. and Elsabbagh, Mayada}, + month = feb, + year = {2021}, + keywords = {Electroencephalography, Forward model, Infant, Neurodevelopment, Population template, Source reconstruction}, + pages = {117682} +} + +@article{RichardsEtAl2016, + series = {Sharing the wealth: {Brain} {Imaging} {Repositories} in 2015}, + title = {A database of age-appropriate average {MRI} templates}, + volume = {124}, + issn = {1053-8119}, + url = {http://www.sciencedirect.com/science/article/pii/S1053811915003559}, + doi = {10.1016/j.neuroimage.2015.04.055}, + language = {en}, + journal = {NeuroImage}, + author = {Richards, John E. and Sanchez, Carmen and Phillips-Meek, Michelle and Xie, Wanze}, + month = jan, + year = {2016}, + keywords = {Average MRI templates, Brain development, Lifespan MRI, Neurodevelopmental MRI Database}, + pages = {1254--1259} +} diff --git a/mne/datasets/__init__.py b/mne/datasets/__init__.py index 98ac5679ab3..39b8ce7df4c 100644 --- a/mne/datasets/__init__.py +++ b/mne/datasets/__init__.py @@ -26,10 +26,11 @@ from .utils import (_download_all_example_data, fetch_hcp_mmp_parcellation, fetch_aparc_sub_parcellation) from ._fsaverage.base import fetch_fsaverage +from ._infant.base import fetch_infant_template __all__ = [ '_download_all_example_data', '_fake', 'brainstorm', 'eegbci', - 'fetch_aparc_sub_parcellation', 'fetch_fsaverage', + 'fetch_aparc_sub_parcellation', 'fetch_fsaverage', 'fetch_infant_template', 'fetch_hcp_mmp_parcellation', 'fieldtrip_cmc', 'hf_sef', 'kiloword', 'misc', 'mtrf', 'multimodal', 'opm', 'phantom_4dbti', 'sample', 'sleep_physionet', 'somato', 'spm_face', 'testing', 'visual_92_categories', diff --git a/mne/datasets/_fsaverage/base.py b/mne/datasets/_fsaverage/base.py index cfdff437b94..7a412b00e9c 100644 --- a/mne/datasets/_fsaverage/base.py +++ b/mne/datasets/_fsaverage/base.py @@ -6,6 +6,7 @@ import os.path as op +from ..utils import _manifest_check_download, _get_path from ...utils import (verbose, get_subjects_dir, set_config) FSAVERAGE_MANIFEST_PATH = op.dirname(__file__) @@ -64,38 +65,26 @@ def fetch_fsaverage(subjects_dir=None, verbose=None): # with open('fsaverage.txt', 'w') as fid: # fid.write('\n'.join(names)) # - from ..utils import _manifest_check_download subjects_dir = _set_montage_coreg_path(subjects_dir) subjects_dir = op.abspath(subjects_dir) fs_dir = op.join(subjects_dir, 'fsaverage') os.makedirs(fs_dir, exist_ok=True) - - fsaverage_data_parts = { - 'root.zip': dict( - url='https://osf.io/3bxqt/download?revision=2', - hash_='5133fe92b7b8f03ae19219d5f46e4177', - manifest=op.join(FSAVERAGE_MANIFEST_PATH, 'root.txt'), - destination=op.join(subjects_dir), - ), - 'bem.zip': dict( - url='https://osf.io/7ve8g/download?revision=4', - hash_='b31509cdcf7908af6a83dc5ee8f49fb1', - manifest=op.join(FSAVERAGE_MANIFEST_PATH, 'bem.txt'), - destination=op.join(subjects_dir, 'fsaverage'), - ), - } - for fname, data in fsaverage_data_parts.items(): - _manifest_check_download( - destination=data['destination'], - manifest_path=data['manifest'], - url=data['url'], - hash_=data['hash_'], - ) + _manifest_check_download( + manifest_path=op.join(FSAVERAGE_MANIFEST_PATH, 'root.txt'), + destination=op.join(subjects_dir), + url='https://osf.io/3bxqt/download?revision=2', + hash_='5133fe92b7b8f03ae19219d5f46e4177', + ) + _manifest_check_download( + manifest_path=op.join(FSAVERAGE_MANIFEST_PATH, 'bem.txt'), + destination=op.join(subjects_dir, 'fsaverage'), + url='https://osf.io/7ve8g/download?revision=4', + hash_='b31509cdcf7908af6a83dc5ee8f49fb1', + ) return fs_dir def _get_create_subjects_dir(subjects_dir): - from ..utils import _get_path subjects_dir = get_subjects_dir(subjects_dir, raise_error=False) if subjects_dir is None: subjects_dir = _get_path(None, 'MNE_DATA', 'montage coregistration') diff --git a/mne/datasets/_infant/ANTS1-0Months3T.txt b/mne/datasets/_infant/ANTS1-0Months3T.txt new file mode 100644 index 00000000000..fc77acedae1 --- /dev/null +++ b/mne/datasets/_infant/ANTS1-0Months3T.txt @@ -0,0 +1,117 @@ +bem/ANTS1-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS1-0Months3T-5120-5120-5120-bem.fif +bem/ANTS1-0Months3T-fiducials.fif +bem/ANTS1-0Months3T-head.fif +bem/ANTS1-0Months3T-oct-6-src.fif +bem/ANTS1-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS10-5Months3T.txt b/mne/datasets/_infant/ANTS10-5Months3T.txt new file mode 100644 index 00000000000..cec0a3e735a --- /dev/null +++ b/mne/datasets/_infant/ANTS10-5Months3T.txt @@ -0,0 +1,115 @@ +bem/ANTS10-5Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS10-5Months3T-5120-5120-5120-bem.fif +bem/ANTS10-5Months3T-fiducials.fif +bem/ANTS10-5Months3T-head.fif +bem/ANTS10-5Months3T-oct-6-src.fif +bem/ANTS10-5Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS12-0Months3T.txt b/mne/datasets/_infant/ANTS12-0Months3T.txt new file mode 100644 index 00000000000..d1fdbbc7bb0 --- /dev/null +++ b/mne/datasets/_infant/ANTS12-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS12-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS12-0Months3T-5120-5120-5120-bem.fif +bem/ANTS12-0Months3T-fiducials.fif +bem/ANTS12-0Months3T-head.fif +bem/ANTS12-0Months3T-oct-6-src.fif +bem/ANTS12-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS15-0Months3T.txt b/mne/datasets/_infant/ANTS15-0Months3T.txt new file mode 100644 index 00000000000..50487c06c73 --- /dev/null +++ b/mne/datasets/_infant/ANTS15-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS15-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS15-0Months3T-5120-5120-5120-bem.fif +bem/ANTS15-0Months3T-fiducials.fif +bem/ANTS15-0Months3T-head.fif +bem/ANTS15-0Months3T-oct-6-src.fif +bem/ANTS15-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS18-0Months3T.txt b/mne/datasets/_infant/ANTS18-0Months3T.txt new file mode 100644 index 00000000000..8f386c820f2 --- /dev/null +++ b/mne/datasets/_infant/ANTS18-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS18-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS18-0Months3T-5120-5120-5120-bem.fif +bem/ANTS18-0Months3T-fiducials.fif +bem/ANTS18-0Months3T-head.fif +bem/ANTS18-0Months3T-oct-6-src.fif +bem/ANTS18-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS2-0Weeks3T.txt b/mne/datasets/_infant/ANTS2-0Weeks3T.txt new file mode 100644 index 00000000000..e940f24b147 --- /dev/null +++ b/mne/datasets/_infant/ANTS2-0Weeks3T.txt @@ -0,0 +1,117 @@ +bem/ANTS2-0Weeks3T-5120-5120-5120-bem-sol.fif +bem/ANTS2-0Weeks3T-5120-5120-5120-bem.fif +bem/ANTS2-0Weeks3T-fiducials.fif +bem/ANTS2-0Weeks3T-head.fif +bem/ANTS2-0Weeks3T-oct-6-src.fif +bem/ANTS2-0Weeks3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS2-0Years3T.txt b/mne/datasets/_infant/ANTS2-0Years3T.txt new file mode 100644 index 00000000000..776396919ec --- /dev/null +++ b/mne/datasets/_infant/ANTS2-0Years3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS2-0Years3T-5120-5120-5120-bem-sol.fif +bem/ANTS2-0Years3T-5120-5120-5120-bem.fif +bem/ANTS2-0Years3T-fiducials.fif +bem/ANTS2-0Years3T-head.fif +bem/ANTS2-0Years3T-oct-6-src.fif +bem/ANTS2-0Years3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS3-0Months3T.txt b/mne/datasets/_infant/ANTS3-0Months3T.txt new file mode 100644 index 00000000000..29a7148010b --- /dev/null +++ b/mne/datasets/_infant/ANTS3-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS3-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS3-0Months3T-5120-5120-5120-bem.fif +bem/ANTS3-0Months3T-fiducials.fif +bem/ANTS3-0Months3T-head.fif +bem/ANTS3-0Months3T-oct-6-src.fif +bem/ANTS3-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS4-5Months3T.txt b/mne/datasets/_infant/ANTS4-5Months3T.txt new file mode 100644 index 00000000000..b9188492d50 --- /dev/null +++ b/mne/datasets/_infant/ANTS4-5Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS4-5Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS4-5Months3T-5120-5120-5120-bem.fif +bem/ANTS4-5Months3T-fiducials.fif +bem/ANTS4-5Months3T-head.fif +bem/ANTS4-5Months3T-oct-6-src.fif +bem/ANTS4-5Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS6-0Months3T.txt b/mne/datasets/_infant/ANTS6-0Months3T.txt new file mode 100644 index 00000000000..3235de4c576 --- /dev/null +++ b/mne/datasets/_infant/ANTS6-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS6-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS6-0Months3T-5120-5120-5120-bem.fif +bem/ANTS6-0Months3T-fiducials.fif +bem/ANTS6-0Months3T-head.fif +bem/ANTS6-0Months3T-oct-6-src.fif +bem/ANTS6-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS7-5Months3T.txt b/mne/datasets/_infant/ANTS7-5Months3T.txt new file mode 100644 index 00000000000..8b38563c5b1 --- /dev/null +++ b/mne/datasets/_infant/ANTS7-5Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS7-5Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS7-5Months3T-5120-5120-5120-bem.fif +bem/ANTS7-5Months3T-fiducials.fif +bem/ANTS7-5Months3T-head.fif +bem/ANTS7-5Months3T-oct-6-src.fif +bem/ANTS7-5Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS9-0Months3T.txt b/mne/datasets/_infant/ANTS9-0Months3T.txt new file mode 100644 index 00000000000..8d37f25d0ba --- /dev/null +++ b/mne/datasets/_infant/ANTS9-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS9-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS9-0Months3T-5120-5120-5120-bem.fif +bem/ANTS9-0Months3T-fiducials.fif +bem/ANTS9-0Months3T-head.fif +bem/ANTS9-0Months3T-oct-6-src.fif +bem/ANTS9-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/base.py b/mne/datasets/_infant/base.py new file mode 100644 index 00000000000..b5b6ed67f6e --- /dev/null +++ b/mne/datasets/_infant/base.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# Authors: Eric Larson +# License: BSD Style. + +import os +import os.path as op + +from ..utils import _manifest_check_download +from ...utils import verbose, get_subjects_dir, _check_option, _validate_type + +_AGES = '2wk 1mo 3mo 4.5mo 6mo 7.5mo 9mo 10.5mo 12mo 15mo 18mo 2yr' +# https://github.com/christian-oreilly/infant_template_paper/releases +_ORIGINAL_URL = 'https://github.com/christian-oreilly/infant_template_paper/releases/download/v0.1-alpha/{subject}.zip' # noqa: E501 +# Formatted the same way as md5sum *.zip on Ubuntu: +_ORIGINAL_HASHES = """ +851737d5f8f246883f2aef9819c6ec29 ANTS10-5Months3T.zip +32ab6d025f4311433a82e81374f1a045 ANTS1-0Months3T.zip +48ef349e7cc542fdf63ff36d7958ab57 ANTS12-0Months3T.zip +bba22c95aa97988c6e8892d6169ed317 ANTS15-0Months3T.zip +fa7bee6c0985b9cd15ba53820cd72ccd ANTS2-0Months3T.zip +2ad90540cdf42837c09f8ce829458a35 ANTS2-0Weeks3T.zip +73e6a8b2579b7959a96f7d294ffb7393 ANTS2-0Years3T.zip +cb7b9752894e16a4938ddfe220f6286a ANTS3-0Months3T.zip +16b2a6804c7d5443cfba2ad6f7d4ac6a ANTS4-5Months3T.zip +dbdf2a9976121f2b106da96775690da3 ANTS6-0Months3T.zip +75fe37a1bc80ed6793a8abb47681d5ab ANTS7-5Months3T.zip +790f7dba0a264262e6c1c2dfdf216215 ANTS9-0Months3T.zip +""" +_MANIFEST_PATH = op.dirname(__file__) + + +@verbose +def fetch_infant_template(age, subjects_dir=None, verbose=None): + """Fetch and update an infant MRI template. + + Parameters + ---------- + age : str + Age to download. Can be one of ``{'2wk', '1mo', '3mo', '4.5mo', '6mo', + '7.5mo', '9mo', '10.5mo', '12mo', '15mo', '18mo', '2yr'}``. + subjects_dir : str | None + The path to download the template data to. + %(verbose)s + + Returns + ------- + subject : str + The standard subject name, e.g. ``ANTS4-5Month3T``. + + Notes + ----- + If you use these templates in your work, please cite + :footcite:`OReillyEtAl2021` and :footcite:`RichardsEtAl2016`. + + .. versionadded:: 0.23 + + References + ---------- + .. footbibliography:: + """ + # Code used to create the lists: + # + # $ for name in 2-0Weeks 1-0Months 3-0Months 4-5Months 6-0Months 7-5Months 9-0Months 10-5Months 12-0Months 15-0Months 18-0Months 2-0Years; do wget https://github.com/christian-oreilly/infant_template_paper/releases/download/v0.1-alpha/ANTS${name}3T.zip; done # noqa: E501 + # $ md5sum ANTS*.zip + # $ python + # >>> import os.path as op + # >>> import zipfile + # >>> names = [f'ANTS{name}3T' for name in '2-0Weeks 1-0Months 3-0Months 4-5Months 6-0Months 7-5Months 9-0Months 10-5Months 12-0Months 15-0Months 18-0Months 2-0Years'.split()] # noqa: E501 + # >>> for name in names: + # ... with zipfile.ZipFile(f'{name}.zip', 'r') as zip: + # ... names = sorted(name for name in zip.namelist() if not zipfile.Path(zip, name).is_dir()) # noqa: E501 + # ... with open(f'{name}.txt', 'w') as fid: + # ... fid.write('\n'.join(names)) + _validate_type(age, str, 'age') + _check_option('age', age, _AGES.split()) + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + subjects_dir = op.abspath(subjects_dir) + unit = dict(wk='Weeks', mo='Months', yr='Years')[age[-2:]] + first = age[:-2].split('.')[0] + dash = '-5' if '.5' in age else '-0' + subject = f'ANTS{first}{dash}{unit}3T' + # Actually get and create the files + subj_dir = op.join(subjects_dir, subject) + os.makedirs(subj_dir, exist_ok=True) + # .zip -> hash mapping + orig_hashes = dict(line.strip().split()[::-1] + for line in _ORIGINAL_HASHES.strip().splitlines()) + _manifest_check_download( + manifest_path=op.join(_MANIFEST_PATH, f'{subject}.txt'), + destination=subj_dir, + url=_ORIGINAL_URL.format(subject=subject), + hash_=orig_hashes[f'{subject}.zip'], + ) + return subject diff --git a/mne/datasets/tests/test_datasets.py b/mne/datasets/tests/test_datasets.py index eba38940f69..85d7a0a6049 100644 --- a/mne/datasets/tests/test_datasets.py +++ b/mne/datasets/tests/test_datasets.py @@ -8,7 +8,8 @@ import pytest from mne import datasets, read_labels_from_annot, write_labels_to_annot -from mne.datasets import testing +from mne.datasets import testing, fetch_infant_template +from mne.datasets._infant import base as infant_base from mne.datasets._fsaverage.base import _set_montage_coreg_path from mne.datasets.utils import _manifest_check_download @@ -196,3 +197,19 @@ def test_manifest_check_download(tmpdir, n_have, monkeypatch): assert op.isdir(destination) for fname in _zip_fnames: assert op.isfile(op.join(destination, fname)) + + +def _fake_mcd(manifest_path, destination, url, hash_): + name = url.split('/')[-1].split('.')[0] + assert name in manifest_path + assert name in destination + assert name in url + assert len(hash_) == 32 + + +def test_infant(tmpdir, monkeypatch): + """Test fetch_infant_template.""" + monkeypatch.setattr(infant_base, '_manifest_check_download', _fake_mcd) + fetch_infant_template('12mo', subjects_dir=tmpdir) + with pytest.raises(ValueError, match='Invalid value for'): + fetch_infant_template('0mo', subjects_dir=tmpdir) diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index ca35718554c..506e31ebfe2 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -18,7 +18,6 @@ import numpy as np -from ._fsaverage.base import fetch_fsaverage from .. import __version__ as mne_version from ..label import read_labels_from_annot, Label, write_labels_to_annot from ..utils import (get_config, set_config, _fetch_file, logger, warn, @@ -594,7 +593,8 @@ def _download_all_example_data(verbose=True): from . import (sample, testing, misc, spm_face, somato, brainstorm, eegbci, multimodal, opm, hf_sef, mtrf, fieldtrip_cmc, kiloword, phantom_4dbti, sleep_physionet, limo, - fnirs_motor, refmeg_noise) + fnirs_motor, refmeg_noise, fetch_infant_template, + fetch_fsaverage) sample_path = sample.data_path() testing.data_path() misc.data_path() @@ -622,6 +622,7 @@ def _download_all_example_data(verbose=True): # If the user has SUBJECTS_DIR, respect it, if not, set it to the EEG one # (probably on CircleCI, or otherwise advanced user) fetch_fsaverage(None) + fetch_infant_template('6mo') fetch_hcp_mmp_parcellation( subjects_dir=sample_path + '/subjects', accept=True) limo.load_data(subject=1, update_path=True) @@ -810,6 +811,7 @@ def fetch_hcp_mmp_parcellation(subjects_dir=None, combine=True, *, def _manifest_check_download(manifest_path, destination, url, hash_): with open(manifest_path, 'r') as fid: names = [name.strip() for name in fid.readlines()] + manifest_path = op.basename(manifest_path) need = list() for name in names: if not op.isfile(op.join(destination, name)): diff --git a/setup.py b/setup.py index bd47736cf99..9a5e4da5ff3 100755 --- a/setup.py +++ b/setup.py @@ -107,6 +107,8 @@ def package_tree(pkgroot): op.join('channels', 'data', 'montages', '*.elc'), op.join('channels', 'data', 'neighbors', '*.mat'), op.join('datasets', 'sleep_physionet', 'SHA1SUMS'), + op.join('datasets', '_fsaverage', '*.txt'), + op.join('datasets', '_infant', '*.txt'), op.join('gui', 'help', '*.json'), op.join('html', '*.js'), op.join('html', '*.css'), diff --git a/tutorials/source-modeling/plot_eeg_no_mri.py b/tutorials/source-modeling/plot_eeg_no_mri.py index 15d78888b74..3fb68898cf6 100644 --- a/tutorials/source-modeling/plot_eeg_no_mri.py +++ b/tutorials/source-modeling/plot_eeg_no_mri.py @@ -16,13 +16,18 @@ :local: :depth: 2 +Adult template MRI (fsaverage) +------------------------------ +First we show how ``fsaverage`` can be used as a surrogate subject. """ # Authors: Alexandre Gramfort # Joan Massich +# Eric Larson # # License: BSD Style. import os.path as op +import numpy as np import mne from mne.datasets import eegbci @@ -40,7 +45,7 @@ ############################################################################## # Load the data -# ------------- +# ^^^^^^^^^^^^^ # # We use here EEG data from the BCI dataset. # @@ -69,7 +74,7 @@ ############################################################################## # Setup source space and compute forward -# -------------------------------------- +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ fwd = mne.make_forward_solution(raw.info, trans=trans, src=src, bem=bem, eeg=True, mindist=5.0, n_jobs=1) @@ -79,3 +84,61 @@ eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed') brain = eeg_map.plot(time_label='EEG sensitivity', subjects_dir=subjects_dir, clim=dict(lims=[5, 50, 100])) + +############################################################################## +# From here on, standard inverse imaging methods can be used! +# +# Infant MRI surrogates +# --------------------- +# We don't have a sample infant dataset for MNE, so let's fake a 10-20 one: + +ch_names = \ + 'Fz Cz Pz Oz Fp1 Fp2 F3 F4 F7 F8 C3 C4 T7 T8 P3 P4 P7 P8 O1 O2'.split() +data = np.random.RandomState(0).randn(len(ch_names), 1000) +info = mne.create_info(ch_names, 1000., 'eeg') +raw = mne.io.RawArray(data, info) + +############################################################################## +# Get an infant MRI template +# ^^^^^^^^^^^^^^^^^^^^^^^^^^ +# To use an infant head model for M/EEG data, you can use +# :func:`mne.datasets.fetch_infant_template` to download an infant template: + +subject = mne.datasets.fetch_infant_template('6mo', subjects_dir, verbose=True) + +############################################################################## +# It comes with several helpful built-in files, including a 10-20 montage +# in the MRI coordinate frame, which can be used to compute the +# MRI<->head transform ``trans``: +fname_1020 = op.join(subjects_dir, subject, 'montages', '10-20-montage.fif') +mon = mne.channels.read_dig_fif(fname_1020) +mon.rename_channels( + {f'EEG{ii:03d}': ch_name for ii, ch_name in enumerate(ch_names, 1)}) +trans = mne.channels.compute_native_head_t(mon) +raw.set_montage(mon) +print(trans) + +############################################################################## +# There are also BEM and source spaces: + +bem_dir = op.join(subjects_dir, subject, 'bem') +fname_src = op.join(bem_dir, f'{subject}-oct-6-src.fif') +src = mne.read_source_spaces(fname_src) +print(src) +fname_bem = op.join(bem_dir, f'{subject}-5120-5120-5120-bem-sol.fif') +bem = mne.read_bem_solution(fname_bem) + +############################################################################## +# You can ensure everything is as expected by plotting the result: +fig = mne.viz.plot_alignment( + raw.info, subject=subject, subjects_dir=subjects_dir, trans=trans, + src=src, bem=bem, coord_frame='mri', mri_fiducials=True, show_axes=True, + surfaces=('white', 'outer_skin', 'inner_skull', 'outer_skull')) +mne.viz.set_3d_view(fig, 25, 70, focalpoint=[0, -0.005, 0.01]) + +############################################################################## +# From here, standard forward and inverse operators can be computed +# +# If you have digitized head positions or MEG data, consider using +# :ref:`mne coreg` to warp a suitable infant template MRI to your +# digitization information. From 94a75a5c6c4124c950d96c9eec7e1d69c068b7ef Mon Sep 17 00:00:00 2001 From: Marijn van Vliet Date: Thu, 21 Jan 2021 15:05:32 +0200 Subject: [PATCH 073/387] Add time unit to the docstring of make_fixed_length_events (#8768) --- mne/event.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/mne/event.py b/mne/event.py index 8a76d92e704..ee2a2d84b6f 100644 --- a/mne/event.py +++ b/mne/event.py @@ -858,12 +858,12 @@ def make_fixed_length_events(raw, id=1, start=0, stop=None, duration=1., id : int The id to use (default 1). start : float - Time of first event. + Time of first event (in seconds). stop : float | None - Maximum time of last event. If None, events extend to the end - of the recording. + Maximum time of last event (in seconds). If None, events extend to the + end of the recording. duration : float - The duration to separate events by. + The duration to separate events by (in seconds). first_samp : bool If True (default), times will have raw.first_samp added to them, as in :func:`mne.find_events`. This behavior is not desirable if the @@ -871,7 +871,8 @@ def make_fixed_length_events(raw, id=1, start=0, stop=None, duration=1., have ``raw.first_samp`` added to them, e.g. event times that come from :func:`mne.find_events`. overlap : float - The overlap between events. Must be ``0 <= overlap < duration``. + The overlap between events (in seconds). + Must be ``0 <= overlap < duration``. .. versionadded:: 0.18 From 72c8f61e615b04b2f00e9e36202572cd23ae0c47 Mon Sep 17 00:00:00 2001 From: Guillaume Favelier Date: Thu, 21 Jan 2021 16:11:09 +0100 Subject: [PATCH 074/387] MRG, ENH: Add a screenshot button to the notebook 3d backend (#8708) * Add screenshot button * Ensure valid filename * Make the name shorter * Use Brain screenshot * Click on all buttons * Count the buttons too * Add a tool bar to the standard _Renderer * Improve testing of standard _Renderer * DRY a little bit * Use concatenate_images * Make it shorter and more complicated * Fix style * Add centered parameter * Comment slicing * make it work on mac * Remove cruft * Update comments * Remove more comments * Generate screenshot filename * Start over and test * Test both qt and notebook * The pragmatic approach * Improve testing * Fix test * ENH: Faster test * BUG: More explicit height * Fix dangling objects issue * Change order * Try #8082 * FIX: Fix sizing * FIX: Use concatenate_images * FIX: dtype * MAINT: Notebook test * FIX: Flake * Speed up test.ipynb * FIX: Bad Qt/VTK combo Co-authored-by: Alexandre Gramfort Co-authored-by: Eric Larson --- mne/datasets/utils.py | 2 +- mne/label.py | 4 +- mne/viz/__init__.py | 2 +- mne/viz/_brain/_brain.py | 142 +++++++++++++++----------- mne/viz/_brain/tests/test.ipynb | 44 +++++++- mne/viz/_brain/tests/test_brain.py | 53 +++++++++- mne/viz/_brain/tests/test_notebook.py | 2 - mne/viz/backends/_notebook.py | 42 ++++++++ mne/viz/backends/_pyvista.py | 20 ++-- mne/viz/utils.py | 44 ++++++++ 10 files changed, 278 insertions(+), 77 deletions(-) diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index 506e31ebfe2..2320a5ac39c 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -383,7 +383,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, want_version = _FAKE_VERSION if name == 'fake' else want_version if not need_download and want_version is not None: data_version = _dataset_version(folder_path[0], name) - need_download = data_version != want_version + need_download = LooseVersion(data_version) < LooseVersion(want_version) if need_download: logger.info(f'Dataset {name} version {data_version} out of date, ' f'latest version is {want_version}') diff --git a/mne/label.py b/mne/label.py index e0e6a8ccddd..e596b68c7b3 100644 --- a/mne/label.py +++ b/mne/label.py @@ -1874,9 +1874,11 @@ def _cortex_parcellation(subject, n_parcel, hemis, vertices_, graphs, return labels -def _read_annot_cands(dir_name): +def _read_annot_cands(dir_name, raise_error=True): """List the candidate parcellations.""" if not op.isdir(dir_name): + if not raise_error: + return list() raise IOError('Directory for annotation does not exist: %s', dir_name) cands = os.listdir(dir_name) diff --git a/mne/viz/__init__.py b/mne/viz/__init__.py index 9b54741afe7..a64bb4efdb9 100644 --- a/mne/viz/__init__.py +++ b/mne/viz/__init__.py @@ -6,7 +6,7 @@ from .topo import plot_topo_image_epochs, iter_topography from .utils import (tight_layout, mne_analyze_colormap, compare_fiff, ClickableImage, add_background_image, plot_sensors, - centers_to_edges) + centers_to_edges, concatenate_images) from ._3d import (plot_sparse_source_estimates, plot_source_estimates, plot_vector_source_estimates, plot_evoked_field, plot_dipole_locations, snapshot_brain_montage, diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index 142c0320192..a3bff001192 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -9,6 +9,7 @@ import contextlib from functools import partial +from io import BytesIO import os import os.path as op import sys @@ -27,7 +28,7 @@ from .callback import (ShowView, IntSlider, TimeSlider, SmartSlider, BumpColorbarPoints, UpdateColorbarScale) -from ..utils import _show_help, _get_color_list +from ..utils import _show_help, _get_color_list, concatenate_images from .._3d import _process_clim, _handle_time, _check_views from ...externals.decorator import decorator @@ -593,6 +594,7 @@ def setup_time_viewer(self, time_viewer=True, show_traces=True): self._configure_picking() self._configure_tool_bar() if self.notebook: + self._renderer._set_tool_bar(state=False) self.show() self._configure_trace_mode() self.toggle_interface() @@ -655,9 +657,20 @@ def ensure_minimum_sizes(self): yield finally: self.splitter.setSizes([sz[1], mpl_h]) + # 1. Process events _process_events(self.plotter) _process_events(self.plotter) - self.mpl_canvas.canvas.setMinimumSize(0, 0) + # 2. Get the window size that accommodates the size + sz = self.plotter.app_window.size() + # 3. Call app_window.setBaseSize and resize (in pyvistaqt) + self.plotter.window_size = (sz.width(), sz.height()) + # 4. Undo the min size setting and process events + self.plotter.interactor.setMinimumSize(0, 0) + _process_events(self.plotter) + _process_events(self.plotter) + # 5. Resize the window (again!) to the correct size + # (not sure why, but this is required on macOS at least) + self.plotter.window_size = (sz.width(), sz.height()) _process_events(self.plotter) _process_events(self.plotter) # sizes could change, update views @@ -1145,15 +1158,16 @@ def _set_annot(annot): from PyQt5.QtWidgets import QComboBox, QLabel dir_name = op.join(self._subjects_dir, self._subject_id, 'label') - cands = _read_annot_cands(dir_name) + cands = _read_annot_cands(dir_name, raise_error=False) self.tool_bar.addSeparator() self.tool_bar.addWidget(QLabel("Annotation")) self._annot_cands_widget = QComboBox() self.tool_bar.addWidget(self._annot_cands_widget) - self._annot_cands_widget.addItem('None') + cands = cands + ['None'] for cand in cands: self._annot_cands_widget.addItem(cand) self.annot = cands[0] + del cands # setup label extraction parameters def _set_label_mode(mode): @@ -1215,7 +1229,14 @@ def _save_movie_noname(self): return self.save_movie(None) def _screenshot(self): - if not self.notebook: + if self.notebook: + from PIL import Image + fname = self.actions.get("screenshot_field").value + fname = self._renderer._get_screenshot_filename() \ + if len(fname) == 0 else fname + img = self.screenshot(fname, time_viewer=True) + Image.fromarray(img).save(fname) + else: self.plotter._qt_screenshot() def _initialize_actions(self): @@ -1223,14 +1244,13 @@ def _initialize_actions(self): self._load_icons() self.tool_bar = self.window.addToolBar("toolbar") - def _add_action(self, name, desc, func, icon_name, qt_icon_name=None, + def _add_button(self, name, desc, func, icon_name, qt_icon_name=None, notebook=True): if self.notebook: if not notebook: return - from ipywidgets import Button - self.actions[name] = Button(description=desc, icon=icon_name) - self.actions[name].on_click(lambda x: func()) + self.actions[name] = self._renderer._add_button( + desc, func, icon_name) else: qt_icon_name = name if qt_icon_name is None else qt_icon_name self.actions[name] = self.tool_bar.addAction( @@ -1239,61 +1259,71 @@ def _add_action(self, name, desc, func, icon_name, qt_icon_name=None, func, ) + def _add_text_field(self, name, value, placeholder): + if not self.notebook: + return + self.actions[name] = self._renderer._add_text_field( + value, placeholder) + def _configure_tool_bar(self): self._initialize_actions() - self._add_action( + self._add_button( name="screenshot", desc="Take a screenshot", func=self._screenshot, - icon_name=None, - notebook=False, + icon_name="camera", + ) + self._add_text_field( + name="screenshot_field", + value=None, + placeholder="Type a file name", ) - self._add_action( + self._add_button( name="movie", desc="Save movie...", func=self._save_movie_noname, icon_name=None, notebook=False, ) - self._add_action( + self._add_button( name="visibility", desc="Toggle Visibility", func=self.toggle_interface, icon_name="eye", qt_icon_name="visibility_on", ) - self._add_action( + self._add_button( name="play", desc="Play/Pause", func=self.toggle_playback, icon_name=None, notebook=False, ) - self._add_action( + self._add_button( name="reset", desc="Reset", func=self.reset, icon_name="history", ) - self._add_action( + self._add_button( name="scale", desc="Auto-Scale", func=self.apply_auto_scaling, icon_name="magic", ) - self._add_action( + self._add_button( name="restore", desc="Restore scaling", func=self.restore_user_scaling, icon_name="reply", ) - self._add_action( + self._add_button( name="clear", desc="Clear traces", func=self.clear_glyphs, icon_name="trash", ) - self._add_action( + self._add_button( name="help", desc="Help", func=self.help, @@ -1302,10 +1332,7 @@ def _configure_tool_bar(self): ) if self.notebook: - from IPython import display - from ipywidgets import HBox - self.tool_bar = HBox(tuple(self.actions.values())) - display.display(self.tool_bar) + self.tool_bar = self._renderer._show_tool_bar(self.actions) else: # Qt shortcuts self.actions["movie"].setShortcut("ctrl+shift+s") @@ -1593,6 +1620,7 @@ def plot_time_course(self, hemi, vertex_id, color): if self.mpl_canvas is None: return time = self._data['time'].copy() # avoid circular ref + mni = None if hemi == 'vol': hemi_str = 'V' xfm = read_talxfm( @@ -1605,15 +1633,20 @@ def plot_time_course(self, hemi, vertex_id, color): mni = apply_trans(np.dot(xfm['trans'], src_mri_t), ijk) else: hemi_str = 'L' if hemi == 'lh' else 'R' - mni = vertex_to_mni( - vertices=vertex_id, - hemis=0 if hemi == 'lh' else 1, - subject=self._subject_id, - subjects_dir=self._subjects_dir - ) - label = "{}:{} MNI: {}".format( - hemi_str, str(vertex_id).ljust(6), - ', '.join('%5.1f' % m for m in mni)) + try: + mni = vertex_to_mni( + vertices=vertex_id, + hemis=0 if hemi == 'lh' else 1, + subject=self._subject_id, + subjects_dir=self._subjects_dir + ) + except Exception: + mni = None + if mni is not None: + mni = ' MNI: ' + ', '.join('%5.1f' % m for m in mni) + else: + mni = '' + label = "{}:{}{}".format(hemi_str, str(vertex_id).ljust(6), mni) act_data, smooth = self.act_data_smooth[hemi] if smooth is not None: act_data = smooth[vertex_id].dot(act_data)[0] @@ -2594,32 +2627,23 @@ def screenshot(self, mode='rgb', time_viewer=False): not self.separate_canvas: canvas = self.mpl_canvas.fig.canvas canvas.draw_idle() - # In theory, one of these should work: - # - # trace_img = np.frombuffer( - # canvas.tostring_rgb(), dtype=np.uint8) - # trace_img.shape = canvas.get_width_height()[::-1] + (3,) - # - # or - # - # trace_img = np.frombuffer( - # canvas.tostring_rgb(), dtype=np.uint8) - # size = time_viewer.mpl_canvas.getSize() - # trace_img.shape = (size.height(), size.width(), 3) - # - # But in practice, sometimes the sizes does not match the - # renderer tostring_rgb() size. So let's directly use what - # matplotlib does in lib/matplotlib/backends/backend_agg.py - # before calling tobytes(): - trace_img = np.asarray( - canvas.renderer._renderer).take([0, 1, 2], axis=2) - # need to slice into trace_img because generally it's a bit - # smaller - delta = trace_img.shape[1] - img.shape[1] - if delta > 0: - start = delta // 2 - trace_img = trace_img[:, start:start + img.shape[1]] - img = np.concatenate([img, trace_img], axis=0) + fig = self.mpl_canvas.fig + with BytesIO() as output: + # Need to pass dpi here so it uses the physical (HiDPI) DPI + # rather than logical DPI when saving in most cases. + # But when matplotlib uses HiDPI and VTK doesn't + # (e.g., macOS w/Qt 5.14+ and VTK9) then things won't work, + # so let's just calculate the DPI we need to get + # the correct size output based on the widths being equal + dpi = img.shape[1] / fig.get_size_inches()[0] + fig.savefig(output, dpi=dpi, format='raw', + facecolor=self._bg_color, edgecolor='none') + output.seek(0) + trace_img = np.reshape( + np.frombuffer(output.getvalue(), dtype=np.uint8), + newshape=(-1, img.shape[1], 4))[:, :, :3] + img = concatenate_images( + [img, trace_img], bgcolor=self._brain_color[:3]) return img @fill_doc diff --git a/mne/viz/_brain/tests/test.ipynb b/mne/viz/_brain/tests/test.ipynb index 80a8bec809e..ec7bfc13e60 100644 --- a/mne/viz/_brain/tests/test.ipynb +++ b/mne/viz/_brain/tests/test.ipynb @@ -27,9 +27,12 @@ "metadata": {}, "outputs": [], "source": [ + "from contextlib import contextmanager\n", "import os\n", - "import mne\n", + "from numpy.testing import assert_allclose\n", + "from ipywidgets import Button\n", "import matplotlib.pyplot as plt\n", + "import mne\n", "from mne.datasets import testing\n", "data_path = testing.data_path()\n", "sample_dir = os.path.join(data_path, 'MEG', 'sample')\n", @@ -39,16 +42,40 @@ "initial_time = 0.13\n", "mne.viz.set_3d_backend('notebook')\n", "brain_class = mne.viz.get_brain_class()\n", - "for interactive_state in (False, True):\n", - " plt.interactive(interactive_state)\n", + "\n", + "\n", + "@contextmanager\n", + "def interactive(on):\n", + " old = plt.isinteractive()\n", + " plt.interactive(on)\n", + " try:\n", + " yield\n", + " finally:\n", + " plt.interactive(old)\n", + "\n", + "with interactive(False):\n", " brain = stc.plot(subjects_dir=subjects_dir, initial_time=initial_time,\n", " clim=dict(kind='value', pos_lims=[3, 6, 9]),\n", " time_viewer=True,\n", - " hemi='split')\n", + " show_traces=True,\n", + " hemi='lh', size=300)\n", " assert isinstance(brain, brain_class)\n", " assert brain.notebook\n", " assert brain._renderer.figure.display is not None\n", " brain._update()\n", + " total_number_of_buttons = len([k for k in brain.actions.keys() if '_field' not in k])\n", + " number_of_buttons = 0\n", + " for action in brain.actions.values():\n", + " if isinstance(action, Button):\n", + " action.click()\n", + " number_of_buttons += 1\n", + " assert number_of_buttons == total_number_of_buttons\n", + " img_nv = brain.screenshot()\n", + " assert img_nv.shape == (300, 300, 3), img_nv.shape\n", + " img_v = brain.screenshot(time_viewer=True)\n", + " assert img_v.shape[1:] == (300, 3), img_v.shape\n", + " # XXX This rtol is not very good, ideally would be zero\n", + " assert_allclose(img_v.shape[0], img_nv.shape[0] * 1.25, err_msg=img_nv.shape, rtol=0.1)\n", " brain.close()" ] }, @@ -66,6 +93,13 @@ "mne.viz.set_3d_view(fig, 200, 70, focalpoint=[0, 0, 0])\n", "assert fig.display is None\n", "rend.show()\n", + "total_number_of_buttons = len([k for k in rend.actions.keys() if '_field' not in k])\n", + "number_of_buttons = 0\n", + "for action in rend.actions.values():\n", + " if isinstance(action, Button):\n", + " action.click()\n", + " number_of_buttons += 1\n", + "assert number_of_buttons == total_number_of_buttons\n", "assert fig.display is not None" ] } @@ -86,4 +120,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/mne/viz/_brain/tests/test_brain.py b/mne/viz/_brain/tests/test_brain.py index 6a0897bf12b..e206983d211 100644 --- a/mne/viz/_brain/tests/test_brain.py +++ b/mne/viz/_brain/tests/test_brain.py @@ -17,7 +17,7 @@ from mne import (read_source_estimate, read_evokeds, read_cov, read_forward_solution, pick_types_forward, - SourceEstimate, MixedSourceEstimate, + SourceEstimate, MixedSourceEstimate, write_surface, VolSourceEstimate) from mne.minimum_norm import apply_inverse, make_inverse_operator from mne.source_space import (read_source_spaces, vertex_to_mni, @@ -369,6 +369,57 @@ def test_brain_save_movie(tmpdir, renderer, brain_gc): brain.close() +_TINY_SIZE = (300, 250) + + +def tiny(tmpdir): + """Create a tiny fake brain.""" + # This is a minimal version of what we need for our viz-with-timeviewer + # support currently + subject = 'test' + subject_dir = tmpdir.mkdir(subject) + surf_dir = subject_dir.mkdir('surf') + rng = np.random.RandomState(0) + rr = rng.randn(4, 3) + tris = np.array([[0, 1, 2], [2, 1, 3]]) + curv = rng.randn(len(rr)) + with open(surf_dir.join('lh.curv'), 'wb') as fid: + fid.write(np.array([255, 255, 255], dtype=np.uint8)) + fid.write(np.array([len(rr), 0, 1], dtype='>i4')) + fid.write(curv.astype('>f4')) + write_surface(surf_dir.join('lh.white'), rr, tris) + write_surface(surf_dir.join('rh.white'), rr, tris) # needed for vertex tc + vertices = [np.arange(len(rr)), []] + data = rng.randn(len(rr), 10) + stc = SourceEstimate(data, vertices, 0, 1, subject) + brain = stc.plot(subjects_dir=tmpdir, hemi='lh', surface='white', + size=_TINY_SIZE) + # in principle this should be sufficient: + # + # ratio = brain.mpl_canvas.canvas.window().devicePixelRatio() + # + # but in practice VTK can mess up sizes, so let's just calculate it. + sz = brain.plotter.size() + sz = (sz.width(), sz.height()) + sz_ren = brain.plotter.renderer.GetSize() + ratio = np.median(np.array(sz_ren) / np.array(sz)) + return brain, ratio + + +def test_brain_screenshot(renderer_interactive, tmpdir, brain_gc): + """Test time viewer screenshot.""" + if renderer_interactive._get_3d_backend() != 'pyvista': + pytest.skip('TimeViewer tests only supported on PyVista') + tiny_brain, ratio = tiny(tmpdir) + img_nv = tiny_brain.screenshot(time_viewer=False) + want = (_TINY_SIZE[1] * ratio, _TINY_SIZE[0] * ratio, 3) + assert img_nv.shape == want + img_v = tiny_brain.screenshot(time_viewer=True) + assert img_v.shape[1:] == want[1:] + assert_allclose(img_v.shape[0], want[0] * 4 / 3, atol=3) # some slop + tiny_brain.close() + + @testing.requires_testing_data @pytest.mark.slowtest def test_brain_time_viewer(renderer_interactive, pixel_ratio, brain_gc): diff --git a/mne/viz/_brain/tests/test_notebook.py b/mne/viz/_brain/tests/test_notebook.py index 48c65c2d066..7c159326b74 100644 --- a/mne/viz/_brain/tests/test_notebook.py +++ b/mne/viz/_brain/tests/test_notebook.py @@ -1,5 +1,4 @@ import os -import pytest from mne.datasets import testing from mne.utils import requires_version @@ -7,7 +6,6 @@ PATH = os.path.dirname(os.path.realpath(__file__)) -@pytest.mark.slowtest @testing.requires_testing_data @requires_version('nbformat') @requires_version('nbclient') diff --git a/mne/viz/backends/_notebook.py b/mne/viz/backends/_notebook.py index 761f0b8a60f..e8bda5436d0 100644 --- a/mne/viz/backends/_notebook.py +++ b/mne/viz/backends/_notebook.py @@ -10,11 +10,53 @@ class _Renderer(_PyVistaRenderer): def __init__(self, *args, **kwargs): + self.tool_bar_state = True + self.tool_bar = None + self.actions = dict() kwargs["notebook"] = True super().__init__(*args, **kwargs) + def _screenshot(self): + fname = self.actions.get("screenshot_field").value + fname = self._get_screenshot_filename() if len(fname) == 0 else fname + self.screenshot(filename=fname) + + def _set_tool_bar(self, state): + self.tool_bar_state = state + + def _add_button(self, desc, func, icon_name): + from ipywidgets import Button + button = Button(tooltip=desc, icon=icon_name) + button.on_click(lambda x: func()) + return button + + def _add_text_field(self, value, placeholder): + from ipywidgets import Text + return Text(value=value, placeholder=placeholder) + + def _show_tool_bar(self, actions): + from IPython import display + from ipywidgets import HBox + tool_bar = HBox(tuple(actions.values())) + display.display(tool_bar) + return tool_bar + + def _configure_tool_bar(self): + self.actions["screenshot"] = self._add_button( + desc="Take a screenshot", + func=self._screenshot, + icon_name="camera", + ) + self.actions["screenshot_field"] = self._add_text_field( + value=None, + placeholder="Type a file name", + ) + self.tool_bar = self._show_tool_bar(self.actions) + def show(self): from IPython.display import display + if self.tool_bar_state: + self._configure_tool_bar() self.figure.display = self.plotter.show(use_ipyvtk=True, return_viewer=True) self.figure.display.layout.width = None # unlock the fixed layout diff --git a/mne/viz/backends/_pyvista.py b/mne/viz/backends/_pyvista.py index 1b340d2a974..51e2492777d 100644 --- a/mne/viz/backends/_pyvista.py +++ b/mne/viz/backends/_pyvista.py @@ -12,6 +12,7 @@ # License: Simplified BSD from contextlib import contextmanager +from datetime import datetime from distutils.version import LooseVersion import os import sys @@ -212,6 +213,11 @@ def __init__(self, fig=None, size=(600, 600), bgcolor='black', self.update_lighting() + def _get_screenshot_filename(self): + now = datetime.now() + dt_string = now.strftime("_%Y-%m-%d_%H-%M-%S") + return "MNE" + dt_string + ".png" + @contextmanager def ensure_minimum_sizes(self): sz = self.figure.store['window_size'] @@ -227,17 +233,17 @@ def ensure_minimum_sizes(self): # 1. Process events _process_events(self.plotter) _process_events(self.plotter) - # 2. Get the window size that accommodates the size - sz = self.plotter.app_window.size() - # 3. Call app_window.setBaseSize and resize (in pyvistaqt) - self.plotter.window_size = (sz.width(), sz.height()) - # 4. Undo the min size setting and process events + # 2. Get the window and interactor sizes that work + win_sz = self.plotter.app_window.size() + ren_sz = self.plotter.interactor.size() + # 3. Undo the min size setting and process events self.plotter.interactor.setMinimumSize(0, 0) _process_events(self.plotter) _process_events(self.plotter) - # 5. Resize the window (again!) to the correct size + # 4. Resize the window and interactor to the correct size # (not sure why, but this is required on macOS at least) - self.plotter.window_size = (sz.width(), sz.height()) + self.plotter.window_size = (win_sz.width(), win_sz.height()) + self.plotter.interactor.resize(ren_sz.width(), ren_sz.height()) _process_events(self.plotter) _process_events(self.plotter) diff --git a/mne/viz/utils.py b/mne/viz/utils.py index 515311b365d..5a09e7dfefb 100644 --- a/mne/viz/utils.py +++ b/mne/viz/utils.py @@ -2287,3 +2287,47 @@ def _ndarray_to_fig(img): fig = _figure_agg(dpi=dpi, figsize=figsize, frameon=False) fig.figimage(img, resize=True) return fig + + +def concatenate_images(images, axis=0, bgcolor='black', centered=True): + """Concatenate a list of images. + + Parameters + ---------- + images : list of ndarray + The list of images to concatenate. + axis : 0 or 1 + The images are concatenated horizontally if 0 and vertically otherwise. + The default orientation is horizontal. + bgcolor : str | list + The color of the background. The name of the color is accepted + (e.g 'red') or a list of RGB values between 0 and 1. Defaults to + 'black'. + centered : bool + If True, the images are centered. Defaults to True. + + Returns + ------- + img : ndarray + The concatenated image. + """ + from matplotlib.colors import colorConverter + if isinstance(bgcolor, str): + bgcolor = colorConverter.to_rgb(bgcolor) + bgcolor = np.asarray(bgcolor) * 255 + funcs = [np.sum, np.max] + ret_shape = np.asarray([ + funcs[axis]([image.shape[0] for image in images]), + funcs[1 - axis]([image.shape[1] for image in images]), + ]) + ret = np.zeros((ret_shape[0], ret_shape[1], 3), dtype=np.uint8) + ret[:, :, :] = bgcolor + ptr = np.array([0, 0]) + sec = np.array([0 == axis, 1 == axis]).astype(int) + for image in images: + shape = image.shape[:-1] + dec = ptr + dec += ((ret_shape - shape) // 2) * (1 - sec) if centered else 0 + ret[dec[0]:dec[0] + shape[0], dec[1]:dec[1] + shape[1], :] = image + ptr += shape * sec + return ret From 9a2031897735de9b7ffc0a9658b7c4f89b36d88c Mon Sep 17 00:00:00 2001 From: Guillaume Favelier Date: Fri, 22 Jan 2021 10:17:59 +0100 Subject: [PATCH 075/387] Use renderer (#8769) --- mne/viz/_brain/_brain.py | 85 +++----- mne/viz/backends/_pyvista.py | 372 +++++++++++++++++------------------ mne/viz/backends/_utils.py | 15 ++ 3 files changed, 226 insertions(+), 246 deletions(-) diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index a3bff001192..b9e1aff7596 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -170,8 +170,7 @@ def remove_overlay(self, names): def _update(self): if self._cache is None: return - from ..backends._pyvista import _set_mesh_scalars - _set_mesh_scalars( + self._renderer._set_mesh_scalars( mesh=self._polydata, scalars=self._cache, name=self._default_scalars_name, @@ -547,7 +546,6 @@ def setup_time_viewer(self, time_viewer=True, show_traces=True): self._label_mode_widget = None # Direct access parameters: - self._iren = self._renderer.plotter.iren self.tool_bar = None if self.notebook: self.main_menu = None @@ -644,7 +642,6 @@ def _clean(self): @contextlib.contextmanager def ensure_minimum_sizes(self): """Ensure that widgets respect the windows size.""" - from ..backends._pyvista import _process_events sz = self._size adjust_mpl = self.show_traces and not self.separate_canvas if not adjust_mpl: @@ -658,26 +655,26 @@ def ensure_minimum_sizes(self): finally: self.splitter.setSizes([sz[1], mpl_h]) # 1. Process events - _process_events(self.plotter) - _process_events(self.plotter) + self._renderer._process_events() + self._renderer._process_events() # 2. Get the window size that accommodates the size sz = self.plotter.app_window.size() # 3. Call app_window.setBaseSize and resize (in pyvistaqt) self.plotter.window_size = (sz.width(), sz.height()) # 4. Undo the min size setting and process events self.plotter.interactor.setMinimumSize(0, 0) - _process_events(self.plotter) - _process_events(self.plotter) + self._renderer._process_events() + self._renderer._process_events() # 5. Resize the window (again!) to the correct size # (not sure why, but this is required on macOS at least) self.plotter.window_size = (sz.width(), sz.height()) - _process_events(self.plotter) - _process_events(self.plotter) + self._renderer._process_events() + self._renderer._process_events() # sizes could change, update views for hemi in ('lh', 'rh'): for ri, ci, v in self._iter_views(hemi): self.show_view(view=v, row=ri, col=ci) - _process_events(self.plotter) + self._renderer._process_events() def toggle_interface(self, value=None): """Toggle the interface. @@ -1099,8 +1096,6 @@ def _configure_vertex_time_course(self): self._add_vertex_glyph(hemi, mesh, vertex_id) def _configure_picking(self): - from ..backends._pyvista import _update_picking_callback - # get data for each hemi for idx, hemi in enumerate(['vol', 'lh', 'rh']): hemi_data = self._data.get(hemi) @@ -1117,8 +1112,7 @@ def _configure_picking(self): (vertices, np.arange(len(vertices))))) self.act_data_smooth[hemi] = (act_data, smooth_mat) - _update_picking_callback( - self.plotter, + self._renderer._update_picking_callback( self._on_mouse_move, self._on_button_press, self._on_button_release, @@ -1211,8 +1205,8 @@ def _set_label_mode(mode): def _load_icons(self): from PyQt5.QtGui import QIcon - from ..backends._pyvista import _init_resources - _init_resources() + from ..backends._utils import _init_qt_resources + _init_qt_resources() self.icons["help"] = QIcon(":/help.svg") self.icons["play"] = QIcon(":/play.svg") self.icons["pause"] = QIcon(":/pause.svg") @@ -1501,7 +1495,6 @@ def _add_vertex_glyph(self, hemi, mesh, vertex_id): # skip if the wrong hemi is selected if self.act_data_smooth[hemi][0] is None: return - from ..backends._pyvista import _sphere color = next(self.color_cycle) line = self.plot_time_course(hemi, vertex_id, color) if hemi == 'vol': @@ -1535,8 +1528,7 @@ def _add_vertex_glyph(self, hemi, mesh, vertex_id): # mitigated with synchronization/delay?) # 2) the glyph filter is used in renderer.sphere() but only one # sphere is required in this function. - actor, sphere = _sphere( - plotter=self.plotter, + actor, sphere = self._renderer._sphere( center=np.array(center), color=color, radius=4.0, @@ -1700,7 +1692,6 @@ def help(self): ) def _clear_callbacks(self): - from ..backends._pyvista import _remove_picking_callback if not hasattr(self, 'callbacks'): return for callback in self.callbacks.values(): @@ -1712,8 +1703,6 @@ def _clear_callbacks(self): if hasattr(callback, "slider_rep"): callback.slider_rep = None self.callbacks.clear() - if self.show_traces: - _remove_picking_callback(self._iren, self.plotter.picker) @property def interaction(self): @@ -2029,7 +2018,6 @@ def remove_annotations(self): self._update() def _add_volume_data(self, hemi, src, volume_options): - from ..backends._pyvista import _volume _validate_type(src, SourceSpaces, 'src') _check_option('src.kind', src.kind, ('volume',)) _validate_type( @@ -2099,8 +2087,9 @@ def _add_volume_data(self, hemi, src, volume_options): scalars = np.zeros(np.prod(dimensions)) scalars[vertices] = 1. # for the outer mesh grid, grid_mesh, volume_pos, volume_neg = \ - _volume(dimensions, origin, spacing, scalars, surface_alpha, - resolution, blending, center) + self._renderer._volume(dimensions, origin, spacing, scalars, + surface_alpha, resolution, blending, + center) self._data[hemi]['alpha'] = alpha # incorrectly set earlier self._data[hemi]['grid'] = grid self._data[hemi]['grid_mesh'] = grid_mesh @@ -2118,7 +2107,6 @@ def _add_volume_data(self, hemi, src, volume_options): actor_neg = None grid_mesh = self._data[hemi]['grid_mesh'] if grid_mesh is not None: - import vtk _, prop = self._renderer.plotter.add_actor( grid_mesh, reset_camera=False, name=None, culling=False, pickable=False) @@ -2127,20 +2115,12 @@ def _add_volume_data(self, hemi, src, volume_options): if silhouette_alpha > 0 and silhouette_linewidth > 0: for ri, ci, v in self._iter_views('vol'): self._renderer.subplot(ri, ci) - grid_silhouette = vtk.vtkPolyDataSilhouette() - grid_silhouette.SetInputData(grid_mesh.GetInput()) - grid_silhouette.SetCamera( - self._renderer.plotter.renderer.GetActiveCamera()) - grid_silhouette.SetEnableFeatureAngle(0) - grid_silhouette_mapper = vtk.vtkPolyDataMapper() - grid_silhouette_mapper.SetInputConnection( - grid_silhouette.GetOutputPort()) - _, prop = self._renderer.plotter.add_actor( - grid_silhouette_mapper, reset_camera=False, name=None, - culling=False, pickable=False) - prop.SetColor(*self._brain_color[:3]) - prop.SetOpacity(silhouette_alpha) - prop.SetLineWidth(silhouette_linewidth) + self._renderer._silhouette( + mesh=grid_mesh.GetInput(), + color=self._brain_color[:3], + line_width=silhouette_linewidth, + alpha=silhouette_alpha, + ) return actor_pos, actor_neg @@ -2534,9 +2514,8 @@ def add_annotation(self, annot, borders=True, alpha=1, hemi=None, ) self._annots[hemi].append(annot) if not self.time_viewer or self.traces_mode == 'vertex': - from ..backends._pyvista import _set_colormap_range - _set_colormap_range(mesh._actor, cmap.astype(np.uint8), - None) + self._renderer._set_colormap_range( + mesh._actor, cmap.astype(np.uint8), None) self._update() @@ -2654,7 +2633,6 @@ def update_lut(self, fmin=None, fmid=None, fmax=None): ---------- %(fmin_fmid_fmax)s """ - from ..backends._pyvista import _set_colormap_range, _set_volume_range center = self._data['center'] colormap = self._data['colormap'] transparent = self._data['transparent'] @@ -2684,15 +2662,16 @@ def update_lut(self, fmin=None, fmid=None, fmax=None): mesh = self._layered_meshes[hemi] mesh.update_overlay(name='data', colormap=self._data['ctable']) - _set_colormap_range(mesh._actor, ctable, scalar_bar, rng, - self._brain_color) + self._renderer._set_colormap_range( + mesh._actor, ctable, scalar_bar, rng, + self._brain_color) scalar_bar = None grid_volume_pos = hemi_data.get('grid_volume_pos') grid_volume_neg = hemi_data.get('grid_volume_neg') for grid_volume in (grid_volume_pos, grid_volume_neg): if grid_volume is not None: - _set_volume_range( + self._renderer._set_volume_range( grid_volume, ctable, hemi_data['alpha'], scalar_bar, rng) scalar_bar = None @@ -2700,7 +2679,7 @@ def update_lut(self, fmin=None, fmid=None, fmax=None): glyph_actor = hemi_data.get('glyph_actor') if glyph_actor is not None: for glyph_actor_ in glyph_actor: - _set_colormap_range( + self._renderer._set_colormap_range( glyph_actor_, ctable, scalar_bar, rng) scalar_bar = None @@ -2862,7 +2841,6 @@ def set_time(self, time): f'available times ({min(self._times)}-{max(self._times)} s).') def _update_glyphs(self, hemi, vectors): - from ..backends._pyvista import _set_colormap_range, _create_actor hemi_data = self._data.get(hemi) assert hemi_data is not None vertices = hemi_data['vertices'] @@ -2897,7 +2875,7 @@ def _update_glyphs(self, hemi, vectors): glyph_dataset.point_arrays['vec'] = vectors glyph_mapper = hemi_data['glyph_mapper'] if add: - glyph_actor = _create_actor(glyph_mapper) + glyph_actor = self._renderer._actor(glyph_mapper) prop = glyph_actor.GetProperty() prop.SetLineWidth(2.) prop.SetOpacity(vector_alpha) @@ -2906,7 +2884,7 @@ def _update_glyphs(self, hemi, vectors): else: glyph_actor = hemi_data['glyph_actor'][count] count += 1 - _set_colormap_range( + self._renderer._set_colormap_range( actor=glyph_actor, ctable=self._data['ctable'], scalar_bar=None, @@ -2985,8 +2963,7 @@ def _save_movie(self, filename, time_dilation=4., tmin=None, tmax=None, framerate=24, interpolation=None, codec=None, bitrate=None, callback=None, time_viewer=False, **kwargs): import imageio - from ..backends._pyvista import _disabled_interaction - with _disabled_interaction(self._renderer): + with self._renderer._disabled_interaction(): images = self._make_movie_frames( time_dilation, tmin, tmax, framerate, interpolation, callback, time_viewer) diff --git a/mne/viz/backends/_pyvista.py b/mne/viz/backends/_pyvista.py index 51e2492777d..8397b9ac904 100644 --- a/mne/viz/backends/_pyvista.py +++ b/mne/viz/backends/_pyvista.py @@ -23,10 +23,9 @@ from .base_renderer import _BaseRenderer from ._utils import (_get_colormap_from_array, _alpha_blend_background, - ALLOWED_QUIVER_MODES) + ALLOWED_QUIVER_MODES, _init_qt_resources) from ...fixes import _get_args from ...utils import copy_base_doc_to_subclass_doc, _check_option -from ...externals.decorator import decorator with warnings.catch_warnings(): @@ -98,7 +97,7 @@ def build(self): self.plotter = plotter if self.plotter_class is BackgroundPlotter and \ hasattr(BackgroundPlotter, 'set_icon'): - _init_resources() + _init_qt_resources() _process_events(plotter) plotter.set_icon(":/mne-icon.png") _process_events(self.plotter) @@ -666,13 +665,185 @@ def remove_mesh(self, mesh_data): actor, _ = mesh_data self.plotter.remove_actor(actor) + @contextmanager + def _disabled_interaction(self): + if not self.plotter.renderer.GetInteractive(): + yield + else: + self.plotter.disable() + try: + yield + finally: + self.plotter.enable() + + def _actor(self, mapper=None): + actor = vtk.vtkActor() + if mapper is not None: + actor.SetMapper(mapper) + return actor -def _create_actor(mapper=None): - """Create a vtkActor.""" - actor = vtk.vtkActor() - if mapper is not None: - actor.SetMapper(mapper) - return actor + def _process_events(self): + _process_events(self.plotter) + + def _update_picking_callback(self, + on_mouse_move, + on_button_press, + on_button_release, + on_pick): + self.plotter.iren.AddObserver( + vtk.vtkCommand.RenderEvent, + on_mouse_move + ) + self.plotter.iren.AddObserver( + vtk.vtkCommand.LeftButtonPressEvent, + on_button_press + ) + self.plotter.iren.AddObserver( + vtk.vtkCommand.EndInteractionEvent, + on_button_release + ) + self.plotter.picker = vtk.vtkCellPicker() + self.plotter.picker.AddObserver( + vtk.vtkCommand.EndPickEvent, + on_pick + ) + self.plotter.picker.SetVolumeOpacityIsovalue(0.) + + def _set_mesh_scalars(self, mesh, scalars, name): + # Catch: FutureWarning: Conversion of the second argument of + # issubdtype from `complex` to `np.complexfloating` is deprecated. + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=FutureWarning) + mesh.point_arrays[name] = scalars + + def _set_colormap_range(self, actor, ctable, scalar_bar, rng=None, + background_color=None): + from vtk.util.numpy_support import numpy_to_vtk + if rng is not None: + mapper = actor.GetMapper() + mapper.SetScalarRange(*rng) + lut = mapper.GetLookupTable() + lut.SetTable(numpy_to_vtk(ctable)) + if scalar_bar is not None: + lut = scalar_bar.GetLookupTable() + if background_color is not None: + background_color = np.array(background_color) * 255 + ctable = _alpha_blend_background(ctable, background_color) + lut.SetTable(numpy_to_vtk(ctable, + array_type=vtk.VTK_UNSIGNED_CHAR)) + lut.SetRange(*rng) + + def _set_volume_range(self, volume, ctable, alpha, scalar_bar, rng): + import vtk + from vtk.util.numpy_support import numpy_to_vtk + color_tf = vtk.vtkColorTransferFunction() + opacity_tf = vtk.vtkPiecewiseFunction() + for loc, color in zip(np.linspace(*rng, num=len(ctable)), ctable): + color_tf.AddRGBPoint(loc, *(color[:-1] / 255.)) + opacity_tf.AddPoint(loc, color[-1] * alpha / 255.) + color_tf.ClampingOn() + opacity_tf.ClampingOn() + volume.GetProperty().SetColor(color_tf) + volume.GetProperty().SetScalarOpacity(opacity_tf) + if scalar_bar is not None: + lut = vtk.vtkLookupTable() + lut.SetRange(*rng) + lut.SetTable(numpy_to_vtk(ctable)) + scalar_bar.SetLookupTable(lut) + + def _sphere(self, center, color, radius): + sphere = vtk.vtkSphereSource() + sphere.SetThetaResolution(8) + sphere.SetPhiResolution(8) + sphere.SetRadius(radius) + sphere.SetCenter(center) + sphere.Update() + mesh = pyvista.wrap(sphere.GetOutput()) + actor = _add_mesh( + self.plotter, + mesh=mesh, + color=color + ) + return actor, mesh + + def _volume(self, dimensions, origin, spacing, scalars, + surface_alpha, resolution, blending, center): + # Now we can actually construct the visualization + grid = pyvista.UniformGrid() + grid.dimensions = dimensions + 1 # inject data on the cells + grid.origin = origin + grid.spacing = spacing + grid.cell_arrays['values'] = scalars + + # Add contour of enclosed volume (use GetOutput instead of + # GetOutputPort below to avoid updating) + grid_alg = vtk.vtkCellDataToPointData() + grid_alg.SetInputDataObject(grid) + grid_alg.SetPassCellData(False) + grid_alg.Update() + + if surface_alpha > 0: + grid_surface = vtk.vtkMarchingContourFilter() + grid_surface.ComputeNormalsOn() + grid_surface.ComputeScalarsOff() + grid_surface.SetInputData(grid_alg.GetOutput()) + grid_surface.SetValue(0, 0.1) + grid_surface.Update() + grid_mesh = vtk.vtkPolyDataMapper() + grid_mesh.SetInputData(grid_surface.GetOutput()) + else: + grid_mesh = None + + mapper = vtk.vtkSmartVolumeMapper() + if resolution is None: # native + mapper.SetScalarModeToUseCellData() + mapper.SetInputDataObject(grid) + else: + upsampler = vtk.vtkImageReslice() + upsampler.SetInterpolationModeToLinear() # default anyway + upsampler.SetOutputSpacing(*([resolution] * 3)) + upsampler.SetInputConnection(grid_alg.GetOutputPort()) + mapper.SetInputConnection(upsampler.GetOutputPort()) + # Additive, AverageIntensity, and Composite might also be reasonable + remap = dict(composite='Composite', mip='MaximumIntensity') + getattr(mapper, f'SetBlendModeTo{remap[blending]}')() + volume_pos = vtk.vtkVolume() + volume_pos.SetMapper(mapper) + dist = grid.length / (np.mean(grid.dimensions) - 1) + volume_pos.GetProperty().SetScalarOpacityUnitDistance(dist) + if center is not None and blending == 'mip': + # We need to create a minimum intensity projection for the neg half + mapper_neg = vtk.vtkSmartVolumeMapper() + if resolution is None: # native + mapper_neg.SetScalarModeToUseCellData() + mapper_neg.SetInputDataObject(grid) + else: + mapper_neg.SetInputConnection(upsampler.GetOutputPort()) + mapper_neg.SetBlendModeToMinimumIntensity() + volume_neg = vtk.vtkVolume() + volume_neg.SetMapper(mapper_neg) + volume_neg.GetProperty().SetScalarOpacityUnitDistance(dist) + else: + volume_neg = None + return grid, grid_mesh, volume_pos, volume_neg + + def _silhouette(self, mesh, color=None, line_width=None, alpha=None): + silhouette_filter = vtk.vtkPolyDataSilhouette() + silhouette_filter.SetInputData(mesh) + silhouette_filter.SetCamera(self.plotter.renderer.GetActiveCamera()) + silhouette_filter.SetEnableFeatureAngle(0) + silhouette_mapper = vtk.vtkPolyDataMapper() + silhouette_mapper.SetInputConnection( + silhouette_filter.GetOutputPort()) + _, prop = self.plotter.add_actor( + silhouette_mapper, reset_camera=False, name=None, + culling=False, pickable=False) + if color is not None: + prop.SetColor(*color) + if alpha is not None: + prop.SetOpacity(alpha) + if line_width is not None: + prop.SetLineWidth(line_width) def _compute_normals(mesh): @@ -881,50 +1052,6 @@ def _process_events(plotter): plotter.app.processEvents() -def _set_colormap_range(actor, ctable, scalar_bar, rng=None, - background_color=None): - from vtk.util.numpy_support import numpy_to_vtk - if rng is not None: - mapper = actor.GetMapper() - mapper.SetScalarRange(*rng) - lut = mapper.GetLookupTable() - lut.SetTable(numpy_to_vtk(ctable)) - if scalar_bar is not None: - lut = scalar_bar.GetLookupTable() - if background_color is not None: - background_color = np.array(background_color) * 255 - ctable = _alpha_blend_background(ctable, background_color) - lut.SetTable(numpy_to_vtk(ctable, array_type=vtk.VTK_UNSIGNED_CHAR)) - lut.SetRange(*rng) - - -def _set_volume_range(volume, ctable, alpha, scalar_bar, rng): - import vtk - from vtk.util.numpy_support import numpy_to_vtk - color_tf = vtk.vtkColorTransferFunction() - opacity_tf = vtk.vtkPiecewiseFunction() - for loc, color in zip(np.linspace(*rng, num=len(ctable)), ctable): - color_tf.AddRGBPoint(loc, *(color[:-1] / 255.)) - opacity_tf.AddPoint(loc, color[-1] * alpha / 255.) - color_tf.ClampingOn() - opacity_tf.ClampingOn() - volume.GetProperty().SetColor(color_tf) - volume.GetProperty().SetScalarOpacity(opacity_tf) - if scalar_bar is not None: - lut = vtk.vtkLookupTable() - lut.SetRange(*rng) - lut.SetTable(numpy_to_vtk(ctable)) - scalar_bar.SetLookupTable(lut) - - -def _set_mesh_scalars(mesh, scalars, name): - # Catch: FutureWarning: Conversion of the second argument of - # issubdtype from `complex` to `np.complexfloating` is deprecated. - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=FutureWarning) - mesh.point_arrays[name] = scalars - - def _update_slider_callback(slider, callback, event_type): _check_option('event_type', event_type, ['start', 'end', 'always']) @@ -950,40 +1077,6 @@ def _add_camera_callback(camera, callback): camera.AddObserver(vtk.vtkCommand.ModifiedEvent, callback) -def _update_picking_callback(plotter, - on_mouse_move, - on_button_press, - on_button_release, - on_pick): - interactor = plotter.iren - interactor.AddObserver( - vtk.vtkCommand.RenderEvent, - on_mouse_move - ) - interactor.AddObserver( - vtk.vtkCommand.LeftButtonPressEvent, - on_button_press - ) - interactor.AddObserver( - vtk.vtkCommand.EndInteractionEvent, - on_button_release - ) - picker = vtk.vtkCellPicker() - picker.AddObserver( - vtk.vtkCommand.EndPickEvent, - on_pick - ) - picker.SetVolumeOpacityIsovalue(0.) - plotter.picker = picker - - -def _remove_picking_callback(interactor, picker): - interactor.RemoveObservers(vtk.vtkCommand.RenderEvent) - interactor.RemoveObservers(vtk.vtkCommand.LeftButtonPressEvent) - interactor.RemoveObservers(vtk.vtkCommand.EndInteractionEvent) - picker.RemoveObservers(vtk.vtkCommand.EndPickEvent) - - def _arrow_glyph(grid, factor): glyph = vtk.vtkGlyphSource2D() glyph.SetGlyphTypeToArrow() @@ -1040,84 +1133,6 @@ def _glyph(dataset, scale_mode='scalar', orient=True, scalars=True, factor=1.0, return alg -def _sphere(plotter, center, color, radius): - sphere = vtk.vtkSphereSource() - sphere.SetThetaResolution(8) - sphere.SetPhiResolution(8) - sphere.SetRadius(radius) - sphere.SetCenter(center) - sphere.Update() - mesh = pyvista.wrap(sphere.GetOutput()) - actor = _add_mesh( - plotter, - mesh=mesh, - color=color - ) - return actor, mesh - - -def _volume(dimensions, origin, spacing, scalars, - surface_alpha, resolution, blending, center): - # Now we can actually construct the visualization - grid = pyvista.UniformGrid() - grid.dimensions = dimensions + 1 # inject data on the cells - grid.origin = origin - grid.spacing = spacing - grid.cell_arrays['values'] = scalars - - # Add contour of enclosed volume (use GetOutput instead of - # GetOutputPort below to avoid updating) - grid_alg = vtk.vtkCellDataToPointData() - grid_alg.SetInputDataObject(grid) - grid_alg.SetPassCellData(False) - grid_alg.Update() - - if surface_alpha > 0: - grid_surface = vtk.vtkMarchingContourFilter() - grid_surface.ComputeNormalsOn() - grid_surface.ComputeScalarsOff() - grid_surface.SetInputData(grid_alg.GetOutput()) - grid_surface.SetValue(0, 0.1) - grid_surface.Update() - grid_mesh = vtk.vtkPolyDataMapper() - grid_mesh.SetInputData(grid_surface.GetOutput()) - else: - grid_mesh = None - - mapper = vtk.vtkSmartVolumeMapper() - if resolution is None: # native - mapper.SetScalarModeToUseCellData() - mapper.SetInputDataObject(grid) - else: - upsampler = vtk.vtkImageReslice() - upsampler.SetInterpolationModeToLinear() # default anyway - upsampler.SetOutputSpacing(*([resolution] * 3)) - upsampler.SetInputConnection(grid_alg.GetOutputPort()) - mapper.SetInputConnection(upsampler.GetOutputPort()) - # Additive, AverageIntensity, and Composite might also be reasonable - remap = dict(composite='Composite', mip='MaximumIntensity') - getattr(mapper, f'SetBlendModeTo{remap[blending]}')() - volume_pos = vtk.vtkVolume() - volume_pos.SetMapper(mapper) - dist = grid.length / (np.mean(grid.dimensions) - 1) - volume_pos.GetProperty().SetScalarOpacityUnitDistance(dist) - if center is not None and blending == 'mip': - # We need to create a minimum intensity projection for the neg half - mapper_neg = vtk.vtkSmartVolumeMapper() - if resolution is None: # native - mapper_neg.SetScalarModeToUseCellData() - mapper_neg.SetInputDataObject(grid) - else: - mapper_neg.SetInputConnection(upsampler.GetOutputPort()) - mapper_neg.SetBlendModeToMinimumIntensity() - volume_neg = vtk.vtkVolume() - volume_neg.SetMapper(mapper_neg) - volume_neg.GetProperty().SetScalarOpacityUnitDistance(dist) - else: - volume_neg = None - return grid, grid_mesh, volume_pos, volume_neg - - def _require_minimum_version(version_required): from distutils.version import LooseVersion version = LooseVersion(pyvista.__version__) @@ -1157,30 +1172,3 @@ def _disabled_depth_peeling(): yield finally: rcParams["depth_peeling"]["enabled"] = depth_peeling_enabled - - -@contextmanager -def _disabled_interaction(renderer): - plotter = renderer.plotter - if not plotter.renderer.GetInteractive(): - yield - else: - plotter.disable() - try: - yield - finally: - plotter.enable() - - -@decorator -def run_once(fun, *args, **kwargs): - """Run the function only once.""" - if not hasattr(fun, "_has_run"): - fun._has_run = True - return fun(*args, **kwargs) - - -@run_once -def _init_resources(): - from ...icons import resources - resources.qInitResources() diff --git a/mne/viz/backends/_utils.py b/mne/viz/backends/_utils.py index f9c36a097da..626f161017a 100644 --- a/mne/viz/backends/_utils.py +++ b/mne/viz/backends/_utils.py @@ -9,6 +9,7 @@ import numpy as np import collections.abc +from ...externals.decorator import decorator VALID_3D_BACKENDS = ( 'pyvista', # default 3d backend @@ -63,3 +64,17 @@ def _alpha_blend_background(ctable, background_color): use_table = ctable.copy() use_table[:, -1] = 255. return (use_table * alphas) + background_color * (1 - alphas) + + +@decorator +def run_once(fun, *args, **kwargs): + """Run the function only once.""" + if not hasattr(fun, "_has_run"): + fun._has_run = True + return fun(*args, **kwargs) + + +@run_once +def _init_qt_resources(): + from ...icons import resources + resources.qInitResources() From 51fd3cd93f2017c7c30ac6f47c43ae38c4d7603b Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Sat, 23 Jan 2021 12:36:31 +0100 Subject: [PATCH 076/387] Add raw.describe() method (#8760) * Add raw.describe() method * Print nicely formatted table * Add function to automatically convert numbers to use best SI prefixes * Add myself to authors list * Use correct human-readable units * Rename function to si_prefix * Use more explicit names in list comprehensions * Optionally return data frame instead of printing to stdout * Show commonly used units * Scale all units defined in defaults.py * Scale only when necessary * Remove unused import * Add Returns docstring section * Use default units and scalings * Strip whitespace at the end of rows * Remove empty line * Add tests * Use pandas in docstring * Fix for pandas < 0.24 * Fix PEP8 * Use Q1/Q3 instead of q1/q3 * Improved docstring * Add changelog entry * Use human-readable method link * Don't use backticks in docstring --- doc/changes/latest.inc | 2 + mne/io/base.py | 89 +++++++++++++++++++++++++++++++++++++++- mne/io/tests/test_raw.py | 41 +++++++++++++++++- mne/utils/numerics.py | 1 + 4 files changed, 130 insertions(+), 3 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index b0186e754e9..a5bc085d314 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -34,6 +34,8 @@ Enhancements - Add :func:`mne.io.read_raw_nedf` for reading StarStim / enobio NEDF files (:gh:`8734` by `Tristan Stenner`_) +- Add :meth:`raw.describe() ` to display (or return) descriptive statistics for each channel (:gh:`8760` by `Clemens Brunner`_) + Bugs ~~~~ diff --git a/mne/io/base.py b/mne/io/base.py index 5084398b507..cd8050f064e 100644 --- a/mne/io/base.py +++ b/mne/io/base.py @@ -6,6 +6,7 @@ # Teon Brooks # Marijn van Vliet # Stefan Appelhoff +# Clemens Brunner # # License: BSD (3-clause) @@ -14,16 +15,19 @@ import os import os.path as op import shutil +from collections import defaultdict import numpy as np from .constants import FIFF from .utils import _construct_bids_filename, _check_orig_units -from .pick import (pick_types, pick_channels, pick_info, _picks_to_idx) +from .pick import (pick_types, pick_channels, pick_info, _picks_to_idx, + channel_type) from .meas_info import write_meas_info from .proj import setup_proj, activate_proj, _proj_equal, ProjMixin from ..channels.channels import (ContainsMixin, UpdateChannelsMixin, - SetChannelsMixin, InterpolationMixin) + SetChannelsMixin, InterpolationMixin, + _unit2human) from .compensator import set_current_comp, make_compensator from .write import (start_file, end_file, start_block, end_block, write_dau_pack16, write_float, write_double, @@ -44,6 +48,7 @@ _check_preload, _get_argvalues, _check_option, _build_data_frame, _convert_times, _scale_dataframe_data, _check_time_format) +from ..defaults import _handle_default from ..viz import plot_raw, plot_raw_psd, plot_raw_psd_topo, _RAW_CLIP_DEF from ..event import find_events, concatenate_events from ..annotations import Annotations, _combine_annotations, _sync_onset @@ -1765,6 +1770,86 @@ def to_data_frame(self, picks=None, index=None, default_index=['time']) return df + def describe(self, data_frame=False): + """Describe channels (name, type, descriptive statistics). + + Parameters + ---------- + data_frame : bool + If True, return results in a pandas.DataFrame. If False, only print + results. Columns 'ch', 'type', and 'unit' indicate channel index, + channel type, and unit of the remaining five columns. These columns + are 'min' (minimum), 'Q1' (first quartile or 25% percentile), + 'median', 'Q3' (third quartile or 75% percentile), and 'max' + (maximum). + + Returns + ------- + result : None | pandas.DataFrame + If data_frame=False, returns None. If data_frame=True, returns + results in a pandas.DataFrame (requires pandas). + """ + from scipy.stats import scoreatpercentile as q + nchan = self.info["nchan"] + + # describe each channel + cols = defaultdict(list) + cols["name"] = self.ch_names + for i in range(nchan): + ch = self.info["chs"][i] + data = self[i][0] + cols["type"].append(channel_type(self.info, i)) + cols["unit"].append(_unit2human[ch["unit"]]) + cols["min"].append(np.min(data)) + cols["Q1"].append(q(data, 25)) + cols["median"].append(np.median(data)) + cols["Q3"].append(q(data, 75)) + cols["max"].append(np.max(data)) + + if data_frame: # return data frame + import pandas as pd + df = pd.DataFrame(cols) + df.index.name = "ch" + return df + + # convert into commonly used units + scalings = _handle_default("scalings") + units = _handle_default("units") + for i in range(nchan): + unit = units.get(cols['type'][i]) + scaling = scalings.get(cols['type'][i], 1) + if scaling != 1: + cols['unit'][i] = unit + for col in ["min", "Q1", "median", "Q3", "max"]: + cols[col][i] *= scaling + + lens = {"ch": max(2, len(str(nchan))), + "name": max(4, max([len(n) for n in cols["name"]])), + "type": max(4, max([len(t) for t in cols["type"]])), + "unit": max(4, max([len(u) for u in cols["unit"]]))} + + # print description, start with header + print(self) + print(f"{'ch':>{lens['ch']}} " + f"{'name':<{lens['name']}} " + f"{'type':<{lens['type']}} " + f"{'unit':<{lens['unit']}} " + f"{'min':>8} " + f"{'Q1':>8} " + f"{'median':>8} " + f"{'Q3':>8} " + f"{'max':>8}") + # print description for each channel + for i in range(nchan): + msg = (f"{i:>{lens['ch']}} " + f"{cols['name'][i]:<{lens['name']}} " + f"{cols['type'][i].upper():<{lens['type']}} " + f"{cols['unit'][i]:<{lens['unit']}} ") + for col in ["min", "Q1", "median", "Q3"]: + msg += f"{cols[col][i]:>8.2f} " + msg += f"{cols['max'][i]:>8.2f}" + print(msg) + def _allocate_data(preload, shape, dtype): """Allocate data in memory or in memmap for preloading.""" diff --git a/mne/io/tests/test_raw.py b/mne/io/tests/test_raw.py index d6e6ed055f5..05536fc65d2 100644 --- a/mne/io/tests/test_raw.py +++ b/mne/io/tests/test_raw.py @@ -6,8 +6,11 @@ # License: BSD (3-clause) from os import path as op +from pathlib import Path import math import re +from contextlib import redirect_stdout +from io import StringIO import pytest import numpy as np @@ -19,7 +22,7 @@ from mne.externals.h5io import read_hdf5, write_hdf5 from mne.io import read_raw_fif, RawArray, BaseRaw, Info, _writing_info_hdf5 from mne.utils import (_TempDir, catch_logging, _raw_annot, _stamp_to_dt, - object_diff, check_version) + object_diff, check_version, requires_pandas) from mne.io.meas_info import _get_valid_units from mne.io._digitization import DigPoint from mne.io.proj import Projection @@ -537,3 +540,39 @@ def _read_raw_arange(preload=False, verbose=None): def test_test_raw_reader(): """Test _test_raw_reader.""" _test_raw_reader(_read_raw_arange, test_scaling=False, test_rank='less') + + +def test_describe_print(): + """Test print output of describe method.""" + fname = Path(__file__).parent / "data" / "test_raw.fif" + raw = read_raw_fif(fname) + + # test print output + f = StringIO() + with redirect_stdout(f): + raw.describe() + s = f.getvalue().strip().split("\n") + assert len(s) == 378 + assert s[0] == "" # noqa + assert s[1] == " ch name type unit min Q1 median Q3 max" # noqa + assert s[2] == " 0 MEG 0113 GRAD fT/cm -221.80 -38.57 -9.64 19.29 414.67" # noqa + assert s[-1] == "375 EOG 061 EOG µV -231.41 271.28 277.16 285.66 334.69" # noqa + + +@requires_pandas +def test_describe_df(): + """Test returned data frame of describe method.""" + fname = Path(__file__).parent / "data" / "test_raw.fif" + raw = read_raw_fif(fname) + + df = raw.describe(data_frame=True) + assert df.shape == (376, 8) + assert (df.columns.tolist() == ["name", "type", "unit", "min", "Q1", + "median", "Q3", "max"]) + assert df.index.name == "ch" + assert_allclose(df.iloc[0, 3:].astype(float), + np.array([-2.218017605790535e-11, + -3.857421923113974e-12, + -9.643554807784935e-13, + 1.928710961556987e-12, + 4.146728567347522e-11])) diff --git a/mne/utils/numerics.py b/mne/utils/numerics.py index ad077b08549..8481d8c6ae8 100644 --- a/mne/utils/numerics.py +++ b/mne/utils/numerics.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- """Some utility functions.""" # Authors: Alexandre Gramfort +# Clemens Brunner # # License: BSD (3-clause) From ae8a466051a43ac929eaf23e02b40c27a6d62e1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Richard=20H=C3=B6chenberger?= Date: Sat, 23 Jan 2021 12:43:13 +0100 Subject: [PATCH 077/387] WIP, BUG: Make plot_evoked() not plot GFP for single channel (#8774) * Make plot_evoked() not plot GFP for single channel Fixes #8772 * Better * Comments * Docstring * Docstring * Fix docs [skip azp][skip github] --- doc/changes/latest.inc | 2 ++ mne/viz/evoked.py | 14 +++++++++++--- mne/viz/tests/test_evoked.py | 7 +++++++ 3 files changed, 20 insertions(+), 3 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index a5bc085d314..ee78086b204 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -61,6 +61,8 @@ Bugs - `mne.io.read_raw_egi` now correctly handles `pathlib.Path` filenames (:gh:`8759` by `Richard Höchenberger`_) +- `mne.viz.plot_evoked` and `mne.Evoked.plot` won't plot global field power (GFP) anymore if there's only a single channel in the data, raising a warning (``gfp=True``) or an exception (``gfp='only'``) instead (:gh:`8774` by `Richard Höchenberger`_) + API changes ~~~~~~~~~~~ diff --git a/mne/viz/evoked.py b/mne/viz/evoked.py index 932c0ae5c27..c2f9df6c68a 100644 --- a/mne/viz/evoked.py +++ b/mne/viz/evoked.py @@ -473,7 +473,14 @@ def _plot_lines(data, info, picks, fig, axes, spatial_colors, unit, units, linewidth=0.5)[0]) line_list[-1].set_pickradius(3.) - if gfp: # 'only' or boolean True + if gfp == 'only' and D.shape[0] == 1: + # Only a single channel + only GFP -> abort mission! + raise ValueError('Cannot calculate GFP with only one data ' + 'channel') + elif gfp and D.shape[0] == 1: + warn('Cannot calculate GFP with only one data channel, ' + 'not plotting GFP') + elif gfp: # gfp 'only' or boolean True gfp_color = 3 * (0.,) if spatial_colors is True else (0., 1., 0.) this_gfp = np.sqrt((D * D).mean(axis=0)) @@ -674,8 +681,9 @@ def plot_evoked(evoked, picks=None, exclude='bads', unit=True, show=True, the same length as the number of channel types. If instance of Axes, there must be only one channel type plotted. gfp : bool | 'only' - Plot GFP in green if True or "only". If "only", then the individual - channel traces will not be shown. + Plot the GFP and the traces for all channels if ``True``; or only the + GFP if ``'only'``, hiding the channel traces. The color of the GFP + trace will be green if ``spatial_colors=False``, and black otherwise. window_title : str | None The title to put at the top of the figure. spatial_colors : bool diff --git a/mne/viz/tests/test_evoked.py b/mne/viz/tests/test_evoked.py index 27445c0d157..c4a9cb4df08 100644 --- a/mne/viz/tests/test_evoked.py +++ b/mne/viz/tests/test_evoked.py @@ -126,6 +126,13 @@ def test_plot_evoked(): evoked.plot(gfp='only', time_unit='s') pytest.raises(ValueError, evoked.plot, gfp='foo', time_unit='s') + # test GFP plotting with only a single data channel + with pytest.warns(RuntimeWarning, match='Cannot calculate GFP'): + evoked.copy().pick(evoked.ch_names[0]).plot(gfp=True) + with pytest.raises(ValueError, match='Cannot calculate GFP'): + evoked.copy().pick(evoked.ch_names[0]).plot(gfp='only') + plt.close('all') + # plot with bad channels excluded, spatial_colors, zorder & pos. layout evoked.rename_channels({'MEG 0133': 'MEG 0000'}) evoked.plot(exclude=evoked.info['bads'], spatial_colors=True, gfp=True, From 6ae3b22033c745cce5cd5de9b92da54c13c36484 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Richard=20H=C3=B6chenberger?= Date: Sat, 23 Jan 2021 13:02:03 +0100 Subject: [PATCH 078/387] Revert "WIP, BUG: Make plot_evoked() not plot GFP for single channel (#8774)" (#8778) This reverts commit ae8a466051a43ac929eaf23e02b40c27a6d62e1a. --- doc/changes/latest.inc | 2 -- mne/viz/evoked.py | 14 +++----------- mne/viz/tests/test_evoked.py | 7 ------- 3 files changed, 3 insertions(+), 20 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index ee78086b204..a5bc085d314 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -61,8 +61,6 @@ Bugs - `mne.io.read_raw_egi` now correctly handles `pathlib.Path` filenames (:gh:`8759` by `Richard Höchenberger`_) -- `mne.viz.plot_evoked` and `mne.Evoked.plot` won't plot global field power (GFP) anymore if there's only a single channel in the data, raising a warning (``gfp=True``) or an exception (``gfp='only'``) instead (:gh:`8774` by `Richard Höchenberger`_) - API changes ~~~~~~~~~~~ diff --git a/mne/viz/evoked.py b/mne/viz/evoked.py index c2f9df6c68a..932c0ae5c27 100644 --- a/mne/viz/evoked.py +++ b/mne/viz/evoked.py @@ -473,14 +473,7 @@ def _plot_lines(data, info, picks, fig, axes, spatial_colors, unit, units, linewidth=0.5)[0]) line_list[-1].set_pickradius(3.) - if gfp == 'only' and D.shape[0] == 1: - # Only a single channel + only GFP -> abort mission! - raise ValueError('Cannot calculate GFP with only one data ' - 'channel') - elif gfp and D.shape[0] == 1: - warn('Cannot calculate GFP with only one data channel, ' - 'not plotting GFP') - elif gfp: # gfp 'only' or boolean True + if gfp: # 'only' or boolean True gfp_color = 3 * (0.,) if spatial_colors is True else (0., 1., 0.) this_gfp = np.sqrt((D * D).mean(axis=0)) @@ -681,9 +674,8 @@ def plot_evoked(evoked, picks=None, exclude='bads', unit=True, show=True, the same length as the number of channel types. If instance of Axes, there must be only one channel type plotted. gfp : bool | 'only' - Plot the GFP and the traces for all channels if ``True``; or only the - GFP if ``'only'``, hiding the channel traces. The color of the GFP - trace will be green if ``spatial_colors=False``, and black otherwise. + Plot GFP in green if True or "only". If "only", then the individual + channel traces will not be shown. window_title : str | None The title to put at the top of the figure. spatial_colors : bool diff --git a/mne/viz/tests/test_evoked.py b/mne/viz/tests/test_evoked.py index c4a9cb4df08..27445c0d157 100644 --- a/mne/viz/tests/test_evoked.py +++ b/mne/viz/tests/test_evoked.py @@ -126,13 +126,6 @@ def test_plot_evoked(): evoked.plot(gfp='only', time_unit='s') pytest.raises(ValueError, evoked.plot, gfp='foo', time_unit='s') - # test GFP plotting with only a single data channel - with pytest.warns(RuntimeWarning, match='Cannot calculate GFP'): - evoked.copy().pick(evoked.ch_names[0]).plot(gfp=True) - with pytest.raises(ValueError, match='Cannot calculate GFP'): - evoked.copy().pick(evoked.ch_names[0]).plot(gfp='only') - plt.close('all') - # plot with bad channels excluded, spatial_colors, zorder & pos. layout evoked.rename_channels({'MEG 0133': 'MEG 0000'}) evoked.plot(exclude=evoked.info['bads'], spatial_colors=True, gfp=True, From 33f76fae73f37dac8b402fcf6dd4899e8eafe1c8 Mon Sep 17 00:00:00 2001 From: Alexandre Gramfort Date: Sun, 24 Jan 2021 08:24:13 +0100 Subject: [PATCH 079/387] enforce abs path (#8777) * enforce abs path * expanduser --- mne/bem.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mne/bem.py b/mne/bem.py index 75296b46091..e3c40d1374e 100644 --- a/mne/bem.py +++ b/mne/bem.py @@ -1632,6 +1632,8 @@ def _prepare_env(subject, subjects_dir): _validate_type(subject, "str") subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + subjects_dir = op.abspath(subjects_dir) # force use of an absolute path + subjects_dir = op.expanduser(subjects_dir) if not op.isdir(subjects_dir): raise RuntimeError('Could not find the MRI data directory "%s"' % subjects_dir) From baaf9e33f2b54f6a40454a8aa146577bde1d6f58 Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Mon, 25 Jan 2021 14:04:35 +0100 Subject: [PATCH 080/387] Replace master -> main (#8758) --- .circleci/config.yml | 14 ++++++------- CONTRIBUTING.rst | 2 +- README.rst | 18 ++++++++--------- azure-pipelines.yml | 2 +- doc/_includes/forward.rst | 2 +- doc/_templates/layout.html | 2 +- doc/conf.py | 2 +- doc/glossary.rst | 2 +- doc/install/advanced.rst | 12 +++++------ doc/install/contributing.rst | 34 ++++++++++++++++---------------- doc/install/mne_python.rst | 8 ++++---- doc/links.inc | 2 +- doc/overview/faq.rst | 2 +- mne/tests/test_morph.py | 4 ++-- mne/utils/docs.py | 2 +- mne/utils/tests/test_fetching.py | 2 +- tools/circleci_download.sh | 6 +++--- 17 files changed, 58 insertions(+), 58 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 5a0e0861c2f..4c27af9a991 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -391,16 +391,16 @@ jobs: name: Deploy docs command: | set -e; - if [ "${CIRCLE_BRANCH}" == "master" ] || [ "${CIRCLE_BRANCH}" == "maint/0.22" ]; then + if [ "${CIRCLE_BRANCH}" == "main" ] || [ "${CIRCLE_BRANCH}" == "maint/0.22" ]; then git config --global user.email "circle@mne.com"; git config --global user.name "Circle CI"; cd ~/mne-tools.github.io; - git checkout master + git checkout main git remote -v git fetch origin - git reset --hard origin/master + git reset --hard origin/main git clean -xdf - if [ "${CIRCLE_BRANCH}" == "master" ]; then + if [ "${CIRCLE_BRANCH}" == "main" ]; then echo "Deploying dev docs for ${CIRCLE_BRANCH}."; rm -Rf dev; cp -a /tmp/build/html dev; @@ -413,7 +413,7 @@ jobs: git add -A; git commit -m "CircleCI update of stable docs (${CIRCLE_BUILD_NUM})."; fi; - git push origin master; + git push origin main; else echo "No deployment (build: ${CIRCLE_BRANCH})."; fi @@ -469,7 +469,7 @@ workflows: filters: branches: only: - - master + - main - maint/0.22 # interactive_test @@ -483,4 +483,4 @@ workflows: filters: branches: only: - - master + - main diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index fa5a057ae52..a104cd32b5a 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -17,5 +17,5 @@ MNE-Python. .. _`opening an issue`: https://github.com/mne-tools/mne-python/issues/new/choose .. _`MNE Forum`: https://mne.discourse.group -.. _`code of conduct`: https://github.com/mne-tools/.github/blob/master/CODE_OF_CONDUCT.md +.. _`code of conduct`: https://github.com/mne-tools/.github/blob/main/CODE_OF_CONDUCT.md .. _`contributing guide`: https://mne-tools.github.io/dev/install/contributing.html diff --git a/README.rst b/README.rst index 6c13b796487..405eee64fa5 100644 --- a/README.rst +++ b/README.rst @@ -4,19 +4,19 @@ |MNE|_ -.. |GH-Linux| image:: https://github.com/mne-tools/mne-python/workflows/linux%20/%20conda/badge.svg?branch=master -.. _GH-Linux: https://github.com/mne-tools/mne-python/actions?query=branch:master+event:push +.. |GH-Linux| image:: https://github.com/mne-tools/mne-python/workflows/linux%20/%20conda/badge.svg?branch=main +.. _GH-Linux: https://github.com/mne-tools/mne-python/actions?query=branch:main+event:push -.. |GH-macOS| image:: https://github.com/mne-tools/mne-python/workflows/macos%20/%20conda/badge.svg?branch=master -.. _GH-macOS: https://github.com/mne-tools/mne-python/actions?query=branch:master+event:push +.. |GH-macOS| image:: https://github.com/mne-tools/mne-python/workflows/macos%20/%20conda/badge.svg?branch=main +.. _GH-macOS: https://github.com/mne-tools/mne-python/actions?query=branch:main+event:push -.. |Azure| image:: https://dev.azure.com/mne-tools/mne-python/_apis/build/status/mne-tools.mne-python?branchName=master -.. _Azure: https://dev.azure.com/mne-tools/mne-python/_build/latest?definitionId=1&branchName=master +.. |Azure| image:: https://dev.azure.com/mne-tools/mne-python/_apis/build/status/mne-tools.mne-python?branchName=main +.. _Azure: https://dev.azure.com/mne-tools/mne-python/_build/latest?definitionId=1&branchName=main .. |Circle| image:: https://circleci.com/gh/mne-tools/mne-python.svg?style=shield .. _Circle: https://circleci.com/gh/mne-tools/mne-python -.. |Codecov| image:: https://codecov.io/gh/mne-tools/mne-python/branch/master/graph/badge.svg +.. |Codecov| image:: https://codecov.io/gh/mne-tools/mne-python/branch/main/graph/badge.svg .. _Codecov: https://codecov.io/gh/mne-tools/mne-python .. |PyPI| image:: https://img.shields.io/pypi/dm/mne.svg?label=PyPI%20downloads @@ -71,7 +71,7 @@ To install the latest version of the code using pip_ open a terminal and type: .. code-block:: bash - pip install -U https://github.com/mne-tools/mne-python/archive/master.zip + pip install -U https://github.com/mne-tools/mne-python/archive/main.zip To get the latest code using `git `__, open a terminal and type: @@ -80,7 +80,7 @@ To get the latest code using `git `__, open a terminal and git clone git://github.com/mne-tools/mne-python.git Alternatively, you can also download a -`zip file of the latest development version `__. +`zip file of the latest development version `__. Dependencies diff --git a/azure-pipelines.yml b/azure-pipelines.yml index a2f9d1a6af4..3a8b9e0c309 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -3,7 +3,7 @@ trigger: batch: False branches: include: - - 'master' + - 'main' - 'maint/*' pr: branches: diff --git a/doc/_includes/forward.rst b/doc/_includes/forward.rst index 7070b426dcd..800fb09e8bc 100644 --- a/doc/_includes/forward.rst +++ b/doc/_includes/forward.rst @@ -30,7 +30,7 @@ MEG/EEG and MRI coordinate systems :class:`~mne.SourceSpaces`, etc), information about the coordinate frame is encoded as a constant integer value. The meaning of those integers is determined `in the source code - `__. + `__. The coordinate systems used in MNE software (and FreeSurfer) and their relationships are depicted in :ref:`coordinate_system_figure`. Except for the diff --git a/doc/_templates/layout.html b/doc/_templates/layout.html index 8ba519efa2e..76e2babb489 100755 --- a/doc/_templates/layout.html +++ b/doc/_templates/layout.html @@ -45,7 +45,7 @@ {% if build_dev_html|tobool %}
This is documentation for the unstable development version of MNE-Python, -available here. +available here. Or, switch to documentation for the current stable version.
diff --git a/doc/conf.py b/doc/conf.py index 204dc860f2b..5b433cc5241 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -99,7 +99,7 @@ # The encoding of source files. # source_encoding = 'utf-8-sig' -# The master toctree document. +# The main toctree document. master_doc = 'index' # General information about the project. diff --git a/doc/glossary.rst b/doc/glossary.rst index f7ce9ad1d63..526c5891c09 100644 --- a/doc/glossary.rst +++ b/doc/glossary.rst @@ -322,4 +322,4 @@ general neuroimaging concepts. If you think a term is missing, please consider .. _`creating a new issue`: https://github.com/mne-tools/mne-python/issues/new?template=glossary.md .. _`opening a pull request`: - https://github.com/mne-tools/mne-python/pull/new/master + https://github.com/mne-tools/mne-python/pull/new/main diff --git a/doc/install/advanced.rst b/doc/install/advanced.rst index 18e7be96b93..e0528348a25 100644 --- a/doc/install/advanced.rst +++ b/doc/install/advanced.rst @@ -81,10 +81,10 @@ The notebook 3d backend requires PyVista to be installed along with other packag please follow :doc:`mne_python` -.. _installing_master: +.. _installing_main: -Using the development version of MNE-Python (latest master) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Using the development version of MNE-Python (latest main) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If you want access to the latest features and bugfixes, you can easily switch from the stable version of MNE-Python to the current development version. @@ -92,14 +92,14 @@ from the stable version of MNE-Python to the current development version. .. warning:: In between releases, function and class APIs can change without warning. -For a one-time update to latest master, make sure you're in the conda +For a one-time update to latest main, make sure you're in the conda environment where MNE-Python is installed (if you followed the default install instructions, this will be ``base``), and use ``pip`` to upgrade: .. code-block:: console $ conda activate name_of_my_mne_environment - $ pip install --upgrade --no-deps https://github.com/mne-tools/mne-python/archive/master.zip + $ pip install --upgrade --no-deps https://github.com/mne-tools/mne-python/archive/main.zip If you plan to contribute to MNE-Python, or just prefer to use git rather than pip to make frequent updates, check out the :ref:`contributing guide @@ -124,7 +124,7 @@ the provided `requirements file`_: .. code-block:: console - curl --remote-name https://raw.githubusercontent.com/mne-tools/mne-python/master/requirements.txt + curl --remote-name https://raw.githubusercontent.com/mne-tools/mne-python/main/requirements.txt pip install --user requirements.txt Other configurations will probably also work, but we may be unable to offer diff --git a/doc/install/contributing.rst b/doc/install/contributing.rst index 8b4f2cf4c44..8b1ff860e05 100644 --- a/doc/install/contributing.rst +++ b/doc/install/contributing.rst @@ -40,7 +40,7 @@ MNE-Python. .. _`opening an issue`: https://github.com/mne-tools/mne-python/issues/new/choose .. _`MNE Forum`: https://mne.discourse.group -.. _`code of conduct`: https://github.com/mne-tools/.github/blob/master/CODE_OF_CONDUCT.md +.. _`code of conduct`: https://github.com/mne-tools/.github/blob/main/CODE_OF_CONDUCT.md .. _`GitHub issues marked "easy"`: https://github.com/mne-tools/mne-python/issues?q=is%3Aissue+is%3Aopen+label%3AEASY Overview of contribution process @@ -208,7 +208,7 @@ version of MNE-Python, you should now repeat that process to create a new, separate environment for MNE-Python development (here we'll give it the name ``mnedev``):: - $ curl --remote-name https://raw.githubusercontent.com/mne-tools/mne-python/master/environment.yml + $ curl --remote-name https://raw.githubusercontent.com/mne-tools/mne-python/main/environment.yml $ conda env create --file environment.yml --name mnedev $ conda activate mnedev @@ -323,22 +323,22 @@ Other commands that you will undoubtedly need relate to `branches`_. Branches represent multiple copies of the codebase *within a local clone or remote repo*. Branches are typically used to experiment with new features while still keeping a clean, working copy of the original codebase that you can switch back -to at any time. The default branch of any repo is always called ``master``, and -it is recommended that you reserve the ``master`` branch to be that clean copy +to at any time. The default branch of any repo is called ``main``, and +it is recommended that you reserve the ``main`` branch to be that clean copy of the working ``upstream`` codebase. Therefore, if you want to add a new -feature, you should first synchronize your local ``master`` branch with the -``upstream`` repository, then create a new branch based off of ``master`` and +feature, you should first synchronize your local ``main`` branch with the +``upstream`` repository, then create a new branch based off of ``main`` and `check it out`_ so that any changes you make will exist on that new branch -(instead of on ``master``):: +(instead of on ``main``):: - $ git checkout master # switch to local master branch + $ git checkout main # switch to local main branch $ git fetch upstream # get the current state of the remote upstream repo - $ git merge upstream/master # synchronize local master branch with remote upstream master branch + $ git merge upstream/main # synchronize local main branch with remote upstream main branch $ git checkout -b new-feature-x # create local branch "new-feature-x" and check it out .. sidebar:: Alternative - You can save some typing by using ``git pull upstream/master`` to replace + You can save some typing by using ``git pull upstream/main`` to replace the ``fetch`` and ``merge`` lines above. Now that you're on a new branch, you can fix a bug or add a new feature, add a @@ -473,7 +473,7 @@ deprecation section ` and `mne.utils.deprecated` for instructions. Bug fixes (when something isn't doing what it says it will do) do not require a deprecation cycle. -Note that any new API elements should be added to the master reference; +Note that any new API elements should be added to the main reference; classes, functions, methods, and attributes cannot be cross-referenced unless they are included in the :doc:`python_reference` (:file:`doc/python_reference.rst`). @@ -946,13 +946,13 @@ down the road. Here are the guidelines: doing this. Avoid purely cosmetic changes to the code; they make PRs harder to review. -- It is usually better to make PRs *from* branches other than your master - branch, so that you can use your master branch to easily get back to a +- It is usually better to make PRs *from* branches other than your main + branch, so that you can use your main branch to easily get back to a working state of the code if needed (e.g., if you're working on multiple changes at once, or need to pull in recent changes from someone else to get your new feature to work properly). -- In most cases you should make PRs *into* the upstream's master branch, unless +- In most cases you should make PRs *into* the upstream's main branch, unless you are specifically asked by a maintainer to PR into another branch (e.g., for backports or maintenance bugfixes to the current stable version). @@ -998,8 +998,8 @@ down the road. Here are the guidelines: `This sample pull request`_ exemplifies many of the conventions listed above: it addresses only one problem; it started with an issue to discuss the problem -and some possible solutions; it is a PR from the user's non-master branch into -the upstream master branch; it separates different kinds of changes into +and some possible solutions; it is a PR from the user's non-main branch into +the upstream main branch; it separates different kinds of changes into separate commits and uses labels like ``DOC``, ``FIX``, and ``STY`` to make it easier for maintainers to review the changeset; etc. If you are new to GitHub it can serve as a useful example of what to expect from the PR review process. @@ -1078,7 +1078,7 @@ it can serve as a useful example of what to expect from the PR review process. .. _continuous integration: https://en.wikipedia.org/wiki/Continuous_integration .. _matplotlib: https://matplotlib.org/ .. _github actions: https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions -.. _azure: https://dev.azure.com/mne-tools/mne-python/_build/latest?definitionId=1&branchName=master +.. _azure: https://dev.azure.com/mne-tools/mne-python/_build/latest?definitionId=1&branchName=main .. _CircleCI: https://circleci.com/gh/mne-tools/mne-python .. optipng diff --git a/doc/install/mne_python.rst b/doc/install/mne_python.rst index 4f9e391a0a1..76184360df0 100644 --- a/doc/install/mne_python.rst +++ b/doc/install/mne_python.rst @@ -148,7 +148,7 @@ your operating system. Download the MNE-Python `environment file`_ (done here with ``curl``) and use it to create a new environment (named ``mne`` by default):: - $ curl --remote-name https://raw.githubusercontent.com/mne-tools/mne-python/master/environment.yml + $ curl --remote-name https://raw.githubusercontent.com/mne-tools/mne-python/main/environment.yml $ conda env update --file environment.yml .. collapse:: |hand-stop-o| If you get errors building mayavi... @@ -166,7 +166,7 @@ your operating system. default):: $ conda install --name base nb_conda_kernels "spyder>=4.2.1" - $ curl --remote-name https://raw.githubusercontent.com/mne-tools/mne-python/master/environment.yml + $ curl --remote-name https://raw.githubusercontent.com/mne-tools/mne-python/main/environment.yml $ conda env update --file environment.yml .. collapse:: |windows| Windows @@ -334,8 +334,8 @@ Python development are: .. LINKS -.. _environment file: https://raw.githubusercontent.com/mne-tools/mne-python/master/environment.yml -.. _server environment file: https://raw.githubusercontent.com/mne-tools/mne-python/master/server_environment.yml +.. _environment file: https://raw.githubusercontent.com/mne-tools/mne-python/main/environment.yml +.. _server environment file: https://raw.githubusercontent.com/mne-tools/mne-python/main/server_environment.yml .. _`mayavi`: https://docs.enthought.com/mayavi/mayavi/ .. _`pyvista`: https://docs.pyvista.org/ .. _`X server`: https://en.wikipedia.org/wiki/X_Window_System diff --git a/doc/links.inc b/doc/links.inc index 7cd2918a1ab..6103dc4df1b 100644 --- a/doc/links.inc +++ b/doc/links.inc @@ -206,7 +206,7 @@ .. installation links -.. _requirements file: https://raw.githubusercontent.com/mne-tools/mne-python/master/requirements.txt +.. _requirements file: https://raw.githubusercontent.com/mne-tools/mne-python/main/requirements.txt .. _NVIDIA CUDA GPU processing: https://developer.nvidia.com/cuda-zone .. _NVIDIA proprietary drivers: https://www.geforce.com/drivers diff --git a/doc/overview/faq.rst b/doc/overview/faq.rst index 8644338165e..9ffbbd205d2 100644 --- a/doc/overview/faq.rst +++ b/doc/overview/faq.rst @@ -102,7 +102,7 @@ is a software bug (not bad data or user error): dependencies. - If you're already on the most current version, if possible try using - :ref:`the latest development version `, as the bug may + :ref:`the latest development version `, as the bug may have been fixed already since the latest release. If you can't try the latest development version, search the GitHub issues page to see if the problem has already been reported and/or fixed. diff --git a/mne/tests/test_morph.py b/mne/tests/test_morph.py index c082ad102b6..05abc9ebf52 100644 --- a/mne/tests/test_morph.py +++ b/mne/tests/test_morph.py @@ -778,7 +778,7 @@ def test_volume_labels_morph(tmpdir, sl, n_real, n_mri, n_orig): n_got_real = np.in1d( aseg_img.ravel(), [lut[name] for name in use_label_names]).sum() assert n_got_real == n_real - # - This was 291 on `master` before gh-5590 + # - This was 291 on `main` before gh-5590 # - Refactoring transforms it became 279 with a < 1e-8 change in vox_mri_t # - Dropped to 123 once nearest-voxel was used in gh-7653 # - Jumped back up to 330 with morphing fixes actually correctly @@ -791,7 +791,7 @@ def test_volume_labels_morph(tmpdir, sl, n_real, n_mri, n_orig): src[0]['interpolator'] = None img = stc.as_volume(src, mri_resolution=False) n_on = np.array(img.dataobj).astype(bool).sum() - # was 20 on `master` before gh-5590 + # was 20 on `main` before gh-5590 # then 44 before gh-7653, which took it back to 20 assert n_on == n_orig # without the interpolator, this should fail diff --git a/mne/utils/docs.py b/mne/utils/docs.py index 4d377f91341..c5f394a72bd 100644 --- a/mne/utils/docs.py +++ b/mne/utils/docs.py @@ -2365,7 +2365,7 @@ def linkcode_resolve(domain, info): linespec = "" if 'dev' in mne.__version__: - kind = 'master' + kind = 'main' else: kind = 'maint/%s' % ('.'.join(mne.__version__.split('.')[:2])) return "http://github.com/mne-tools/mne-python/blob/%s/mne/%s%s" % ( diff --git a/mne/utils/tests/test_fetching.py b/mne/utils/tests/test_fetching.py index 547419380d2..98a4536ca1f 100644 --- a/mne/utils/tests/test_fetching.py +++ b/mne/utils/tests/test_fetching.py @@ -10,7 +10,7 @@ @pytest.mark.timeout(60) @requires_good_network @pytest.mark.parametrize('url', ( - 'https://raw.githubusercontent.com/mne-tools/mne-python/master/README.rst', + 'https://raw.githubusercontent.com/mne-tools/mne-python/main/README.rst', )) def test_fetch_file(url, tmpdir): """Test URL retrieval.""" diff --git a/tools/circleci_download.sh b/tools/circleci_download.sh index e0b5eb643b9..e7a85c220ec 100755 --- a/tools/circleci_download.sh +++ b/tools/circleci_download.sh @@ -1,6 +1,6 @@ #!/bin/bash -ef -if [ "$CIRCLE_BRANCH" == "master" ] || [[ $(cat gitlog.txt) == *"[circle full]"* ]]; then +if [ "$CIRCLE_BRANCH" == "main" ] || [[ $(cat gitlog.txt) == *"[circle full]"* ]]; then echo "Doing a full dev build"; echo html_dev-memory > build.txt; python -c "import mne; mne.datasets._download_all_example_data()"; @@ -10,7 +10,7 @@ elif [ "$CIRCLE_BRANCH" == "maint/0.22" ]; then python -c "import mne; mne.datasets._download_all_example_data()"; else echo "Doing a partial build"; - FNAMES=$(git diff --name-only $(git merge-base $CIRCLE_BRANCH upstream/master) $CIRCLE_BRANCH); + FNAMES=$(git diff --name-only $(git merge-base $CIRCLE_BRANCH upstream/main) $CIRCLE_BRANCH); if [[ $(cat gitlog.txt) == *"[circle front]"* ]]; then FNAMES="tutorials/source-modeling/plot_mne_dspm_source_localization.py tutorials/machine-learning/plot_receptive_field.py examples/connectivity/plot_mne_inverse_label_connectivity.py tutorials/machine-learning/plot_sensors_decoding.py tutorials/stats-source-space/plot_stats_cluster_spatio_temporal.py tutorials/evoked/plot_20_visualize_evoked.py "${FNAMES}; python -c "import mne; print(mne.datasets.testing.data_path(update_path=True))"; @@ -106,4 +106,4 @@ else echo html_dev-noplot > build.txt; fi; fi; -echo "$PATTERN" > pattern.txt; \ No newline at end of file +echo "$PATTERN" > pattern.txt; From d8d5474fa5ce7b81b3abef0359235e231f39a53b Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Mon, 25 Jan 2021 13:28:19 -0500 Subject: [PATCH 081/387] MAINT: Fix doc date (#8787) --- doc/_templates/layout.html | 2 +- doc/conf.py | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/doc/_templates/layout.html b/doc/_templates/layout.html index 76e2babb489..32921867f82 100755 --- a/doc/_templates/layout.html +++ b/doc/_templates/layout.html @@ -82,7 +82,7 @@
  • Aarhus Universitet
  • Karl-Franzens-Universität Graz
  • -

    © Copyright 2012-2020, MNE Developers. Last updated on 2020-03-27.

    +

    © Copyright {{ copyright }}

    diff --git a/doc/conf.py b/doc/conf.py index 5b433cc5241..79f06b10f5d 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -12,7 +12,7 @@ # All configuration values have a default; values that are commented out # serve to show the default. -from datetime import date +from datetime import datetime, timezone from distutils.version import LooseVersion import gc import os @@ -104,9 +104,13 @@ # General information about the project. project = u'MNE' -td = date.today() -copyright = u'2012-%s, MNE Developers. Last updated on %s' % (td.year, - td.isoformat()) +td = datetime.now(tz=timezone.utc) +copyright = ( + '2012-%(year)s, MNE Developers. Last updated ' + '\n' + '' # noqa: E501 +) % dict(year=td.year, iso=td.isoformat(), + short=td.strftime('%Y-%m-%d %H:%M %Z')) nitpicky = True nitpick_ignore = [ From 9db75c108ae2d475050c395e57dd0062f78ee44d Mon Sep 17 00:00:00 2001 From: Robert Luke <748691+rob-luke@users.noreply.github.com> Date: Tue, 26 Jan 2021 18:28:05 +1100 Subject: [PATCH 082/387] Add describe method for annotations (#8783) * Add describe method to annotations * Add tests * Flake fix * Pydocstyle * Switch to_data_frame * pydocstyle * Try and fix sphinx error --- doc/changes/latest.inc | 2 ++ mne/annotations.py | 30 +++++++++++++++++++++--------- mne/tests/test_annotations.py | 23 ++++++++++++++++++++++- 3 files changed, 45 insertions(+), 10 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index a5bc085d314..492b2a99d30 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -36,6 +36,8 @@ Enhancements - Add :meth:`raw.describe() ` to display (or return) descriptive statistics for each channel (:gh:`8760` by `Clemens Brunner`_) +- Add :meth:`annotations.to_data_frame() ` to return annotations as a pandas dataframe (:gh:`8783` by `Robert Luke`_) + Bugs ~~~~ diff --git a/mne/annotations.py b/mne/annotations.py index 58db835583d..119b803615a 100644 --- a/mne/annotations.py +++ b/mne/annotations.py @@ -1,4 +1,5 @@ # Authors: Jaakko Leppakangas +# Robert Luke # # License: BSD (3-clause) @@ -324,6 +325,25 @@ def delete(self, idx): self.duration = np.delete(self.duration, idx) self.description = np.delete(self.description, idx) + def to_data_frame(self): + """Export annotations in tabular structure as a pandas DataFrame. + + Returns + ------- + result : pandas.DataFrame + Returns a pandas DataFrame with onset, duration, and + description columns. + """ + pd = _check_pandas_installed(strict=True) + dt = _handle_meas_date(self.orig_time) + if dt is None: + dt = _handle_meas_date(0) + dt = dt.replace(tzinfo=None) + onsets_dt = [dt + timedelta(seconds=o) for o in self.onset] + df = pd.DataFrame(dict(onset=onsets_dt, duration=self.duration, + description=self.description)) + return df + def save(self, fname): """Save annotations to FIF, CSV or TXT. @@ -572,15 +592,7 @@ def _write_annotations(fid, annotations): def _write_annotations_csv(fname, annot): - pd = _check_pandas_installed(strict=True) - dt = _handle_meas_date(annot.orig_time) - if dt is None: - dt = _handle_meas_date(0) - dt = dt.replace(tzinfo=None) - onsets_dt = [dt + timedelta(seconds=o) for o in annot.onset] - df = pd.DataFrame(dict(onset=onsets_dt, duration=annot.duration, - description=annot.description)) - df.to_csv(fname, index=False) + annot.to_data_frame().to_csv(fname, index=False) def _write_annotations_txt(fname, annot): diff --git a/mne/tests/test_annotations.py b/mne/tests/test_annotations.py index fe62ae8707b..32b6651e200 100644 --- a/mne/tests/test_annotations.py +++ b/mne/tests/test_annotations.py @@ -1,4 +1,5 @@ # Authors: Jaakko Leppakangas +# Robert Luke # # License: BSD 3 clause @@ -21,7 +22,7 @@ events_from_annotations) from mne import Epochs, Annotations from mne.utils import (run_tests_if_main, _TempDir, requires_version, - catch_logging) + catch_logging, requires_pandas) from mne.utils import (assert_and_remove_boundary_annot, _raw_annot, _dt_to_stamp, _stamp_to_dt) from mne.io import read_raw_fif, RawArray, concatenate_raws @@ -1232,4 +1233,24 @@ def test_repr(): assert r == '' +@requires_pandas +def test_annotation_to_data_frame(): + """Test annotation class to data frame conversion.""" + onset = np.arange(1, 10) + durations = np.full_like(onset, [4, 5, 6, 4, 5, 6, 4, 5, 6]) + description = ["yy"] * onset.shape[0] + + a = Annotations(onset=onset, + duration=durations, + description=description, + orig_time=0) + + df = a.to_data_frame() + for col in ['onset', 'duration', 'description']: + assert col in df.columns + assert df.description[0] == 'yy' + assert (df.onset[1] - df.onset[0]).seconds == 1 + assert df.groupby('description').count().onset['yy'] == 9 + + run_tests_if_main() From fa779fc945dba0b77c466ad91aefe213a5be9681 Mon Sep 17 00:00:00 2001 From: Guillaume Favelier Date: Wed, 27 Jan 2021 15:37:45 +0100 Subject: [PATCH 083/387] MRG: Brain silhouette (#8771) * Add silhouette parameter * Fix docstring * Update alpha * Fix silhouette check * TST: SSAO bug * Fix ssao * Remove SSAO * Touch examples * Add brain_kwargs * Revert "Touch examples" This reverts commit 5b5e9a8aa9736029df93c449058310a7a42ed960. * Add brain_kwargs * Touch examples * Adjust decimation and fix silhouette * Improve coverage * Improve coverage * Improve coverage * Commit suggestions --- doc/changes/latest.inc | 2 ++ .../plot_mixed_source_space_inverse.py | 3 +- mne/source_estimate.py | 18 +++++++----- mne/utils/docs.py | 5 ++++ mne/viz/_3d.py | 17 +++++++---- mne/viz/_brain/_brain.py | 29 ++++++++++++++++++- mne/viz/_brain/tests/test_brain.py | 8 +++-- mne/viz/backends/_pyvista.py | 4 ++- .../source-modeling/plot_beamformer_lcmv.py | 3 +- .../source-modeling/plot_visualize_stc.py | 3 +- 10 files changed, 72 insertions(+), 20 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 492b2a99d30..1cd91426879 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -38,6 +38,8 @@ Enhancements - Add :meth:`annotations.to_data_frame() ` to return annotations as a pandas dataframe (:gh:`8783` by `Robert Luke`_) +- Add the ``silhouette`` parameter to :class:`mne.viz.Brain` to display sharp edges and improve perception (:gh:`8771` by `Guillaume Favelier`_) + Bugs ~~~~ diff --git a/examples/inverse/plot_mixed_source_space_inverse.py b/examples/inverse/plot_mixed_source_space_inverse.py index f9d7a6a56ce..268418825ff 100644 --- a/examples/inverse/plot_mixed_source_space_inverse.py +++ b/examples/inverse/plot_mixed_source_space_inverse.py @@ -139,7 +139,8 @@ pick_ori='vector') brain = stc_vec.plot( hemi='both', src=inverse_operator['src'], views='coronal', - initial_time=initial_time, subjects_dir=subjects_dir) + initial_time=initial_time, subjects_dir=subjects_dir, + brain_kwargs=dict(silhouette=True)) ############################################################################### # Plot the surface diff --git a/mne/source_estimate.py b/mne/source_estimate.py index 08f66eee61f..abdae45ca00 100644 --- a/mne/source_estimate.py +++ b/mne/source_estimate.py @@ -649,7 +649,7 @@ def plot(self, subject=None, surface='inflated', hemi='lh', foreground=None, initial_time=None, time_unit='s', backend='auto', spacing='oct6', title=None, show_traces='auto', src=None, volume_options=1., view_layout='vertical', - add_data_kwargs=None, verbose=None): + add_data_kwargs=None, brain_kwargs=None, verbose=None): brain = plot_source_estimates( self, subject, surface=surface, hemi=hemi, colormap=colormap, time_label=time_label, smoothing_steps=smoothing_steps, @@ -660,7 +660,8 @@ def plot(self, subject=None, surface='inflated', hemi='lh', initial_time=initial_time, time_unit=time_unit, backend=backend, spacing=spacing, title=title, show_traces=show_traces, src=src, volume_options=volume_options, view_layout=view_layout, - add_data_kwargs=add_data_kwargs, verbose=verbose) + add_data_kwargs=add_data_kwargs, brain_kwargs=brain_kwargs, + verbose=verbose) return brain @property @@ -1918,7 +1919,7 @@ def plot(self, subject=None, hemi='lh', colormap='hot', time_label='auto', background='black', foreground=None, initial_time=None, time_unit='s', show_traces='auto', src=None, volume_options=1., view_layout='vertical', add_data_kwargs=None, - verbose=None): # noqa: D102 + brain_kwargs=None, verbose=None): # noqa: D102 return plot_vector_source_estimates( self, subject=subject, hemi=hemi, colormap=colormap, time_label=time_label, smoothing_steps=smoothing_steps, @@ -1931,7 +1932,7 @@ def plot(self, subject=None, hemi='lh', colormap='hot', time_label='auto', initial_time=initial_time, time_unit=time_unit, show_traces=show_traces, src=src, volume_options=volume_options, view_layout=view_layout, add_data_kwargs=add_data_kwargs, - verbose=verbose) + brain_kwargs=brain_kwargs, verbose=verbose) class _BaseVolSourceEstimate(_BaseSourceEstimate): @@ -1949,7 +1950,7 @@ def plot_3d(self, subject=None, surface='white', hemi='both', foreground=None, initial_time=None, time_unit='s', backend='auto', spacing='oct6', title=None, show_traces='auto', src=None, volume_options=1., view_layout='vertical', - add_data_kwargs=None, verbose=None): + add_data_kwargs=None, brain_kwargs=None, verbose=None): return super().plot( subject=subject, surface=surface, hemi=hemi, colormap=colormap, time_label=time_label, smoothing_steps=smoothing_steps, @@ -1961,7 +1962,7 @@ def plot_3d(self, subject=None, surface='white', hemi='both', time_unit=time_unit, backend=backend, spacing=spacing, title=title, show_traces=show_traces, src=src, volume_options=volume_options, view_layout=view_layout, add_data_kwargs=add_data_kwargs, - verbose=verbose) + brain_kwargs=brain_kwargs, verbose=verbose) @copy_function_doc_to_method_doc(plot_volume_source_estimates) def plot(self, src, subject=None, subjects_dir=None, mode='stat_map', @@ -2280,7 +2281,8 @@ def plot_3d(self, subject=None, hemi='both', colormap='hot', background='black', foreground=None, initial_time=None, time_unit='s', show_traces='auto', src=None, volume_options=1., view_layout='vertical', - add_data_kwargs=None, verbose=None): # noqa: D102 + add_data_kwargs=None, brain_kwargs=None, + verbose=None): # noqa: D102 return _BaseVectorSourceEstimate.plot( self, subject=subject, hemi=hemi, colormap=colormap, time_label=time_label, smoothing_steps=smoothing_steps, @@ -2293,7 +2295,7 @@ def plot_3d(self, subject=None, hemi='both', colormap='hot', initial_time=initial_time, time_unit=time_unit, show_traces=show_traces, src=src, volume_options=volume_options, view_layout=view_layout, add_data_kwargs=add_data_kwargs, - verbose=verbose) + brain_kwargs=brain_kwargs, verbose=verbose) @fill_doc diff --git a/mne/utils/docs.py b/mne/utils/docs.py index c5f394a72bd..76b2f4a5110 100644 --- a/mne/utils/docs.py +++ b/mne/utils/docs.py @@ -1533,6 +1533,11 @@ Additional arguments to brain.add_data (e.g., ``dict(time_label_size=10)``). """ +docdict['brain_kwargs'] = """ +brain_kwargs : dict | None + Additional arguments to the :class:`mne.viz.Brain` constructor (e.g., + ``dict(silhouette=True)``). +""" docdict['views'] = """ views : str | list View to use. Can be any of:: diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py index 4af33fa3005..25f06850ce3 100644 --- a/mne/viz/_3d.py +++ b/mne/viz/_3d.py @@ -1678,7 +1678,8 @@ def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh', time_unit='s', backend='auto', spacing='oct6', title=None, show_traces='auto', src=None, volume_options=1., view_layout='vertical', - add_data_kwargs=None, verbose=None): + add_data_kwargs=None, brain_kwargs=None, + verbose=None): """Plot SourceEstimate. Parameters @@ -1772,6 +1773,7 @@ def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh', %(src_volume_options)s %(view_layout)s %(add_data_kwargs)s + %(brain_kwargs)s %(verbose)s Returns @@ -1828,7 +1830,8 @@ def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh', stc, overlay_alpha=alpha, brain_alpha=alpha, vector_alpha=alpha, cortex=cortex, foreground=foreground, size=size, scale_factor=None, show_traces=show_traces, src=src, volume_options=volume_options, - view_layout=view_layout, add_data_kwargs=add_data_kwargs, **kwargs) + view_layout=view_layout, add_data_kwargs=add_data_kwargs, + brain_kwargs=brain_kwargs, **kwargs) def _plot_stc(stc, subject, surface, hemi, colormap, time_label, @@ -1836,7 +1839,7 @@ def _plot_stc(stc, subject, surface, hemi, colormap, time_label, time_unit, background, time_viewer, colorbar, transparent, brain_alpha, overlay_alpha, vector_alpha, cortex, foreground, size, scale_factor, show_traces, src, volume_options, - view_layout, add_data_kwargs): + view_layout, add_data_kwargs, brain_kwargs): from .backends.renderer import _get_3d_backend from ..source_estimate import _BaseVolSourceEstimate vec = stc._data_ndim == 3 @@ -1895,6 +1898,8 @@ def _plot_stc(stc, subject, surface, hemi, colormap, time_label, "figure": figure, "subjects_dir": subjects_dir, "views": views, "alpha": brain_alpha, } + if brain_kwargs is not None: + kwargs.update(brain_kwargs) if backend in ['pyvista', 'notebook']: kwargs["show"] = False kwargs["view_layout"] = view_layout @@ -2506,7 +2511,8 @@ def plot_vector_source_estimates(stc, subject=None, hemi='lh', colormap='hot', time_unit='s', show_traces='auto', src=None, volume_options=1., view_layout='vertical', - add_data_kwargs=None, verbose=None): + add_data_kwargs=None, brain_kwargs=None, + verbose=None): """Plot VectorSourceEstimate with PySurfer. A "glass brain" is drawn and all dipoles defined in the source estimate @@ -2582,6 +2588,7 @@ def plot_vector_source_estimates(stc, subject=None, hemi='lh', colormap='hot', %(src_volume_options)s %(view_layout)s %(add_data_kwargs)s + %(brain_kwargs)s %(verbose)s Returns @@ -2609,7 +2616,7 @@ def plot_vector_source_estimates(stc, subject=None, hemi='lh', colormap='hot', vector_alpha=vector_alpha, cortex=cortex, foreground=foreground, size=size, scale_factor=scale_factor, show_traces=show_traces, src=src, volume_options=volume_options, view_layout=view_layout, - add_data_kwargs=add_data_kwargs) + add_data_kwargs=add_data_kwargs, brain_kwargs=brain_kwargs) @verbose diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index b9e1aff7596..8d8c2d0a2e2 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -266,6 +266,11 @@ class Brain(object): units : str Can be 'm' or 'mm' (default). %(view_layout)s + silhouette : dict | bool + As a dict, it contains the ``color``, ``linewidth``, ``alpha`` opacity + and ``decimate`` (level of decimation between 0 and 1 or None) of the + brain's silhouette to display. If True, the default values are used + and if False, no silhouette will be displayed. Defaults to False. show : bool Display the window as soon as it is ready. Defaults to True. @@ -344,7 +349,7 @@ def __init__(self, subject_id, hemi, surf, title=None, foreground=None, figure=None, subjects_dir=None, views='auto', offset=True, show_toolbar=False, offscreen=False, interaction='trackball', units='mm', - view_layout='vertical', show=True): + view_layout='vertical', silhouette=False, show=True): from ..backends.renderer import backend, _get_renderer, _get_3d_backend from .._3d import _get_cmap from matplotlib.colors import colorConverter @@ -405,6 +410,19 @@ def __init__(self, subject_id, hemi, surf, title=None, self._labels = {'lh': list(), 'rh': list()} self._annots = {'lh': list(), 'rh': list()} self._layered_meshes = {} + # default values for silhouette + self._silhouette = { + 'color': self._bg_color, + 'line_width': 2, + 'alpha': alpha, + 'decimate': 0.9, + } + _validate_type(silhouette, (dict, bool), 'silhouette') + if isinstance(silhouette, dict): + self._silhouette.update(silhouette) + self.silhouette = True + else: + self.silhouette = silhouette # for now only one color bar can be added # since it is the same for all figures self._colorbar_added = False @@ -467,6 +485,15 @@ def __init__(self, subject_id, hemi, surf, title=None, else: actor = self._layered_meshes[h]._actor self._renderer.plotter.add_actor(actor) + if self.silhouette: + mesh = self._layered_meshes[h] + self._renderer._silhouette( + mesh=mesh._polydata, + color=self._silhouette["color"], + line_width=self._silhouette["line_width"], + alpha=self._silhouette["alpha"], + decimate=self._silhouette["decimate"], + ) self._renderer.set_camera(**views_dicts[h][v]) self.interaction = interaction diff --git a/mne/viz/_brain/tests/test_brain.py b/mne/viz/_brain/tests/test_brain.py index e206983d211..a21b5482250 100644 --- a/mne/viz/_brain/tests/test_brain.py +++ b/mne/viz/_brain/tests/test_brain.py @@ -174,7 +174,8 @@ def __init__(self): renderer.backend._close_all() brain = Brain(hemi=hemi, surf=surf, size=size, title=title, - cortex=cortex, units='m', **kwargs) + cortex=cortex, units='m', + silhouette=dict(decimate=0.95), **kwargs) with pytest.raises(TypeError, match='not supported'): brain._check_stc(hemi='lh', array=FakeSTC(), vertices=None) with pytest.raises(ValueError, match='add_data'): @@ -431,7 +432,10 @@ def test_brain_time_viewer(renderer_interactive, pixel_ratio, brain_gc): with pytest.raises(ValueError, match="got unknown keys"): _create_testing_brain(hemi='lh', surf='white', src='volume', volume_options={'foo': 'bar'}) - brain = _create_testing_brain(hemi='both', show_traces=False) + brain = _create_testing_brain( + hemi='both', show_traces=False, + brain_kwargs=dict(silhouette=dict(decimate=0.95)) + ) # test sub routines when show_traces=False brain._on_pick(None, None) brain._configure_vertex_time_course() diff --git a/mne/viz/backends/_pyvista.py b/mne/viz/backends/_pyvista.py index 8397b9ac904..8fb7ff046ac 100644 --- a/mne/viz/backends/_pyvista.py +++ b/mne/viz/backends/_pyvista.py @@ -827,7 +827,9 @@ def _volume(self, dimensions, origin, spacing, scalars, volume_neg = None return grid, grid_mesh, volume_pos, volume_neg - def _silhouette(self, mesh, color=None, line_width=None, alpha=None): + def _silhouette(self, mesh, color=None, line_width=None, alpha=None, + decimate=None): + mesh = mesh.decimate(decimate) if decimate is not None else mesh silhouette_filter = vtk.vtkPolyDataSilhouette() silhouette_filter.SetInputData(mesh) silhouette_filter.SetCamera(self.plotter.renderer.GetActiveCamera()) diff --git a/tutorials/source-modeling/plot_beamformer_lcmv.py b/tutorials/source-modeling/plot_beamformer_lcmv.py index 81448ad9073..f55248436bd 100644 --- a/tutorials/source-modeling/plot_beamformer_lcmv.py +++ b/tutorials/source-modeling/plot_beamformer_lcmv.py @@ -238,7 +238,8 @@ brain = stc_vec.plot_3d( clim=dict(kind='value', lims=lims), hemi='both', views=['coronal', 'sagittal', 'axial'], size=(800, 300), - view_layout='horizontal', show_traces=0.3, **kwargs) + view_layout='horizontal', show_traces=0.3, + brain_kwargs=dict(silhouette=True), **kwargs) ############################################################################### # Visualize the activity of the maximum voxel with all three components diff --git a/tutorials/source-modeling/plot_visualize_stc.py b/tutorials/source-modeling/plot_visualize_stc.py index d4632a119bb..c70a3142d5d 100644 --- a/tutorials/source-modeling/plot_visualize_stc.py +++ b/tutorials/source-modeling/plot_visualize_stc.py @@ -166,7 +166,8 @@ inv = read_inverse_operator(fname_inv) stc = apply_inverse(evoked, inv, lambda2, 'dSPM', pick_ori='vector') brain = stc.plot(subject='sample', subjects_dir=subjects_dir, - initial_time=initial_time) + initial_time=initial_time, brain_kwargs=dict( + silhouette=True)) ############################################################################### # Dipole fits From 21b3c10445675b9578f2efd11b76719a74402587 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Richard=20H=C3=B6chenberger?= Date: Wed, 27 Jan 2021 15:56:05 +0100 Subject: [PATCH 084/387] MRG, BUG, DOC: derive GFP from average reference for EEG (#8775) * GFP was not derived from average-referenced signal as it must be The reason for #8772 is simply that we always calculated the GFP incorrectly, i.e. without re-referencing sensor signals to average first. When doing that, I now correctly get a flat GFP (0 at all time points) for single-channel recordings. Closes #8772, #8774 * Add GFP to EEG tutorial [skip azp][skip github] * Fix doc build [skip azp][skip github] * style [skip azp][skip github] * phrasing [skip github][skip azp] * Small fixes [skip azp][skip github] * Apply suggestions from code review [skip azp][skip github] Co-authored-by: Daniel McCloy * Fix [skip azp][skip github] * Small fixes + adjust color [skip azp][skip github] * Add gfp='power', gfp='power-only' * Better docstring rendering * use np.linalg.norm Co-authored-by: Alexandre Gramfort * power -> rms * GFP for EEG, RMS for MEG * Touch Evoked tutorial * Update changelog * Fix tests * FIX: Doc and test * Frobenius norm -> RMS Co-authored-by: Daniel McCloy Co-authored-by: Alexandre Gramfort Co-authored-by: Eric Larson --- doc/changes/latest.inc | 3 +- doc/references.bib | 45 +++++++++++++++ mne/viz/evoked.py | 38 ++++++++++--- mne/viz/tests/test_evoked.py | 58 ++++++++++++++------ tutorials/evoked/plot_20_visualize_evoked.py | 7 ++- tutorials/evoked/plot_eeg_erp.py | 47 ++++++++++++++++ 6 files changed, 171 insertions(+), 27 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 1cd91426879..74127de77bd 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -40,7 +40,6 @@ Enhancements - Add the ``silhouette`` parameter to :class:`mne.viz.Brain` to display sharp edges and improve perception (:gh:`8771` by `Guillaume Favelier`_) - Bugs ~~~~ - Fix zen mode and scalebar toggling for :meth:`raw.plot() ` when using the ``macosx`` matplotlib backend (:gh:`8688` by `Daniel McCloy`_) @@ -65,6 +64,8 @@ Bugs - `mne.io.read_raw_egi` now correctly handles `pathlib.Path` filenames (:gh:`8759` by `Richard Höchenberger`_) +- `mne.viz.plot_evoked` and `mne.Evoked.plot` now correctly plot global field power (GFP) for EEG data when ``gfp=True`` or ``gfp='only'`` is passed (used to plot RMS). For MEG data, we continue to plot the RMS, but now label it correctly as such (:gh:`8775` by `Richard Höchenberger`_) + API changes ~~~~~~~~~~~ diff --git a/doc/references.bib b/doc/references.bib index 02184fb478a..07aa3e07526 100644 --- a/doc/references.bib +++ b/doc/references.bib @@ -1,3 +1,4 @@ +% Encoding: UTF-8 @article{AblinEtAl2018, author = {Ablin, Pierre and Cardoso, Jean-Francois and Gramfort, Alexandre}, @@ -1927,3 +1928,47 @@ @article{RichardsEtAl2016 keywords = {Average MRI templates, Brain development, Lifespan MRI, Neurodevelopmental MRI Database}, pages = {1254--1259} } + +@Article{Lehmann1980, + author = {Dietrich Lehmann and Wolfgang Skrandies}, + journal = {Electroencephalography and Clinical Neurophysiology}, + title = {Reference-free identification of components of checkerboard-evoked multichannel potential fields}, + year = {1980}, + issn = {0013-4694}, + month = {jun}, + number = {6}, + pages = {609--621}, + volume = {48}, + doi = {10.1016/0013-4694(80)90419-8}, + publisher = {Elsevier {BV}}, +} + +@Article{Lehmann1984, + author = {Dietrich Lehmann and Wolfgang Skrandies}, + journal = {Progress in Neurobiology}, + title = {Spatial analysis of evoked potentials in man—a review}, + year = {1984}, + issn = {0301-0082}, + month = {jan}, + number = {3}, + pages = {227--250}, + volume = {23}, + doi = {10.1016/0301-0082(84)90003-0}, + publisher = {Elsevier {BV}}, +} + +@Article{Murray2008, + author = {Micah M. Murray and Denis Brunet and Christoph M. Michel}, + journal = {Brain Topography}, + title = {Topographic {ERP} Analyses: {A} Step-by-Step Tutorial Review}, + year = {2008}, + issn = {0896-0267}, + month = {mar}, + number = {4}, + pages = {249--264}, + volume = {20}, + doi = {10.1007/s10548-008-0054-5}, + publisher = {Springer Science and Business Media {LLC}}, +} + +@Comment{jabref-meta: databaseType:bibtex;} diff --git a/mne/viz/evoked.py b/mne/viz/evoked.py index 932c0ae5c27..f62747f5072 100644 --- a/mne/viz/evoked.py +++ b/mne/viz/evoked.py @@ -266,8 +266,8 @@ def _plot_evoked(evoked, picks, exclude, unit, show, ylim, proj, xlim, hline, if axes is not None and proj == 'interactive': raise RuntimeError('Currently only single axis figures are supported' ' for interactive SSP selection.') - if isinstance(gfp, str) and gfp != 'only': - raise ValueError('gfp must be boolean or "only". Got %s' % gfp) + + _check_option('gfp', gfp, [True, False, 'only']) scalings = _handle_default('scalings', scalings) titles = _handle_default('titles', titles) @@ -428,7 +428,7 @@ def _plot_lines(data, info, picks, fig, axes, spatial_colors, unit, units, # Set amplitude scaling D = this_scaling * data[idx, :] _check_if_nan(D) - gfp_only = (isinstance(gfp, str) and gfp == 'only') + gfp_only = gfp == 'only' if not gfp_only: chs = [info['chs'][i] for i in idx] locs3d = np.array([ch['loc'][:3] for ch in chs]) @@ -473,10 +473,17 @@ def _plot_lines(data, info, picks, fig, axes, spatial_colors, unit, units, linewidth=0.5)[0]) line_list[-1].set_pickradius(3.) - if gfp: # 'only' or boolean True + if gfp: + if gfp in [True, 'only']: + if this_type == 'eeg': + this_gfp = D.std(axis=0, ddof=0) + label = 'GFP' + else: + this_gfp = np.sqrt((D ** 2).mean(axis=0)) + label = 'RMS' + gfp_color = 3 * (0.,) if spatial_colors is True else (0., 1., 0.) - this_gfp = np.sqrt((D * D).mean(axis=0)) this_ylim = ax.get_ylim() if (ylim is None or this_type not in ylim.keys()) else ylim[this_type] if gfp_only: @@ -490,7 +497,7 @@ def _plot_lines(data, info, picks, fig, axes, spatial_colors, unit, units, zorder=3, alpha=line_alpha)[0]) ax.text(times[0] + 0.01 * (times[-1] - times[0]), this_gfp[0] + 0.05 * np.diff(ax.get_ylim())[0], - 'GFP', zorder=4, color=gfp_color, + label, zorder=4, color=gfp_color, path_effects=gfp_path_effects) for ii, line in zip(idx, line_list): if ii in bad_ch_idx: @@ -674,8 +681,23 @@ def plot_evoked(evoked, picks=None, exclude='bads', unit=True, show=True, the same length as the number of channel types. If instance of Axes, there must be only one channel type plotted. gfp : bool | 'only' - Plot GFP in green if True or "only". If "only", then the individual - channel traces will not be shown. + Plot the global field power (GFP) or the root mean square (RMS) of the + data. For MEG data, this will plot the RMS. For EEG, it plots GFP, + i.e. the standard deviation of the signal across channels. The GFP is + equivalent to the RMS of an average-referenced signal. + + - ``True`` + Plot GFP or RMS (for EEG and MEG, respectively) and traces for all + channels. + - ``'only'`` + Plot GFP or RMS (for EEG and MEG, respectively), and omit the + traces for individual channels. + + The color of the GFP/RMS trace will be green if + ``spatial_colors=False``, and black otherwise. + + .. versionchanged:: 0.23 + Plot GFP for EEG instead of RMS. Label RMS traces correctly as such. window_title : str | None The title to put at the top of the figure. spatial_colors : bool diff --git a/mne/viz/tests/test_evoked.py b/mne/viz/tests/test_evoked.py index 27445c0d157..45d06dc50a3 100644 --- a/mne/viz/tests/test_evoked.py +++ b/mne/viz/tests/test_evoked.py @@ -39,7 +39,8 @@ # Use a subset of channels for plotting speed # make sure we have a magnetometer and a pair of grad pairs for topomap. -default_picks = (0, 1, 2, 3, 4, 6, 7, 61, 122, 183, 244, 305) +default_picks = (0, 1, 2, 3, 4, 6, 7, 61, 122, 183, 244, 305, + 315, 316, 317, 318) # EEG channels sel = (0, 7) @@ -50,7 +51,10 @@ def _get_epochs(picks=default_picks): events = read_events(event_name) epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks, decim=10, verbose='error') - epochs.info['bads'] = [epochs.ch_names[-1]] + epochs.info['bads'] = [ + epochs.ch_names[-5], # MEG + epochs.ch_names[-1] # EEG + ] epochs.info.normalize_proj() return epochs @@ -72,7 +76,8 @@ def test_plot_evoked_cov(): evoked = _get_epochs().average() cov = read_cov(cov_fname) cov['projs'] = [] # avoid warnings - evoked.plot(noise_cov=cov, time_unit='s') + with pytest.warns(RuntimeWarning, match='No average EEG reference'): + evoked.plot(noise_cov=cov, time_unit='s') with pytest.raises(TypeError, match='Covariance'): evoked.plot(noise_cov=1., time_unit='s') with pytest.raises(IOError, match='No such file'): @@ -96,7 +101,7 @@ def test_plot_evoked(): fig = evoked.plot(proj=True, hline=[1], exclude=[], window_title='foo', time_unit='s') amplitudes = _get_amplitudes(fig) - assert len(amplitudes) == 12 + assert len(amplitudes) == len(default_picks) assert evoked.proj is False # Test a click ax = fig.get_axes()[0] @@ -122,9 +127,26 @@ def test_plot_evoked(): proj='interactive', axes='foo', time_unit='s') plt.close('all') - # test GFP only - evoked.plot(gfp='only', time_unit='s') - pytest.raises(ValueError, evoked.plot, gfp='foo', time_unit='s') + # test `gfp='only'`: GFP (EEG) and RMS (MEG) + fig, ax = plt.subplots(3) + evoked.plot(gfp='only', time_unit='s', axes=ax) + + assert len(ax[0].lines) == len(ax[1].lines) == len(ax[2].lines) == 1 + + assert ax[0].get_title() == 'EEG (3 channels)' + assert ax[0].texts[0].get_text() == 'GFP' + + assert ax[1].get_title() == 'Gradiometers (9 channels)' + assert ax[1].texts[0].get_text() == 'RMS' + + assert ax[2].get_title() == 'Magnetometers (2 channels)' + assert ax[1].texts[0].get_text() == 'RMS' + + plt.close('all') + + # Test invalid `gfp` + with pytest.raises(ValueError): + evoked.plot(gfp='foo', time_unit='s') # plot with bad channels excluded, spatial_colors, zorder & pos. layout evoked.rename_channels({'MEG 0133': 'MEG 0000'}) @@ -165,9 +187,9 @@ def _get_amplitudes(fig): @pytest.mark.parametrize('picks, rlims, avg_proj', [ - (default_picks, (0.59, 0.61), False), # MEG - (np.arange(340, 360), (0.49, 0.51), True), # EEG - (np.arange(340, 360), (0.78, 0.80), False), # EEG + (default_picks[:-4], (0.59, 0.61), False), # MEG + (np.arange(340, 360), (0.56, 0.57), True), # EEG + (np.arange(340, 360), (0.79, 0.81), False), # EEG ]) def test_plot_evoked_reconstruct(picks, rlims, avg_proj): """Test proj="reconstruct".""" @@ -266,23 +288,25 @@ def test_plot_white(): cov['method'] = 'empirical' cov['projs'] = [] # avoid warnings evoked = _get_epochs().average() + evoked.set_eeg_reference('average') # Avoid warnings + # test rank param. - evoked.plot_white(cov, rank={'mag': 101, 'grad': 201}, time_unit='s') + evoked.plot_white(cov, rank={'mag': 101, 'grad': 201, 'eeg': 10}, + time_unit='s') fig = evoked.plot_white(cov, rank={'mag': 101}, time_unit='s') # test rank evoked.plot_white(cov, rank={'grad': 201}, time_unit='s', axes=fig.axes) - with pytest.raises(ValueError, match=r'must have shape \(3,\), got \(2,'): + with pytest.raises(ValueError, match=r'must have shape \(4,\), got \(2,'): evoked.plot_white(cov, axes=fig.axes[:2]) with pytest.raises(ValueError, match='When not using SSS'): evoked.plot_white(cov, rank={'meg': 306}) evoked.plot_white([cov, cov], time_unit='s') plt.close('all') - assert 'eeg' not in evoked fig = plot_evoked_white(evoked, [cov, cov]) - assert len(fig.axes) == 2 * 2 - axes = np.array(fig.axes).reshape(2, 2) + assert len(fig.axes) == 3 * 2 + axes = np.array(fig.axes).reshape(3, 2) plot_evoked_white(evoked, [cov, cov], axes=axes) - with pytest.raises(ValueError, match=r'have shape \(2, 2\), got'): + with pytest.raises(ValueError, match=r'have shape \(3, 2\), got'): plot_evoked_white(evoked, [cov, cov], axes=axes[:, :1]) # Hack to test plotting of maxfiltered data @@ -301,7 +325,7 @@ def test_plot_compare_evokeds(): evoked = _get_epochs().average() # test defaults figs = plot_compare_evokeds(evoked) - assert len(figs) == 2 + assert len(figs) == 3 # test picks, combine, and vlines (1-channel pick also shows sensor inset) picks = ['MEG 0113', 'mag'] + 2 * [['MEG 0113', 'MEG 0112']] + [[0, 1]] vlines = [[0.1, 0.2], []] + 3 * ['auto'] diff --git a/tutorials/evoked/plot_20_visualize_evoked.py b/tutorials/evoked/plot_20_visualize_evoked.py index 322c5022918..a464591ce48 100644 --- a/tutorials/evoked/plot_20_visualize_evoked.py +++ b/tutorials/evoked/plot_20_visualize_evoked.py @@ -75,7 +75,12 @@ # select channels to plot by name, index, or type. In the next plot we'll show # only magnetometer channels, and also color-code the channel traces by their # location by passing ``spatial_colors=True``. Finally, we'll superimpose a -# trace of the :term:`global field power ` across channels: +# trace of the root mean square (RMS) of the signal across channels by +# passing ``gfp=True``. This parameter is called ``gfp`` for historical +# reasons and behaves correctly for all supported channel types: for MEG data, +# it will plot the RMS; while for EEG, it would plot the +# :term:`global field power ` (an average-referenced RMS), hence its +# name: evks['aud/left'].plot(picks='mag', spatial_colors=True, gfp=True) diff --git a/tutorials/evoked/plot_eeg_erp.py b/tutorials/evoked/plot_eeg_erp.py index df5c062fd27..e04bdfb03e9 100644 --- a/tutorials/evoked/plot_eeg_erp.py +++ b/tutorials/evoked/plot_eeg_erp.py @@ -10,6 +10,8 @@ """ +import matplotlib.pyplot as plt + import mne from mne.datasets import sample from mne.channels import combine_channels @@ -108,6 +110,45 @@ evoked_custom.plot(titles=dict(eeg=title), time_unit='s') evoked_custom.plot_topomap(times=[0.1], size=3., title=title, time_unit='s') +############################################################################### +# Global field power (GFP) +# ------------------------ +# +# Global field power :footcite:`Lehmann1980,Lehmann1984,Murray2008` is, +# generally speaking, a measure of agreement of the signals picked up by all +# sensors across the entire scalp: if all sensors have the same value at a +# given time point, the GFP will be zero at that time point; if the signals +# differ, the GFP will be non-zero at that time point. GFP +# peaks may reflect "interesting" brain activity, warranting further +# investigation. Mathematically, the GFP is the population standard +# deviation across all sensors, calculated separately for every time point. +# +# You can plot the GFP using `evoked.plot(gfp=True) `. The GFP +# trace will be black if ``spatial_colors=True`` and green otherwise. The EEG +# reference will not affect the GFP: + +for evk in (evoked_car, evoked_no_ref): + evk.plot(gfp=True, spatial_colors=True, ylim=dict(eeg=[-10, 10])) + +############################################################################### +# To plot the GFP by itself you can pass ``gfp='only'`` (this makes it easier +# to read off the GFP data values, because the scale is aligned): + +evoked_car.plot(gfp='only') + +############################################################################### +# As stated above, the GFP is the population standard deviation of the signal +# across channels. To compute it manually, we can leverage +# the fact that `evoked.data ` is a NumPy array: + +gfp = evoked_car.data.std(axis=0, ddof=0) + +# Reproducing the plot style from above: +fig, ax = plt.subplots() +ax.plot(evoked_car.times, gfp * 1e6, color='lime') +ax.fill_between(evoked_car.times, gfp * 1e6, color='lime', alpha=0.2) +ax.set(xlabel='Time (s)', ylabel='GFP (µV)', title='EEG') + ############################################################################### # Evoked response averaged across channels by ROI # ----------------------------------------------- @@ -200,3 +241,9 @@ # Besides for explicit access, this can be used for example to set titles. for cond in all_evokeds: all_evokeds[cond].plot_joint(title=cond, **joint_kwargs) + + +############################################################################## +# References +# ---------- +# .. footbibliography:: From 1a990c8c0404824b6aea0daa338e5d6113d4975e Mon Sep 17 00:00:00 2001 From: Guillaume Favelier Date: Wed, 27 Jan 2021 15:58:05 +0100 Subject: [PATCH 085/387] MRG: support plot_stc in report (#8730) * Add get_brain_fig * Add stc to report tuto * Fix style * Add minimal description * Demo for get_brain_fig * Remove all close * Restore plt close * Move get_brain_fig * Close the dangling plot * Update docstring * Add to doc * Update testing * Fix style * Remove get_brain_fig from plot_report.py * Fix test * Update doc * Reduce to minimal * Make it work with plot_alignment * Comment the section * Add auto_close to Report * Revert "Add auto_close to Report" This reverts commit 15f465b0ec07d23f4305338fa3e63fe8d538bbd0. * Add auto_close only to add_figs_to_section() and add_slider_to_section() * TST: Merge cells * Try again * Revert "Try again" This reverts commit 828cf21ed08a6ac09849e3d53f34ba2175a6ab26. * Revert "TST: Merge cells" This reverts commit db1daccef1e50099f5f4d2a6939e743b3ef08a12. * Remove commented code * Do not instantiate * Commit suggestions * Fix style Co-authored-by: Eric Larson --- doc/changes/latest.inc | 2 ++ mne/report.py | 25 ++++++++++++++++++------- mne/viz/_brain/tests/test_brain.py | 14 +++++++++++++- tutorials/misc/plot_report.py | 28 ++++++++++++++++++++++++++++ 4 files changed, 61 insertions(+), 8 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 74127de77bd..95ddb0a7578 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -40,6 +40,8 @@ Enhancements - Add the ``silhouette`` parameter to :class:`mne.viz.Brain` to display sharp edges and improve perception (:gh:`8771` by `Guillaume Favelier`_) +- Add ``auto_close`` to `mne.Report.add_figs_to_section` and `mne.Report.add_slider_to_section` to manage closing figures (:gh`8730` by `Guillaume Favelier`_) + Bugs ~~~~ - Fix zen mode and scalebar toggling for :meth:`raw.plot() ` when using the ``macosx`` matplotlib backend (:gh:`8688` by `Daniel McCloy`_) diff --git a/mne/report.py b/mne/report.py index c97e3699131..0787e386b66 100644 --- a/mne/report.py +++ b/mne/report.py @@ -72,7 +72,8 @@ ############################################################################### # PLOTTING FUNCTIONS -def _fig_to_img(fig, image_format='png', scale=None, **kwargs): +def _fig_to_img(fig, image_format='png', scale=None, auto_close=True, + **kwargs): """Plot figure and create a binary image.""" # fig can be ndarray, mpl Figure, Mayavi Figure, or callable that produces # a mpl Figure @@ -81,7 +82,8 @@ def _fig_to_img(fig, image_format='png', scale=None, **kwargs): if isinstance(fig, np.ndarray): fig = _ndarray_to_fig(fig) elif callable(fig): - plt.close('all') + if auto_close: + plt.close('all') fig = fig(**kwargs) elif not isinstance(fig, Figure): from .viz.backends.renderer import backend, MNE_3D_BACKEND_TESTING @@ -91,7 +93,8 @@ def _fig_to_img(fig, image_format='png', scale=None, **kwargs): else: # Testing mode img = np.zeros((2, 2, 3)) - backend._close_3d_figure(figure=fig) + if auto_close: + backend._close_3d_figure(figure=fig) fig = _ndarray_to_fig(img) output = BytesIO() @@ -1128,7 +1131,7 @@ def _add_or_replace(self, fname, sectionlabel, html, replace=False): def add_figs_to_section(self, figs, captions, section='custom', scale=None, image_format=None, comments=None, - replace=False): + replace=False, auto_close=True): """Append custom user-defined figures. Parameters @@ -1158,6 +1161,9 @@ class construction. replace : bool If ``True``, figures already present that have the same caption will be replaced. Defaults to ``False``. + auto_close : bool + If True, the plots are closed during the generation of the report. + Defaults to True. """ figs, captions, comments = self._validate_input(figs, captions, section, comments) @@ -1170,7 +1176,7 @@ class construction. div_klass = self._sectionvars[section] img_klass = self._sectionvars[section] - img = _fig_to_img(fig, image_format, scale) + img = _fig_to_img(fig, image_format, scale, auto_close) html = image_template.substitute(img=img, id=global_id, div_klass=div_klass, img_klass=img_klass, @@ -1323,7 +1329,7 @@ def add_bem_to_section(self, subject, caption='BEM', section='bem', def add_slider_to_section(self, figs, captions=None, section='custom', title='Slider', scale=None, image_format=None, - replace=False): + replace=False, auto_close=True): """Render a slider of figs to the report. Parameters @@ -1355,6 +1361,11 @@ class construction. replace : bool If ``True``, figures already present that have the same caption will be replaced. Defaults to ``False``. + auto_close : bool + If True, the plots are closed during the generation of the report. + Defaults to True. + + .. versionadded:: 0.23 Notes ----- @@ -1396,7 +1407,7 @@ class construction. raise TypeError('Captions must be None or an iterable of ' 'float, int, str, Got %s' % type(captions)) for ii, (fig, caption) in enumerate(zip(figs, captions)): - img = _fig_to_img(fig, image_format, scale) + img = _fig_to_img(fig, image_format, scale, auto_close) slice_id = '%s-%s-%s' % (name, global_id, sl[ii]) first = True if ii == 0 else False slices.append(_build_html_image(img, slice_id, div_klass, diff --git a/mne/viz/_brain/tests/test_brain.py b/mne/viz/_brain/tests/test_brain.py index a21b5482250..f5959601c60 100644 --- a/mne/viz/_brain/tests/test_brain.py +++ b/mne/viz/_brain/tests/test_brain.py @@ -23,7 +23,7 @@ from mne.source_space import (read_source_spaces, vertex_to_mni, setup_volume_source_space) from mne.datasets import testing -from mne.utils import check_version +from mne.utils import check_version, requires_pysurfer from mne.label import read_label from mne.viz._brain import Brain, _LinkViewer, _BrainScraper, _LayeredMesh from mne.viz._brain.colormap import calculate_lut @@ -144,6 +144,18 @@ def test_brain_gc(renderer, brain_gc): brain.close() +@requires_pysurfer +@testing.requires_testing_data +def test_brain_routines(renderer, brain_gc): + """Test backend agnostic Brain routines.""" + brain_klass = renderer.get_brain_class() + if renderer.get_3d_backend() == "mayavi": + from surfer import Brain + else: # PyVista + from mne.viz._brain import Brain + assert brain_klass == Brain + + @testing.requires_testing_data def test_brain_init(renderer, tmpdir, pixel_ratio, brain_gc): """Test initialization of the Brain instance.""" diff --git a/tutorials/misc/plot_report.py b/tutorials/misc/plot_report.py index 2b30b410c5f..eb3a66fcc84 100644 --- a/tutorials/misc/plot_report.py +++ b/tutorials/misc/plot_report.py @@ -180,6 +180,8 @@ # the :meth:`~mne.Report.add_figs_to_section` method, and sliders via the # :meth:`~mne.Report.add_slider_to_section`: +report = mne.Report(verbose=True) + # generate a custom plot: fname_evoked = os.path.join(path, 'MEG', 'sample', 'sample_audvis-ave.fif') evoked = mne.read_evokeds(fname_evoked, @@ -203,6 +205,32 @@ report.save('report_custom.html', overwrite=True) +############################################################################### +# Adding a stc plot to a report +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# +# Now we see how :class:`~mne.Report` handles :class:`~mne.SourceEstimate` +# data. The following will produce a stc plot with vertex time courses. In +# this scenario, we also demonstrate how to use the +# :meth:`mne.viz.Brain.screenshot` method to save the figs in a slider. + +report = mne.Report(verbose=True) +fname_stc = os.path.join(path, 'MEG', 'sample', 'sample_audvis-meg') +stc = mne.read_source_estimate(fname_stc, subject='sample') +figs = list() +kwargs = dict(subjects_dir=subjects_dir, initial_time=0.13, + clim=dict(kind='value', lims=[3, 6, 9])) +for hemi in ('lh', 'rh'): + brain = stc.plot(hemi=hemi, **kwargs) + brain.toggle_interface(False) + figs.append(brain.screenshot(time_viewer=True)) + brain.close() + +# add the stc plot to the report: +report.add_slider_to_section(figs) + +report.save('report_stc.html', overwrite=True) + ############################################################################### # Managing report sections # ^^^^^^^^^^^^^^^^^^^^^^^^ From f70f508a1bd5f73bdb4797e657340c11cc138d64 Mon Sep 17 00:00:00 2001 From: Alexandre Gramfort Date: Wed, 27 Jan 2021 17:29:28 +0100 Subject: [PATCH 086/387] MRG, BUG: fix IO or read_ica_eeglab (#8780) * WIP fix IO or read_ica_eeglab * FIX: icawinv in conflict * FIX: Flake * MAINT: Add test * update what's new * FIX: appendmat Co-authored-by: Eric Larson --- doc/changes/latest.inc | 2 ++ mne/io/eeglab/eeglab.py | 2 +- mne/preprocessing/ica.py | 32 +++++++++++++++++++++-------- mne/preprocessing/tests/test_ica.py | 29 ++++++++++++++++++++++++++ mne/utils/__init__.py | 3 ++- 5 files changed, 58 insertions(+), 10 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 95ddb0a7578..1cb3ea109b5 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -54,6 +54,8 @@ Bugs - Fix bug with :func:`mne.io.read_raw_nicolet` where header type values such as num_sample and duration_in_sec where not parsed properly (:gh:`8712` by `Alex Gramfort`_) +- Fix bug with :func:`mne.preprocessing.read_ica_eeglab` when reading decompositions using PCA dimensionality reduction (:gh:`8780` by `Alex Gramfort`_ and `Eric Larson`_) + - Fix bug with ``replace`` argument of :meth:`mne.Report.add_bem_to_section` and :meth:`mne.Report.add_slider_to_section` (:gh:`8723` by `Eric Larson`_) - Allow sEEG channel types in :meth:`mne.Evoked.plot_joint` (:gh:`8736` by `Daniel McCloy`_) diff --git a/mne/io/eeglab/eeglab.py b/mne/io/eeglab/eeglab.py index 555c271fbb8..570b9fba6f7 100644 --- a/mne/io/eeglab/eeglab.py +++ b/mne/io/eeglab/eeglab.py @@ -323,7 +323,7 @@ def __init__(self, input_fname, eog=(), ' the .set file contains epochs.' % eeg.trials) last_samps = [eeg.pnts - 1] - info, eeg_montage, update_ch_names = _get_info(eeg, eog=eog) + info, eeg_montage, _ = _get_info(eeg, eog=eog) # read the data if isinstance(eeg.data, str): diff --git a/mne/preprocessing/ica.py b/mne/preprocessing/ica.py index fa6b1795348..b19c2c64a8a 100644 --- a/mne/preprocessing/ica.py +++ b/mne/preprocessing/ica.py @@ -53,8 +53,8 @@ compute_corr, _get_inst_data, _ensure_int, copy_function_doc_to_method_doc, _pl, warn, Bunch, _check_preload, _check_compensation_grade, fill_doc, - _check_option, _PCA, int_like) -from ..utils.check import _check_all_same_channel_names + _check_option, _PCA, int_like, + _check_all_same_channel_names) from ..fixes import _get_args, _safe_svd from ..filter import filter_data @@ -2678,13 +2678,15 @@ def corrmap(icas, template, threshold="auto", label=None, ch_type="eeg", return None -def read_ica_eeglab(fname): +@verbose +def read_ica_eeglab(fname, *, verbose=None): """Load ICA information saved in an EEGLAB .set file. Parameters ---------- fname : str Complete path to a .set EEGLAB file that contains an ICA object. + %(verbose)s Returns ------- @@ -2692,9 +2694,11 @@ def read_ica_eeglab(fname): An ICA object based on the information contained in the input file. """ eeg = _check_load_mat(fname, None) - info = _get_info(eeg)[0] + info, eeg_montage, _ = _get_info(eeg) + info.set_montage(eeg_montage) pick_info(info, np.round(eeg['icachansind']).astype(int) - 1, copy=False) + rank = eeg.icasphere.shape[0] n_components = eeg.icaweights.shape[0] ica = ICA(method='imported_eeglab', n_components=n_components) @@ -2704,11 +2708,15 @@ def read_ica_eeglab(fname): ica.n_pca_components = None ica.n_components_ = n_components - ica.pre_whitener_ = np.ones((len(eeg.icachansind), 1)) - ica.pca_mean_ = np.zeros(len(eeg.icachansind)) - n_ch = len(ica.ch_names) - assert eeg.icaweights.shape == (n_components, n_ch) + assert len(eeg.icachansind) == n_ch + + ica.pre_whitener_ = np.ones((n_ch, 1)) + ica.pca_mean_ = np.zeros(n_ch) + + assert eeg.icasphere.shape[1] == n_ch + assert eeg.icaweights.shape == (n_components, rank) + # When PCA reduction is used in EEGLAB, runica returns # weights= weights*sphere*eigenvectors(:,1:ncomps)'; # sphere = eye(urchans). When PCA reduction is not used, we have: @@ -2718,9 +2726,17 @@ def read_ica_eeglab(fname): # So in either case, we can use SVD to get our square whitened # weights matrix (u * s) and our PCA vectors (v) back: use = eeg.icaweights @ eeg.icasphere + use_check = linalg.pinv(eeg.icawinv) + if not np.allclose(use, use_check, rtol=1e-6): + warn('Mismatch between icawinv and icaweights @ icasphere from EEGLAB ' + 'possibly due to ICA component removal, assuming icawinv is ' + 'correct') + use = use_check u, s, v = _safe_svd(use, full_matrices=False) ica.unmixing_matrix_ = u * s ica.pca_components_ = v ica.pca_explained_variance_ = s * s + ica.info = info ica._update_mixing_matrix() + ica._update_ica_names() return ica diff --git a/mne/preprocessing/tests/test_ica.py b/mne/preprocessing/tests/test_ica.py index 96bd5f72fe8..3907c5b48ba 100644 --- a/mne/preprocessing/tests/test_ica.py +++ b/mne/preprocessing/tests/test_ica.py @@ -6,6 +6,7 @@ from itertools import product import os import os.path as op +import shutil from unittest import SkipTest import pytest @@ -13,6 +14,7 @@ from numpy.testing import (assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal) from scipy import stats, linalg +from scipy.io import loadmat, savemat import matplotlib.pyplot as plt from mne import (Epochs, read_events, pick_types, create_info, EpochsArray, @@ -1311,6 +1313,33 @@ def test_read_ica_eeglab(): rtol=1e-05, atol=1e-08) +@testing.requires_testing_data +def test_read_ica_eeglab_mismatch(tmpdir): + """Test read_ica_eeglab function when there is a mismatch.""" + fname_orig = op.join(test_base_dir, "EEGLAB", "test_raw.set") + base = op.basename(fname_orig)[:-3] + shutil.copyfile(fname_orig[:-3] + 'fdt', tmpdir.join(base + 'fdt')) + fname = tmpdir.join(base) + data = loadmat(fname_orig) + w = data['EEG']['icaweights'][0][0] + w[:] = np.random.RandomState(0).randn(*w.shape) + savemat(str(fname), data, appendmat=False) + assert op.isfile(fname) + with pytest.warns(RuntimeWarning, match='Mismatch.*removal.*icawinv.*'): + ica = read_ica_eeglab(fname) + _assert_ica_attributes(ica) + ica_correct = read_ica_eeglab(fname_orig) + attrs = [attr for attr in dir(ica_correct) + if attr.endswith('_') and not attr.startswith('_')] + assert 'mixing_matrix_' in attrs + assert 'unmixing_matrix_' in attrs + assert ica.labels_ == ica_correct.labels_ == {} + attrs.pop(attrs.index('labels_')) + for attr in attrs: + a, b = getattr(ica, attr), getattr(ica_correct, attr) + assert_allclose(a, b, rtol=1e-12, atol=1e-12, err_msg=attr) + + def _assert_ica_attributes(ica, data=None, limits=(1.0, 70)): """Assert some attributes of ICA objects.""" __tracebackhide__ = True diff --git a/mne/utils/__init__.py b/mne/utils/__init__.py index b79679586e7..b420e1732f2 100644 --- a/mne/utils/__init__.py +++ b/mne/utils/__init__.py @@ -17,7 +17,8 @@ _check_path_like, _check_src_normal, _check_stc_units, _check_pyqt5_version, _check_sphere, _check_time_format, _check_freesurfer_home, _suggest, _require_version, - _on_missing, int_like, _safe_input) + _on_missing, int_like, _safe_input, + _check_all_same_channel_names) from .config import (set_config, get_config, get_config_path, set_cache_dir, set_memmap_min_size, get_subjects_dir, _get_stim_channel, sys_info, _get_extra_data_path, _get_root_dir, From 9fc8623eebd62d31039e90927744e376f5ee611c Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 28 Jan 2021 03:02:11 -0500 Subject: [PATCH 087/387] MRG, ENH: Add inner surface option to coreg (#8793) * ENH: Add inner surface option to coreg * MAINT: DRY * DOC: latest --- doc/changes/0.9.inc | 2 +- doc/changes/latest.inc | 4 ++ mne/commands/mne_surf2bem.py | 2 +- mne/gui/__init__.py | 14 ++++++- mne/gui/_coreg_gui.py | 36 +++++++++++++----- mne/gui/_fiducials_gui.py | 74 ++++++++++-------------------------- mne/gui/_viewer.py | 5 ++- mne/utils/config.py | 1 + 8 files changed, 70 insertions(+), 68 deletions(-) diff --git a/doc/changes/0.9.inc b/doc/changes/0.9.inc index dae8ef881d8..a7318719051 100644 --- a/doc/changes/0.9.inc +++ b/doc/changes/0.9.inc @@ -180,7 +180,7 @@ API - Deprecated ``fmin, fmid, fmax`` in stc.plot and added ``clim`` by `Mark Wronkiewicz`_ -- Use ``scipy.signal.welch`` instead of matplotlib.psd inside ``compute_raw_psd`` and ``compute_epochs_psd`` by `Yousra Bekhti`_ `Eric Larson`_ and `Denis Engemann`_. As a consquence, ``Raw.plot_raw_psds`` has been deprecated. +- Use ``scipy.signal.welch`` instead of matplotlib.psd inside ``compute_raw_psd`` and ``compute_epochs_psd`` by `Yousra Bekhti`_ `Eric Larson`_ and `Denis Engemann`_. As a consequence, ``Raw.plot_raw_psds`` has been deprecated. - ``Raw`` instances returned by ``mne.forward.apply_forward_raw`` now always have times starting from zero to be consistent with all other ``Raw`` instances. To get the former ``start`` and ``stop`` times, diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 1cb3ea109b5..58a6205e49a 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -32,6 +32,8 @@ Enhancements - `mne.Report` has gained a new method `~mne.Report.add_custom_css` for adding user-defined styles (:gh:`8762` by `Richard Höchenberger`_) +- Add option to control appearance of opaque inside surface of the head to :ref:`mne coreg` (:gh:`8793` by `Eric Larson`_) + - Add :func:`mne.io.read_raw_nedf` for reading StarStim / enobio NEDF files (:gh:`8734` by `Tristan Stenner`_) - Add :meth:`raw.describe() ` to display (or return) descriptive statistics for each channel (:gh:`8760` by `Clemens Brunner`_) @@ -58,6 +60,8 @@ Bugs - Fix bug with ``replace`` argument of :meth:`mne.Report.add_bem_to_section` and :meth:`mne.Report.add_slider_to_section` (:gh:`8723` by `Eric Larson`_) +- Fix bug with :ref:`mne coreg` where nasion values were not updated when clicking (:gh:`8793` by `Eric Larson`_) + - Allow sEEG channel types in :meth:`mne.Evoked.plot_joint` (:gh:`8736` by `Daniel McCloy`_) - Function :func:`mne.set_bipolar_reference` was not working when passing ``Epochs`` constructed with some ``picks`` (:gh:`8728` by `Alex Gramfort`_) diff --git a/mne/commands/mne_surf2bem.py b/mne/commands/mne_surf2bem.py index 6512630a2e0..a7adaab594f 100755 --- a/mne/commands/mne_surf2bem.py +++ b/mne/commands/mne_surf2bem.py @@ -30,7 +30,7 @@ def run(): parser.add_option("-f", "--fif", dest="fif", help="FIF file produced", metavar="FILE") parser.add_option("-i", "--id", dest="id", default=4, - help=("Surface Id (e.g. 4 sur head surface)")) + help=("Surface Id (e.g. 4 for head surface)")) options, args = parser.parse_args() diff --git a/mne/gui/__init__.py b/mne/gui/__init__.py index 9b69247d96f..a52d9a69de4 100644 --- a/mne/gui/__init__.py +++ b/mne/gui/__init__.py @@ -26,7 +26,7 @@ def coregistration(tabbed=False, split=True, width=None, inst=None, trans=None, scrollable=True, project_eeg=None, orient_to_surface=None, scale_by_distance=None, mark_inside=None, interaction=None, scale=None, - advanced_rendering=None, verbose=None): + advanced_rendering=None, head_inside=True, verbose=None): """Coregister an MRI with a subject's head shape. The recommended way to use the GUI is through bash with: @@ -108,6 +108,11 @@ def coregistration(tabbed=False, split=True, width=None, inst=None, bugs. .. versionadded:: 0.18 + head_inside : bool + If True (default), add opaque inner scalp head surface to help occlude + points behind the head. + + .. versionadded:: 0.23 %(verbose)s Returns @@ -138,6 +143,9 @@ def coregistration(tabbed=False, split=True, width=None, inst=None, config.get('MNE_COREG_ADVANCED_RENDERING', 'true') == 'true' if head_opacity is None: head_opacity = config.get('MNE_COREG_HEAD_OPACITY', 1.) + if head_inside is None: + head_inside = \ + config.get('MNE_COREG_HEAD_INSIDE', 'true').lower() == 'true' if width is None: width = config.get('MNE_COREG_WINDOW_WIDTH', 800) if height is None: @@ -162,6 +170,7 @@ def coregistration(tabbed=False, split=True, width=None, inst=None, if scale is None: scale = config.get('MNE_COREG_SCENE_SCALE', 0.16) head_opacity = float(head_opacity) + head_inside = bool(head_inside) width = int(width) height = int(height) scale = float(scale) @@ -176,7 +185,8 @@ def coregistration(tabbed=False, split=True, width=None, inst=None, orient_to_surface=orient_to_surface, scale_by_distance=scale_by_distance, mark_inside=mark_inside, interaction=interaction, - scale=scale, advanced_rendering=advanced_rendering) + scale=scale, advanced_rendering=advanced_rendering, + head_inside=head_inside) return _initialize_gui(frame, view) diff --git a/mne/gui/_coreg_gui.py b/mne/gui/_coreg_gui.py index 3f1cdeee8ff..cb5ce0f29c3 100644 --- a/mne/gui/_coreg_gui.py +++ b/mne/gui/_coreg_gui.py @@ -1656,6 +1656,7 @@ class ViewOptionsPanel(HasTraits): bgcolor = RGBColor() coord_frame = Enum('mri', 'head', label='Display coordinate frame') head_high_res = Bool(True, label='Show high-resolution head') + head_inside = Bool(True, label='Add opaque inner head surface') advanced_rendering = Bool(True, label='Use advanced OpenGL', desc='Enable advanced OpenGL methods that do ' 'not work with all renderers (e.g., depth ' @@ -1673,7 +1674,8 @@ class ViewOptionsPanel(HasTraits): format_func=_pass)), Item('head_high_res'), Spring(), Item('advanced_rendering'), - Spring(), Spring(), columns=3, show_labels=True), + Item('head_inside'), Spring(), Spring(), + columns=3, show_labels=True), Item('hsp_cf_obj', style='custom', label='Head axes'), Item('mri_cf_obj', style='custom', label='MRI axes'), HGroup(Item('bgcolor', label='Background'), Spring()), @@ -1756,6 +1758,7 @@ class CoregFrame(HasTraits): scene = Instance(MlabSceneModel, ()) head_high_res = Bool(True) advanced_rendering = Bool(True) + head_inside = Bool(True) data_panel = Instance(DataPanel) coreg_panel = Instance(CoregPanel) # right panel @@ -1818,19 +1821,21 @@ def __init__(self, raw=None, subject=None, subjects_dir=None, project_eeg=False, orient_to_surface=False, scale_by_distance=False, mark_inside=False, interaction='trackball', scale=0.16, - advanced_rendering=True): # noqa: D102 + advanced_rendering=True, head_inside=True): # noqa: D102 self._config = config or {} super(CoregFrame, self).__init__(guess_mri_subject=guess_mri_subject, head_high_res=head_high_res, - advanced_rendering=advanced_rendering) + advanced_rendering=advanced_rendering, + head_inside=head_inside) self._initial_kwargs = dict(project_eeg=project_eeg, orient_to_surface=orient_to_surface, scale_by_distance=scale_by_distance, mark_inside=mark_inside, head_opacity=head_opacity, interaction=interaction, - scale=scale) + scale=scale, head_inside=head_inside) self._locked_opacity = self._initial_kwargs['head_opacity'] + self._locked_head_inside = self._initial_kwargs['head_inside'] if not 0 <= head_opacity <= 1: raise ValueError( "head_opacity needs to be a floating point number between 0 " @@ -1891,6 +1896,7 @@ def _init_plot(self): # [[0, 0, 0]] -- why?? ) self.mri_obj.opacity = self._initial_kwargs['head_opacity'] + self.mri_obj.rear_opacity = float(self.head_inside) self.data_panel.fid_panel.hsp_obj = self.mri_obj self._update_mri_obj() self.mri_obj.plot() @@ -2000,7 +2006,7 @@ def _init_plot(self): mri_obj=self.mri_obj, hsp_obj=self.hsp_obj, eeg_obj=self.eeg_obj, hpi_obj=self.hpi_obj, hsp_cf_obj=self.hsp_cf_obj, mri_cf_obj=self.mri_cf_obj, - head_high_res=self.head_high_res, + head_high_res=self.head_high_res, head_inside=self.head_inside, bgcolor=self.bgcolor, advanced_rendering=self.advanced_rendering) self.data_panel.headview.scale = self._initial_kwargs['scale'] self.data_panel.headview.interaction = \ @@ -2008,10 +2014,9 @@ def _init_plot(self): self.data_panel.headview.left = True self.data_panel.view_options_panel.sync_trait( 'coord_frame', self.model) - self.data_panel.view_options_panel.sync_trait('head_high_res', self) - self.data_panel.view_options_panel.sync_trait('advanced_rendering', - self) - self.data_panel.view_options_panel.sync_trait('bgcolor', self) + for key in ('head_high_res', 'advanced_rendering', 'bgcolor', + 'head_inside'): + self.data_panel.view_options_panel.sync_trait(key, self) @on_trait_change('advanced_rendering') def _on_advanced_rendering_change(self): @@ -2040,9 +2045,17 @@ def _on_lock_change(self): else: self._locked_opacity = self.mri_obj.opacity self.mri_obj.opacity = 1. + self._locked_head_inside = self.head_inside + self.head_inside = False else: if self.mri_obj is not None: self.mri_obj.opacity = self._locked_opacity + self.head_inside = self._locked_head_inside + + @on_trait_change('head_inside') + def _on_head_inside_change(self): + if self.mri_obj is not None: + self.mri_obj.rear_opacity = float(self.head_inside) # 0 or 1 @cached_property def _get_hsp_visible(self): @@ -2108,12 +2121,15 @@ def s_c(key, value, lower=True): set_env=False) s_c('MNE_COREG_GUESS_MRI_SUBJECT', self.model.guess_mri_subject) - s_c('MNE_COREG_HEAD_HIGH_RES', self.head_high_res) s_c('MNE_COREG_ADVANCED_RENDERING', self.advanced_rendering) + s_c('MNE_COREG_HEAD_HIGH_RES', self.head_high_res) if self.lock_fiducials: opacity = self.mri_obj.opacity + head_inside = self.head_inside else: opacity = self._locked_opacity + head_inside = self._locked_head_inside + s_c('MNE_COREG_HEAD_INSIDE', head_inside) s_c('MNE_COREG_HEAD_OPACITY', opacity) if size is not None: s_c('MNE_COREG_WINDOW_WIDTH', size[0]) diff --git a/mne/gui/_fiducials_gui.py b/mne/gui/_fiducials_gui.py index 02245e89251..f6b495e34d6 100644 --- a/mne/gui/_fiducials_gui.py +++ b/mne/gui/_fiducials_gui.py @@ -312,20 +312,10 @@ def _update_pos(self): if not np.allclose(getattr(self, attr), self.current_pos_mm * 1e-3): setattr(self, attr, self.current_pos_mm * 1e-3) - @on_trait_change('model:lpa') - def _update_lpa(self, name): - if self.set == 'LPA': - self.current_pos_mm = self.lpa * 1000 - - @on_trait_change('model:nasion') - def _update_nasion(self, name): - if self.set.lower() == 'Nasion': - self.current_pos_mm = self.nasion * 1000 - - @on_trait_change('model:rpa') - def _update_rpa(self, name): - if self.set.lower() == 'RPA': - self.current_pos_mm = self.rpa * 1000 + @on_trait_change('model:lpa,model:nasion,model:rpa') + def _update_fiducial(self, value): + attr = self.set.lower() + self.current_pos_mm = getattr(self, attr) * 1000 def _reset_fid_fired(self): self.model.reset = True @@ -378,6 +368,7 @@ def _on_pick(self, picker): pt = [picker.picked_positions[idx]] else: logger.debug("GUI: picked object other than MRI") + return def round_(x): return round(x, 3) @@ -400,27 +391,18 @@ def round_(x): msg.append(line) logger.debug('\n'.join(msg)) - if self.set == 'Nasion': - self.nasion = pt - elif self.set == 'LPA': - self.lpa = pt - elif self.set == 'RPA': - self.rpa = pt - else: - raise ValueError("set = %r" % self.set) + set_ = self.set.lower() + assert set_ in _VIEW_DICT, set_ + setattr(self, set_, pt) @on_trait_change('set') def _on_set_change(self, obj, name, old, new): - if new == 'Nasion': - self.current_pos_mm = self.nasion * 1000 - self.headview.front = True - elif new == 'LPA': - self.current_pos_mm = self.lpa * 1000 - self.headview.left = True - elif new == 'RPA': - self.current_pos_mm = self.rpa * 1000 - self.headview.right = True + new = new.lower() + self._update_fiducial(None) + setattr(self.headview, _VIEW_DICT[new], True) + +_VIEW_DICT = dict(lpa='left', nasion='front', rpa='right') # FiducialsPanel view that allows manipulating all coordinates numerically view2 = View(VGroup(Item('fid_file', label='Fiducials File'), @@ -500,10 +482,6 @@ def __init__(self, subject=None, subjects_dir=None, def _init_plot(self): _toggle_mlab_render(self, False) - lpa_color = defaults['lpa_color'] - nasion_color = defaults['nasion_color'] - rpa_color = defaults['rpa_color'] - # bem color = defaults['mri_color'] self.mri_obj = SurfaceObject(points=self.model.points, color=color, @@ -512,24 +490,14 @@ def _init_plot(self): self.panel.hsp_obj = self.mri_obj # fiducials - self.lpa_obj = PointObject(scene=self.scene, color=lpa_color, - has_norm=True, - point_scale=self.point_scale) - self.panel.sync_trait('lpa', self.lpa_obj, 'points', mutual=False) - self.sync_trait('point_scale', self.lpa_obj, mutual=False) - - self.nasion_obj = PointObject(scene=self.scene, color=nasion_color, - has_norm=True, - point_scale=self.point_scale) - self.panel.sync_trait('nasion', self.nasion_obj, 'points', - mutual=False) - self.sync_trait('point_scale', self.nasion_obj, mutual=False) - - self.rpa_obj = PointObject(scene=self.scene, color=rpa_color, - has_norm=True, - point_scale=self.point_scale) - self.panel.sync_trait('rpa', self.rpa_obj, 'points', mutual=False) - self.sync_trait('point_scale', self.rpa_obj, mutual=False) + for key in ('lpa', 'nasion', 'rpa'): + attr = f'{key}_obj' + setattr(self, attr, PointObject( + scene=self.scene, color=defaults[f'{key}_color'], + has_norm=True, point_scale=self.point_scale)) + obj = getattr(self, attr) + self.panel.sync_trait(key, obj, 'points', mutual=False) + self.sync_trait('point_scale', obj, mutual=False) self.headview.left = True _toggle_mlab_render(self, True) diff --git a/mne/gui/_viewer.py b/mne/gui/_viewer.py index 4fe6e9b1002..8b1762795af 100644 --- a/mne/gui/_viewer.py +++ b/mne/gui/_viewer.py @@ -488,6 +488,7 @@ class SurfaceObject(Object): surf = Instance(Surface) surf_rear = Instance(Surface) + rear_opacity = Float(1.) view = View(HGroup(Item('visible', show_label=False), Item('color', show_label=False), @@ -532,7 +533,9 @@ def plot(self): self.sync_trait('color', self.surf_rear.actor.property, mutual=False) self.sync_trait('visible', self.surf_rear, 'visible') - self.surf_rear.actor.property.opacity = 1. + self.surf_rear.actor.property.opacity = self.rear_opacity + self.sync_trait( + 'rear_opacity', self.surf_rear.actor.property, 'opacity') surf = pipeline.surface( normals, figure=fig, color=self.color, representation=rep, line_width=1) diff --git a/mne/utils/config.py b/mne/utils/config.py index 0dbd92f79a8..ef2e2b1e1b5 100644 --- a/mne/utils/config.py +++ b/mne/utils/config.py @@ -73,6 +73,7 @@ def set_memmap_min_size(memmap_min_size): 'MNE_COREG_GUESS_MRI_SUBJECT', 'MNE_COREG_HEAD_HIGH_RES', 'MNE_COREG_HEAD_OPACITY', + 'MNE_COREG_HEAD_INSIDE', 'MNE_COREG_INTERACTION', 'MNE_COREG_MARK_INSIDE', 'MNE_COREG_PREPARE_BEM', From 982b0e093865ec4cc8116c47a1fc7349fbdf682e Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 28 Jan 2021 03:52:36 -0500 Subject: [PATCH 088/387] MRG, FIX: Fix Brain offset default (#8794) * FIX: Fix offset default * ENH: Fix offset for tilted MRIs * ENH: Fix alignment [circle front] Co-authored-by: Guillaume Favelier --- doc/changes/latest.inc | 4 ++ mne/label.py | 10 +++++ mne/viz/_brain/_brain.py | 56 ++++++++++++++++++++++------ mne/viz/_brain/surface.py | 30 +++++++-------- mne/viz/backends/_pysurfer_mayavi.py | 3 +- mne/viz/backends/_pyvista.py | 32 ++++++++++------ 6 files changed, 97 insertions(+), 38 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 58a6205e49a..5e9559060c5 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -42,6 +42,8 @@ Enhancements - Add the ``silhouette`` parameter to :class:`mne.viz.Brain` to display sharp edges and improve perception (:gh:`8771` by `Guillaume Favelier`_) +- Add parameter ``align=True`` to `mne.viz.Brain.show_view` to make views relative to the closest canonical (MNI) axes rather than the native MRI surface RAS coordinates (:gh:`8794` by `Eric Larson`_) + - Add ``auto_close`` to `mne.Report.add_figs_to_section` and `mne.Report.add_slider_to_section` to manage closing figures (:gh`8730` by `Guillaume Favelier`_) Bugs @@ -60,6 +62,8 @@ Bugs - Fix bug with ``replace`` argument of :meth:`mne.Report.add_bem_to_section` and :meth:`mne.Report.add_slider_to_section` (:gh:`8723` by `Eric Larson`_) +- Fix bug with `mne.viz.Brain` where non-inflated surfaces had an X-offset imposed by default (:gh:`8794` by `Eric Larson`_) + - Fix bug with :ref:`mne coreg` where nasion values were not updated when clicking (:gh:`8793` by `Eric Larson`_) - Allow sEEG channel types in :meth:`mne.Evoked.plot_joint` (:gh:`8736` by `Daniel McCloy`_) diff --git a/mne/label.py b/mne/label.py index e596b68c7b3..6731a9e1a74 100644 --- a/mne/label.py +++ b/mne/label.py @@ -921,6 +921,7 @@ def read_label(filename, subject=None, color=None): See Also -------- read_labels_from_annot + write_labels_to_annot """ if subject is not None and not isinstance(subject, str): raise TypeError('subject must be a string') @@ -2042,6 +2043,11 @@ def read_labels_from_annot(subject, parc='aparc', hemi='both', ------- labels : list of Label The labels, sorted by label name (ascending). + + See Also + -------- + write_labels_to_annot + morph_labels """ logger.info('Reading labels from parcellation...') @@ -2362,6 +2368,10 @@ def write_labels_to_annot(labels, subject=None, parc=None, overwrite=False, .. versionadded:: 0.21.0 %(verbose)s + See Also + -------- + read_labels_from_annot + Notes ----- Vertices that are not covered by any of the labels are assigned to a label diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index 8d8c2d0a2e2..469c757782b 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -22,7 +22,7 @@ from collections import OrderedDict from .colormap import calculate_lut -from .surface import Surface +from .surface import _Surface from .view import views_dicts, _lh_views_dict from .mplcanvas import MplCanvas from .callback import (ShowView, IntSlider, TimeSlider, SmartSlider, @@ -35,9 +35,10 @@ from ...defaults import _handle_default from ...surface import mesh_edges from ...source_space import SourceSpaces, vertex_to_mni, read_talxfm -from ...transforms import apply_trans +from ...transforms import apply_trans, invert_transform from ...utils import (_check_option, logger, verbose, fill_doc, _validate_type, - use_log_level, Bunch, _ReuseCycle, warn) + use_log_level, Bunch, _ReuseCycle, warn, + get_subjects_dir) @decorator @@ -251,9 +252,15 @@ class Brain(object): variable. views : list | str The views to use. - offset : bool - If True, aligs origin with medial wall. Useful for viewing inflated - surface where hemispheres typically overlap (Default: True). + offset : bool | str + If True, shifts the right- or left-most x coordinate of the left and + right surfaces, respectively, to be at zero. This is useful for viewing + inflated surface where hemispheres typically overlap. Can be "auto" + (default) use True with inflated surfaces and False otherwise + (Default: 'auto'). Only used when ``hemi='both'``. + + .. versionchanged:: 0.23 + Default changed to "auto". show_toolbar : bool If True, toolbars will be shown for each view. offscreen : bool @@ -347,7 +354,7 @@ class Brain(object): def __init__(self, subject_id, hemi, surf, title=None, cortex="classic", alpha=1.0, size=800, background="black", foreground=None, figure=None, subjects_dir=None, - views='auto', offset=True, show_toolbar=False, + views='auto', offset='auto', show_toolbar=False, offscreen=False, interaction='trackball', units='mm', view_layout='vertical', silhouette=False, show=True): from ..backends.renderer import backend, _get_renderer, _get_3d_backend @@ -395,6 +402,7 @@ def __init__(self, subject_id, hemi, surf, title=None, raise ValueError('"size" parameter must be an int or length-2 ' 'sequence of ints.') self._size = size if len(size) == 2 else size * 2 # 1-tuple to 2-tuple + subjects_dir = get_subjects_dir(subjects_dir) self.time_viewer = False self.notebook = (_get_3d_backend() == "notebook") @@ -440,6 +448,10 @@ def __init__(self, subject_id, hemi, surf, title=None, self._brain_color = _get_cmap(geo_kwargs['colormap'])(val) # load geometry for one or both hemispheres as necessary + _validate_type(offset, (str, bool), 'offset') + if isinstance(offset, str): + _check_option('offset', offset, ('auto',), extra='when str') + offset = (surf == 'inflated') offset = None if (not offset or hemi != 'both') else 0.0 self._renderer = _get_renderer(name=self._title, size=self._size, @@ -454,10 +466,11 @@ def __init__(self, subject_id, hemi, surf, title=None, self.window = self.plotter.app_window self.window.signal_close.connect(self._clean) + self._setup_canonical_rotation() for h in self._hemis: # Initialize a Surface object as the geometry - geo = Surface(subject_id, h, surf, subjects_dir, offset, - units=self._units) + geo = _Surface(subject_id, h, surf, subjects_dir, offset, + units=self._units, x_dir=self._rigid[0, :3]) # Load in the geometry and curvature geo.load_geometry() geo.load_curvature() @@ -511,6 +524,21 @@ def __init__(self, subject_id, hemi, surf, title=None, if hemi == 'rh' and hasattr(self._renderer, "_orient_lights"): self._renderer._orient_lights() + def _setup_canonical_rotation(self): + from ...coreg import fit_matched_points, _trans_from_params + self._rigid = np.eye(4) + try: + xfm = read_talxfm(self._subject_id, self._subjects_dir) + except Exception: + return + # XYZ+origin + halfway + pts_tal = np.concatenate([np.eye(4)[:, :3], np.eye(3) * 0.5]) + pts_subj = apply_trans(invert_transform(xfm), pts_tal) + # we fit with scaling enabled, but then discard it (we just need + # the rigid-body components) + params = fit_matched_points(pts_subj, pts_tal, scale=3, out='params') + self._rigid[:] = _trans_from_params((True, True, False), params[:6]) + def setup_time_viewer(self, time_viewer=True, show_traces=True): """Configure the time viewer parameters. @@ -2556,7 +2584,7 @@ def show(self): self._renderer.show() def show_view(self, view=None, roll=None, distance=None, row=0, col=0, - hemi=None): + hemi=None, align=True): """Orient camera to display view. Parameters @@ -2573,6 +2601,11 @@ def show_view(self, view=None, roll=None, distance=None, row=0, col=0, The column to set. hemi : str Which hemi to use for string lookup (when in "both" mode). + align : bool + If True, consider view arguments relative to canonical MRI + directions (closest to MNI for the subject) rather than native MRI + space. This helps when MRIs are not in standard orientation (e.g., + have large rotations). """ hemi = self._hemi if hemi is None else hemi if hemi == 'split': @@ -2589,7 +2622,8 @@ def show_view(self, view=None, roll=None, distance=None, row=0, col=0, if distance is not None: view.update(distance=distance) self._renderer.subplot(row, col) - self._renderer.set_camera(**view, reset_camera=False) + xfm = self._rigid if align else None + self._renderer.set_camera(**view, reset_camera=False, rigid=xfm) self._update() def reset_view(self): diff --git a/mne/viz/_brain/surface.py b/mne/viz/_brain/surface.py index 5520207afdb..305961d9cc5 100644 --- a/mne/viz/_brain/surface.py +++ b/mne/viz/_brain/surface.py @@ -9,12 +9,13 @@ from os import path as path import numpy as np -from ...utils import _check_option, get_subjects_dir, _check_fname +from ...utils import (_check_option, get_subjects_dir, _check_fname, + _validate_type) from ...surface import (complete_surface_info, read_surface, read_curvature, _read_patch) -class Surface(object): +class _Surface(object): """Container for a brain surface. It is used for storing vertices, faces and morphometric data @@ -37,6 +38,8 @@ class Surface(object): be applied. If != 0.0, an additional offset will be used. units : str Can be 'm' or 'mm' (default). + x_dir : ndarray | None + The x direction to use for offset alignment. Attributes ---------- @@ -68,18 +71,13 @@ class Surface(object): """ def __init__(self, subject_id, hemi, surf, subjects_dir=None, offset=None, - units='mm'): + units='mm', x_dir=None): - hemis = ('lh', 'rh') - - if hemi not in hemis: - raise ValueError('hemi should be either "lh" or "rh",' + - 'given value {0}'.format(hemi)) - - if offset is not None and ((not isinstance(offset, float)) and - (not isinstance(offset, int))): - raise ValueError('offset should either float or int, given ' + - 'type {0}'.format(type(offset).__name__)) + x_dir = np.array([1., 0, 0]) if x_dir is None else x_dir + assert isinstance(x_dir, np.ndarray) + assert np.isclose(np.linalg.norm(x_dir), 1., atol=1e-6) + assert hemi in ('lh', 'rh') + _validate_type(offset, (None, 'numeric'), 'offset') self.units = _check_option('units', units, ('mm', 'm')) self.subject_id = subject_id @@ -93,6 +91,7 @@ def __init__(self, subject_id, hemi, surf, subjects_dir=None, offset=None, self.grey_curv = None self.nn = None self.labels = dict() + self.x_dir = x_dir subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) self.data_path = path.join(subjects_dir, subject_id) @@ -122,10 +121,11 @@ def load_geometry(self): if self.units == 'm': coords /= 1000. if self.offset is not None: + x_ = coords @ self.x_dir if self.hemi == 'lh': - coords[:, 0] -= (np.max(coords[:, 0]) + self.offset) + coords -= (np.max(x_) + self.offset) * self.x_dir else: - coords[:, 0] -= (np.min(coords[:, 0]) + self.offset) + coords -= (np.min(x_) + self.offset) * self.x_dir surf = dict(rr=coords, tris=faces) complete_surface_info(surf, copy=False, verbose=False) nn = surf['nn'] diff --git a/mne/viz/backends/_pysurfer_mayavi.py b/mne/viz/backends/_pysurfer_mayavi.py index 4bc33cbcd73..2bccd8d6c89 100644 --- a/mne/viz/backends/_pysurfer_mayavi.py +++ b/mne/viz/backends/_pysurfer_mayavi.py @@ -314,7 +314,8 @@ def close(self): _close_3d_figure(figure=self.fig) def set_camera(self, azimuth=None, elevation=None, distance=None, - focalpoint=None, roll=None, reset_camera=None): + focalpoint=None, roll=None, reset_camera=None, + rigid=None): _set_3d_view(figure=self.fig, azimuth=azimuth, elevation=elevation, distance=distance, focalpoint=focalpoint, roll=roll) diff --git a/mne/viz/backends/_pyvista.py b/mne/viz/backends/_pyvista.py index 8fb7ff046ac..5e8103e16f3 100644 --- a/mne/viz/backends/_pyvista.py +++ b/mne/viz/backends/_pyvista.py @@ -25,6 +25,7 @@ from ._utils import (_get_colormap_from_array, _alpha_blend_background, ALLOWED_QUIVER_MODES, _init_qt_resources) from ...fixes import _get_args +from ...transforms import apply_trans from ...utils import copy_base_doc_to_subclass_doc, _check_option @@ -636,10 +637,11 @@ def close(self): _close_3d_figure(figure=self.figure) def set_camera(self, azimuth=None, elevation=None, distance=None, - focalpoint=None, roll=None, reset_camera=True): + focalpoint=None, roll=None, reset_camera=True, + rigid=None): _set_3d_view(self.figure, azimuth=azimuth, elevation=elevation, distance=distance, focalpoint=focalpoint, roll=roll, - reset_camera=reset_camera) + reset_camera=reset_camera, rigid=rigid) def reset_camera(self): self.plotter.reset_camera() @@ -959,17 +961,21 @@ def _get_camera_direction(focalpoint, position): r = np.sqrt(x * x + y * y + z * z) theta = np.arccos(z / r) phi = np.arctan2(y, x) - return r, theta, phi, focalpoint + return r, theta, phi def _set_3d_view(figure, azimuth, elevation, focalpoint, distance, roll=None, - reset_camera=True): + reset_camera=True, rigid=None): + rigid = np.eye(4) if rigid is None else rigid position = np.array(figure.plotter.camera_position[0]) if reset_camera: figure.plotter.reset_camera() if focalpoint is None: focalpoint = np.array(figure.plotter.camera_position[1]) - r, theta, phi, fp = _get_camera_direction(focalpoint, position) + # work in the transformed space + position = apply_trans(rigid, position) + focalpoint = apply_trans(rigid, focalpoint) + _, theta, phi = _get_camera_direction(focalpoint, position) if azimuth is not None: phi = _deg2rad(azimuth) @@ -993,21 +999,25 @@ def _set_3d_view(figure, azimuth, elevation, focalpoint, distance, roll=None, if elevation is None or 5. <= abs(elevation) <= 175.: view_up = [0, 0, 1] else: - view_up = [np.sin(phi), np.cos(phi), 0] + view_up = [0, 1, 0] position = [ distance * np.cos(phi) * np.sin(theta), distance * np.sin(phi) * np.sin(theta), distance * np.cos(theta)] + + # restore to the original frame + rigid = np.linalg.inv(rigid) + position = apply_trans(rigid, position) + focalpoint = apply_trans(rigid, focalpoint) + view_up = apply_trans(rigid, view_up, move=False) figure.plotter.camera_position = [ position, focalpoint, view_up] + # We need to add the requested roll to the roll dictated by the + # transformed view_up if roll is not None: - figure.plotter.camera.SetRoll(roll) + figure.plotter.camera.SetRoll(figure.plotter.camera.GetRoll() + roll) - figure.plotter.renderer._azimuth = azimuth - figure.plotter.renderer._elevation = elevation - figure.plotter.renderer._distance = distance - figure.plotter.renderer._roll = roll figure.plotter.update() _process_events(figure.plotter) From cff4c3709aaa9f2a50e10fca29c199cd2f5bb526 Mon Sep 17 00:00:00 2001 From: Yu-Han Luo Date: Thu, 28 Jan 2021 21:44:13 +0800 Subject: [PATCH 089/387] FIX: fix overwrite in make_scalp_surfaces (#8800) * FIX: fix overwrite in make_scalp_surfaces * update what's new --- doc/changes/latest.inc | 2 ++ mne/commands/mne_make_scalp_surfaces.py | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 5e9559060c5..caf52450efd 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -78,6 +78,8 @@ Bugs - `mne.viz.plot_evoked` and `mne.Evoked.plot` now correctly plot global field power (GFP) for EEG data when ``gfp=True`` or ``gfp='only'`` is passed (used to plot RMS). For MEG data, we continue to plot the RMS, but now label it correctly as such (:gh:`8775` by `Richard Höchenberger`_) +- Fix bug with :ref:`mne make_scalp_surfaces` where ``--overwrite`` was not functional (:gh:`8800` by `Yu-Han Luo`_) + API changes ~~~~~~~~~~~ diff --git a/mne/commands/mne_make_scalp_surfaces.py b/mne/commands/mne_make_scalp_surfaces.py index 649aa621dda..b6528dff893 100755 --- a/mne/commands/mne_make_scalp_surfaces.py +++ b/mne/commands/mne_make_scalp_surfaces.py @@ -112,7 +112,7 @@ def check_seghead(surf_path=op.join(subj_path, 'surf')): surf = mne.bem._surfaces_to_bem( [surf], [mne.io.constants.FIFF.FIFFV_BEM_SURF_ID_HEAD], [1], incomplete=incomplete, extra=msg)[0] - mne.write_bem_surfaces(dense_fname, surf) + mne.write_bem_surfaces(dense_fname, surf, overwrite=overwrite) levels = 'medium', 'sparse' tris = [] if no_decimate else [30000, 2500] if os.getenv('_MNE_TESTING_SCALP', 'false') == 'true': @@ -131,7 +131,7 @@ def check_seghead(surf_path=op.join(subj_path, 'surf')): [dict(rr=points, tris=tris)], [mne.io.constants.FIFF.FIFFV_BEM_SURF_ID_HEAD], [1], rescale=False, incomplete=incomplete, extra=msg) - mne.write_bem_surfaces(dec_fname, dec_surf) + mne.write_bem_surfaces(dec_fname, dec_surf, overwrite=overwrite) mne.utils.run_command_if_main() From f31643185741aa6e46ef4869673e5178740c4980 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 28 Jan 2021 13:19:24 -0500 Subject: [PATCH 090/387] MRG, BUG: Allow depth > 1 (#8804) * BUG: Allow depth > 1 * FIX: Err * FIX: Missed * Update mne/minimum_norm/inverse.py Co-authored-by: Alexandre Gramfort Co-authored-by: Alexandre Gramfort --- doc/changes/latest.inc | 2 ++ doc/references.bib | 17 +++++++++++++++++ mne/minimum_norm/inverse.py | 17 +++++++++++++---- mne/minimum_norm/tests/test_inverse.py | 4 +++- mne/utils/docs.py | 5 ++--- 5 files changed, 37 insertions(+), 8 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index caf52450efd..112616b3960 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -60,6 +60,8 @@ Bugs - Fix bug with :func:`mne.preprocessing.read_ica_eeglab` when reading decompositions using PCA dimensionality reduction (:gh:`8780` by `Alex Gramfort`_ and `Eric Larson`_) +- Fix bug with :func:`mne.minimum_norm.make_inverse_operator` where ``depth`` was errantly restricted to be less than or equal to 1. (:gh:`8804` by `Eric Larson`_) + - Fix bug with ``replace`` argument of :meth:`mne.Report.add_bem_to_section` and :meth:`mne.Report.add_slider_to_section` (:gh:`8723` by `Eric Larson`_) - Fix bug with `mne.viz.Brain` where non-inflated surfaces had an X-offset imposed by default (:gh:`8794` by `Eric Larson`_) diff --git a/doc/references.bib b/doc/references.bib index 07aa3e07526..075a899ec90 100644 --- a/doc/references.bib +++ b/doc/references.bib @@ -961,6 +961,23 @@ @article{LinEtAl2006 year = {2006} } + +@article{LinEtAl2006a, + title = {Assessing and improving the spatial accuracy in {MEG} source localization by depth-weighted minimum-norm estimates}, + volume = {31}, + issn = {1053-8119}, + doi = {10.1016/j.neuroimage.2005.11.054}, + language = {en}, + number = {1}, + urldate = {2021-01-28}, + journal = {NeuroImage}, + author = {Lin, Fa-Hsuan and Witzel, Thomas and Ahlfors, Seppo P. and Stufflebeam, Steven M. and Belliveau, John W. and Hämäläinen, Matti S.}, + month = may, + year = {2006}, + keywords = {Brain, Depth, Inverse problem, MEG, Minimum-norm}, + pages = {160--171} +} + @article{LiuEtAl1998, author = {Liu, Arthur K. and Belliveau, John W. and Dale, Anders M.}, doi = {10.1073/pnas.95.15.8945}, diff --git a/mne/minimum_norm/inverse.py b/mne/minimum_norm/inverse.py index 5d5ccf4ad24..82f498d7525 100644 --- a/mne/minimum_norm/inverse.py +++ b/mne/minimum_norm/inverse.py @@ -1372,9 +1372,9 @@ def _prepare_forward(forward, info, noise_cov, fixed, loose, rank, pca, # Deal with "depth" if exp is not None: exp = float(exp) - if not (0 <= exp <= 1): - raise ValueError('depth exponent should be a scalar between ' - '0 and 1, got %s' % (exp,)) + if exp < 0: + raise ValueError('depth exponent should be greater than or ' + f'equal to 0, got {exp}') exp = exp or None # alias 0. -> None # put the forward solution in correct orientation @@ -1527,6 +1527,13 @@ def make_inverse_operator(info, forward, noise_cov, loose='auto', depth=0.8, has patch statistics computed, these are used to improve the depth weighting. Thus slightly different results are to be expected with and without this information. + + For depth weighting, 0.8 is generally good for MEG, and between 2 and 5 + is good for EEG, see :footcite:`LinEtAl2006a`. + + References + ---------- + .. footbibliography:: """ # noqa: E501 # For now we always have pca='white'. It does not seem to affect # calculations and is also backward-compatible with MNE-C @@ -1553,7 +1560,9 @@ def make_inverse_operator(info, forward, noise_cov, loose='auto', depth=0.8, eigen_fields, sing, eigen_leads = _safe_svd(gain, full_matrices=False) del gain logger.info(' largest singular value = %g' % np.max(sing)) - logger.info(' scaling factor to adjust the trace = %g' % trace_GRGT) + logger.info(f' scaling factor to adjust the trace = {trace_GRGT:g} ' + f'(nchan = {eigen_fields.shape[0]} ' + f'nzero = {(noise_cov["eig"] <= 0).sum()})') # MNE-ify everything for output eigen_fields = dict(data=eigen_fields.T, col_names=gain_info['ch_names'], diff --git a/mne/minimum_norm/tests/test_inverse.py b/mne/minimum_norm/tests/test_inverse.py index e00ea0b958c..2aa9b0a9960 100644 --- a/mne/minimum_norm/tests/test_inverse.py +++ b/mne/minimum_norm/tests/test_inverse.py @@ -208,6 +208,8 @@ def test_warn_inverse_operator(evoked, noise_cov): bad_info['projs'] = list() fwd_op = convert_forward_solution(read_forward_solution(fname_fwd), surf_ori=True, copy=False) + with pytest.raises(ValueError, match='greater than or'): + make_inverse_operator(bad_info, fwd_op, noise_cov, depth=-0.1) noise_cov['projs'].pop(-1) # get rid of avg EEG ref proj with pytest.warns(RuntimeWarning, match='reference'): make_inverse_operator(bad_info, fwd_op, noise_cov) @@ -254,7 +256,7 @@ def test_inverse_operator_channel_ordering(evoked, noise_cov): fwd_orig = make_forward_solution(evoked.info, fname_trans, src_fname, fname_bem, eeg=True, mindist=5.0) fwd_orig = convert_forward_solution(fwd_orig, surf_ori=True) - depth = dict(exp=0.8, limit_depth_chs=False) + depth = dict(exp=2.8, limit_depth_chs=False) # test depth > 1 as well with catch_logging() as log: inv_orig = make_inverse_operator(evoked.info, fwd_orig, noise_cov, loose=0.2, depth=depth, verbose=True) diff --git a/mne/utils/docs.py b/mne/utils/docs.py index 76b2f4a5110..71f591ceeb3 100644 --- a/mne/utils/docs.py +++ b/mne/utils/docs.py @@ -946,9 +946,8 @@ depth : None | float | dict How to weight (or normalize) the forward using a depth prior. If float (default 0.8), it acts as the depth weighting exponent (``exp``) - to use, which must be between 0 and 1. None is equivalent to 0, meaning - no depth weighting is performed. It can also be a :class:`dict` - containing keyword arguments to pass to + to use None is equivalent to 0, meaning no depth weighting is performed. + It can also be a :class:`dict` containing keyword arguments to pass to :func:`mne.forward.compute_depth_prior` (see docstring for details and defaults). This is effectively ignored when ``method='eLORETA'``. From c5c6d2306194393cbbbc4d7235d9dcc91d3851bc Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 28 Jan 2021 14:26:14 -0500 Subject: [PATCH 091/387] MRG, ENH: Add support for other formats to browse_raw (#8807) * ENH: Add support for other formats to browse_raw * FIX: Test --- doc/changes/latest.inc | 4 ++++ mne/commands/mne_browse_raw.py | 9 +++++++-- mne/io/_read_raw.py | 37 ++++++++++++++++++---------------- mne/io/tests/test_read_raw.py | 16 +++++++++++---- mne/viz/tests/test_raw.py | 8 +++++--- mne/viz/utils.py | 4 +++- 6 files changed, 51 insertions(+), 27 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 112616b3960..87bbb129ce6 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -34,6 +34,8 @@ Enhancements - Add option to control appearance of opaque inside surface of the head to :ref:`mne coreg` (:gh:`8793` by `Eric Larson`_) +- Add support for non-FIF files in :ref:`mne browse_raw` using :func:`mne.io.read_raw` (:gh:`8806` by `Eric Larson`_) + - Add :func:`mne.io.read_raw_nedf` for reading StarStim / enobio NEDF files (:gh:`8734` by `Tristan Stenner`_) - Add :meth:`raw.describe() ` to display (or return) descriptive statistics for each channel (:gh:`8760` by `Clemens Brunner`_) @@ -56,6 +58,8 @@ Bugs - Fix bug with :meth:`raw.plot() ` where annotations didn't immediately appear when changing window duration (:gh:`8689` by `Daniel McCloy`_) +- Fix bug with :meth:`raw.plot() ` where ``scalings='auto'`` did not compute scalings using the full range of data (:gh:`8806` by `Eric Larson`_) + - Fix bug with :func:`mne.io.read_raw_nicolet` where header type values such as num_sample and duration_in_sec where not parsed properly (:gh:`8712` by `Alex Gramfort`_) - Fix bug with :func:`mne.preprocessing.read_ica_eeglab` when reading decompositions using PCA dimensionality reduction (:gh:`8780` by `Alex Gramfort`_ and `Eric Larson`_) diff --git a/mne/commands/mne_browse_raw.py b/mne/commands/mne_browse_raw.py index 020f02b3578..1425708984a 100755 --- a/mne/commands/mne_browse_raw.py +++ b/mne/commands/mne_browse_raw.py @@ -1,6 +1,9 @@ #!/usr/bin/env python r"""Browse raw data. +This uses :func:`mne.io.read_raw` so it supports the same formats +(without keyword arguments). + Examples -------- .. code-block:: console @@ -105,8 +108,10 @@ def run(): parser.print_help() sys.exit(1) - raw = mne.io.read_raw_fif(raw_in, preload=preload, - allow_maxshield=maxshield) + kwargs = dict(preload=preload) + if maxshield: + kwargs.update(allow_maxshield='yes') + raw = mne.io.read_raw(raw_in, **kwargs) if len(proj_in) > 0: projs = mne.read_proj(proj_in) raw.info['projs'] = projs diff --git a/mne/io/_read_raw.py b/mne/io/_read_raw.py index d4ef31c2b7a..588e06c3613 100644 --- a/mne/io/_read_raw.py +++ b/mne/io/_read_raw.py @@ -27,23 +27,26 @@ def _read_unsupported(fname, **kwargs): # supported read file formats -supported = {".edf": read_raw_edf, - ".bdf": read_raw_bdf, - ".gdf": read_raw_gdf, - ".vhdr": read_raw_brainvision, - ".fif": read_raw_fif, - ".fif.gz": read_raw_fif, - ".set": read_raw_eeglab, - ".cnt": read_raw_cnt, - ".mff": read_raw_egi, - ".nxe": read_raw_eximia, - ".hdr": read_raw_nirx, - ".mat": read_raw_fieldtrip, - ".bin": read_raw_artemis123, - ".data": read_raw_nicolet, - ".sqd": read_raw_kit, - ".ds": read_raw_ctf, - ".txt": read_raw_boxy} +supported = { + ".edf": read_raw_edf, + ".bdf": read_raw_bdf, + ".gdf": read_raw_gdf, + ".vhdr": read_raw_brainvision, + ".fif": read_raw_fif, + ".fif.gz": read_raw_fif, + ".set": read_raw_eeglab, + ".cnt": read_raw_cnt, + ".mff": read_raw_egi, + ".nxe": read_raw_eximia, + ".hdr": read_raw_nirx, + ".mat": read_raw_fieldtrip, + ".bin": read_raw_artemis123, + ".data": read_raw_nicolet, + ".sqd": read_raw_kit, + ".con": read_raw_kit, + ".ds": read_raw_ctf, + ".txt": read_raw_boxy, +} # known but unsupported file formats suggested = {".vmrk": partial(_read_unsupported, suggest=".vhdr"), diff --git a/mne/io/tests/test_read_raw.py b/mne/io/tests/test_read_raw.py index 7d670aac35e..10007c5520c 100644 --- a/mne/io/tests/test_read_raw.py +++ b/mne/io/tests/test_read_raw.py @@ -5,11 +5,15 @@ # License: BSD (3-clause) from pathlib import Path + import pytest + from mne.io import read_raw +from mne.datasets import testing base = Path(__file__).parent.parent +test_base = Path(testing.data_path(download=False)) @pytest.mark.parametrize('fname', ['x.xxx', 'x']) @@ -26,10 +30,14 @@ def test_read_raw_suggested(fname): read_raw(fname) -@pytest.mark.parametrize('fname', [base / 'edf/tests/data/test.edf', - base / 'edf/tests/data/test.bdf', - base / 'brainvision/tests/data/test.vhdr', - base / 'kit/tests/data/test.sqd']) +@pytest.mark.parametrize('fname', [ + base / 'edf/tests/data/test.edf', + base / 'edf/tests/data/test.bdf', + base / 'brainvision/tests/data/test.vhdr', + base / 'kit/tests/data/test.sqd', + pytest.param(test_base / 'KIT/data_berlin.con', + marks=testing._pytest_mark()), +]) def test_read_raw_supported(fname): """Test supported file types.""" read_raw(fname) diff --git a/mne/viz/tests/test_raw.py b/mne/viz/tests/test_raw.py index cc0623a08fe..a4f67e3eb21 100644 --- a/mne/viz/tests/test_raw.py +++ b/mne/viz/tests/test_raw.py @@ -14,8 +14,7 @@ from mne import read_events, pick_types, Annotations, create_info from mne.datasets import testing from mne.io import read_raw_fif, read_raw_ctf, RawArray -from mne.utils import (run_tests_if_main, _dt_to_stamp, _click_ch_name, - _close_event) +from mne.utils import _dt_to_stamp, _click_ch_name, _close_event from mne.viz.utils import _fake_click from mne.annotations import _sync_onset from mne.viz import plot_raw, plot_sensors @@ -706,4 +705,7 @@ def test_plot_sensors(raw): raw.plot_sensors() -run_tests_if_main() +def test_scalings_int(): + """Test that auto scalings access samples using integers.""" + raw = RawArray(np.zeros((1, 500)), create_info(1, 1000., 'eeg')) + raw.plot(scalings='auto') diff --git a/mne/viz/utils.py b/mne/viz/utils.py index 5a09e7dfefb..4083a694006 100644 --- a/mne/viz/utils.py +++ b/mne/viz/utils.py @@ -1125,7 +1125,9 @@ def _compute_scalings(scalings, inst, remove_dc=False, duration=10): time_middle = np.mean(inst.times) tmin = np.clip(time_middle - n_secs / 2., inst.times.min(), None) tmax = np.clip(time_middle + n_secs / 2., None, inst.times.max()) - data = inst._read_segment(tmin, tmax) + smin, smax = [ + int(round(x * inst.info['sfreq'])) for x in (tmin, tmax)] + data = inst._read_segment(smin, smax) elif isinstance(inst, BaseEpochs): # Load a random subset of epochs up to 100mb in size n_epochs = 1e8 // (len(inst.ch_names) * len(inst.times) * 8) From 32402f9000aba9ca7ad62fc0439ba8a8c9f8020f Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Fri, 29 Jan 2021 16:09:24 +0100 Subject: [PATCH 092/387] Better error message if configured download folder doesn't exist (#8809) * Better error message if download folder specified in config does not exist * TST: Add test Co-authored-by: Eric Larson --- mne/datasets/tests/test_datasets.py | 22 +++++++++++++--------- mne/datasets/utils.py | 5 +++++ 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/mne/datasets/tests/test_datasets.py b/mne/datasets/tests/test_datasets.py index 85d7a0a6049..1b2cccb4e35 100644 --- a/mne/datasets/tests/test_datasets.py +++ b/mne/datasets/tests/test_datasets.py @@ -13,7 +13,7 @@ from mne.datasets._fsaverage.base import _set_montage_coreg_path from mne.datasets.utils import _manifest_check_download -from mne.utils import (requires_good_network, modified_env, +from mne.utils import (requires_good_network, get_subjects_dir, ArgvSetter, _pl, use_log_level, catch_logging, hashfunc) @@ -21,7 +21,7 @@ subjects_dir = op.join(testing.data_path(download=False), 'subjects') -def test_datasets_basic(tmpdir): +def test_datasets_basic(tmpdir, monkeypatch): """Test simple dataset functions.""" # XXX 'hf_sef' and 'misc' do not conform to these standards for dname in ('sample', 'somato', 'spm_face', 'testing', 'opm', @@ -45,13 +45,17 @@ def test_datasets_basic(tmpdir): tempdir = str(tmpdir) # don't let it read from the config file to get the directory, # force it to look for the default - with modified_env(**{'_MNE_FAKE_HOME_DIR': tempdir, 'SUBJECTS_DIR': None}): - assert (datasets.utils._get_path(None, 'foo', 'bar') == - op.join(tempdir, 'mne_data')) - assert get_subjects_dir(None) is None - _set_montage_coreg_path() - sd = get_subjects_dir() - assert sd.endswith('MNE-fsaverage-data') + monkeypatch.setenv('_MNE_FAKE_HOME_DIR', tempdir) + monkeypatch.delenv('SUBJECTS_DIR', raising=False) + assert (datasets.utils._get_path(None, 'foo', 'bar') == + op.join(tempdir, 'mne_data')) + assert get_subjects_dir(None) is None + _set_montage_coreg_path() + sd = get_subjects_dir() + assert sd.endswith('MNE-fsaverage-data') + monkeypatch.setenv('MNE_DATA', str(tmpdir.join('foo'))) + with pytest.raises(FileNotFoundError, match='as specified by MNE_DAT'): + testing.data_path(download=False) @requires_good_network diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index 2320a5ac39c..f11420a3389 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -178,6 +178,11 @@ def _get_path(path, key, name): # 3. get_config('MNE_DATA') path = get_config(key, get_config('MNE_DATA')) if path is not None: + if not op.exists(path): + msg = (f"Download location {path} as specified by MNE_DATA does " + f"not exist. Either create this directory manually and try " + f"again, or set MNE_DATA to an existing directory.") + raise FileNotFoundError(msg) return path # 4. ~/mne_data (but use a fake home during testing so we don't # unnecessarily create ~/mne_data) From 5a6688b94f97d365a0abf256d86109d6a092cc6a Mon Sep 17 00:00:00 2001 From: "Christopher J. Bailey" Date: Fri, 29 Jan 2021 16:53:53 +0100 Subject: [PATCH 093/387] FIX missing Axes3D import in viz._3d._plot_mpl_stc (#8811) * FIX missing Axes3D import in viz._3d._plot_mpl_stc * DOC: Latest Co-authored-by: Eric Larson --- doc/changes/latest.inc | 2 ++ mne/viz/_3d.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 87bbb129ce6..638e530baa5 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -72,6 +72,8 @@ Bugs - Fix bug with :ref:`mne coreg` where nasion values were not updated when clicking (:gh:`8793` by `Eric Larson`_) +- Fix bug with matplotlib-based 3D plotting where ``Axes3D`` were not properly initialized in :func:`mne.viz.plot_source_estimates` (:gh:`8811` by `Chris Bailey`_) + - Allow sEEG channel types in :meth:`mne.Evoked.plot_joint` (:gh:`8736` by `Daniel McCloy`_) - Function :func:`mne.set_bipolar_reference` was not working when passing ``Epochs`` constructed with some ``picks`` (:gh:`8728` by `Alex Gramfort`_) diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py index 25f06850ce3..90c5375013b 100644 --- a/mne/viz/_3d.py +++ b/mne/viz/_3d.py @@ -245,7 +245,7 @@ def plot_head_positions(pos, mode='traces', cmap='viridis', direction='z', else: # mode == 'field': from matplotlib.colors import Normalize from mpl_toolkits.mplot3d.art3d import Line3DCollection - from mpl_toolkits.mplot3d import axes3d # noqa: F401, analysis:ignore + from mpl_toolkits.mplot3d import Axes3D # noqa: F401, analysis:ignore fig, ax = plt.subplots(1, subplot_kw=dict(projection='3d')) # First plot the trajectory as a colormap: @@ -1488,6 +1488,7 @@ def _plot_mpl_stc(stc, subject=None, surface='inflated', hemi='lh', transparent=True): """Plot source estimate using mpl.""" import matplotlib.pyplot as plt + from mpl_toolkits.mplot3d import Axes3D # noqa: F401, analysis:ignore from matplotlib import cm from matplotlib.widgets import Slider import nibabel as nib From 74a7f32d561e84f918a4f42bd6471d204cafe2f9 Mon Sep 17 00:00:00 2001 From: Guillaume Favelier Date: Fri, 29 Jan 2021 18:36:57 +0100 Subject: [PATCH 094/387] MRG: Improve Brain UX (#8792) * Show either time label or sliders * Move backward/forward in time * Add _rotate_azimuth and _rotate_elevation * Add help * Improve coverage * Update docstring * Use np.clip * ENH: mne_analyze steps Co-authored-by: Eric Larson --- mne/viz/_brain/_brain.py | 63 ++++++++++++++++++++++++++++-- mne/viz/_brain/tests/test_brain.py | 4 ++ mne/viz/backends/_pyvista.py | 5 +++ 3 files changed, 68 insertions(+), 4 deletions(-) diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index 469c757782b..63f8ecefc58 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -41,6 +41,9 @@ get_subjects_dir) +_ARROW_MOVE = 10 # degrees per press + + @decorator def safe_event(fun, *args, **kwargs): """Protect against PyQt5 exiting on event-handling errors.""" @@ -418,6 +421,7 @@ def __init__(self, subject_id, hemi, surf, title=None, self._labels = {'lh': list(), 'rh': list()} self._annots = {'lh': list(), 'rh': list()} self._layered_meshes = {} + self._elevation_rng = [15, 165] # range of motion of camera on theta # default values for silhouette self._silhouette = { 'color': self._bg_color, @@ -549,6 +553,23 @@ def setup_time_viewer(self, time_viewer=True, show_traces=True): show_traces : bool If True, enable visualization of time traces. Defaults to True. + + Notes + ----- + The keyboard shortcuts are the following: + + '?': Display help window + 'i': Toggle interface + 's': Apply auto-scaling + 'r': Restore original clim + 'c': Clear all traces + 'n': Shift the time forward by the playback speed + 'b': Shift the time backward by the playback speed + 'Space': Start/Pause playback + 'Up': Decrease camera elevation angle + 'Down': Increase camera elevation angle + 'Left': Decrease camera azimuth angle + 'Right': Increase camera azimuth angle """ if self.time_viewer: return @@ -765,11 +786,8 @@ def toggle_interface(self, value=None): # manage time label time_label = self._data['time_label'] - # if we actually have time points, we will show the slider so - # hide the time actor - have_ts = self._times is not None and len(self._times) > 1 if self.time_actor is not None: - if self.visibility and time_label is not None and not have_ts: + if not self.visibility and time_label is not None: self.time_actor.SetInput(time_label(self._current_time)) self.time_actor.VisibilityOn() else: @@ -1388,11 +1406,42 @@ def _configure_tool_bar(self): self.actions["play"].setShortcut(" ") self.actions["help"].setShortcut("?") + def _shift_time(self, op): + self.callbacks["time"]( + value=(op(self._current_time, self.playback_speed)), + time_as_index=False, + update_widget=True, + ) + + def _rotate_azimuth(self, value): + azimuth = (self._renderer.figure._azimuth + value) % 360 + self._renderer.set_camera(azimuth=azimuth, reset_camera=False) + + def _rotate_elevation(self, value): + elevation = np.clip( + self._renderer.figure._elevation + value, + self._elevation_rng[0], + self._elevation_rng[1], + ) + self._renderer.set_camera(elevation=elevation, reset_camera=False) + def _configure_shortcuts(self): + # First, we remove the default bindings: + self.plotter._key_press_event_callbacks.clear() + # Then, we add our own: self.plotter.add_key_event("i", self.toggle_interface) self.plotter.add_key_event("s", self.apply_auto_scaling) self.plotter.add_key_event("r", self.restore_user_scaling) self.plotter.add_key_event("c", self.clear_glyphs) + self.plotter.add_key_event("n", partial(self._shift_time, + op=lambda x, y: x + y)) + self.plotter.add_key_event("b", partial(self._shift_time, + op=lambda x, y: x - y)) + for key, func, sign in (("Left", self._rotate_azimuth, 1), + ("Right", self._rotate_azimuth, -1), + ("Up", self._rotate_elevation, 1), + ("Down", self._rotate_elevation, -1)): + self.plotter.add_key_event(key, partial(func, sign * _ARROW_MOVE)) def _configure_menu(self): # remove default picking menu @@ -1734,7 +1783,13 @@ def help(self): ('s', 'Apply auto-scaling'), ('r', 'Restore original clim'), ('c', 'Clear all traces'), + ('n', 'Shift the time forward by the playback speed'), + ('b', 'Shift the time backward by the playback speed'), ('Space', 'Start/Pause playback'), + ('Up', 'Decrease camera elevation angle'), + ('Down', 'Increase camera elevation angle'), + ('Left', 'Decrease camera azimuth angle'), + ('Right', 'Increase camera azimuth angle'), ] text1, text2 = zip(*pairs) text1 = '\n'.join(text1) diff --git a/mne/viz/_brain/tests/test_brain.py b/mne/viz/_brain/tests/test_brain.py index f5959601c60..c198ff716e3 100644 --- a/mne/viz/_brain/tests/test_brain.py +++ b/mne/viz/_brain/tests/test_brain.py @@ -474,6 +474,10 @@ def test_brain_time_viewer(renderer_interactive, pixel_ratio, brain_gc): brain.callbacks["fscale"](value=1.1) brain.callbacks["fmin"](value=12.0) brain.callbacks["fmid"](value=4.0) + brain._shift_time(op=lambda x, y: x + y) + brain._shift_time(op=lambda x, y: x - y) + brain._rotate_azimuth(15) + brain._rotate_elevation(15) brain.toggle_interface() brain.toggle_interface(value=False) brain.callbacks["playback_speed"](value=0.1) diff --git a/mne/viz/backends/_pyvista.py b/mne/viz/backends/_pyvista.py index 5e8103e16f3..72669c353bf 100644 --- a/mne/viz/backends/_pyvista.py +++ b/mne/viz/backends/_pyvista.py @@ -75,6 +75,8 @@ def __init__(self, plotter=None, # multi_samples > 1 is broken on macOS + Intel Iris + volume rendering self.store['multi_samples'] = 1 if sys.platform == 'darwin' else 4 + self._azimuth = self._elevation = None + def build(self): if self.plotter_class is None: self.plotter_class = BackgroundPlotter @@ -1006,6 +1008,9 @@ def _set_3d_view(figure, azimuth, elevation, focalpoint, distance, roll=None, distance * np.sin(phi) * np.sin(theta), distance * np.cos(theta)] + figure._azimuth = _rad2deg(phi) + figure._elevation = _rad2deg(theta) + # restore to the original frame rigid = np.linalg.inv(rigid) position = apply_trans(rigid, position) From a36c74f8b88b93e41806c14465ff1308bf585c4a Mon Sep 17 00:00:00 2001 From: Christian Brodbeck Date: Fri, 29 Jan 2021 13:15:33 -0500 Subject: [PATCH 095/387] [MRG] ENH EGI MFF reader: populate info['dig'] (#8789) * ENH EGI MFF reader: populate info['dig'] * ENH MFF reader: match info conventions for reference electrode * Keep digitizer order consistent with channels * STY * DOC: latest.inc Co-authored-by: Eric Larson --- doc/changes/latest.inc | 2 ++ mne/io/egi/egimff.py | 57 +++++++++++++++++++++++++++++------- mne/io/egi/tests/test_egi.py | 7 +++++ 3 files changed, 56 insertions(+), 10 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 638e530baa5..953b5ebb1a4 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -26,6 +26,8 @@ Enhancements - Add infant template MRI dataset downloader :func:`mne.datasets.fetch_infant_template` (:gh:`8738` by `Eric Larson`_ and `Christian O'Reilly`_) +- Add digitizer information to :func:`mne.io.read_raw_mff` (:gh:`8789` by `Christian Brodbeck`_) + - Speed up :func:`mne.inverse_sparse.tf_mixed_norm` using STFT/ISTFT linearity (:gh:`8697` by `Eric Larson`_) - `mne.Report.parse_folder` now processes supported non-FIFF files by default, too (:gh:`8744` by `Richard Höchenberger`_) diff --git a/mne/io/egi/egimff.py b/mne/io/egi/egimff.py index dff9a894069..ccf1fb5a77d 100644 --- a/mne/io/egi/egimff.py +++ b/mne/io/egi/egimff.py @@ -12,6 +12,7 @@ from .events import _read_events, _combine_triggers from .general import (_get_signalfname, _get_ep_info, _extract, _get_blocks, _get_gains, _block_r) +from .._digitization import DigPoint from ..base import BaseRaw from ..constants import FIFF from ..meas_info import _empty_info, create_info @@ -253,21 +254,55 @@ def _read_locs(filepath, chs, egi_info): """Read channel locations.""" fname = op.join(filepath, 'coordinates.xml') if not op.exists(fname): - return chs + return chs, [] + reference_names = ('VREF', 'Vertex Reference') + dig_kind_map = { + '': FIFF.FIFFV_POINT_EEG, + 'VREF': FIFF.FIFFV_POINT_EEG, + 'Vertex Reference': FIFF.FIFFV_POINT_EEG, + 'Left periauricular point': FIFF.FIFFV_POINT_CARDINAL, + 'Right periauricular point': FIFF.FIFFV_POINT_CARDINAL, + 'Nasion': FIFF.FIFFV_POINT_CARDINAL, + } + dig_ident_map = { + 'Left periauricular point': FIFF.FIFFV_POINT_LPA, + 'Right periauricular point': FIFF.FIFFV_POINT_RPA, + 'Nasion': FIFF.FIFFV_POINT_NASION, + } numbers = np.array(egi_info['numbers']) coordinates = parse(fname) sensors = coordinates.getElementsByTagName('sensor') + dig_points = [] + dig_reference = None for sensor in sensors: + name_element = sensor.getElementsByTagName('name')[0].firstChild + name = '' if name_element is None else name_element.data nr = sensor.getElementsByTagName('number')[0].firstChild.data.encode() - id = np.where(numbers == nr)[0] + coords = [float(sensor.getElementsByTagName(coord)[0].firstChild.data) + for coord in 'xyz'] + loc = np.array(coords) / 100 # cm -> m + # create dig entry + kind = dig_kind_map[name] + if kind == FIFF.FIFFV_POINT_CARDINAL: + ident = dig_ident_map[name] + else: + ident = int(nr) + dig_point = DigPoint(kind=kind, ident=ident, r=loc, + coord_frame=FIFF.FIFFV_COORD_HEAD) + dig_points.append(dig_point) + if name in reference_names: + dig_reference = dig_point + # add location to channel entry + id = np.flatnonzero(numbers == nr) if len(id) == 0: continue - loc = chs[id[0]]['loc'] - loc[0] = sensor.getElementsByTagName('x')[0].firstChild.data - loc[1] = sensor.getElementsByTagName('y')[0].firstChild.data - loc[2] = sensor.getElementsByTagName('z')[0].firstChild.data - loc /= 100. # cm -> m - return chs + chs[id[0]]['loc'][:3] = loc + # Insert reference location into channel location + if dig_reference is not None: + for ch in chs: + if ch['kind'] == FIFF.FIFFV_EEG_CH: + ch['loc'][3:6] = dig_reference['r'] + return chs, dig_points def _add_pns_channel_info(chs, egi_info, ch_names): @@ -453,7 +488,7 @@ def __init__(self, input_fname, eog=None, misc=None, ch_coil = FIFF.FIFFV_COIL_EEG ch_kind = FIFF.FIFFV_EEG_CH chs = _create_chs(ch_names, cals, ch_coil, ch_kind, eog, (), (), misc) - chs = _read_locs(input_fname, chs, egi_info) + chs, dig = _read_locs(input_fname, chs, egi_info) sti_ch_idx = [i for i, name in enumerate(ch_names) if name.startswith('STI') or name in event_codes] for idx in sti_ch_idx: @@ -465,6 +500,7 @@ def __init__(self, input_fname, eog=None, misc=None, chs = _add_pns_channel_info(chs, egi_info, ch_names) info['chs'] = chs + info['dig'] = dig info._update_redundant() file_bin = op.join(input_fname, egi_info['eeg_fname']) egi_info['egi_events'] = egi_events @@ -826,10 +862,11 @@ def _read_evoked_mff(fname, condition, channel_naming='E%d', verbose=None): ch_coil = FIFF.FIFFV_COIL_EEG ch_kind = FIFF.FIFFV_EEG_CH chs = _create_chs(ch_names, cals, ch_coil, ch_kind, (), (), (), ()) - chs = _read_locs(fname, chs, egi_info) + chs, dig = _read_locs(fname, chs, egi_info) # Update PNS channel info chs = _add_pns_channel_info(chs, egi_info, ch_names) info['chs'] = chs + info['dig'] = dig # Add bad channels to info info['description'] = category diff --git a/mne/io/egi/tests/test_egi.py b/mne/io/egi/tests/test_egi.py index af2f38e8851..e69e25fca71 100644 --- a/mne/io/egi/tests/test_egi.py +++ b/mne/io/egi/tests/test_egi.py @@ -115,6 +115,12 @@ def test_io_egi_mff(): test_scaling=False, # XXX probably some bug ) assert raw.info['sfreq'] == 1000. + assert len(raw.info['dig']) == 132 # 128 eeg + 1 ref + 3 cardinal points + assert raw.info['dig'][0]['ident'] == 1 # EEG channel E1 + assert raw.info['dig'][128]['ident'] == 129 # Reference channel + ref_loc = raw.info['dig'][128]['r'] + for i in pick_types(raw.info, eeg=True): + assert_equal(raw.info['chs'][i]['loc'][3:6], ref_loc) assert_equal('eeg' in raw, True) eeg_chan = [c for c in raw.ch_names if 'EEG' in c] @@ -359,6 +365,7 @@ def test_io_egi_evokeds_mff(idx, cond, tmax, signals, bads): assert evoked_cond.info['nchan'] == 259 assert evoked_cond.info['sfreq'] == 250.0 assert not evoked_cond.info['custom_ref_applied'] + assert len(evoked_cond.info['dig']) == 0 # coordinates.xml missing @requires_version('mffpy', '0.5.7') From 8006b6cb985eee6af118a070c38f19a4c08b2767 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Fri, 29 Jan 2021 13:32:33 -0500 Subject: [PATCH 096/387] FIX: Path --- doc/changes/latest.inc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 953b5ebb1a4..b81cae13f54 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -26,7 +26,7 @@ Enhancements - Add infant template MRI dataset downloader :func:`mne.datasets.fetch_infant_template` (:gh:`8738` by `Eric Larson`_ and `Christian O'Reilly`_) -- Add digitizer information to :func:`mne.io.read_raw_mff` (:gh:`8789` by `Christian Brodbeck`_) +- Add digitizer information to :func:`mne.io.read_raw_egi` (:gh:`8789` by `Christian Brodbeck`_) - Speed up :func:`mne.inverse_sparse.tf_mixed_norm` using STFT/ISTFT linearity (:gh:`8697` by `Eric Larson`_) From b45fc5cdb83114ed855b5aed7fd2a0c3a01d4cb5 Mon Sep 17 00:00:00 2001 From: Christian Brodbeck Date: Mon, 1 Feb 2021 08:03:18 -0500 Subject: [PATCH 097/387] [MRG] Coregistration-GUI: use *.mff as digitization source (#8790) * ENH coreg-GUI: allow *.mff as digitization source * ENH coreg-GUI: read digitization from any supported raw file * RF: simplify digitizer check * TEST * STY * STY * DOC --- doc/changes/latest.inc | 2 + mne/gui/_file_traits.py | 138 ++++++++++++++++-------------- mne/gui/tests/test_file_traits.py | 22 ++++- 3 files changed, 96 insertions(+), 66 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index b81cae13f54..d3bdb70b5d6 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -28,6 +28,8 @@ Enhancements - Add digitizer information to :func:`mne.io.read_raw_egi` (:gh:`8789` by `Christian Brodbeck`_) +- Allow reading digitization from files other than ``*.fif`` in the coregistration GUI (:gh:`8790` by `Christian Brodbeck`_) + - Speed up :func:`mne.inverse_sparse.tf_mixed_norm` using STFT/ISTFT linearity (:gh:`8697` by `Eric Larson`_) - `mne.Report.parse_folder` now processes supported non-FIFF files by default, too (:gh:`8744` by `Richard Höchenberger`_) diff --git a/mne/gui/_file_traits.py b/mne/gui/_file_traits.py index 7a50cea6b9b..255ccddae3b 100644 --- a/mne/gui/_file_traits.py +++ b/mne/gui/_file_traits.py @@ -13,7 +13,7 @@ from traits.api import (Any, HasTraits, HasPrivateTraits, cached_property, on_trait_change, Array, Bool, Button, DelegatesTo, Directory, Enum, Event, File, Instance, Int, List, - Property, Str, ArrayOrNone) + Property, Str, ArrayOrNone, BaseFile) from traitsui.api import View, Item, VGroup from pyface.api import DirectoryDialog, OK, ProgressDialog, error, information @@ -21,7 +21,8 @@ from ..bem import read_bem_surfaces from ..io.constants import FIFF -from ..io import read_info, read_fiducials +from ..io import read_info, read_fiducials, read_raw +from ..io._read_raw import supported from ..io.meas_info import _empty_info from ..io.open import fiff_open, dir_tree_find from ..surface import read_surface, complete_surface_info @@ -29,7 +30,7 @@ create_default_subject) from ..utils import get_config, set_config from ..viz._3d import _fiducial_coords -from ..channels import read_dig_fif, DigMontage +from ..channels import read_dig_fif fid_wildcard = "*.fif" @@ -135,6 +136,21 @@ def _mne_root_problem(mne_root): "installation, consider reinstalling." % mne_root) +class FileOrDir(File): + """Subclass File because *.mff files are actually directories.""" + + def validate(self, object, name, value): + """Validate that a specified value is valid for this trait.""" + value = os.fspath(value) + validated_value = super(BaseFile, self).validate(object, name, value) + if not self.exists: + return validated_value + elif op.exists(value): + return validated_value + + self.error(object, name, value) + + class Surf(HasTraits): """Expose a surface similar to the ones used elsewhere in MNE.""" @@ -245,7 +261,8 @@ class DigSource(HasPrivateTraits): Nasion, RAP, LAP. If no file is set all values are 0. """ - file = File(exists=True, filter=['*.fif']) + file = FileOrDir(exists=True, + filter=[';'.join([f'*{ext}' for ext in supported])]) inst_fname = Property(Str, depends_on='file') inst_dir = Property(depends_on='file') @@ -288,53 +305,45 @@ def _get_n_omitted(self): @cached_property def _get__info(self): - if self.file: + if not self.file: + return + elif self.file.endswith(('.fif', '.fif.gz')): info = None fid, tree, _ = fiff_open(self.file) fid.close() if len(dir_tree_find(tree, FIFF.FIFFB_MEAS_INFO)) > 0: info = read_info(self.file, verbose=False) elif len(dir_tree_find(tree, FIFF.FIFFB_ISOTRAK)) > 0: - info = read_dig_fif(fname=self.file) - - if isinstance(info, DigMontage): - dig = info.dig info = _empty_info(1) - info['dig'] = dig - elif info is None or info['dig'] is None: - error(None, "The selected FIFF file does not contain " - "digitizer information. Please select a different " - "file.", "Error Reading FIFF File") + info['dig'] = read_dig_fif(fname=self.file).dig + else: + info = read_raw(self.file).info + + # check that digitizer info is present + if info is None or info['dig'] is None: + error(None, "The selected file does not contain digitization " + "information. Please select a different file.", + "Error Reading Digitization File") + self.reset_traits(['file']) + return + + # check that all fiducial points are present + point_kinds = {d['kind'] for d in info['dig']} + missing = [key for key in ('LPA', 'Nasion', 'RPA') if + getattr(FIFF, f'FIFFV_POINT_{key.upper()}') not in + point_kinds] + if missing: + points = _fiducial_coords(info['dig']) + if len(points == 3): + _append_fiducials(info['dig'], *points.T) + else: + error(None, "The selected digitization file does not contain " + f"all cardinal points (missing: {', '.join(missing)}). " + "Please select a different file.", + "Error Reading Digitization File") self.reset_traits(['file']) return - else: - # check that all fiducial points are present - has_point = {FIFF.FIFFV_POINT_LPA: False, - FIFF.FIFFV_POINT_NASION: False, - FIFF.FIFFV_POINT_RPA: False} - for d in info['dig']: - if d['kind'] == FIFF.FIFFV_POINT_CARDINAL: - has_point[d['ident']] = True - if not all(has_point.values()): - points = _fiducial_coords(info['dig']) - if len(points) == 3: - _append_fiducials(info['dig'], *points.T) - else: - missing = [] - if not has_point[FIFF.FIFFV_POINT_LPA]: - missing.append('LPA') - if not has_point[FIFF.FIFFV_POINT_NASION]: - missing.append('Nasion') - if not has_point[FIFF.FIFFV_POINT_RPA]: - missing.append('RPA') - error(None, "The selected FIFF file does not contain " - "all cardinal points (missing: %s). Please " - "select a different file." % ', '.join(missing), - "Error Reading FIFF File") - self.reset_traits(['file']) - return - - return info + return info @cached_property def _get_inst_dir(self): @@ -349,8 +358,8 @@ def _get_inst_fname(self): @cached_property def _get__hsp_points(self): - if not self._info: - return np.zeros((0, 3)) + if not self._info or not self._info['dig']: + return np.empty((0, 3)) points = np.array([d['r'] for d in self._info['dig'] if d['kind'] == FIFF.FIFFV_POINT_EXTRA]) @@ -366,11 +375,12 @@ def _get_points(self): def _cardinal_point(self, ident): """Coordinates for a cardinal point.""" - if self._info: - for d in self._info['dig']: - if (d['kind'] == FIFF.FIFFV_POINT_CARDINAL and - d['ident'] == ident): - return d['r'][None, :] + if not self._info or not self._info['dig']: + return np.zeros((1, 3)) + + for d in self._info['dig']: + if d['kind'] == FIFF.FIFFV_POINT_CARDINAL and d['ident'] == ident: + return d['r'][None, :] return np.zeros((1, 3)) @cached_property @@ -387,25 +397,25 @@ def _get_rpa(self): @cached_property def _get_eeg_points(self): - if self._info: - out = [d['r'] for d in self._info['dig'] if - d['kind'] == FIFF.FIFFV_POINT_EEG and - d['coord_frame'] == FIFF.FIFFV_COORD_HEAD] - out = np.empty((0, 3)) if len(out) == 0 else np.array(out) - return out - else: + if not self._info or not self._info['dig']: return np.empty((0, 3)) + out = [d['r'] for d in self._info['dig'] if + d['kind'] == FIFF.FIFFV_POINT_EEG and + d['coord_frame'] == FIFF.FIFFV_COORD_HEAD] + out = np.empty((0, 3)) if len(out) == 0 else np.array(out) + return out + @cached_property def _get_hpi_points(self): - if self._info: - out = [d['r'] for d in self._info['dig'] if - d['kind'] == FIFF.FIFFV_POINT_HPI and - d['coord_frame'] == FIFF.FIFFV_COORD_HEAD] - out = np.empty((0, 3)) if len(out) == 0 else np.array(out) - return out - else: - return np.empty((0, 3)) + if not self._info or not self._info['dig']: + return np.zeros((0, 3)) + + out = [d['r'] for d in self._info['dig'] if + d['kind'] == FIFF.FIFFV_POINT_HPI and + d['coord_frame'] == FIFF.FIFFV_COORD_HEAD] + out = np.empty((0, 3)) if len(out) == 0 else np.array(out) + return out def _file_changed(self): self.reset_traits(('points_filter',)) diff --git a/mne/gui/tests/test_file_traits.py b/mne/gui/tests/test_file_traits.py index 0e8da21fdaa..3fc48a774da 100644 --- a/mne/gui/tests/test_file_traits.py +++ b/mne/gui/tests/test_file_traits.py @@ -59,7 +59,7 @@ def test_fiducials_source(): @testing.requires_testing_data @requires_mayavi @traits_test -def test_inst_source(tmpdir): +def test_digitization_source(tmpdir): """Test DigSource.""" from mne.gui._file_traits import DigSource tempdir = str(tmpdir) @@ -70,6 +70,7 @@ def test_inst_source(tmpdir): inst.file = inst_path assert inst.inst_dir == op.dirname(inst_path) + # FIFF lpa = array([[-7.13766068e-02, 0.00000000e+00, 5.12227416e-09]]) nasion = array([[3.72529030e-09, 1.02605611e-01, 4.19095159e-09]]) rpa = array([[7.52676800e-02, 0.00000000e+00, 5.58793545e-09]]) @@ -77,7 +78,8 @@ def test_inst_source(tmpdir): assert_allclose(inst.nasion, nasion) assert_allclose(inst.rpa, rpa) - montage = read_dig_fif(inst_path) # test reading DigMontage + # DigMontage + montage = read_dig_fif(inst_path) montage_path = op.join(tempdir, 'temp_montage.fif') montage.save(montage_path) inst.file = montage_path @@ -85,6 +87,22 @@ def test_inst_source(tmpdir): assert_allclose(inst.nasion, nasion) assert_allclose(inst.rpa, rpa) + # EGI MFF + inst.file = op.join(data_path, 'EGI', 'test_egi.mff') + assert len(inst.points) == 0 + assert len(inst.eeg_points) == 129 + assert_allclose(inst.lpa * 1000, [[-67.1, 0.5, -37.1]], atol=0.1) + assert_allclose(inst.nasion * 1000, [[0.0, 103.6, -26.9]], atol=0.1) + assert_allclose(inst.rpa * 1000, [[67.1, 0.5, -37.1]], atol=0.1) + + # CTF + inst.file = op.join(data_path, 'CTF', 'testdata_ctf.ds') + assert len(inst.points) == 0 + assert len(inst.eeg_points) == 8 + assert_allclose(inst.lpa * 1000, [[-74.3, 0.0, 0.0]], atol=0.1) + assert_allclose(inst.nasion * 1000, [[0.0, 117.7, 0.0]], atol=0.1) + assert_allclose(inst.rpa * 1000, [[84.9, -0.0, 0.0]], atol=0.1) + @testing.requires_testing_data @requires_mayavi From 9069a696fa9b55e3376cb8f4f013b8e8a40c1929 Mon Sep 17 00:00:00 2001 From: Alexandre Gramfort Date: Mon, 1 Feb 2021 14:05:41 +0100 Subject: [PATCH 098/387] fix mne.viz.plot_topomap with some missing grad in a pair (#8817) * fix mne.viz.plot_topomap with some missing grad in a pair * update what's new --- doc/changes/latest.inc | 2 ++ mne/viz/tests/test_topomap.py | 13 +++++++++++++ mne/viz/topomap.py | 2 +- 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index d3bdb70b5d6..51740dad57d 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -92,6 +92,8 @@ Bugs - Fix bug with :ref:`mne make_scalp_surfaces` where ``--overwrite`` was not functional (:gh:`8800` by `Yu-Han Luo`_) +- Fix bug with :func:`mne.viz.plot_topomap` when plotting gradiometers with a missing channel in a pair (:gh:`8817` by `Alex Gramfort`_) + API changes ~~~~~~~~~~~ diff --git a/mne/viz/tests/test_topomap.py b/mne/viz/tests/test_topomap.py index 4c42a3ea74a..785c095e91c 100644 --- a/mne/viz/tests/test_topomap.py +++ b/mne/viz/tests/test_topomap.py @@ -557,6 +557,19 @@ def test_plot_topomap_bads(): plt.close('all') +def test_plot_topomap_bads_grad(): + """Test plotting topomap with bad gradiometer channels (gh-8802).""" + import matplotlib.pyplot as plt + data = np.random.RandomState(0).randn(203) + info = read_info(evoked_fname) + info['bads'] = ['MEG 2242'] + picks = pick_types(info, meg='grad') + info = pick_info(info, picks) + assert len(info['chs']) == 203 + plot_topomap(data, info, res=8) + plt.close('all') + + def test_plot_topomap_nirs_overlap(fnirs_epochs): """Test plotting nirs topomap with overlapping channels (gh-7414).""" fig = fnirs_epochs['A'].average(picks='hbo').plot_topomap() diff --git a/mne/viz/topomap.py b/mne/viz/topomap.py index bc968392846..7a79b2d7960 100644 --- a/mne/viz/topomap.py +++ b/mne/viz/topomap.py @@ -856,7 +856,7 @@ def _plot_topomap(data, pos, vmin=None, vmax=None, cmap=None, sensors=True, # deal with grad pairs picks = _pair_grad_sensors(pos, topomap_coords=False) pos = _find_topomap_coords(pos, picks=picks[::2], sphere=sphere) - data, _ = _merge_ch_data(data, ch_type, []) + data, _ = _merge_ch_data(data[picks], ch_type, []) data = data.reshape(-1) else: picks = list(range(data.shape[0])) From d8201c9bbb2ade3e20dae0284f0333a80a62d3a9 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Mon, 1 Feb 2021 08:06:35 -0500 Subject: [PATCH 099/387] MRG, MAINT: Deprecated param and pytest-qt (#8808) * MAINT: Deprecated param and pytest-qt * ENH: One more * FIX: Split * FIX: Whoops * FIX: Fix for style * FIX: add_widget * FIX: Old * FIX: Try again * ENH: More * FIX: old * STY: Flake * FIX: One more idea * MAINT: Remove qtbot --- mne/minimum_norm/inverse.py | 2 +- mne/utils/misc.py | 8 ++-- mne/viz/_3d.py | 41 +++++++++++--------- mne/viz/_brain/_brain.py | 2 +- mne/viz/_brain/tests/test_brain.py | 10 +++-- mne/viz/tests/test_3d.py | 61 ++++++++++++++++++++---------- mne/viz/utils.py | 12 +++--- 7 files changed, 83 insertions(+), 53 deletions(-) diff --git a/mne/minimum_norm/inverse.py b/mne/minimum_norm/inverse.py index 82f498d7525..6dc8b1e0a37 100644 --- a/mne/minimum_norm/inverse.py +++ b/mne/minimum_norm/inverse.py @@ -789,7 +789,7 @@ def _check_reference(inst, ch_names=None): raise ValueError( 'EEG average reference (using a projector) is mandatory for ' 'modeling, use the method set_eeg_reference(projection=True)') - if info['custom_ref_applied']: + if info.get('custom_ref_applied', False): raise ValueError('Custom EEG reference is not allowed for inverse ' 'modeling.') diff --git a/mne/utils/misc.py b/mne/utils/misc.py index af5b429048f..caceae5abe2 100644 --- a/mne/utils/misc.py +++ b/mne/utils/misc.py @@ -341,9 +341,11 @@ def _assert_no_instances(cls, when=''): r is not globals() and \ r is not locals() and \ not inspect.isframe(r): - ref.append( - f'{r.__class__.__name__}: ' + - repr(r)[:100].replace('\n', ' ')) + if isinstance(r, (list, dict)): + rep = f'len={len(r)}' + else: + rep = repr(r)[:100].replace('\n', ' ') + ref.append(f'{r.__class__.__name__}: {rep}') count += 1 del r del rr diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py index 90c5375013b..9c123b59d25 100644 --- a/mne/viz/_3d.py +++ b/mne/viz/_3d.py @@ -1488,7 +1488,7 @@ def _plot_mpl_stc(stc, subject=None, surface='inflated', hemi='lh', transparent=True): """Plot source estimate using mpl.""" import matplotlib.pyplot as plt - from mpl_toolkits.mplot3d import Axes3D # noqa: F401, analysis:ignore + from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm from matplotlib.widgets import Slider import nibabel as nib @@ -1524,7 +1524,7 @@ def _plot_mpl_stc(stc, subject=None, surface='inflated', hemi='lh', time_label, times = _handle_time(time_label, time_unit, stc.times) fig = plt.figure(figsize=(6, 6)) if figure is None else figure - ax = fig.gca(projection='3d') + ax = Axes3D(fig) hemi_idx = 0 if hemi == 'lh' else 1 surf = op.join(subjects_dir, subject, 'surf', '%s.%s' % (hemi, surface)) if spacing == 'all': @@ -1860,6 +1860,8 @@ def _plot_stc(stc, subject, surface, hemi, colormap, time_label, _check_option('hemi', hemi, ['lh', 'rh', 'split', 'both']) _check_option('view_layout', view_layout, ('vertical', 'horizontal')) time_label, times = _handle_time(time_label, time_unit, stc.times) + show_traces, time_viewer = _check_st_tv( + show_traces, time_viewer, using_mayavi, times) # convert control points to locations in colormap use = stc.magnitude().data if vec else stc.data @@ -1995,6 +1997,21 @@ def _plot_stc(stc, subject, surface, hemi, colormap, time_label, elif need_peeling: brain.enable_depth_peeling() + if time_viewer: + if using_mayavi: + from surfer import TimeViewer + TimeViewer(brain) + else: # PyVista + brain.setup_time_viewer(time_viewer=time_viewer, + show_traces=show_traces) + else: + if not using_mayavi: + brain.show() + + return brain + + +def _check_st_tv(show_traces, time_viewer, using_mayavi, times): # time_viewer and show_traces _check_option('time_viewer', time_viewer, (True, False, 'auto')) _validate_type(show_traces, (str, bool, 'numeric'), 'show_traces') @@ -2008,26 +2025,15 @@ def _plot_stc(stc, subject, surface, hemi, colormap, time_label, show_traces = ( not using_mayavi and time_viewer and - brain._times is not None and - len(brain._times) > 1 + times is not None and + len(times) > 1 ) if show_traces and not time_viewer: raise ValueError('show_traces cannot be used when time_viewer=False') if using_mayavi and show_traces: raise NotImplementedError("show_traces=True is not available " "for the mayavi 3d backend.") - if time_viewer: - if using_mayavi: - from surfer import TimeViewer - TimeViewer(brain) - else: # PyVista - brain.setup_time_viewer(time_viewer=time_viewer, - show_traces=show_traces) - else: - if not using_mayavi: - brain.show() - - return brain + return show_traces, time_viewer def _glass_brain_crosshairs(params, x, y, z): @@ -3096,8 +3102,7 @@ def _plot_dipole_mri_orthoview(dipole, trans, subject, subjects_dir=None, dims = len(data) # Symmetric size assumed. dd = dims // 2 if ax is None: - fig = plt.figure() - ax = fig.gca(projection='3d') + fig, ax = plt.subplots(1, subplot_kw=dict(projection='3d')) else: _validate_type(ax, Axes3D, "ax", "Axes3D") fig = ax.get_figure() diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index 63f8ecefc58..86f117caba3 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -172,7 +172,7 @@ def remove_overlay(self, names): self.update() def _update(self): - if self._cache is None: + if self._cache is None or self._renderer is None: return self._renderer._set_mesh_scalars( mesh=self._polydata, diff --git a/mne/viz/_brain/tests/test_brain.py b/mne/viz/_brain/tests/test_brain.py index c198ff716e3..93119cd28b6 100644 --- a/mne/viz/_brain/tests/test_brain.py +++ b/mne/viz/_brain/tests/test_brain.py @@ -573,7 +573,8 @@ def test_brain_traces(renderer_interactive, hemi, src, tmpdir, # test colormap if src != 'vector': brain = _create_testing_brain( - hemi=hemi, surf='white', src=src, show_traces=0.5, initial_time=0, + hemi=hemi, surf='white', src=src, show_traces=0.5, + initial_time=0, volume_options=None, # for speed, don't upsample n_time=1 if src == 'mixed' else 5, diverging=True, add_data_kwargs=dict(colorbar_kwargs=dict(n_labels=3)), @@ -587,7 +588,8 @@ def test_brain_traces(renderer_interactive, hemi, src, tmpdir, # vertex traces brain = _create_testing_brain( - hemi=hemi, surf='white', src=src, show_traces=0.5, initial_time=0, + hemi=hemi, surf='white', src=src, show_traces=0.5, + initial_time=0, volume_options=None, # for speed, don't upsample n_time=1 if src == 'mixed' else 5, add_data_kwargs=dict(colorbar_kwargs=dict(n_labels=3)), @@ -850,8 +852,8 @@ def test_calculate_lut(): calculate_lut(colormap, alpha, 1, 0, 2) -def _create_testing_brain(hemi, surf='inflated', src='surface', size=300, - n_time=5, diverging=False, **kwargs): +def _create_testing_brain(hemi, surf='inflated', src='surface', + size=300, n_time=5, diverging=False, **kwargs): assert src in ('surface', 'vector', 'mixed', 'volume') meth = 'plot' if src in ('surface', 'mixed'): diff --git a/mne/viz/tests/test_3d.py b/mne/viz/tests/test_3d.py index 092131f2b96..ee3f30e496b 100644 --- a/mne/viz/tests/test_3d.py +++ b/mne/viz/tests/test_3d.py @@ -7,10 +7,10 @@ # # License: Simplified BSD -from mne.minimum_norm.inverse import apply_inverse import os.path as op from pathlib import Path import sys +import warnings import numpy as np from numpy.testing import assert_array_equal, assert_allclose @@ -29,13 +29,14 @@ from mne.io._digitization import write_dig from mne.io.pick import pick_info from mne.io.constants import FIFF +from mne.minimum_norm import apply_inverse from mne.viz import (plot_sparse_source_estimates, plot_source_estimates, snapshot_brain_montage, plot_head_positions, plot_alignment, plot_sensors_connectivity, plot_brain_colorbar, link_brains, mne_analyze_colormap) from mne.viz._3d import _process_clim, _linearize_map, _get_map_ticks from mne.viz.utils import _fake_click -from mne.utils import (requires_pysurfer, requires_nibabel, traits_test, +from mne.utils import (requires_nibabel, traits_test, catch_logging, run_subprocess, modified_env) from mne.datasets import testing from mne.source_space import read_source_spaces @@ -100,11 +101,11 @@ def test_plot_head_positions(): @testing.requires_testing_data -@requires_pysurfer @traits_test @pytest.mark.slowtest def test_plot_sparse_source_estimates(renderer_interactive, brain_gc): """Test plotting of (sparse) source estimates.""" + _check_skip_pysurfer(renderer_interactive) sample_src = read_source_spaces(src_fname) # dense version @@ -123,9 +124,11 @@ def test_plot_sparse_source_estimates(renderer_interactive, brain_gc): stc, 'sample', colormap=colormap, background=(1, 1, 0), subjects_dir=subjects_dir, colorbar=True, clim='auto') brain.close() - pytest.raises(TypeError, plot_source_estimates, stc, 'sample', - figure='foo', hemi='both', clim='auto', - subjects_dir=subjects_dir) + del brain + with pytest.raises(TypeError, match='figure must be'): + plot_source_estimates( + stc, 'sample', figure='foo', hemi='both', clim='auto', + subjects_dir=subjects_dir) # now do sparse version vertices = sample_src[0]['vertno'] @@ -371,10 +374,10 @@ def test_plot_alignment(tmpdir, renderer): @pytest.mark.slowtest # can be slow on OSX @testing.requires_testing_data -@requires_pysurfer @traits_test def test_process_clim_plot(renderer_interactive, brain_gc): """Test functionality for determining control points with stc.plot.""" + _check_skip_pysurfer(renderer_interactive) sample_src = read_source_spaces(src_fname) kwargs = dict(subjects_dir=subjects_dir, smoothing_steps=1, time_viewer=False, show_traces=False) @@ -393,10 +396,15 @@ def test_process_clim_plot(renderer_interactive, brain_gc): brain = stc.plot(clim=dict(pos_lims=(10, 50, 90)), **kwargs) assert brain.data['center'] == 0. brain.close() - stc.plot(colormap='hot', clim='auto', **kwargs) - stc.plot(colormap='mne', clim='auto', **kwargs) - stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99, **kwargs) - pytest.raises(TypeError, stc.plot, clim='auto', figure=[0], **kwargs) + brain = stc.plot(colormap='hot', clim='auto', **kwargs) + brain.close() + brain = stc.plot(colormap='mne', clim='auto', **kwargs) + brain.close() + brain = stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99, + **kwargs) + brain.close() + with pytest.raises(TypeError, match='must be a'): + stc.plot(clim='auto', figure=[0], **kwargs) # Test for correct clim values with pytest.raises(ValueError, match='monotonically'): @@ -418,7 +426,8 @@ def test_process_clim_plot(renderer_interactive, brain_gc): # Test handling of degenerate data: thresholded maps stc._data.fill(0.) with pytest.warns(RuntimeWarning, match='All data were zero'): - plot_source_estimates(stc, **kwargs) + brain = plot_source_estimates(stc, **kwargs) + brain.close() def _assert_mapdata_equal(a, b): @@ -578,18 +587,27 @@ def test_snapshot_brain_montage(renderer): pytest.raises(ValueError, snapshot_brain_montage, None, info) +def _check_skip_pysurfer(renderer): + is_pyvista = renderer._get_3d_backend() == 'pyvista' + if not is_pyvista: + with warnings.catch_warnings(record=True): + try: + from surfer import Brain # noqa: 401 analysis:ignore + except Exception: + pytest.skip('Requires PySurfer') + return is_pyvista + + @pytest.mark.slowtest # can be slow on OSX @testing.requires_testing_data -@requires_pysurfer -@traits_test @pytest.mark.parametrize('pick_ori', ('vector', None)) @pytest.mark.parametrize('kind', ('surface', 'volume', 'mixed')) def test_plot_source_estimates(renderer_interactive, all_src_types_inv_evoked, pick_ori, kind, brain_gc): """Test plotting of scalar and vector source estimates.""" + is_pyvista = _check_skip_pysurfer(renderer_interactive) invs, evoked = all_src_types_inv_evoked inv = invs[kind] - is_pyvista = renderer_interactive._get_3d_backend() == 'pyvista' with pytest.warns(None): # PCA mag stc = apply_inverse(evoked, inv, pick_ori=pick_ori) stc.data[1] *= -1 # make it signed @@ -656,6 +674,7 @@ def test_plot_source_estimates(renderer_interactive, all_src_types_inv_evoked, else: brain = flat_meth(**these_kwargs) brain.close() + del brain these_kwargs.update(surface='inflated', views='flat') with pytest.raises(ValueError, match='surface="flat".*views="flat"'): flat_meth(**these_kwargs) @@ -740,11 +759,11 @@ def test_brain_colorbar(orientation, diverging, lims): @pytest.mark.slowtest # slow-ish on Travis OSX -@requires_pysurfer @testing.requires_testing_data @traits_test def test_mixed_sources_plot_surface(renderer_interactive): - """Test plot_surface() for mixed source space.""" + """Test plot_surface() for mixed source space.""" + _check_skip_pysurfer(renderer_interactive) src = read_source_spaces(fwd_fname2) N = np.sum([s['nuse'] for s in src]) # number of sources @@ -757,9 +776,11 @@ def test_mixed_sources_plot_surface(renderer_interactive): stc = MixedSourceEstimate(data, vertno, 0, 1) - stc.surface().plot(views='lat', hemi='split', - subject='fsaverage', subjects_dir=subjects_dir, - colorbar=False) + brain = stc.surface().plot(views='lat', hemi='split', + subject='fsaverage', subjects_dir=subjects_dir, + colorbar=False) + brain.close() + del brain @testing.requires_testing_data diff --git a/mne/viz/utils.py b/mne/viz/utils.py index 4083a694006..1bb642f37bd 100644 --- a/mne/viz/utils.py +++ b/mne/viz/utils.py @@ -1004,7 +1004,7 @@ def _plot_sensors(pos, info, picks, colors, bads, ch_names, title, show_names, """Plot sensors.""" from matplotlib import rcParams import matplotlib.pyplot as plt - from mpl_toolkits.mplot3d import Axes3D + from mpl_toolkits.mplot3d import Axes3D # noqa: F401 analysis:ignore from .topomap import _get_pos_outlines, _draw_outlines sphere = _check_sphere(sphere, info) @@ -1012,12 +1012,12 @@ def _plot_sensors(pos, info, picks, colors, bads, ch_names, title, show_names, edgecolors[bads] = 'red' axes_was_none = ax is None if axes_was_none: - fig = plt.figure(figsize=(max(rcParams['figure.figsize']),) * 2) + subplot_kw = dict() if kind == '3d': - Axes3D(fig) - ax = fig.gca(projection='3d') - else: - ax = fig.add_subplot(111) + subplot_kw.update(projection='3d') + fig, ax = plt.subplots( + 1, figsize=(max(rcParams['figure.figsize']),) * 2, + subplot_kw=subplot_kw) else: fig = ax.get_figure() From 7e867ce57cc035c32f1362734031d250b2ac0734 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Mon, 1 Feb 2021 08:38:03 -0500 Subject: [PATCH 100/387] MRG, ENH: Add warning about bad whitener conditioning (#8805) * ENH: Add warning about bad condition * FIX: Ref * MAINT: Remove * FIX: Check for reasonable value * DOC: Better docs * STY: Names * FIX: Correct pass --- doc/changes/latest.inc | 2 ++ mne/cov.py | 35 ++++++++++++------- mne/rank.py | 68 ++++++++++++++++++++++++++++-------- mne/tests/test_cov.py | 36 ++++++++++++++----- mne/utils/__init__.py | 2 +- mne/utils/check.py | 8 +++-- mne/utils/docs.py | 14 +++++++- mne/viz/tests/test_evoked.py | 17 ++++----- 8 files changed, 133 insertions(+), 49 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 51740dad57d..0467d9dc6e9 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -48,6 +48,8 @@ Enhancements - Add the ``silhouette`` parameter to :class:`mne.viz.Brain` to display sharp edges and improve perception (:gh:`8771` by `Guillaume Favelier`_) +- Add warning to :func:`mne.cov.compute_whitener` when an explicit ``rank`` parameter leads to a large increase in condition number (:gh:`8805` by `Eric Larson`_) + - Add parameter ``align=True`` to `mne.viz.Brain.show_view` to make views relative to the closest canonical (MNI) axes rather than the native MRI surface RAS coordinates (:gh:`8794` by `Eric Larson`_) - Add ``auto_close`` to `mne.Report.add_figs_to_section` and `mne.Report.add_slider_to_section` to manage closing figures (:gh`8730` by `Guillaume Favelier`_) diff --git a/mne/cov.py b/mne/cov.py index 6a679258bc5..4be5d78f314 100644 --- a/mne/cov.py +++ b/mne/cov.py @@ -38,7 +38,8 @@ from .utils import (check_fname, logger, verbose, check_version, _time_mask, warn, copy_function_doc_to_method_doc, _pl, _undo_scaling_cov, _scaled_array, _validate_type, - _check_option, eigh, fill_doc) + _check_option, eigh, fill_doc, _on_missing, + _check_on_missing) from . import viz from .fixes import (BaseEstimator, EmpiricalCovariance, _logdet, @@ -198,6 +199,14 @@ def as_diag(self): self['eigvec'] = None return self + def _as_square(self): + # This is a hack but it works because np.diag() behaves nicely + if self['diag']: + self['diag'] = False + self.as_diag() + self['diag'] = False + return self + def _get_square(self): if self['diag'] != (self.data.ndim == 1): raise RuntimeError( @@ -872,7 +881,7 @@ def _unpack_epochs(epochs): 'matrix may be inaccurate') orig = epochs[0].info['dev_head_t'] - _check_option('on_mismatch', on_mismatch, ['raise', 'warn', 'ignore']) + _check_on_missing(on_mismatch, 'on_mismatch') for ei, epoch in enumerate(epochs): epoch.info._check_consistency() if (orig is None) != (epoch.info['dev_head_t'] is None) or \ @@ -882,10 +891,7 @@ def _unpack_epochs(epochs): msg = ('MEG<->Head transform mismatch between epochs[0]:\n%s\n\n' 'and epochs[%s]:\n%s' % (orig, ei, epoch.info['dev_head_t'])) - if on_mismatch == 'raise': - raise ValueError(msg) - elif on_mismatch == 'warn': - warn(msg) + _on_missing(on_mismatch, msg, 'on_mismatch') bads = epochs[0].info['bads'] if projs is None: @@ -1428,7 +1434,7 @@ def _get_ch_whitener(A, pca, ch_type, rank): @verbose def prepare_noise_cov(noise_cov, info, ch_names=None, rank=None, - scalings=None, verbose=None): + scalings=None, on_rank_mismatch='ignore', verbose=None): """Prepare noise covariance matrix. Parameters @@ -1449,6 +1455,7 @@ def prepare_noise_cov(noise_cov, info, ch_names=None, rank=None, If dict, it will override the following dict (default if None):: dict(mag=1e12, grad=1e11, eeg=1e5) + %(on_rank_mismatch)s %(verbose)s Returns @@ -1480,7 +1487,7 @@ def prepare_noise_cov(noise_cov, info, ch_names=None, rank=None, loglik=noise_cov.get('loglik', None)) eig, eigvec, _ = _smart_eigh(noise_cov, info, rank, scalings, projs, - ch_names) + ch_names, on_rank_mismatch=on_rank_mismatch) noise_cov.update(eig=eig, eigvec=eigvec) return noise_cov @@ -1488,7 +1495,7 @@ def prepare_noise_cov(noise_cov, info, ch_names=None, rank=None, @verbose def _smart_eigh(C, info, rank, scalings=None, projs=None, ch_names=None, proj_subspace=False, do_compute_rank=True, - verbose=None): + on_rank_mismatch='ignore', verbose=None): """Compute eigh of C taking into account rank and ch_type scalings.""" scalings = _handle_default('scalings_cov_rank', scalings) projs = info['projs'] if projs is None else projs @@ -1510,7 +1517,8 @@ def _smart_eigh(C, info, rank, scalings=None, projs=None, noise_cov = Covariance(C, ch_names, [], projs, 0) if do_compute_rank: # if necessary - rank = compute_rank(noise_cov, rank, scalings, info) + rank = compute_rank( + noise_cov, rank, scalings, info, on_rank_mismatch=on_rank_mismatch) assert C.ndim == 2 and C.shape[0] == C.shape[1] # time saving short-circuit @@ -1767,7 +1775,8 @@ def _regularized_covariance(data, reg=None, method_params=None, info=None, @verbose def compute_whitener(noise_cov, info=None, picks=None, rank=None, scalings=None, return_rank=False, pca=False, - return_colorer=False, verbose=None): + return_colorer=False, on_rank_mismatch='warn', + verbose=None): """Compute whitening matrix. Parameters @@ -1805,6 +1814,7 @@ def compute_whitener(noise_cov, info=None, picks=None, rank=None, .. versionadded:: 0.18 return_colorer : bool If True, return the colorer as well. + %(on_rank_mismatch)s %(verbose)s Returns @@ -1833,7 +1843,8 @@ def compute_whitener(noise_cov, info=None, picks=None, rank=None, ch_names = [info['ch_names'][k] for k in picks] del picks noise_cov = prepare_noise_cov( - noise_cov, info, ch_names, rank, scalings) + noise_cov, info, ch_names, rank, scalings, + on_rank_mismatch=on_rank_mismatch) n_chan = len(ch_names) assert n_chan == len(noise_cov['eig']) diff --git a/mne/rank.py b/mne/rank.py index dbd1b5dae86..bf5d7e27fcf 100644 --- a/mne/rank.py +++ b/mne/rank.py @@ -14,7 +14,8 @@ from .io.proj import make_projector from .utils import (logger, _compute_row_norms, _pl, _validate_type, _apply_scaling_cov, _undo_scaling_cov, - _scaled_array, warn, _check_rank, verbose) + _scaled_array, warn, _check_rank, _on_missing, verbose, + _check_on_missing) @verbose @@ -165,8 +166,9 @@ def _estimate_rank_meeg_signals(data, info, scalings, tol='auto', return out +@verbose def _estimate_rank_meeg_cov(data, info, scalings, tol='auto', - return_singular=False): + return_singular=False, verbose=None): """Estimate rank of M/EEG covariance data, given the covariance. Parameters @@ -272,7 +274,8 @@ def _compute_rank_int(inst, *args, **kwargs): @verbose def compute_rank(inst, rank=None, scalings=None, info=None, tol='auto', - proj=True, tol_kind='absolute', verbose=None): + proj=True, tol_kind='absolute', on_rank_mismatch='ignore', + verbose=None): """Compute the rank of data or noise covariance. This function will normalize the rows of the data (typically @@ -297,6 +300,7 @@ def compute_rank(inst, rank=None, scalings=None, info=None, tol='auto', If True, all projs in ``inst`` and ``info`` will be applied or considered when ``rank=None`` or ``rank='info'``. %(rank_tol_kind)s + %(on_rank_mismatch)s %(verbose)s Returns @@ -315,6 +319,7 @@ def compute_rank(inst, rank=None, scalings=None, info=None, tol='auto', rank = _check_rank(rank) scalings = _handle_default('scalings_cov_rank', scalings) + _check_on_missing(on_rank_mismatch, 'on_rank_mismatch') if isinstance(inst, Covariance): inst_type = 'covariance' @@ -344,8 +349,22 @@ def compute_rank(inst, rank=None, scalings=None, info=None, tol='auto', picks_list = _picks_by_type(info, meg_combined=True, ref_meg=False, exclude='bads') for ch_type, picks in picks_list: + est_verbose = None if ch_type in rank: - continue + # raise an error of user-supplied rank exceeds number of channels + if rank[ch_type] > len(picks): + raise ValueError( + f'rank[{repr(ch_type)}]={rank[ch_type]} exceeds the number' + f' of channels ({len(picks)})') + # special case: if whitening a covariance, check the passed rank + # against the estimated one + est_verbose = False + if not (on_rank_mismatch != 'ignore' and + rank_type == 'estimated' and + ch_type == 'meg' and + isinstance(inst, Covariance) and + not inst['diag']): + continue ch_names = [info['ch_names'][pick] for pick in picks] n_chan = len(ch_names) if proj: @@ -354,16 +373,16 @@ def compute_rank(inst, rank=None, scalings=None, info=None, tol='auto', proj_op, n_proj = None, 0 if rank_type == 'info': # use info - rank[ch_type] = _info_rank(info, ch_type, picks, info_type) + this_rank = _info_rank(info, ch_type, picks, info_type) if info_type != 'full': - rank[ch_type] -= n_proj + this_rank -= n_proj logger.info(' %s: rank %d after %d projector%s applied to ' '%d channel%s' - % (ch_type.upper(), rank[ch_type], + % (ch_type.upper(), this_rank, n_proj, _pl(n_proj), n_chan, _pl(n_chan))) else: logger.info(' %s: rank %d from info' - % (ch_type.upper(), rank[ch_type])) + % (ch_type.upper(), this_rank)) else: # Use empirical estimation assert rank_type == 'estimated' @@ -376,29 +395,48 @@ def compute_rank(inst, rank=None, scalings=None, info=None, tol='auto', data = np.concatenate(data, axis=1) if proj: data = np.dot(proj_op, data) - rank[ch_type] = _estimate_rank_meeg_signals( + this_rank = _estimate_rank_meeg_signals( data, pick_info(simple_info, picks), scalings, tol, False, tol_kind) else: assert isinstance(inst, Covariance) if inst['diag']: - rank[ch_type] = (inst['data'][picks] > 0).sum() - n_proj + this_rank = (inst['data'][picks] > 0).sum() - n_proj else: data = inst['data'][picks][:, picks] if proj: data = np.dot(np.dot(proj_op, data), proj_op.T) - rank[ch_type] = _estimate_rank_meeg_cov( - data, pick_info(simple_info, picks), scalings, tol) + + this_rank, sing = _estimate_rank_meeg_cov( + data, pick_info(simple_info, picks), scalings, tol, + return_singular=True, verbose=est_verbose) + if ch_type in rank: + ratio = sing[this_rank - 1] / sing[rank[ch_type] - 1] + if ratio > 100: + msg = ( + f'The passed rank[{repr(ch_type)}]=' + f'{rank[ch_type]} exceeds the estimated rank ' + f'of the noise covariance ({this_rank}) ' + f'leading to a potential increase in ' + f'noise during whitening by a factor ' + f'of {np.sqrt(ratio):0.1g}. Ensure that the ' + f'rank correctly corresponds to that of the ' + f'given noise covariance matrix.') + _on_missing(on_rank_mismatch, msg, + 'on_rank_mismatch') + continue this_info_rank = _info_rank(info, ch_type, picks, 'info') logger.info(' %s: rank %d computed from %d data channel%s ' 'with %d projector%s' - % (ch_type.upper(), rank[ch_type], n_chan, _pl(n_chan), + % (ch_type.upper(), this_rank, n_chan, _pl(n_chan), n_proj, _pl(n_proj))) - if rank[ch_type] > this_info_rank: + if this_rank > this_info_rank: warn('Something went wrong in the data-driven estimation of ' 'the data rank as it exceeds the theoretical rank from ' 'the info (%d > %d). Consider setting rank to "auto" or ' 'setting it explicitly as an integer.' % - (rank[ch_type], this_info_rank)) + (this_rank, this_info_rank)) + if ch_type not in rank: + rank[ch_type] = this_rank return rank diff --git a/mne/tests/test_cov.py b/mne/tests/test_cov.py index 8232f43be58..b04d334c549 100644 --- a/mne/tests/test_cov.py +++ b/mne/tests/test_cov.py @@ -21,16 +21,15 @@ find_events, compute_raw_covariance, compute_covariance, read_evokeds, compute_proj_raw, pick_channels_cov, pick_types, make_ad_hoc_cov, - make_fixed_length_events, create_info) + make_fixed_length_events, create_info, compute_rank) from mne.channels import equalize_channels from mne.datasets import testing from mne.fixes import _get_args -from mne.io import read_raw_fif, RawArray, read_raw_ctf -from mne.io.pick import _DATA_CH_TYPES_SPLIT +from mne.io import read_raw_fif, RawArray, read_raw_ctf, read_info +from mne.io.pick import _DATA_CH_TYPES_SPLIT, pick_info from mne.preprocessing import maxwell_filter from mne.rank import _compute_rank_int -from mne.utils import (requires_sklearn, run_tests_if_main, - catch_logging, assert_snr) +from mne.utils import requires_sklearn, catch_logging, assert_snr base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data') cov_fname = op.join(base_dir, 'test-cov.fif') @@ -96,8 +95,8 @@ def test_cov_mismatch(): compute_covariance([epochs, epochs_2], on_mismatch='ignore') with pytest.raises(RuntimeWarning, match='transform mismatch'): compute_covariance([epochs, epochs_2], on_mismatch='warn') - pytest.raises(ValueError, compute_covariance, epochs, - on_mismatch='x') + with pytest.raises(ValueError, match='Invalid value'): + compute_covariance(epochs, on_mismatch='x') # This should work epochs.info['dev_head_t'] = None epochs_2.info['dev_head_t'] = None @@ -784,4 +783,25 @@ def test_equalize_channels(): assert cov2.ch_names == ['CH1', 'CH2'] -run_tests_if_main() +def test_compute_whitener_rank(): + """Test risky rank options.""" + info = read_info(ave_fname) + info = pick_info(info, pick_types(info, meg=True)) + info['projs'] = [] + # need a square version because the diag one takes shortcuts in + # compute_whitener (users shouldn't even need this function so it's + # private) + cov = make_ad_hoc_cov(info)._as_square() + assert len(cov['names']) == 306 + _, _, rank = compute_whitener(cov, info, rank=None, return_rank=True) + assert rank == 306 + assert compute_rank(cov, info=info, verbose=True) == dict(meg=rank) + cov['data'][-1] *= 1e-14 # trivially rank-deficient + _, _, rank = compute_whitener(cov, info, rank=None, return_rank=True) + assert rank == 305 + assert compute_rank(cov, info=info, verbose=True) == dict(meg=rank) + # this should emit a warning + with pytest.warns(RuntimeWarning, match='exceeds the estimated'): + _, _, rank = compute_whitener(cov, info, rank=dict(meg=306), + return_rank=True) + assert rank == 306 diff --git a/mne/utils/__init__.py b/mne/utils/__init__.py index b420e1732f2..67dfb33fd1d 100644 --- a/mne/utils/__init__.py +++ b/mne/utils/__init__.py @@ -17,7 +17,7 @@ _check_path_like, _check_src_normal, _check_stc_units, _check_pyqt5_version, _check_sphere, _check_time_format, _check_freesurfer_home, _suggest, _require_version, - _on_missing, int_like, _safe_input, + _on_missing, _check_on_missing, int_like, _safe_input, _check_all_same_channel_names) from .config import (set_config, get_config, get_config_path, set_cache_dir, set_memmap_min_size, get_subjects_dir, _get_stim_channel, diff --git a/mne/utils/check.py b/mne/utils/check.py index fee4b31b929..40f69b2cafe 100644 --- a/mne/utils/check.py +++ b/mne/utils/check.py @@ -706,6 +706,11 @@ def _suggest(val, options, cutoff=0.66): return ' Did you mean one of %r?' % (options,) +def _check_on_missing(on_missing, name='on_missing'): + _validate_type(on_missing, str, name) + _check_option(name, on_missing, ['raise', 'warn', 'ignore']) + + def _on_missing(on_missing, msg, name='on_missing'): """Raise error or print warning with a message. @@ -724,10 +729,9 @@ def _on_missing(on_missing, msg, name='on_missing'): ValueError When on_missing is 'raise'. """ - _validate_type(on_missing, str, name) + _check_on_missing(on_missing, name) on_missing = 'raise' if on_missing == 'error' else on_missing on_missing = 'warn' if on_missing == 'warning' else on_missing - _check_option(name, on_missing, ['raise', 'warn', 'ignore']) if on_missing == 'raise': raise ValueError(msg) elif on_missing == 'warn': diff --git a/mne/utils/docs.py b/mne/utils/docs.py index 71f591ceeb3..860c17cc9ae 100644 --- a/mne/utils/docs.py +++ b/mne/utils/docs.py @@ -874,7 +874,10 @@ docdict['rank'] = """ rank : None | 'info' | 'full' | dict This controls the rank computation that can be read from the - measurement info or estimated from the data. + measurement info or estimated from the data. When a noise covariance + is used for whitening, this should reflect the rank of that covariance, + otherwise amplification of noise components can occur in whitening (e.g., + often during source localization). :data:`python:None` The rank will be estimated from the data after proper scaling of @@ -1002,6 +1005,15 @@ Support for reducing rank in all modes (previously only supported ``pick='max_power'`` with weight normalization). """ +docdict['on_rank_mismatch'] = """ +on_rank_mismatch : str + If an explicit MEG value is passed, what to do when it does not match + an empirically computed rank (only used for covariances). + Can be 'raise' to raise an error, 'warn' (default) to emit a warning, or + 'ignore' to ignore. + + .. versionadded:: 0.23 +""" docdict['weight_norm'] = """ weight_norm : str | None Can be: diff --git a/mne/viz/tests/test_evoked.py b/mne/viz/tests/test_evoked.py index 45d06dc50a3..1b4ad293ff9 100644 --- a/mne/viz/tests/test_evoked.py +++ b/mne/viz/tests/test_evoked.py @@ -22,7 +22,7 @@ from mne import (read_events, Epochs, read_cov, compute_covariance, make_fixed_length_events, compute_proj_evoked) from mne.io import read_raw_fif -from mne.utils import run_tests_if_main, catch_logging, requires_version +from mne.utils import catch_logging, requires_version from mne.viz import plot_compare_evokeds, plot_evoked_white from mne.viz.utils import _fake_click from mne.datasets import testing @@ -291,10 +291,11 @@ def test_plot_white(): evoked.set_eeg_reference('average') # Avoid warnings # test rank param. - evoked.plot_white(cov, rank={'mag': 101, 'grad': 201, 'eeg': 10}, - time_unit='s') - fig = evoked.plot_white(cov, rank={'mag': 101}, time_unit='s') # test rank - evoked.plot_white(cov, rank={'grad': 201}, time_unit='s', axes=fig.axes) + with pytest.raises(ValueError, match='exceeds'): + evoked.plot_white(cov, rank={'mag': 10}) + evoked.plot_white(cov, rank={'mag': 1, 'grad': 8, 'eeg': 2}, time_unit='s') + fig = evoked.plot_white(cov, rank={'mag': 1}, time_unit='s') # test rank + evoked.plot_white(cov, rank={'grad': 8}, time_unit='s', axes=fig.axes) with pytest.raises(ValueError, match=r'must have shape \(4,\), got \(2,'): evoked.plot_white(cov, axes=fig.axes[:2]) with pytest.raises(ValueError, match='When not using SSS'): @@ -310,14 +311,13 @@ def test_plot_white(): plot_evoked_white(evoked, [cov, cov], axes=axes[:, :1]) # Hack to test plotting of maxfiltered data - evoked_sss = evoked.copy() + evoked_sss = _get_epochs(picks='meg').average() sss = dict(sss_info=dict(in_order=80, components=np.arange(80))) evoked_sss.info['proc_history'] = [dict(max_info=sss)] evoked_sss.plot_white(cov, rank={'meg': 64}) with pytest.raises(ValueError, match='When using SSS'): evoked_sss.plot_white(cov, rank={'grad': 201}) evoked_sss.plot_white(cov, time_unit='s') - plt.close('all') def test_plot_compare_evokeds(): @@ -508,6 +508,3 @@ def get_axes_midpoints(axes): topomap_args={'axes': topo_axes}, title=None) midpoints_after = get_axes_midpoints(topo_axes) assert (np.linalg.norm(midpoints_before - midpoints_after) < 0.1).all() - - -run_tests_if_main() From 9b57cbd8ec711e3af33846e62ad7d1fbbdcaf5cd Mon Sep 17 00:00:00 2001 From: Adam Li Date: Mon, 1 Feb 2021 08:42:41 -0500 Subject: [PATCH 101/387] [MRG] Fix persyst reader - adds test for multiple repeated Comments, allow commas and require lay and dat file to be in same directory (#8806) * Fix persyst reader and adds test for multiple repeated Comments with the same free-text. Add a fix to Comments reading allowing comma characters in the free-text. Co-authored-by: Andres Rodriguez * Get rid of extra comment. * Update mne/io/persyst/tests/test_persyst.py Co-authored-by: Eric Larson * Address larson comments. * Fix flake. * Fix flake. * Fixing test. * Fixing docstring. * Fix change log entry. * Add latest change. Co-authored-by: Andres Rodriguez Co-authored-by: Eric Larson --- doc/changes/latest.inc | 4 ++ doc/changes/names.inc | 2 + mne/datasets/utils.py | 4 +- mne/io/persyst/persyst.py | 78 +++++++++++++++++++++------ mne/io/persyst/tests/test_persyst.py | 79 ++++++++++++++++++++++------ 5 files changed, 135 insertions(+), 32 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 0467d9dc6e9..34355870009 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -16,6 +16,8 @@ Current (0.23.dev0) .. |Richard Koehler| replace:: **Richard Koehler** +.. |Andres Rodriguez| replace:: **Andres Rodriguez** + Enhancements ~~~~~~~~~~~~ - Add dbs as new channel type for deep brain stimulation (DBS) recordings (:gh:`8739` **by new contributor** |Richard Koehler|_) @@ -56,6 +58,8 @@ Enhancements Bugs ~~~~ +- Fix bugs with `mne.io.read_raw_persyst` where multiple ``Comments`` with the same name are allowed, and ``Comments`` with a "," character are now allowed (:gh:`8311` and :gh:`8806` **by new contributor** |Andres Rodriguez|_ and `Adam Li`_) + - Fix zen mode and scalebar toggling for :meth:`raw.plot() ` when using the ``macosx`` matplotlib backend (:gh:`8688` by `Daniel McCloy`_) - Fix bug with :func:`mne.preprocessing.maxwell_filter` where the eSSS basis had to exactly match the good channels instead of being a superset (:gh:`8675` by `Eric Larson`_) diff --git a/doc/changes/names.inc b/doc/changes/names.inc index 06925300d97..46bfc2f8ca1 100644 --- a/doc/changes/names.inc +++ b/doc/changes/names.inc @@ -349,3 +349,5 @@ .. _Richard Koehler: https://github.com/richardkoehler .. _Tristan Stenner: https://github.com/tstenner/ + +.. _Andres Rodriguez: https://github.com/infinitejest/ \ No newline at end of file diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index f11420a3389..b646f3dc0a9 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -251,7 +251,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, path = _get_path(path, key, name) # To update the testing or misc dataset, push commits, then make a new # release on GitHub. Then update the "releases" variable: - releases = dict(testing='0.113', misc='0.8') + releases = dict(testing='0.114', misc='0.8') # And also update the "md5_hashes['testing']" variable below. # To update any other dataset, update the data archive itself (upload # an updated version) and update the md5 hash. @@ -337,7 +337,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, sample='12b75d1cb7df9dfb4ad73ed82f61094f', somato='32fd2f6c8c7eb0784a1de6435273c48b', spm='9f43f67150e3b694b523a21eb929ea75', - testing='ce114ad6d5e3dbed06119386e6b1ce0c', + testing='0e7fb1b37f924bf50ce5db9f96c67972', multimodal='26ec847ae9ab80f58f204d09e2c08367', fnirs_motor='c4935d19ddab35422a69f3326a01fef8', opm='370ad1dcfd5c47e029e692c85358a374', diff --git a/mne/io/persyst/persyst.py b/mne/io/persyst/persyst.py index 40ce94ec824..f3e7f4d768a 100644 --- a/mne/io/persyst/persyst.py +++ b/mne/io/persyst/persyst.py @@ -35,6 +35,14 @@ def read_raw_persyst(fname, preload=False, verbose=None): See Also -------- mne.io.Raw : Documentation of attribute and methods. + + Notes + ----- + It is assumed that the ``.lay`` and ``.dat`` file + are in the same directory. To get the correct file path to the + ``.dat`` file, ``read_raw_persyst`` will get the corresponding dat + filename from the lay file, and look for that file inside the same + directory as the lay file. """ return RawPersyst(fname, preload, verbose) @@ -59,8 +67,10 @@ class RawPersyst(BaseRaw): def __init__(self, fname, preload=False, verbose=None): logger.info('Loading %s' % fname) + # make sure filename is the Lay file if not fname.endswith('.lay'): fname = fname + '.lay' + # get the current directory and Lay filename curr_path, lay_fname = op.dirname(fname), op.basename(fname) if not op.exists(fname): raise FileNotFoundError(f'The path you specified, ' @@ -76,6 +86,9 @@ def __init__(self, fname, preload=False, verbose=None): patient_dict = OrderedDict() comments_dict = OrderedDict() + # keep track of total number of comments + num_comments = 0 + # loop through each line in the lay file for key, val, section in zip(keys, data, sections): if key == '': @@ -89,15 +102,16 @@ def __init__(self, fname, preload=False, verbose=None): if section == 'fileinfo': # extract the .dat file name if key == 'file': - dat_fname = val - dat_path = op.dirname(dat_fname) + dat_fname = op.basename(val) dat_fpath = op.join(curr_path, op.basename(dat_fname)) # determine if .dat file exists where it should error_msg = f'The data path you specified ' \ - f'does not exist for the lay path, {lay_fname}' - if op.isabs(dat_path) and not op.exists(dat_fname): - raise FileNotFoundError(error_msg) + f'does not exist for the lay path, ' \ + f'{lay_fname}. Make sure the dat file ' \ + f'is in the same directory as the lay ' \ + f'file, and the specified dat filename ' \ + f'matches.' if not op.exists(dat_fpath): raise FileNotFoundError(error_msg) fileinfo_dict[key] = val @@ -108,8 +122,10 @@ def __init__(self, fname, preload=False, verbose=None): # Patient (All optional) elif section == 'patient': patient_dict[key] = val + # Comments (turned into mne.Annotations) elif section == 'comments': - comments_dict[key] = val + comments_dict[key] = comments_dict.get(key, list()) + [val] + num_comments += 1 # get numerical metadata # datatype is either 7 for 32 bit, or 0 for 16 bit @@ -205,17 +221,21 @@ def __init__(self, fname, preload=False, verbose=None): raw_extras=[raw_extras], verbose=verbose) # set annotations based on the comments read in - num_comments = len(comments_dict) onset = np.zeros(num_comments, float) duration = np.zeros(num_comments, float) description = [''] * num_comments - for t_idx, (_description, (_onset, _duration)) in \ - enumerate(comments_dict.items()): - # extract the onset, duration, description to - # create an Annotations object - onset[t_idx] = _onset - duration[t_idx] = _duration - description[t_idx] = _description + + # loop through comments dictionary, which may contain + # multiple events for the same "text" annotation + t_idx = 0 + for _description, event_tuples in comments_dict.items(): + for (_onset, _duration) in event_tuples: + # extract the onset, duration, description to + # create an Annotations object + onset[t_idx] = _onset + duration[t_idx] = _duration + description[t_idx] = _description + t_idx += 1 annot = Annotations(onset, duration, description) self.set_annotations(annot) @@ -365,6 +385,34 @@ def _process_lay_line(line, section): value : str The string from the line after the ``'='`` character. If section is "Comments", then returns the onset and duration as a tuple. + + Notes + ----- + The lay file comprises of multiple "sections" that are documented with + bracket ``[]`` characters. For example, ``[FileInfo]`` and the lines + afterward indicate metadata about the data file itself. Within + each section, there are multiple lines in the format of + ``=``. + + For ``FileInfo``, ``Patient`` and ``ChannelMap`` + each line will be denoted with a ``key`` and a ``value`` that + can be represented as a dictionary. The keys describe what sort + of data that line holds, while the values contain the corresponding + value. In some cases, the ``value``. + + For ``SampleTimes``, the ``key`` and ``value`` pair indicate the + start and end time in seconds of the original data file. + + For ``Comments`` section, this denotes an area where users through + Persyst actually annotate data in time. These are instead + represented as 5 data points that are ``,`` delimited. These + data points are ordered as: + + 1. time (in seconds) of the annotation + 2. duration (in seconds) of the annotation + 3. state (unused) + 4. variable type (unused) + 5. free-form text describing the annotation """ key = '' # default; only return value possibly not set line = line.strip() # remove leading and trailing spaces @@ -388,7 +436,7 @@ def _process_lay_line(line, section): # Currently not used if section == 'comments': # Persyst Comments output 5 variables "," separated - time_sec, duration, state, var_type, text = line.split(',') + time_sec, duration, state, var_type, text = line.split(',', 4) status = 2 key = text value = (time_sec, duration) diff --git a/mne/io/persyst/tests/test_persyst.py b/mne/io/persyst/tests/test_persyst.py index 7ac8262af6a..8ce0bdb48d5 100644 --- a/mne/io/persyst/tests/test_persyst.py +++ b/mne/io/persyst/tests/test_persyst.py @@ -8,6 +8,7 @@ import pytest from numpy.testing import assert_array_equal +import numpy as np import mne from mne.datasets.testing import data_path, requires_testing_data @@ -123,7 +124,6 @@ def test_persyst_wrong_file(tmpdir): with pytest.raises(FileNotFoundError, match='The path you'): read_raw_persyst(fname_dat, preload=True) - out_dir = mne.utils._TempDir() out_dir = str(tmpdir) new_fname_lay = op.join(out_dir, op.basename(fname_lay)) new_fname_dat = op.join(out_dir, op.basename(fname_dat)) @@ -142,12 +142,75 @@ def test_persyst_wrong_file(tmpdir): read_raw_persyst(new_fname_lay, preload=True) +@requires_testing_data +def test_persyst_moved_file(tmpdir): + """Test reader - Persyst files need to be in same directory.""" + out_dir = str(tmpdir) + new_fname_lay = op.join(out_dir, op.basename(fname_lay)) + new_fname_dat = op.join(out_dir, op.basename(fname_dat)) + shutil.copy(fname_lay, new_fname_lay) + + # original file read should work + read_raw_persyst(fname_lay) + + # without a .dat file, reader should break + # when the lay file was moved + desired_err_msg = \ + 'The data path you specified does ' \ + 'not exist for the lay path, ' \ + 'sub-pt1_ses-02_task-monitor_acq-ecog_run-01_clip2.lay' + with pytest.raises(FileNotFoundError, match=desired_err_msg): + read_raw_persyst(new_fname_lay, preload=True) + + # now change the file contents to point + # to the full path, but it should still not work + # as reader requires lay and dat file to be in + # same directory + with open(fname_lay, "r") as fin: + with open(new_fname_lay, 'w') as fout: + # for each line in the input file + for idx, line in enumerate(fin): + if line.startswith('File='): + # give it the full path to the old data + test_fpath = op.join(op.dirname(fname_dat), + line.split('=')[1]) + line = f'File={test_fpath}\n' + fout.write(line) + with pytest.raises(FileNotFoundError, match=desired_err_msg): + read_raw_persyst(new_fname_lay, preload=True) + + # once we copy the dat file to the same directory, reader + # should work + shutil.copy(fname_dat, new_fname_dat) + read_raw_persyst(new_fname_lay, preload=True) + + @requires_testing_data def test_persyst_standard(): """Test standard operations.""" _test_raw_reader(read_raw_persyst, fname=fname_lay) +@requires_testing_data +def test_persyst_annotations(tmpdir): + """Test annotations reading in Persyst.""" + out_dir = str(tmpdir) + new_fname_lay = op.join(out_dir, op.basename(fname_lay)) + new_fname_dat = op.join(out_dir, op.basename(fname_dat)) + shutil.copy(fname_dat, new_fname_dat) + shutil.copy(fname_lay, new_fname_lay) + + raw = read_raw_persyst(new_fname_lay) + + # get the annotations and make sure that repeated annotations + # are in the dataset + annotations = raw.annotations + assert np.count_nonzero(annotations.description == 'seizure') == 2 + + # make sure annotation with a "," character is in there + assert 'seizure1,2' in annotations.description + + @requires_testing_data def test_persyst_errors(): """Test reading Persyst files when passed in wrong file path.""" @@ -182,20 +245,6 @@ def test_persyst_errors(): 'file do not'): read_raw_persyst(new_fname_lay) - # reformat the lay file - os.remove(new_fname_lay) - with open(fname_lay, "r") as fin: - with open(new_fname_lay, 'w') as fout: - # for each line in the input file - for idx, line in enumerate(fin): - if line.startswith('File'): - line = f'File=/{op.basename(fname_dat)}\n' - fout.write(line) - # file should break - with pytest.raises(FileNotFoundError, match='The data path ' - 'you specified'): - read_raw_persyst(new_fname_lay) - # reformat the lay file to have testdate # improperly specified os.remove(new_fname_lay) From 9458a30bacc5fa96ca56c3a31b4cceb5925990a7 Mon Sep 17 00:00:00 2001 From: Guillaume Favelier Date: Mon, 1 Feb 2021 18:41:33 +0100 Subject: [PATCH 102/387] MRG, FIX: Brain add/remove labels (#8820) * Fix remove_labels * Fix add_label logic * Make it more robust * Make it more robust --- mne/viz/_brain/_brain.py | 16 ++++++++++------ mne/viz/_brain/tests/test_brain.py | 10 ++++++++-- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index 86f117caba3..fe3a28b65e6 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -419,6 +419,7 @@ def __init__(self, subject_id, hemi, surf, title=None, self._vertex_to_label_id = dict() self._annotation_labels = dict() self._labels = {'lh': list(), 'rh': list()} + self._unnamed_label_id = 0 # can only grow self._annots = {'lh': list(), 'rh': list()} self._layered_meshes = {} self._elevation_rng = [15, 165] # range of motion of camera on theta @@ -2115,7 +2116,8 @@ def remove_labels(self): """Remove all the ROI labels from the image.""" for hemi in self._hemis: mesh = self._layered_meshes[hemi] - mesh.remove_overlay(self._labels[hemi]) + for label in self._labels[hemi]: + mesh.remove_overlay(label.name) self._labels[hemi].clear() self._update() @@ -2308,9 +2310,9 @@ def add_label(self, label, color=None, alpha=1, scalar_thresh=None, hemi = label.hemi ids = label.vertices if label.name is None: - label_name = 'unnamed' - else: - label_name = str(label.name) + label.name = 'unnamed' + str(self._unnamed_label_id) + self._unnamed_label_id += 1 + label_name = str(label.name) if color is None: if hasattr(label, 'color') and label.color is not None: @@ -2334,7 +2336,8 @@ def add_label(self, label, color=None, alpha=1, scalar_thresh=None, scalars = np.zeros(self.geo[hemi].coords.shape[0]) scalars[ids] = 1 - if self.time_viewer and self.show_traces: + if self.time_viewer and self.show_traces \ + and self.traces_mode == 'label': stc = self._data["stc"] src = self._data["src"] tc = stc.extract_label_time_course(label, src=src, @@ -2382,7 +2385,8 @@ def add_label(self, label, color=None, alpha=1, scalar_thresh=None, ) if reset_camera: self._renderer.set_camera(**views_dicts[hemi][v]) - if self.time_viewer and self.traces_mode == 'label': + if self.time_viewer and self.show_traces \ + and self.traces_mode == 'label': label._color = orig_color label._line = line self._labels[hemi].append(label) diff --git a/mne/viz/_brain/tests/test_brain.py b/mne/viz/_brain/tests/test_brain.py index 93119cd28b6..89fc7e36303 100644 --- a/mne/viz/_brain/tests/test_brain.py +++ b/mne/viz/_brain/tests/test_brain.py @@ -282,10 +282,16 @@ def __init__(self): with pytest.raises(ValueError, match="does not exist"): brain.add_label('foo', subdir='bar') label.name = None # test unnamed label - brain.add_label(label, scalar_thresh=0.) + brain.add_label(label, scalar_thresh=0., color="green") assert isinstance(brain.labels[label.hemi], list) - assert 'unnamed' in brain._layered_meshes[label.hemi]._overlays + overlays = brain._layered_meshes[label.hemi]._overlays + assert 'unnamed0' in overlays + assert np.allclose(overlays['unnamed0']._colormap[0], + [0, 0, 0, 0]) # first component is transparent + assert np.allclose(overlays['unnamed0']._colormap[1], + [0, 128, 0, 255]) # second is green brain.remove_labels() + assert 'unnamed0' not in overlays brain.add_label(fname_label) brain.add_label('V1', borders=True) brain.remove_labels() From 02572faaeba4c8e33f0e7cde0f4a336224271d8b Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Mon, 1 Feb 2021 13:21:58 -0500 Subject: [PATCH 103/387] MRG, FIX: Fix for NumPy deprecation (#8819) * FIX: Fix for NumPy deprecation * FIX: Flake * FIX: One more * FIX: More * FIX: SciPy --- mne/io/edf/tests/test_edf.py | 11 +++++++---- mne/preprocessing/ica.py | 12 ++++++++++-- mne/utils/tests/test_testing.py | 23 ++++++++++------------- mne/viz/_3d.py | 2 +- mne/viz/_figure.py | 15 +++++---------- 5 files changed, 33 insertions(+), 30 deletions(-) diff --git a/mne/io/edf/tests/test_edf.py b/mne/io/edf/tests/test_edf.py index 22f550d020d..ebb645f5125 100644 --- a/mne/io/edf/tests/test_edf.py +++ b/mne/io/edf/tests/test_edf.py @@ -208,12 +208,15 @@ def test_parse_annotation(tmpdir): samp=(len(annot) - 1) // 2, dtype_byte='This_parameter_is_not_used') + want_onset, want_duration, want_description = zip( + *[[180., 0., 'Lights off'], [180., 0., 'Close door'], + [180., 0., 'Lights off'], [180., 0., 'Close door'], + [3.14, 4.2, 'nothing'], [1800.2, 25.5, 'Apnea']]) for tal_channel in [tal_channel_A, tal_channel_B]: onset, duration, description = _read_annotations_edf([tal_channel]) - assert_equal(np.column_stack((onset, duration, description)), - [[180., 0., 'Lights off'], [180., 0., 'Close door'], - [180., 0., 'Lights off'], [180., 0., 'Close door'], - [3.14, 4.2, 'nothing'], [1800.2, 25.5, 'Apnea']]) + assert_allclose(onset, want_onset) + assert_allclose(duration, want_duration) + assert description == want_description def test_find_events_backward_compatibility(): diff --git a/mne/preprocessing/ica.py b/mne/preprocessing/ica.py index b19c2c64a8a..fb0e88ad9de 100644 --- a/mne/preprocessing/ica.py +++ b/mne/preprocessing/ica.py @@ -79,6 +79,12 @@ def sfunc(x, y): return sfunc +# Violate our assumption that the output is 1D so can't be used. +# Could eventually be added but probably not worth the effort unless someone +# requests it. +_BLOCKLIST = {'somersd'} + + # makes score funcs attr accessible for users def get_score_funcs(): """Get the score functions. @@ -92,9 +98,11 @@ def get_score_funcs(): from scipy.spatial import distance score_funcs = Bunch() xy_arg_dist_funcs = [(n, f) for n, f in vars(distance).items() - if isfunction(f) and not n.startswith('_')] + if isfunction(f) and not n.startswith('_') and + n not in _BLOCKLIST] xy_arg_stats_funcs = [(n, f) for n, f in vars(stats).items() - if isfunction(f) and not n.startswith('_')] + if isfunction(f) and not n.startswith('_') and + n not in _BLOCKLIST] score_funcs.update({n: _make_xy_sfunc(f) for n, f in xy_arg_dist_funcs if _get_args(f) == ['u', 'v']}) diff --git a/mne/utils/tests/test_testing.py b/mne/utils/tests/test_testing.py index cae8e00aa2d..54b1f769221 100644 --- a/mne/utils/tests/test_testing.py +++ b/mne/utils/tests/test_testing.py @@ -1,13 +1,10 @@ import os.path as op -import os import numpy as np import pytest -from numpy.testing import assert_equal from mne.datasets import testing -from mne.utils import (_TempDir, _url_to_local_path, run_tests_if_main, - buggy_mkl_svd) +from mne.utils import (_TempDir, _url_to_local_path, buggy_mkl_svd) def test_buggy_mkl(): @@ -35,18 +32,18 @@ def test_tempdir(): assert (not op.isdir(x)) -def test_datasets(): +def test_datasets(monkeypatch, tmpdir): """Test dataset config.""" # gh-4192 - data_path = testing.data_path(download=False) - os.environ['MNE_DATASETS_TESTING_PATH'] = op.dirname(data_path) - assert testing.data_path(download=False) == data_path + fake_path = tmpdir.mkdir('MNE-testing-data') + with open(fake_path.join('version.txt'), 'w') as fid: + fid.write('9999.9999') + monkeypatch.setenv('_MNE_FAKE_HOME_DIR', str(tmpdir)) + monkeypatch.setenv('MNE_DATASETS_TESTING_PATH', str(tmpdir)) + assert testing.data_path(download=False, verbose='debug') == str(fake_path) def test_url_to_local_path(): """Test URL to local path.""" - assert_equal(_url_to_local_path('http://google.com/home/why.html', '.'), - op.join('.', 'home', 'why.html')) - - -run_tests_if_main() + assert _url_to_local_path('http://google.com/home/why.html', '.') == \ + op.join('.', 'home', 'why.html') diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py index 9c123b59d25..2bd90abdffd 100644 --- a/mne/viz/_3d.py +++ b/mne/viz/_3d.py @@ -834,7 +834,7 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, assert surfs['helmet']['coord_frame'] == FIFF.FIFFV_COORD_MRI # Brain: - brain = np.intersect1d(surfaces, ['brain', 'pial', 'white', 'inflated']) + brain = set(surfaces) & set(['brain', 'pial', 'white', 'inflated']) if len(brain) > 1: raise ValueError('Only one brain surface can be plotted. ' 'Got %s.' % brain) diff --git a/mne/viz/_figure.py b/mne/viz/_figure.py index 9737d2e9fde..707a8295ef1 100644 --- a/mne/viz/_figure.py +++ b/mne/viz/_figure.py @@ -1163,8 +1163,8 @@ def _toggle_draggable_annotations(self, event): def _get_annotation_labels(self): """Get the unique labels in the raw object and added in the UI.""" - labels = list(set(self.mne.inst.annotations.description)) - return np.union1d(labels, self.mne.new_annotation_labels) + return sorted(set(self.mne.inst.annotations.description) | + set(self.mne.new_annotation_labels)) def _update_annotation_fig(self): """Draw or redraw the radio buttons and annotation labels.""" @@ -1294,18 +1294,14 @@ def _add_annotation_label(self, event): def _setup_annotation_colors(self): """Set up colors for annotations; init some annotation vars.""" - raw = self.mne.inst segment_colors = getattr(self.mne, 'annotation_segment_colors', dict()) - # sort the segments by start time - ann_order = raw.annotations.onset.argsort(axis=0) - descriptions = raw.annotations.description[ann_order] - color_keys = np.union1d(descriptions, self.mne.new_annotation_labels) + labels = self._get_annotation_labels() colors, red = _get_color_list(annotations=True) color_cycle = cycle(colors) for key, color in segment_colors.items(): - if color != red and key in color_keys: + if color != red and key in labels: next(color_cycle) - for idx, key in enumerate(color_keys): + for idx, key in enumerate(labels): if key in segment_colors: continue elif key.lower().startswith('bad') or \ @@ -1315,7 +1311,6 @@ def _setup_annotation_colors(self): segment_colors[key] = next(color_cycle) self.mne.annotation_segment_colors = segment_colors # init a couple other annotation-related variables - labels = self._get_annotation_labels() self.mne.visible_annotations = {label: True for label in labels} self.mne.show_hide_annotation_checkboxes = None From 4f2925b19ed8dc54a237021de2912732348ae65c Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Mon, 1 Feb 2021 14:54:21 -0500 Subject: [PATCH 104/387] FIX: Hotfix for bug --- mne/viz/_3d.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py index 2bd90abdffd..327317d78b3 100644 --- a/mne/viz/_3d.py +++ b/mne/viz/_3d.py @@ -834,7 +834,8 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, assert surfs['helmet']['coord_frame'] == FIFF.FIFFV_COORD_MRI # Brain: - brain = set(surfaces) & set(['brain', 'pial', 'white', 'inflated']) + brain = sorted( + set(surfaces) & set(['brain', 'pial', 'white', 'inflated'])) if len(brain) > 1: raise ValueError('Only one brain surface can be plotted. ' 'Got %s.' % brain) From 6bcf041825abde39aefce603022f900c0be80024 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Richard=20H=C3=B6chenberger?= Date: Tue, 2 Feb 2021 10:00:08 +0100 Subject: [PATCH 105/387] MRG, BUG: Fix Epochs.reject_tmin, reject_tmax handling upon Epochs creation & cropping (#8821) --- doc/changes/latest.inc | 4 ++++ mne/epochs.py | 51 +++++++++++++++++++++++++++++++--------- mne/tests/test_epochs.py | 39 ++++++++++++++++++++++++++++-- 3 files changed, 81 insertions(+), 13 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 34355870009..665ab8ba9e1 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -100,6 +100,10 @@ Bugs - Fix bug with :func:`mne.viz.plot_topomap` when plotting gradiometers with a missing channel in a pair (:gh:`8817` by `Alex Gramfort`_) +- :meth:`epochs.crop() ` now also adjusts the ``reject_tmin`` and ``reject_tmax`` attributes if necessary (:gh:`8821` by `Richard Höchenberger`_) + +- When creating `~mne.Epochs`, we now ensure that ``reject_tmin`` and ``reject_tmax`` cannot fall outside of the epochs' time interval anymore (:gh:`8821` by `Richard Höchenberger`_) + API changes ~~~~~~~~~~~ diff --git a/mne/epochs.py b/mne/epochs.py index 2785174954e..977bc46836d 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -478,20 +478,10 @@ def __init__(self, info, data, events, event_id=None, tmin=-0.2, tmax=0.5, self.metadata = metadata # do not set self.events here, let subclass do it - # check reject_tmin and reject_tmax - if (reject_tmin is not None) and (reject_tmin < tmin): - raise ValueError("reject_tmin needs to be None or >= tmin") - if (reject_tmax is not None) and (reject_tmax > tmax): - raise ValueError("reject_tmax needs to be None or <= tmax") - if (reject_tmin is not None) and (reject_tmax is not None): - if reject_tmin >= reject_tmax: - raise ValueError('reject_tmin needs to be < reject_tmax') if (detrend not in [None, 0, 1]) or isinstance(detrend, bool): raise ValueError('detrend must be None, 0, or 1') - - self.reject_tmin = reject_tmin - self.reject_tmax = reject_tmax self.detrend = detrend + self._raw = raw info._check_consistency() self.picks = _picks_to_idx(info, picks, none='all', exclude=(), @@ -526,6 +516,33 @@ def __init__(self, info, data, events, event_id=None, tmin=-0.2, tmax=0.5, self._raw_times = np.arange(start_idx, int(round(tmax * sfreq)) + 1) / sfreq self._set_times(self._raw_times) + + # check reject_tmin and reject_tmax + if reject_tmin is not None: + if (np.isclose(reject_tmin, tmin)): + # adjust for potential small deviations due to sampling freq + reject_tmin = self.tmin + elif reject_tmin < tmin: + raise ValueError(f'reject_tmin needs to be None or >= tmin ' + f'(got {reject_tmin})') + + if reject_tmax is not None: + if (np.isclose(reject_tmax, tmax)): + # adjust for potential small deviations due to sampling freq + reject_tmax = self.tmax + elif reject_tmax > tmax: + raise ValueError(f'reject_tmax needs to be None or <= tmax ' + f'(got {reject_tmax})') + + if (reject_tmin is not None) and (reject_tmax is not None): + if reject_tmin >= reject_tmax: + raise ValueError(f'reject_tmin ({reject_tmin}) needs to be ' + f' < reject_tmax ({reject_tmax})') + + self.reject_tmin = reject_tmin + self.reject_tmax = reject_tmax + + # decimation self._decim = 1 self.decimate(decim) @@ -1560,6 +1577,18 @@ def crop(self, tmin=None, tmax=None, include_tmax=True, verbose=None): self._set_times(self.times[tmask]) self._raw_times = self._raw_times[tmask] self._data = self._data[:, :, tmask] + + # Adjust rejection period + if self.reject_tmin is not None and self.reject_tmin < self.tmin: + logger.info( + f'reject_tmin is not in epochs time interval. ' + f'Setting reject_tmin to epochs.tmin ({self.tmin} sec)') + self.reject_tmin = self.tmin + if self.reject_tmax is not None and self.reject_tmax > self.tmax: + logger.info( + f'reject_tmax is not in epochs time interval. ' + f'Setting reject_tmax to epochs.tmax ({self.tmax} sec)') + self.reject_tmax = self.tmax return self def copy(self): diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py index a586fe71f93..bb2c4b5acbe 100644 --- a/mne/tests/test_epochs.py +++ b/mne/tests/test_epochs.py @@ -1384,8 +1384,11 @@ def test_evoked_standard_error(tmpdir): assert ave.first == ave2.first -def test_reject_epochs(): +def test_reject_epochs(tmpdir): """Test of epochs rejection.""" + tempdir = str(tmpdir) + temp_fname = op.join(tempdir, 'test-epo.fif') + raw, events, picks = _get_data() events1 = events[events[:, 2] == event_id] epochs = Epochs(raw, events1, event_id, tmin, tmax, @@ -1437,6 +1440,17 @@ def test_reject_epochs(): data = epochs[0].get_data()[0] assert epochs._is_good_epoch(data) == (True, None) + # Check that reject_tmin and reject_tmax are being adjusted for small time + # inaccuracies due to sfreq + epochs = Epochs(raw=raw, events=events1, event_id=event_id, + tmin=tmin, tmax=tmax, reject_tmin=tmin, reject_tmax=tmax) + assert epochs.tmin != tmin + assert epochs.tmax != tmax + assert np.isclose(epochs.tmin, epochs.reject_tmin) + assert np.isclose(epochs.tmax, epochs.reject_tmax) + epochs.save(temp_fname, overwrite=True) + read_epochs(temp_fname) + def test_preload_epochs(): """Test preload of epochs.""" @@ -1525,8 +1539,11 @@ def test_comparision_with_c(): assert_array_almost_equal(evoked.times, c_evoked.times, 12) -def test_crop(): +def test_crop(tmpdir): """Test of crop of epochs.""" + tempdir = str(tmpdir) + temp_fname = op.join(tempdir, 'test-epo.fif') + raw, events, picks = _get_data() epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks, preload=False, reject=reject, flat=flat) @@ -1584,6 +1601,24 @@ def test_crop(): pytest.raises(ValueError, epochs.crop, 1000, 2000) pytest.raises(ValueError, epochs.crop, 0.1, 0) + # Test that cropping adjusts reject_tmin and reject_tmax if need be. + epochs = Epochs(raw=raw, events=events[:5], event_id=event_id, + tmin=tmin, tmax=tmax, reject_tmin=tmin, reject_tmax=tmax) + epochs.load_data() + epochs_cropped = epochs.copy().crop(0, None) + assert np.isclose(epochs_cropped.tmin, epochs_cropped.reject_tmin) + + epochs_cropped = epochs.copy().crop(None, 0.1) + assert np.isclose(epochs_cropped.tmax, epochs_cropped.reject_tmax) + del epochs_cropped + + # Cropping & I/O roundtrip + epochs.crop(0, 0.1) + epochs.save(temp_fname) + epochs_read = mne.read_epochs(temp_fname) + assert np.isclose(epochs_read.tmin, epochs_read.reject_tmin) + assert np.isclose(epochs_read.tmax, epochs_read.reject_tmax) + def test_resample(): """Test of resample of epochs.""" From dbe91585fb505de2acf9ade215aa05bb1400c5bb Mon Sep 17 00:00:00 2001 From: Guillaume Favelier Date: Tue, 2 Feb 2021 14:07:34 +0100 Subject: [PATCH 106/387] Use 3d backend (#8818) --- mne/viz/_3d.py | 32 ++++++++++++++------------------ 1 file changed, 14 insertions(+), 18 deletions(-) diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py index 327317d78b3..91b8da2f9dc 100644 --- a/mne/viz/_3d.py +++ b/mne/viz/_3d.py @@ -1795,7 +1795,7 @@ def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh', - https://surfer.nmr.mgh.harvard.edu/fswiki/FreeSurferOccipitalFlattenedPatch - https://openwetware.org/wiki/Beauchamp:FreeSurfer """ # noqa: E501 - from .backends.renderer import _get_3d_backend, set_3d_backend + from .backends.renderer import _get_3d_backend, use_3d_backend from ..source_estimate import _BaseSourceEstimate, _check_stc_src _check_stc_src(stc, src) _validate_type(stc, _BaseSourceEstimate, 'stc', 'source estimate') @@ -1806,19 +1806,12 @@ def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh', ['auto', 'matplotlib', 'mayavi', 'pyvista']) plot_mpl = backend == 'matplotlib' if not plot_mpl: - try: - if backend == 'auto': - set_3d_backend(_get_3d_backend()) - else: - set_3d_backend(backend) - except (ImportError, ModuleNotFoundError): - if backend == 'auto': + if backend == 'auto': + try: + backend = _get_3d_backend() + except (ImportError, ModuleNotFoundError): warn('No 3D backend found. Resorting to matplotlib 3d.') plot_mpl = True - else: # 'mayavi' - raise - else: - backend = _get_3d_backend() kwargs = dict( subject=subject, surface=surface, hemi=hemi, colormap=colormap, time_label=time_label, smoothing_steps=smoothing_steps, @@ -1828,12 +1821,15 @@ def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh', transparent=transparent) if plot_mpl: return _plot_mpl_stc(stc, spacing=spacing, **kwargs) - return _plot_stc( - stc, overlay_alpha=alpha, brain_alpha=alpha, vector_alpha=alpha, - cortex=cortex, foreground=foreground, size=size, scale_factor=None, - show_traces=show_traces, src=src, volume_options=volume_options, - view_layout=view_layout, add_data_kwargs=add_data_kwargs, - brain_kwargs=brain_kwargs, **kwargs) + else: + with use_3d_backend(backend): + return _plot_stc( + stc, overlay_alpha=alpha, brain_alpha=alpha, + vector_alpha=alpha, cortex=cortex, foreground=foreground, + size=size, scale_factor=None, show_traces=show_traces, + src=src, volume_options=volume_options, + view_layout=view_layout, add_data_kwargs=add_data_kwargs, + brain_kwargs=brain_kwargs, **kwargs) def _plot_stc(stc, subject, surface, hemi, colormap, time_label, From fa83dadb9cd2d3607b91638aedd5b400059b149f Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 3 Feb 2021 11:47:04 -0500 Subject: [PATCH 107/387] MAINT: Dont fail on coverage (#8838) --- codecov.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/codecov.yml b/codecov.yml index 2963824ca62..fda31265be5 100644 --- a/codecov.yml +++ b/codecov.yml @@ -8,6 +8,7 @@ coverage: status: patch: default: + informational: true target: 95% if_no_uploads: error if_not_found: success @@ -15,6 +16,7 @@ coverage: project: default: false library: + informational: true target: 90% if_no_uploads: error if_not_found: success From 74750e6b53a26e92a0d509c8458790b5d42d5117 Mon Sep 17 00:00:00 2001 From: Daniel McCloy Date: Wed, 3 Feb 2021 10:47:44 -0600 Subject: [PATCH 108/387] FIX: only delete visible annotation spans (#8831) * fix: only delete visible annotation spans * add test * changelog * fix pydocstyle * better test --- doc/changes/latest.inc | 2 ++ mne/annotations.py | 4 ++-- mne/viz/_figure.py | 6 +++++- mne/viz/tests/test_raw.py | 16 ++++++++++++++++ 4 files changed, 25 insertions(+), 3 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 665ab8ba9e1..405bc9d6a0c 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -86,6 +86,8 @@ Bugs - Allow sEEG channel types in :meth:`mne.Evoked.plot_joint` (:gh:`8736` by `Daniel McCloy`_) +- Fix bug where hidden annotations could be deleted interactively in :meth:`mne.io.Raw.plot` windows (:gh:`8831` by `Daniel McCloy`_) + - Function :func:`mne.set_bipolar_reference` was not working when passing ``Epochs`` constructed with some ``picks`` (:gh:`8728` by `Alex Gramfort`_) - Fix anonymization issue of FIF files after IO round trip (:gh:`8731` by `Alex Gramfort`_) diff --git a/mne/annotations.py b/mne/annotations.py index 119b803615a..e9388e2801d 100644 --- a/mne/annotations.py +++ b/mne/annotations.py @@ -18,7 +18,7 @@ from .utils import (_pl, check_fname, _validate_type, verbose, warn, logger, _check_pandas_installed, _mask_to_onsets_offsets, _DefaultEventParser, _check_dt, _stamp_to_dt, _dt_to_stamp, - _check_fname) + _check_fname, int_like) from .io.write import (start_block, end_block, write_float, write_name_list, write_double, start_file) @@ -257,7 +257,7 @@ def __iter__(self): def __getitem__(self, key): """Propagate indexing and slicing to the underlying numpy structure.""" - if isinstance(key, int): + if isinstance(key, int_like): out_keys = ('onset', 'duration', 'description', 'orig_time') out_vals = (self.onset[key], self.duration[key], self.description[key], self.orig_time) diff --git a/mne/viz/_figure.py b/mne/viz/_figure.py index 707a8295ef1..2fea0a116be 100644 --- a/mne/viz/_figure.py +++ b/mne/viz/_figure.py @@ -794,7 +794,11 @@ def _buttonpress(self, event): start = _sync_onset(inst, inst.annotations.onset) end = start + inst.annotations.duration ann_idx = np.where((xdata > start) & (xdata < end))[0] - inst.annotations.delete(ann_idx) # only first one deleted + for idx in sorted(ann_idx)[::-1]: + # only remove visible annotation spans + descr = inst.annotations[idx]['description'] + if self.mne.visible_annotations[descr]: + inst.annotations.delete(idx) self._remove_annotation_hover_line() self._draw_annotations() self.canvas.draw_idle() diff --git a/mne/viz/tests/test_raw.py b/mne/viz/tests/test_raw.py index a4f67e3eb21..4f980f065eb 100644 --- a/mne/viz/tests/test_raw.py +++ b/mne/viz/tests/test_raw.py @@ -539,6 +539,22 @@ def test_plot_annotations(raw): assert len(fig.mne.annotation_texts) == 1 +@pytest.mark.parametrize('hide_which', ([], [0], [1], [0, 1])) +def test_remove_annotations(raw, hide_which): + """Test that right-click doesn't remove hidden annotation spans.""" + ann = Annotations(onset=[2, 1], duration=[1, 3], + description=['foo', 'bar']) + raw.set_annotations(ann) + assert len(raw.annotations) == 2 + fig = raw.plot() + fig.canvas.key_press_event('a') # start annotation mode + checkboxes = fig.mne.show_hide_annotation_checkboxes + for which in hide_which: + checkboxes.set_active(which) + _fake_click(fig, fig.mne.ax_main, (2.5, 0.1), xform='data', button=3) + assert len(raw.annotations) == len(hide_which) + + @pytest.mark.parametrize('filtorder', (0, 2)) # FIR, IIR def test_plot_raw_filtered(filtorder, raw): """Test filtering of raw plots.""" From 2f7df8843401433b28af0278666fe629fd9f0c28 Mon Sep 17 00:00:00 2001 From: Guillaume Favelier Date: Thu, 4 Feb 2021 17:43:00 +0100 Subject: [PATCH 109/387] MNT: CIs (#8828) * Escape % in azure * Focus on linux_conda and macos_conda [skip azp] * Focus on viz [skip azp] * Uninstall mayavi [skip azp] * Try again [skip azp] * Refactor ensure_minimum_size * Refactor ensure_minimum_size [skip azp] * Try again [skip azp] * Third try [skip azp] * Revert "Refactor ensure_minimum_size [skip azp]" This reverts commit c49287d32d01d26709f9ac94eff7283c89e2704c. * Revert "Refactor ensure_minimum_size" This reverts commit 353801a5b73bbb25b18a8db236ca9c0aa7852030. * Patch ensure_minimum_size [skip azp] * Add _qt_disable_paint * Patch surfer close * Restore linux_conda and macos_conda * Revert "Focus on linux_conda and macos_conda [skip azp]" This reverts commit 3393cb47858e173c33e717ac68fa9ec028bc0e96. * Remove cruft * Remove cruft * Fix * Move to plot_stc * Try again * Try another patch --- azure-pipelines.yml | 1 + mne/viz/_3d.py | 17 +++++++++-------- mne/viz/_brain/_brain.py | 8 +++++--- mne/viz/backends/_pyvista.py | 10 ++++++---- mne/viz/backends/_utils.py | 11 +++++++++++ mne/viz/backends/renderer.py | 4 +++- 6 files changed, 35 insertions(+), 16 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 3a8b9e0c309..e5c2ddb55f3 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -19,6 +19,7 @@ stages: pool: vmImage: 'ubuntu-18.04' variables: + DECODE_PERCENTS: 'false' RET: 'true' steps: - bash: | diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py index 91b8da2f9dc..30b128faa85 100644 --- a/mne/viz/_3d.py +++ b/mne/viz/_3d.py @@ -38,8 +38,7 @@ read_ras_mni_t, _print_coord_trans) from ..utils import (get_subjects_dir, logger, _check_subject, verbose, warn, has_nibabel, check_version, fill_doc, _pl, get_config, - _ensure_int, _validate_type, _check_option, - _require_version) + _ensure_int, _validate_type, _check_option) from .utils import (mne_analyze_colormap, _get_color_list, plt_show, tight_layout, figure_nobar, _check_time_unit) from .misc import _check_mri @@ -1838,7 +1837,7 @@ def _plot_stc(stc, subject, surface, hemi, colormap, time_label, brain_alpha, overlay_alpha, vector_alpha, cortex, foreground, size, scale_factor, show_traces, src, volume_options, view_layout, add_data_kwargs, brain_kwargs): - from .backends.renderer import _get_3d_backend + from .backends.renderer import _get_3d_backend, get_brain_class from ..source_estimate import _BaseVolSourceEstimate vec = stc._data_ndim == 3 subjects_dir = get_subjects_dir(subjects_dir=subjects_dir, @@ -1848,11 +1847,7 @@ def _plot_stc(stc, subject, surface, hemi, colormap, time_label, backend = _get_3d_backend() del _get_3d_backend using_mayavi = backend == "mayavi" - if using_mayavi: - from surfer import Brain - _require_version('surfer', 'stc.plot', '0.9') - else: # PyVista - from ._brain import Brain + Brain = get_brain_class() views = _check_views(surface, views, hemi, stc, backend) _check_option('hemi', hemi, ['lh', 'rh', 'split', 'both']) _check_option('view_layout', view_layout, ('vertical', 'horizontal')) @@ -1911,6 +1906,12 @@ def _plot_stc(stc, subject, surface, hemi, colormap, time_label, with warnings.catch_warnings(record=True): # traits warnings brain = Brain(**kwargs) del kwargs + + if using_mayavi: + # Here we patch to avoid segfault: + # https://github.com/mne-tools/mne-python/pull/8828 + brain.close = lambda *args, **kwargs: brain._close(False) + if scale_factor is None: # Configure the glyphs scale directly width = np.mean([np.ptp(brain.geo[hemi].coords[:, 1]) diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index fe3a28b65e6..88fbe9e1ff2 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -572,6 +572,7 @@ def setup_time_viewer(self, time_viewer=True, show_traces=True): 'Left': Decrease camera azimuth angle 'Right': Increase camera azimuth angle """ + from ..backends._utils import _qt_disable_paint if self.time_viewer: return if not self._data: @@ -679,8 +680,9 @@ def setup_time_viewer(self, time_viewer=True, show_traces=True): self._configure_status_bar() # show everything at the end - with self.ensure_minimum_sizes(): - self.show() + with _qt_disable_paint(self.plotter): + with self._ensure_minimum_sizes(): + self.show() @safe_event def _clean(self): @@ -717,7 +719,7 @@ def _clean(self): setattr(self, key, None) @contextlib.contextmanager - def ensure_minimum_sizes(self): + def _ensure_minimum_sizes(self): """Ensure that widgets respect the windows size.""" sz = self._size adjust_mpl = self.show_traces and not self.separate_canvas diff --git a/mne/viz/backends/_pyvista.py b/mne/viz/backends/_pyvista.py index 72669c353bf..769a479fbba 100644 --- a/mne/viz/backends/_pyvista.py +++ b/mne/viz/backends/_pyvista.py @@ -23,7 +23,8 @@ from .base_renderer import _BaseRenderer from ._utils import (_get_colormap_from_array, _alpha_blend_background, - ALLOWED_QUIVER_MODES, _init_qt_resources) + ALLOWED_QUIVER_MODES, _init_qt_resources, + _qt_disable_paint) from ...fixes import _get_args from ...transforms import apply_trans from ...utils import copy_base_doc_to_subclass_doc, _check_option @@ -221,7 +222,7 @@ def _get_screenshot_filename(self): return "MNE" + dt_string + ".png" @contextmanager - def ensure_minimum_sizes(self): + def _ensure_minimum_sizes(self): sz = self.figure.store['window_size'] # plotter: pyvista.plotting.qt_plotting.BackgroundPlotter # plotter.interactor: vtk.qt.QVTKRenderWindowInteractor.QVTKRenderWindowInteractor -> QWidget # noqa @@ -631,8 +632,9 @@ def scalarbar(self, source, color="white", title=None, n_labels=4, def show(self): self.figure.display = self.plotter.show() if hasattr(self.plotter, "app_window"): - with self.ensure_minimum_sizes(): - self.plotter.app_window.show() + with _qt_disable_paint(self.plotter): + with self._ensure_minimum_sizes(): + self.plotter.app_window.show() return self.scene() def close(self): diff --git a/mne/viz/backends/_utils.py b/mne/viz/backends/_utils.py index 626f161017a..c95ae881486 100644 --- a/mne/viz/backends/_utils.py +++ b/mne/viz/backends/_utils.py @@ -7,6 +7,7 @@ # # License: Simplified BSD +from contextlib import contextmanager import numpy as np import collections.abc from ...externals.decorator import decorator @@ -78,3 +79,13 @@ def run_once(fun, *args, **kwargs): def _init_qt_resources(): from ...icons import resources resources.qInitResources() + + +@contextmanager +def _qt_disable_paint(widget): + paintEvent = widget.paintEvent + widget.paintEvent = lambda *args, **kwargs: None + try: + yield + finally: + widget.paintEvent = paintEvent diff --git a/mne/viz/backends/renderer.py b/mne/viz/backends/renderer.py index fad9f882f97..7e94a8ba540 100644 --- a/mne/viz/backends/renderer.py +++ b/mne/viz/backends/renderer.py @@ -11,7 +11,8 @@ import importlib from ._utils import VALID_3D_BACKENDS -from ...utils import logger, verbose, get_config, _check_option +from ...utils import (logger, verbose, get_config, _check_option, + _require_version) MNE_3D_BACKEND = None MNE_3D_BACKEND_TESTING = False @@ -292,6 +293,7 @@ def get_brain_class(): """ if get_3d_backend() == "mayavi": from surfer import Brain + _require_version('surfer', 'stc.plot', '0.9') else: # PyVista from ...viz._brain import Brain return Brain From 7b6b2c53d6f14fee8fa4515b9ca2a30ec0e7daeb Mon Sep 17 00:00:00 2001 From: Guillaume Favelier Date: Fri, 5 Feb 2021 10:27:35 +0100 Subject: [PATCH 110/387] MNT: Update 3d backend docstring (#8835) * Update docstring * Update mne/viz/backends/renderer.py Co-authored-by: Eric Larson * Update mne/viz/backends/renderer.py Co-authored-by: Mainak Jas * Fix * Use numpydoc standard Co-authored-by: Eric Larson Co-authored-by: Mainak Jas --- mne/viz/backends/renderer.py | 90 ++++++++++++++++++------------------ 1 file changed, 45 insertions(+), 45 deletions(-) diff --git a/mne/viz/backends/renderer.py b/mne/viz/backends/renderer.py index 7e94a8ba540..ea8d7a07fe1 100644 --- a/mne/viz/backends/renderer.py +++ b/mne/viz/backends/renderer.py @@ -61,49 +61,46 @@ def set_3d_backend(backend_name, verbose=None): .. table:: :widths: auto - +--------------------------------------+--------+---------+ - | 3D function: | mayavi | pyvista | - +======================================+========+=========+ - | :func:`plot_vector_source_estimates` | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | :func:`plot_source_estimates` | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | :func:`plot_alignment` | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | :func:`plot_sparse_source_estimates` | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | :func:`plot_evoked_field` | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | :func:`plot_sensors_connectivity` | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | :func:`snapshot_brain_montage` | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | :func:`link_brains` | | ✓ | - +--------------------------------------+--------+---------+ - +--------------------------------------+--------+---------+ - | **3D feature:** | - +--------------------------------------+--------+---------+ - | Large data | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | Opacity/transparency | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | Support geometric glyph | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | Jupyter notebook | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | Interactivity in Jupyter notebook | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | Smooth shading | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | Subplotting | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | Save offline movie | ✓ | ✓ | - +--------------------------------------+--------+---------+ - - .. note:: - In the case of `plot_vector_source_estimates` with PyVista, the glyph - size is not consistent with Mayavi, it is also possible that a dark - filter is visible on the mesh when depth peeling is not available. + +--------------------------------------+--------+---------+----------+ + | **3D function:** | mayavi | pyvista | notebook | + +======================================+========+=========+==========+ + | :func:`plot_vector_source_estimates` | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | :func:`plot_source_estimates` | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | :func:`plot_alignment` | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | :func:`plot_sparse_source_estimates` | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | :func:`plot_evoked_field` | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | :func:`plot_sensors_connectivity` | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | :func:`snapshot_brain_montage` | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | :func:`link_brains` | | ✓ | | + +--------------------------------------+--------+---------+----------+ + +--------------------------------------+--------+---------+----------+ + | **Feature:** | + +--------------------------------------+--------+---------+----------+ + | Large data | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | Opacity/transparency | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | Support geometric glyph | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | Smooth shading | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | Subplotting | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | Inline plot in Jupyter Notebook | ✓ | | ✓ | + +--------------------------------------+--------+---------+----------+ + | Inline plot in JupyterLab | ✓ | | ✓ | + +--------------------------------------+--------+---------+----------+ + | Inline plot in Google Colab | | | | + +--------------------------------------+--------+---------+----------+ + | Toolbar | | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ """ global MNE_3D_BACKEND try: @@ -159,11 +156,14 @@ def _get_3d_backend(): @contextmanager def use_3d_backend(backend_name): - """Create a viz context. + """Create a 3d visualization context using the designated backend. + + See :func:`mne.viz.set_3d_backend` for more details on the available + 3d backends and their capabilities. Parameters ---------- - backend_name : str + backend_name : {'mayavi', 'pyvista', 'notebook'} The 3d backend to use in the context. """ old_backend = _get_3d_backend() From 59429c3b8022f61cf3539dc421cadff4ab675d24 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Fri, 5 Feb 2021 11:22:40 -0500 Subject: [PATCH 111/387] MRG, ENH: Dont store raw.times (#8801) * ENH: Create raw.times on the fly * DOC: PR * FIX: Fix tests * FIX: More * ENH: Numba * STY: Fix docstring * ENH: One more * MAINT: DRY * STY: Sp * FIX: No times --- doc/changes/latest.inc | 2 + mne/baseline.py | 8 ++-- mne/conftest.py | 35 +++++++++++------- mne/epochs.py | 53 ++++++++++++++++----------- mne/forward/forward.py | 1 - mne/io/base.py | 37 ++++++++++--------- mne/io/pick.py | 19 ++++------ mne/preprocessing/ica.py | 31 ++++++---------- mne/preprocessing/realign.py | 1 - mne/preprocessing/tests/test_ica.py | 5 ++- mne/stats/tests/test_cluster_level.py | 19 +--------- mne/tests/test_epochs.py | 3 +- mne/tests/test_event.py | 1 - mne/time_frequency/tfr.py | 4 ++ mne/utils/__init__.py | 2 +- mne/utils/mixin.py | 13 +++++-- mne/utils/numerics.py | 20 +++++++++- mne/utils/tests/test_numerics.py | 11 +++++- 18 files changed, 150 insertions(+), 115 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 405bc9d6a0c..6bba14453c8 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -34,6 +34,8 @@ Enhancements - Speed up :func:`mne.inverse_sparse.tf_mixed_norm` using STFT/ISTFT linearity (:gh:`8697` by `Eric Larson`_) +- Reduce memory consumption of `mne.io.Raw` and speed up epoching when thousands of events are present for `mne.Epochs` (:gh:`8801` by `Eric Larson`_) + - `mne.Report.parse_folder` now processes supported non-FIFF files by default, too (:gh:`8744` by `Richard Höchenberger`_) - `mne.Report` has gained a new method `~mne.Report.add_custom_css` for adding user-defined styles (:gh:`8762` by `Richard Höchenberger`_) diff --git a/mne/baseline.py b/mne/baseline.py index 5b25aad5951..87606a21704 100644 --- a/mne/baseline.py +++ b/mne/baseline.py @@ -59,9 +59,11 @@ def rescale(data, times, baseline, mode='mean', copy=True, picks=None, data_scaled: array Array of same shape as data after rescaling. """ - data = data.copy() if copy else data - msg = _log_rescale(baseline, mode) - logger.info(msg) + if copy: + data = data.copy() + if verbose is not False: + msg = _log_rescale(baseline, mode) + logger.info(msg) if baseline is None or data.shape[-1] == 0: return data diff --git a/mne/conftest.py b/mne/conftest.py index edf67efee55..aa14e290ef5 100644 --- a/mne/conftest.py +++ b/mne/conftest.py @@ -12,23 +12,14 @@ import sys import warnings import pytest -# For some unknown reason, on Travis-xenial there are segfaults caused on -# the line pytest -> pdb.Pdb.__init__ -> "import readline". Forcing an -# import here seems to prevent them (!?). This suggests a potential problem -# with some other library stepping on memory where it shouldn't. It only -# seems to happen on the Linux runs that install Mayavi. Anectodally, -# @larsoner has had problems a couple of years ago where a mayavi import -# seemed to corrupt SciPy linalg function results (!), likely due to the -# associated VTK import, so this could be another manifestation of that. -try: - import readline # noqa -except Exception: - pass import numpy as np + import mne from mne.datasets import testing -from mne.utils import _pl, _assert_no_instances +from mne.fixes import has_numba +from mne.stats import cluster_level +from mne.utils import _pl, _assert_no_instances, numerics test_path = testing.data_path(download=False) s_path = op.join(test_path, 'MEG', 'sample') @@ -571,3 +562,21 @@ def pytest_sessionfinish(session, exitstatus): timings = [timing.rjust(rjust) for timing in timings] for name, timing in zip(names, timings): writer.line(f'{timing.ljust(15)}{name}') + + +@pytest.fixture(scope="function", params=('Numba', 'NumPy')) +def numba_conditional(monkeypatch, request): + """Test both code paths on machines that have Numba.""" + assert request.param in ('Numba', 'NumPy') + if request.param == 'NumPy' and has_numba: + monkeypatch.setattr( + cluster_level, '_get_buddies', cluster_level._get_buddies_fallback) + monkeypatch.setattr( + cluster_level, '_get_selves', cluster_level._get_selves_fallback) + monkeypatch.setattr( + cluster_level, '_where_first', cluster_level._where_first_fallback) + monkeypatch.setattr( + numerics, '_arange_div', numerics._arange_div_fallback) + if request.param == 'Numba' and not has_numba: + pytest.skip('Numba not installed') + yield request.param diff --git a/mne/epochs.py b/mne/epochs.py index 977bc46836d..8ca334ef23c 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -31,10 +31,9 @@ from .io.tag import read_tag, read_tag_info from .io.constants import FIFF from .io.fiff.raw import _get_fname_rep -from .io.pick import (pick_types, channel_indices_by_type, channel_type, +from .io.pick import (channel_indices_by_type, channel_type, pick_channels, pick_info, _pick_data_channels, - _pick_aux_channels, _DATA_CH_TYPES_SPLIT, - _picks_to_idx) + _DATA_CH_TYPES_SPLIT, _picks_to_idx) from .io.proj import setup_proj, ProjMixin, _proj_equal from .io.base import BaseRaw, TimeMixin from .bem import _check_origin @@ -722,11 +721,10 @@ def apply_baseline(self, baseline=(None, 0), *, verbose=None): raise RuntimeError('You cannot remove baseline correction ' 'from preloaded data once it has been ' 'applied.') - picks = _pick_data_channels(self.info, exclude=[], - with_ref_meg=True) - picks_aux = _pick_aux_channels(self.info, exclude=[]) - picks = np.sort(np.concatenate((picks, picks_aux))) + self._do_baseline = True + picks = self._detrend_picks rescale(self._data, self.times, baseline, copy=False, picks=picks) + self._do_baseline = False else: # logging happens in "rescale" in "if" branch logger.info(_log_rescale(baseline)) assert self._do_baseline is True @@ -827,7 +825,7 @@ def _is_good_epoch(self, data, verbose=None): ignore_chs=self.info['bads']) @verbose - def _detrend_offset_decim(self, epoch, verbose=None): + def _detrend_offset_decim(self, epoch, picks, verbose=None): """Aux Function: detrend, baseline correct, offset, decim. Note: operates inplace @@ -837,17 +835,16 @@ def _detrend_offset_decim(self, epoch, verbose=None): # Detrend if self.detrend is not None: - picks = _pick_data_channels(self.info, exclude=[]) - epoch[picks] = detrend(epoch[picks], self.detrend, axis=1) + # We explicitly detrend just data channels (not EMG, ECG, EOG which + # are processed by baseline correction) + use_picks = _pick_data_channels(self.info, exclude=()) + epoch[use_picks] = detrend(epoch[use_picks], self.detrend, axis=1) # Baseline correct if self._do_baseline: - picks = pick_types(self.info, meg=True, eeg=True, stim=False, - ref_meg=True, eog=True, ecg=True, seeg=True, - emg=True, bio=True, ecog=True, fnirs=True, - dbs=True, exclude=[]) - epoch[picks] = rescale(epoch[picks], self._raw_times, - self.baseline, copy=False, verbose=False) + rescale( + epoch, self._raw_times, self.baseline, picks=picks, copy=False, + verbose=False) # Decimate if necessary (i.e., epoch not preloaded) epoch = epoch[:, self._decim_slice] @@ -872,7 +869,7 @@ def iter_evoked(self, copy=False): If False copies of data and measurement info will be omitted to save time. """ - self._current = 0 + self.__iter__() while True: try: @@ -1378,10 +1375,12 @@ def _get_data(self, out=True, picks=None, item=None, verbose=None): return data[:, picks] # we need to load from disk, drop, and return data + detrend_picks = self._detrend_picks for ii, idx in enumerate(use_idx): # faster to pre-allocate memory here epoch_noproj = self._get_epoch_from_raw(idx) - epoch_noproj = self._detrend_offset_decim(epoch_noproj) + epoch_noproj = self._detrend_offset_decim( + epoch_noproj, detrend_picks) if self._do_delayed_proj: epoch_out = epoch_noproj else: @@ -1397,6 +1396,8 @@ def _get_data(self, out=True, picks=None, item=None, verbose=None): n_out = 0 drop_log = list(self.drop_log) assert n_events == len(self.selection) + if not self.preload: + detrend_picks = self._detrend_picks for idx, sel in enumerate(self.selection): if self.preload: # from memory if self._do_delayed_proj: @@ -1407,7 +1408,8 @@ def _get_data(self, out=True, picks=None, item=None, verbose=None): epoch = self._data[idx] else: # from disk epoch_noproj = self._get_epoch_from_raw(idx) - epoch_noproj = self._detrend_offset_decim(epoch_noproj) + epoch_noproj = self._detrend_offset_decim( + epoch_noproj, detrend_picks) epoch = self._project_epoch(epoch_noproj) epoch_out = epoch_noproj if self._do_delayed_proj else epoch @@ -1456,6 +1458,14 @@ def _get_data(self, out=True, picks=None, item=None, verbose=None): else: return None + @property + def _detrend_picks(self): + if self._do_baseline: + return _pick_data_channels( + self.info, with_ref_meg=True, with_aux=True, exclude=()) + else: + return [] + @fill_doc def get_data(self, picks=None, item=None): """Get all epochs as a 3D array. @@ -2367,9 +2377,10 @@ def __init__(self, data, info, events=None, tmin=0, event_id=None, list(self.event_id.values())).sum(): raise ValueError('The events must only contain event numbers from ' 'event_id') - for ii, e in enumerate(self._data): + detrend_picks = self._detrend_picks + for e in self._data: # This is safe without assignment b/c there is no decim - self._detrend_offset_decim(e) + self._detrend_offset_decim(e, detrend_picks) self.drop_bad() diff --git a/mne/forward/forward.py b/mne/forward/forward.py index aa793f6ebbb..3f89b3f0045 100644 --- a/mne/forward/forward.py +++ b/mne/forward/forward.py @@ -1491,7 +1491,6 @@ def apply_forward_raw(fwd, stc, info, start=None, stop=None, raw._first_samps = np.array([int(np.round(times[0] * sfreq))]) raw._last_samps = np.array([raw.first_samp + raw._data.shape[1] - 1]) raw._projector = None - raw._update_times() return raw diff --git a/mne/io/base.py b/mne/io/base.py index cd8050f064e..ebef6bbaff4 100644 --- a/mne/io/base.py +++ b/mne/io/base.py @@ -47,7 +47,7 @@ copy_function_doc_to_method_doc, _validate_type, _check_preload, _get_argvalues, _check_option, _build_data_frame, _convert_times, _scale_dataframe_data, - _check_time_format) + _check_time_format, _arange_div) from ..defaults import _handle_default from ..viz import plot_raw, plot_raw_psd, plot_raw_psd_topo, _RAW_CLIP_DEF from ..event import find_events, concatenate_events @@ -249,7 +249,6 @@ def __init__(self, info, preload=False, self._dtype_ = dtype self.set_annotations(None) # If we have True or a string, actually do the preloading - self._update_times() if load_from_disk: self._preload_data(preload) self._init_kwargs = _get_argvalues() @@ -498,7 +497,7 @@ def _check_bad_segment(self, start, stop, picks, for descr in annot.description[overlaps]: if descr.lower().startswith('bad'): return descr - return self[picks, start:stop][0] + return self._getitem((picks, slice(start, stop)), return_times=False) @verbose def load_data(self, verbose=None): @@ -539,12 +538,6 @@ def _preload_data(self, preload, verbose=None): self._comp = None # no longer needed self.close() - def _update_times(self): - """Update times.""" - self._times = np.arange(self.n_times) / float(self.info['sfreq']) - # make it immutable - self._times.flags.writeable = False - @property def _first_time(self): return self.first_samp / float(self.info['sfreq']) @@ -772,14 +765,25 @@ def __getitem__(self, item): >>> data, times = raw[picks, t_idx[0]:t_idx[1]] # doctest: +SKIP """ # noqa: E501 + return self._getitem(item) + + def _getitem(self, item, return_times=True): sel, start, stop = self._parse_get_set_params(item) if self.preload: data = self._data[sel, start:stop] else: data = self._read_segment(start=start, stop=stop, sel=sel, projector=self._projector) - times = self.times[start:stop] - return data, times + + if return_times: + # Rather than compute the entire thing just compute the subset + # times = self.times[start:stop] + # stop can be None here so don't use it directly + times = np.arange(start, start + data.shape[1], dtype=float) + times /= self.info['sfreq'] + return data, times + else: + return data def __setitem__(self, item, value): """Set raw data content.""" @@ -827,8 +831,8 @@ def get_data(self, picks=None, start=0, stop=None, start = 0 if start is None else start stop = min(self.n_times if stop is None else stop, self.n_times) if len(self.annotations) == 0 or reject_by_annotation is None: - data, times = self[picks, start:stop] - return (data, times) if return_times else data + return self._getitem( + (picks, slice(start, stop)), return_times=return_times) _check_option('reject_by_annotation', reject_by_annotation.lower(), ['omit', 'nan']) onsets, ends = _annotations_starts_stops(self, ['BAD']) @@ -1196,7 +1200,6 @@ def resample(self, sfreq, npad='auto', window='boxcar', stim_picks=None, lowpass = self.info.get('lowpass') lowpass = np.inf if lowpass is None else lowpass self.info['lowpass'] = min(lowpass, sfreq / 2.) - self._update_times() # See the comment above why we ignore all errors here. if events is None: @@ -1280,7 +1283,6 @@ def crop(self, tmin=0.0, tmax=None, include_tmax=True): if self.preload: # slice and copy to avoid the reference to large array self._data = self._data[:, smin:smax + 1].copy() - self._update_times() if self.annotations.orig_time is None: self.annotations.onset -= tmin @@ -1482,7 +1484,9 @@ def ch_names(self): @property def times(self): """Time points.""" - return self._times + out = _arange_div(self.n_times, float(self.info['sfreq'])) + out.flags['WRITEABLE'] = False + return out @property def n_times(self): @@ -1625,7 +1629,6 @@ def append(self, raws, preload=None): self._raw_extras += r._raw_extras self._filenames += r._filenames assert annotations.orig_time == self.info['meas_date'] - self._update_times() self.set_annotations(annotations) for edge_samp in edge_samps: onset = _sync_onset(self, (edge_samp) / self.info['sfreq'], True) diff --git a/mne/io/pick.py b/mne/io/pick.py index e78dc91bf7d..1cf702de4a4 100644 --- a/mne/io/pick.py +++ b/mne/io/pick.py @@ -982,19 +982,14 @@ def _check_excludes_includes(chs, info=None, allow_bads=False): 'mag', 'grad', 'eeg', 'csd', 'seeg', 'ecog', 'dbs') + _FNIRS_CH_TYPES_SPLIT -def _pick_data_channels(info, exclude='bads', with_ref_meg=True): +def _pick_data_channels(info, exclude='bads', with_ref_meg=True, + with_aux=False): """Pick only data channels.""" - return pick_types(info, ref_meg=with_ref_meg, exclude=exclude, - **_PICK_TYPES_DATA_DICT) - - -def _pick_aux_channels(info, exclude='bads'): - """Pick only auxiliary channels. - - Corresponds to EOG, ECG, EMG and BIO - """ - return pick_types(info, meg=False, eog=True, ecg=True, emg=True, bio=True, - ref_meg=False, exclude=exclude) + kwargs = _PICK_TYPES_DATA_DICT + if with_aux: + kwargs = kwargs.copy() + kwargs.update(eog=True, ecg=True, emg=True, bio=True) + return pick_types(info, ref_meg=with_ref_meg, exclude=exclude, **kwargs) def _pick_data_or_ica(info, exclude=()): diff --git a/mne/preprocessing/ica.py b/mne/preprocessing/ica.py index fb0e88ad9de..f44b7bdc659 100644 --- a/mne/preprocessing/ica.py +++ b/mne/preprocessing/ica.py @@ -904,7 +904,9 @@ def get_sources(self, inst, add_channels=None, start=None, stop=None): def _sources_as_raw(self, raw, add_channels, start, stop): """Aux method.""" # merge copied instance and picked data with sources - sources = self._transform_raw(raw, start=start, stop=stop) + start, stop = _check_start_stop(raw, start, stop) + data_ = self._transform_raw(raw, start=start, stop=stop) + assert data_.shape[1] == stop - start if raw.preload: # get data and temporarily delete data = raw._data del raw._data @@ -914,28 +916,17 @@ def _sources_as_raw(self, raw, add_channels, start, stop): raw._data = data # populate copied raw. - start, stop = _check_start_stop(raw, start, stop) - if add_channels is not None: - raw_picked = raw.copy().pick_channels(add_channels) - data_, times_ = raw_picked[:, start:stop] - data_ = np.r_[sources, data_] - else: - data_ = sources - _, times_ = raw[0, start:stop] + if add_channels is not None and len(add_channels): + picks = pick_channels(raw.ch_names, add_channels) + data_ = np.concatenate([ + data_, raw.get_data(picks, start=start, stop=stop)]) out._data = data_ - out._times = times_ out._filenames = [None] out.preload = True - - # update first and last samples - out._first_samps = np.array([raw.first_samp + - (start if start else 0)]) - out._last_samps = np.array([out.first_samp + stop - if stop else raw.last_samp]) - + out._first_samps[:] = [out.first_samp + start] + out._last_samps[:] = [out.first_samp + data_.shape[1] - 1] out._projector = None self._export_info(out.info, raw, add_channels) - out._update_times() return out @@ -1964,9 +1955,9 @@ def _exp_var_ncomp(var, n): def _check_start_stop(raw, start, stop): """Aux function.""" out = list() - for st in (start, stop): + for st, none_ in ((start, 0), (stop, raw.n_times)): if st is None: - out.append(st) + out.append(none_) else: try: out.append(_ensure_int(st)) diff --git a/mne/preprocessing/realign.py b/mne/preprocessing/realign.py index 1f5987f11e9..b35ab6eaab5 100644 --- a/mne/preprocessing/realign.py +++ b/mne/preprocessing/realign.py @@ -94,7 +94,6 @@ def realign_raw(raw, other, t_raw, t_other, verbose=None): sfreq_new = raw.info['sfreq'] * coef other.load_data().resample(sfreq_new, verbose=True) other.info['sfreq'] = raw.info['sfreq'] - other._update_times() # 4. Crop the end of one of the recordings if necessary delta = raw.times[-1] - other.times[-1] diff --git a/mne/preprocessing/tests/test_ica.py b/mne/preprocessing/tests/test_ica.py index 3907c5b48ba..5e8318d669b 100644 --- a/mne/preprocessing/tests/test_ica.py +++ b/mne/preprocessing/tests/test_ica.py @@ -826,8 +826,11 @@ def f(x, y): assert (eog_events.ndim == 2) # Test ica fiff export + assert raw.last_samp - raw.first_samp + 1 == raw.n_times + assert raw.n_times > 100 ica_raw = ica.get_sources(raw, start=0, stop=100) - assert (ica_raw.last_samp - ica_raw.first_samp == 100) + assert ica_raw.n_times == 100 + assert ica_raw.last_samp - ica_raw.first_samp + 1 == 100 assert_equal(len(ica_raw._filenames), 1) # API consistency ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch] assert (ica.n_components_ == len(ica_chans)) diff --git a/mne/stats/tests/test_cluster_level.py b/mne/stats/tests/test_cluster_level.py index 3968c7a943f..5965d1aab1f 100644 --- a/mne/stats/tests/test_cluster_level.py +++ b/mne/stats/tests/test_cluster_level.py @@ -14,9 +14,8 @@ from mne import (SourceEstimate, VolSourceEstimate, MixedSourceEstimate, SourceSpaces) -from mne.fixes import has_numba from mne.parallel import _force_serial -from mne.stats import cluster_level, ttest_ind_no_p, combine_adjacency +from mne.stats import ttest_ind_no_p, combine_adjacency from mne.stats.cluster_level import (permutation_cluster_test, f_oneway, permutation_cluster_1samp_test, spatio_temporal_cluster_test, @@ -26,22 +25,6 @@ requires_sklearn) -@pytest.fixture(scope="function", params=('Numba', 'NumPy')) -def numba_conditional(monkeypatch, request): - """Test both code paths on machines that have Numba.""" - assert request.param in ('Numba', 'NumPy') - if request.param == 'NumPy' and has_numba: - monkeypatch.setattr( - cluster_level, '_get_buddies', cluster_level._get_buddies_fallback) - monkeypatch.setattr( - cluster_level, '_get_selves', cluster_level._get_selves_fallback) - monkeypatch.setattr( - cluster_level, '_where_first', cluster_level._where_first_fallback) - if request.param == 'Numba' and not has_numba: - pytest.skip('Numba not installed') - yield request.param - - n_space = 50 diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py index bb2c4b5acbe..fe18fcf47b3 100644 --- a/mne/tests/test_epochs.py +++ b/mne/tests/test_epochs.py @@ -983,7 +983,8 @@ def test_epochs_io_preload(tmpdir, preload): epochs_no_bl.save(temp_fname_no_bl, overwrite=True) epochs_read = read_epochs(temp_fname) epochs_no_bl_read = read_epochs(temp_fname_no_bl) - pytest.raises(ValueError, epochs.apply_baseline, baseline=[1, 2, 3]) + with pytest.raises(ValueError, match='invalid'): + epochs.apply_baseline(baseline=[1, 2, 3]) epochs_with_bl = epochs_no_bl_read.copy().apply_baseline(baseline) assert (isinstance(epochs_with_bl, BaseEpochs)) assert (epochs_with_bl.baseline == (epochs_no_bl_read.tmin, baseline[1])) diff --git a/mne/tests/test_event.py b/mne/tests/test_event.py index 1ba39dc25d2..4660c9e3729 100644 --- a/mne/tests/test_event.py +++ b/mne/tests/test_event.py @@ -219,7 +219,6 @@ def test_find_events(): # Reset some data for ease of comparison raw._first_samps[0] = 0 raw.info['sfreq'] = 1000 - raw._update_times() stim_channel = 'STI 014' stim_channel_idx = pick_channels(raw.info['ch_names'], diff --git a/mne/time_frequency/tfr.py b/mne/time_frequency/tfr.py index 873ec2ee7bf..8b549321d47 100644 --- a/mne/time_frequency/tfr.py +++ b/mne/time_frequency/tfr.py @@ -2075,6 +2075,10 @@ def __init__(self, info, data, times, freqs, comment=None, method=None, self.preload = True self.metadata = metadata + @property + def _detrend_picks(self): + return list() + def __repr__(self): # noqa: D105 s = "time : [%f, %f]" % (self.times[0], self.times[-1]) s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1]) diff --git a/mne/utils/__init__.py b/mne/utils/__init__.py index 67dfb33fd1d..6d254307f16 100644 --- a/mne/utils/__init__.py +++ b/mne/utils/__init__.py @@ -61,7 +61,7 @@ _mask_to_onsets_offsets, _array_equal_nan, _julian_to_cal, _cal_to_julian, _dt_to_julian, _julian_to_dt, _dt_to_stamp, _stamp_to_dt, - _check_dt, _ReuseCycle) + _check_dt, _ReuseCycle, _arange_div) from .mixin import (SizeMixin, GetEpochsMixin, _prepare_read_metadata, _prepare_write_metadata, _FakeNoPandas, ShiftTimeMixin) from .linalg import (_svd_lwork, _repeated_svd, _sym_mat_pow, sqrtm_sym, diff --git a/mne/utils/mixin.py b/mne/utils/mixin.py index f18b90a3bfd..651afea66cc 100644 --- a/mne/utils/mixin.py +++ b/mne/utils/mixin.py @@ -313,6 +313,7 @@ def __iter__(self): :meth:`mne.Epochs.next`. """ self._current = 0 + self._current_detrend_picks = self._detrend_picks return self def __next__(self, return_event_id=False): @@ -332,16 +333,17 @@ def __next__(self, return_event_id=False): """ if self.preload: if self._current >= len(self._data): - raise StopIteration # signal the end + self._stop_iter() epoch = self._data[self._current] self._current += 1 else: is_good = False while not is_good: if self._current >= len(self.events): - raise StopIteration # signal the end properly + self._stop_iter() epoch_noproj = self._get_epoch_from_raw(self._current) - epoch_noproj = self._detrend_offset_decim(epoch_noproj) + epoch_noproj = self._detrend_offset_decim( + epoch_noproj, self._current_detrend_picks) epoch = self._project_epoch(epoch_noproj) self._current += 1 is_good, _ = self._is_good_epoch(epoch) @@ -354,6 +356,11 @@ def __next__(self, return_event_id=False): else: return epoch, self.events[self._current - 1][-1] + def _stop_iter(self): + del self._current + del self._current_detrend_picks + raise StopIteration # signal the end + next = __next__ # originally for Python2, now b/c public def _check_metadata(self, metadata=None, reset_index=False): diff --git a/mne/utils/numerics.py b/mne/utils/numerics.py index 8481d8c6ae8..1d49eb7b53c 100644 --- a/mne/utils/numerics.py +++ b/mne/utils/numerics.py @@ -23,7 +23,8 @@ from ._logging import logger, warn, verbose from .check import check_random_state, _ensure_int, _validate_type -from ..fixes import _infer_dimension_, svd_flip, stable_cumsum, _safe_svd +from ..fixes import (_infer_dimension_, svd_flip, stable_cumsum, _safe_svd, + jit, has_numba) from .docs import fill_doc @@ -1057,3 +1058,20 @@ def restore(self, val): else: loc = np.searchsorted(self.indices, idx) self.indices.insert(loc, idx) + + +def _arange_div_fallback(n, d): + x = np.arange(n, dtype=np.float64) + x /= d + return x + + +if has_numba: + @jit(fastmath=False) + def _arange_div(n, d): + out = np.empty(n, np.float64) + for i in range(n): + out[i] = i / d + return out +else: # pragma: no cover + _arange_div = _arange_div_fallback diff --git a/mne/utils/tests/test_numerics.py b/mne/utils/tests/test_numerics.py index 3e8f2b27e3a..bc8546a24e3 100644 --- a/mne/utils/tests/test_numerics.py +++ b/mne/utils/tests/test_numerics.py @@ -21,7 +21,7 @@ _undo_scaling_array, _PCA, requires_sklearn, _array_equal_nan, _julian_to_cal, _cal_to_julian, _dt_to_julian, _julian_to_dt, grand_average, - _ReuseCycle, requires_version) + _ReuseCycle, requires_version, numerics) base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') @@ -530,3 +530,12 @@ def test_reuse_cycle(): iterable.restore('a') assert ''.join(next(iterable) for _ in range(4)) == 'acde' assert ''.join(next(iterable) for _ in range(5)) == 'abcde' + + +@pytest.mark.parametrize('n', (0, 1, 10, 1000)) +@pytest.mark.parametrize('d', (0.0001, 1, 2.5, 1000)) +def test_arange_div(numba_conditional, n, d): + """Test Numba arange_div.""" + want = np.arange(n) / d + got = numerics._arange_div(n, d) + assert_allclose(got, want) From a3719e58b8b8891e58571ac01513916964e8002d Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Fri, 5 Feb 2021 16:03:37 -0500 Subject: [PATCH 112/387] MRG, FIX: output_type for clustering (#8842) * FIX: output_type for clustering * DOC: Better doc * FIX: Windows --- doc/changes/latest.inc | 2 ++ mne/stats/cluster_level.py | 21 +++++++++---- mne/stats/tests/test_cluster_level.py | 44 +++++++++++++++++++++++++-- mne/utils/docs.py | 25 ++++++--------- 4 files changed, 67 insertions(+), 25 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 6bba14453c8..d54cc0795e7 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -78,6 +78,8 @@ Bugs - Fix bug with :func:`mne.minimum_norm.make_inverse_operator` where ``depth`` was errantly restricted to be less than or equal to 1. (:gh:`8804` by `Eric Larson`_) +- Fix bug with :func:`mne.stats.permutation_cluster_1samp_test` and related clustering functions when ``adjacency=None`` and ``out_type='indices'`` (:gh:`#8842` by `Eric Larson`_) + - Fix bug with ``replace`` argument of :meth:`mne.Report.add_bem_to_section` and :meth:`mne.Report.add_slider_to_section` (:gh:`8723` by `Eric Larson`_) - Fix bug with `mne.viz.Brain` where non-inflated surfaces had an X-offset imposed by default (:gh:`8794` by `Eric Larson`_) diff --git a/mne/stats/cluster_level.py b/mne/stats/cluster_level.py index fb0ee794657..85cbc9385fc 100644 --- a/mne/stats/cluster_level.py +++ b/mne/stats/cluster_level.py @@ -557,11 +557,20 @@ def _cluster_indices_to_mask(components, n_tot): return components -def _cluster_mask_to_indices(components): +def _cluster_mask_to_indices(components, shape): """Convert to the old format of clusters, which were bool arrays.""" for ci, c in enumerate(components): - if not isinstance(c, slice): - components[ci] = np.where(c)[0] + if isinstance(c, np.ndarray): # mask + components[ci] = np.where(c.reshape(shape)) + else: + assert isinstance(c, tuple), type(c) + c = list(c) # tuple->list + for ii, cc in enumerate(c): + if isinstance(cc, slice): + c[ii] = np.arange(cc.start, cc.stop) + else: + c[ii] = np.where(cc)[0] + components[ci] = tuple(c) return components @@ -918,7 +927,7 @@ def _permutation_cluster_test(X, threshold, n_permutations, tail, stat_fun, else: # ndimage outputs slices or boolean masks by default if out_type == 'indices': - clusters = _cluster_mask_to_indices(clusters) + clusters = _cluster_mask_to_indices(clusters, t_obs.shape) # convert our seed to orders # check to see if we can do an exact test @@ -1087,7 +1096,7 @@ def permutation_cluster_test( no points are excluded. %(clust_stepdown)s %(clust_power_f)s - %(clust_out_none)s + %(clust_out)s %(clust_disjoint)s %(clust_buffer)s %(verbose)s @@ -1147,7 +1156,7 @@ def permutation_cluster_1samp_test( no points are excluded. %(clust_stepdown)s %(clust_power_t)s - %(clust_out_none)s + %(clust_out)s %(clust_disjoint)s %(clust_buffer)s %(verbose)s diff --git a/mne/stats/tests/test_cluster_level.py b/mne/stats/tests/test_cluster_level.py index 5965d1aab1f..7cee96472d7 100644 --- a/mne/stats/tests/test_cluster_level.py +++ b/mne/stats/tests/test_cluster_level.py @@ -21,8 +21,7 @@ spatio_temporal_cluster_test, spatio_temporal_cluster_1samp_test, ttest_1samp_no_p, summarize_clusters_stc) -from mne.utils import (run_tests_if_main, catch_logging, check_version, - requires_sklearn) +from mne.utils import catch_logging, check_version, requires_sklearn n_space = 50 @@ -671,4 +670,43 @@ def test_tfce_thresholds(numba_conditional): data, tail=1, out_type='mask', threshold=dict(start=1, step=-0.5)) -run_tests_if_main() +# 1D gives slices, 2D+ gives boolean masks +@pytest.mark.parametrize('shape', ((11,), (11, 3), (11, 1, 2))) +@pytest.mark.parametrize('out_type', ('mask', 'indices')) +@pytest.mark.parametrize('adjacency', (None, 'sparse')) +def test_output_equiv(shape, out_type, adjacency): + """Test equivalence of output types.""" + rng = np.random.RandomState(0) + n_subjects = 10 + data = rng.randn(n_subjects, *shape) + data -= data.mean(axis=0, keepdims=True) + data[:, 2:4] += 2 + data[:, 6:9] += 2 + want_mask = np.zeros(shape, int) + want_mask[2:4] = 1 + want_mask[6:9] = 2 + if adjacency is not None: + assert adjacency == 'sparse' + adjacency = combine_adjacency(*shape) + clusters = permutation_cluster_1samp_test( + X=data, n_permutations=1, adjacency=adjacency, out_type=out_type)[1] + got_mask = np.zeros_like(want_mask) + for n, clu in enumerate(clusters, 1): + if out_type == 'mask': + if len(shape) == 1 and adjacency is None: + assert isinstance(clu, tuple) + assert len(clu) == 1 + assert isinstance(clu[0], slice) + else: + assert isinstance(clu, np.ndarray) + assert clu.dtype == bool + assert clu.shape == shape + got_mask[clu] = n + else: + assert isinstance(clu, tuple) + for c in clu: + assert isinstance(c, np.ndarray) + assert c.dtype.kind == 'i' + assert out_type == 'indices' + got_mask[np.ix_(*clu)] = n + assert_array_equal(got_mask, want_mask) diff --git a/mne/utils/docs.py b/mne/utils/docs.py index 860c17cc9ae..ed3381d4777 100644 --- a/mne/utils/docs.py +++ b/mne/utils/docs.py @@ -1753,22 +1753,15 @@ docdict['clust_power_f'] = docdict['clust_power'].format('F') docdict['clust_out'] = """ out_type : 'mask' | 'indices' - Output format of clusters. If ``'mask'``, returns boolean arrays the same - shape as the input data, with ``True`` values indicating locations that are - part of a cluster. If ``'indices'``, returns a list of lists, where each - sublist contains the indices of locations that together form a cluster. - Note that for large datasets, ``'indices'`` may use far less memory than - ``'mask'``. Default is ``'indices'``. -""" -docdict['clust_out_none'] = """ -out_type : 'mask' | 'indices' - Output format of clusters. If ``'mask'``, returns boolean arrays the same - shape as the input data, with ``True`` values indicating locations that are - part of a cluster. If ``'indices'``, returns a list of lists, where each - sublist contains the indices of locations that together form a cluster. - Note that for large datasets, ``'indices'`` may use far less memory than - ``'mask'``. The default translates to ``'mask'`` in version 0.21 but will - change to ``'indices'`` in version 0.22. + Output format of clusters within a list. + If ``'mask'``, returns a list of boolean arrays, + each with the same shape as the input data (or slices if the shape is 1D + and adjacency is None), with ``True`` values indicating locations that are + part of a cluster. If ``'indices'``, returns a list of tuple of ndarray, + where each ndarray contains the indices of locations that together form the + given cluster along the given dimension. Note that for large datasets, + ``'indices'`` may use far less memory than ``'mask'``. + Default is ``'indices'``. """ docdict['clust_disjoint'] = """ check_disjoint : bool From d4a1ce5d8a7f5db605543f09d29ccdd1ec92676f Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Fri, 5 Feb 2021 19:16:17 -0500 Subject: [PATCH 113/387] MRG, ENH: Add extract_chpi_locs_kit (#8813) * WIP: Add extract_chpi_locs_kit * ENH: Add KIT hpi_result * WIP: Better * ENH: Add test * ENH: Better coreg * Update doc/changes/names.inc Co-authored-by: monkeyman192 Co-authored-by: monkeyman192 --- doc/changes/latest.inc | 4 ++ doc/changes/names.inc | 6 ++- doc/python_reference.rst | 1 + mne/chpi.py | 94 +++++++++++++++++++++++++++++++++++++--- mne/datasets/utils.py | 4 +- mne/io/kit/constants.py | 1 + mne/io/kit/coreg.py | 27 ++++++++---- mne/io/kit/kit.py | 20 ++++++--- mne/tests/test_chpi.py | 55 ++++++++++++++++------- mne/viz/_3d.py | 1 - 10 files changed, 173 insertions(+), 40 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index d54cc0795e7..2c736c0014b 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -18,10 +18,14 @@ Current (0.23.dev0) .. |Andres Rodriguez| replace:: **Andres Rodriguez** +.. |Matt Sanderson| replace:: **Matt Sanderson** + Enhancements ~~~~~~~~~~~~ - Add dbs as new channel type for deep brain stimulation (DBS) recordings (:gh:`8739` **by new contributor** |Richard Koehler|_) +- Add :func:`mne.chpi.extract_chpi_locs_kit` to read cHPI coil locations from KIT/Yokogawa data (:gh:`` **by new contributor** |Matt Sanderson|_, `Robert Seymour`_, and `Eric Larson`_) + - Update the ``notebook`` 3d backend to use ``ipyvtk_simple`` for a better integration within ``Jupyter`` (:gh:`8503` by `Guillaume Favelier`_) - Add toggle-all button to :class:`mne.Report` HTML and ``width`` argument to :meth:`mne.Report.add_bem_to_section` (:gh:`8723` by `Eric Larson`_) diff --git a/doc/changes/names.inc b/doc/changes/names.inc index 46bfc2f8ca1..a67d2bf056f 100644 --- a/doc/changes/names.inc +++ b/doc/changes/names.inc @@ -266,6 +266,10 @@ .. _Robert Luke: https://github.com/rob-luke +.. _Robert Seymour: https://neurofractal.github.io + +.. _Matt Sanderson: https://github.com/monkeyman192 + .. _Mohammad Daneshzand: https://github.com/mdaneshzand .. _Fahimeh Mamashli: https://github.com/fmamashli @@ -350,4 +354,4 @@ .. _Tristan Stenner: https://github.com/tstenner/ -.. _Andres Rodriguez: https://github.com/infinitejest/ \ No newline at end of file +.. _Andres Rodriguez: https://github.com/infinitejest/ diff --git a/doc/python_reference.rst b/doc/python_reference.rst index 02f6b053ae2..51479656549 100644 --- a/doc/python_reference.rst +++ b/doc/python_reference.rst @@ -453,6 +453,7 @@ EEG referencing: compute_chpi_locs compute_head_pos extract_chpi_locs_ctf + extract_chpi_locs_kit filter_chpi head_pos_to_trans_rot_t read_head_pos diff --git a/mne/chpi.py b/mne/chpi.py index 972cb400bd8..84eec047cc2 100644 --- a/mne/chpi.py +++ b/mne/chpi.py @@ -25,7 +25,10 @@ from scipy import linalg import itertools +from .event import find_events from .io.base import BaseRaw +from .io.kit.constants import KIT +from .io.kit.kit import RawKIT as _RawKIT from .io.meas_info import _simplify_info from .io.pick import (pick_types, pick_channels, pick_channels_regexp, pick_info) @@ -41,9 +44,9 @@ _regularize_out, _get_mf_picks_fix_mags) from .transforms import (apply_trans, invert_transform, _angle_between_quats, quat_to_rot, rot_to_quat, _fit_matched_points, - _quat_to_affine) + _quat_to_affine, als_ras_trans) from .utils import (verbose, logger, use_log_level, _check_fname, warn, - _validate_type, ProgressBar, _check_option) + _validate_type, ProgressBar, _check_option, _pl) # Eventually we should add: # hpicons @@ -212,6 +215,85 @@ def extract_chpi_locs_ctf(raw, verbose=None): return dict(rrs=rrs, gofs=gofs, times=times, moments=moments) +@verbose +def extract_chpi_locs_kit(raw, stim_channel='MISC 064', *, verbose=None): + """Extract cHPI locations from KIT data. + + Parameters + ---------- + raw : instance of RawKIT + Raw data with KIT cHPI information. + stim_channel : str + The stimulus channel that encodes HPI measurement intervals. + %(verbose)s + + Returns + ------- + %(chpi_locs)s + + Notes + ----- + .. versionadded:: 0.23 + """ + _validate_type(raw, (_RawKIT,), 'raw') + stim_chs = [ + raw.info['ch_names'][pick] for pick in pick_types( + raw.info, stim=True, misc=True, ref_meg=False)] + _validate_type(stim_channel, str, 'stim_channel') + _check_option('stim_channel', stim_channel, stim_chs) + idx = raw.ch_names.index(stim_channel) + events_on = find_events( + raw, stim_channel=raw.ch_names[idx], output='onset', + verbose=False)[:, 0] + events_off = find_events( + raw, stim_channel=raw.ch_names[idx], output='offset', + verbose=False)[:, 0] + bad = False + if len(events_on) == 0 or len(events_off) == 0: + bad = True + else: + if events_on[-1] > events_off[-1]: + events_on = events_on[:-1] + if events_on.size != events_off.size or not \ + (events_on < events_off).all(): + bad = True + if bad: + raise RuntimeError( + f'Could not find appropriate cHPI intervals from {stim_channel}') + # use the midpoint for times + times = (events_on + events_off) / (2 * raw.info['sfreq']) + del events_on, events_off + # XXX remove first two rows. It is unknown currently if there is a way to + # determine from the con file the number of initial pulses that + # indicate the start of reading. The number is shown by opening the con + # file in MEG160, but I couldn't find the value in the .con file, so it + # may just always be 2... + times = times[2:] + n_coils = 5 # KIT always has 5 (hard-coded in reader) + header = raw._raw_extras[0]['dirs'][KIT.DIR_INDEX_CHPI_DATA] + dtype = np.dtype([('good', '= gof_limit) & (errs < dist_limit)).sum() if n_good < 3: warn(_time_prefix(fit_time) + '%s/%s good HPI fits, cannot ' - 'determine the transformation (%s)!' - % (n_good, n_coils, ', '.join('%0.2f' % g for g in g_coils))) + 'determine the transformation (%s mm/GOF)!' + % (n_good, n_coils, + ', '.join(f'{1000 * e:0.1f}::{g:0.2f}' + for e, g in zip(errs, g_coils)))) continue # velocities, in device coords, of HPI coils diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index b646f3dc0a9..54588a9e57a 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -251,7 +251,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, path = _get_path(path, key, name) # To update the testing or misc dataset, push commits, then make a new # release on GitHub. Then update the "releases" variable: - releases = dict(testing='0.114', misc='0.8') + releases = dict(testing='0.115', misc='0.8') # And also update the "md5_hashes['testing']" variable below. # To update any other dataset, update the data archive itself (upload # an updated version) and update the md5 hash. @@ -337,7 +337,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, sample='12b75d1cb7df9dfb4ad73ed82f61094f', somato='32fd2f6c8c7eb0784a1de6435273c48b', spm='9f43f67150e3b694b523a21eb929ea75', - testing='0e7fb1b37f924bf50ce5db9f96c67972', + testing='731f4ce20f0cb439c04c719a67ccf4d5', multimodal='26ec847ae9ab80f58f204d09e2c08367', fnirs_motor='c4935d19ddab35422a69f3326a01fef8', opm='370ad1dcfd5c47e029e692c85358a374', diff --git a/mne/io/kit/constants.py b/mne/io/kit/constants.py index 8cf725c7b57..69830cb3ccb 100644 --- a/mne/io/kit/constants.py +++ b/mne/io/kit/constants.py @@ -257,3 +257,4 @@ # BOOKMARKS = 15 # DIGITIZER = 25 KIT.DIR_INDEX_DIG_POINTS = 26 +KIT.DIR_INDEX_CHPI_DATA = 29 diff --git a/mne/io/kit/coreg.py b/mne/io/kit/coreg.py index d13bdbd18bb..d80a500d66f 100644 --- a/mne/io/kit/coreg.py +++ b/mne/io/kit/coreg.py @@ -36,21 +36,23 @@ def read_mrk(fname): mrk_points : ndarray, shape (n_points, 3) Marker points in MEG space [m]. """ + from .kit import _read_dirs ext = op.splitext(fname)[-1] if ext in ('.sqd', '.mrk'): with open(fname, 'rb', buffering=0) as fid: - fid.seek(192) - mrk_offset = np.fromfile(fid, INT32, 1)[0] - fid.seek(mrk_offset) + dirs = _read_dirs(fid) + fid.seek(dirs[KIT.DIR_INDEX_COREG]['offset']) # skips match_done, meg_to_mri and mri_to_meg - fid.seek(KIT.INT + (2 * KIT.DOUBLE * 4 ** 2), SEEK_CUR) + fid.seek(KIT.INT + (2 * KIT.DOUBLE * 16), SEEK_CUR) mrk_count = np.fromfile(fid, INT32, 1)[0] pts = [] for _ in range(mrk_count): - # skips mri/meg mrk_type and done, mri_marker - fid.seek(KIT.INT * 4 + (KIT.DOUBLE * 3), SEEK_CUR) - pts.append(np.fromfile(fid, dtype=FLOAT64, count=3)) - mrk_points = np.array(pts) + # mri_type, meg_type, mri_done, meg_done + _, _, _, meg_done = np.fromfile(fid, INT32, 4) + _, meg_pts = np.fromfile(fid, FLOAT64, 6).reshape(2, 3) + if meg_done: + pts.append(meg_pts) + mrk_points = np.array(pts) elif ext == '.txt': mrk_points = _read_dig_kit(fname, unit='m') elif ext == '.pickled': @@ -123,6 +125,8 @@ def _set_dig_kit(mrk, elp, hsp, eeg): List of digitizer points for info['dig']. dev_head_t : dict A dictionary describe the device-head transformation. + hpi_results : list + The hpi results. """ from ...coreg import fit_matched_points, _decimate_points @@ -167,7 +171,12 @@ def _set_dig_kit(mrk, elp, hsp, eeg): dig_points = _make_dig_points(nasion, lpa, rpa, elp, hsp, dig_ch_pos=eeg) dev_head_t = Transform('meg', 'head', trans) - return dig_points, dev_head_t + hpi_results = [dict(dig_points=[ + dict(ident=ci, r=r, kind=FIFF.FIFFV_POINT_HPI, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN) + for ci, r in enumerate(mrk)], coord_trans=dev_head_t)] + + return dig_points, dev_head_t, hpi_results def _read_dig_kit(fname, unit='auto'): diff --git a/mne/io/kit/kit.py b/mne/io/kit/kit.py index e486276d738..da2a6b3bc6c 100644 --- a/mne/io/kit/kit.py +++ b/mne/io/kit/kit.py @@ -50,10 +50,8 @@ def _call_digitization(info, mrk, elp, hsp, kit_info): # setup digitization if mrk is not None and elp is not None and hsp is not None: - dig_points, dev_head_t = _set_dig_kit( + info['dig'], info['dev_head_t'], info['hpi_results'] = _set_dig_kit( mrk, elp, hsp, kit_info['eeg_dig']) - info['dig'] = dig_points - info['dev_head_t'] = dev_head_t elif mrk is not None or elp is not None or hsp is not None: raise ValueError("mrk, elp and hsp need to be provided as a group " "(all or none)") @@ -463,6 +461,17 @@ def _read_dir(fid): count=np.fromfile(fid, INT32, 1)[0]) +@verbose +def _read_dirs(fid, verbose=None): + dirs = list() + dirs.append(_read_dir(fid)) + for ii in range(dirs[0]['count'] - 1): + logger.debug(f' KIT dir entry {ii} @ {fid.tell()}') + dirs.append(_read_dir(fid)) + assert len(dirs) == dirs[KIT.DIR_INDEX_DIR]['count'] + return dirs + + @verbose def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, verbose=None): @@ -488,14 +497,11 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, sqd = dict() sqd['rawfile'] = rawfile unsupported_format = False - sqd['dirs'] = dirs = list() with open(rawfile, 'rb', buffering=0) as fid: # buffering=0 for np bug # # directories (0) # - dirs.append(_read_dir(fid)) - dirs.extend(_read_dir(fid) for _ in range(dirs[0]['count'] - 1)) - assert len(dirs) == dirs[KIT.DIR_INDEX_DIR]['count'] + sqd['dirs'] = dirs = _read_dirs(fid) # # system (1) diff --git a/mne/tests/test_chpi.py b/mne/tests/test_chpi.py index a412d9071c4..5e604215d2b 100644 --- a/mne/tests/test_chpi.py +++ b/mne/tests/test_chpi.py @@ -13,18 +13,20 @@ from mne import pick_types, pick_info from mne.forward._compute_forward import _MAG_FACTOR from mne.io import (read_raw_fif, read_raw_artemis123, read_raw_ctf, read_info, - RawArray) + RawArray, read_raw_kit) from mne.io.constants import FIFF from mne.chpi import (compute_chpi_amplitudes, compute_chpi_locs, compute_head_pos, _setup_ext_proj, _chpi_locs_to_times_dig, _compute_good_distances, extract_chpi_locs_ctf, head_pos_to_trans_rot_t, read_head_pos, write_head_pos, filter_chpi, - _get_hpi_info, _get_hpi_initial_fit) -from mne.transforms import rot_to_quat, _angle_between_quats -from mne.simulation import add_chpi -from mne.utils import run_tests_if_main, catch_logging, assert_meg_snr, verbose + _get_hpi_info, _get_hpi_initial_fit, + extract_chpi_locs_kit) from mne.datasets import testing +from mne.simulation import add_chpi +from mne.transforms import rot_to_quat, _angle_between_quats +from mne.utils import catch_logging, assert_meg_snr, verbose +from mne.viz import plot_head_positions base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data') ctf_fname = op.join(base_dir, 'test_ctf_raw.fif') @@ -49,6 +51,12 @@ art_mc_fname = op.join(data_path, 'ARTEMIS123', 'Artemis_Data_2017-04-04' + '-15h-44m-22s_Motion_Translation-z_mc.pos') +con_fname = op.join(data_path, 'KIT', 'MQKIT_125_2sec.con') +mrk_fname = op.join(data_path, 'KIT', 'MQKIT_125.mrk') +elp_fname = op.join(data_path, 'KIT', 'MQKIT_125.elp') +hsp_fname = op.join(data_path, 'KIT', 'MQKIT_125.hsp') +berlin_fname = op.join(data_path, 'KIT', 'data_berlin.con') + @testing.requires_testing_data def test_chpi_adjust(): @@ -592,25 +600,42 @@ def test_chpi_subtraction_filter_chpi(): assert '2 cHPI' in log.getvalue() -def calculate_head_pos_ctf(raw): - """Wrap to facilitate API change.""" - chpi_locs = extract_chpi_locs_ctf(raw) - return compute_head_pos(raw.info, chpi_locs) - - @testing.requires_testing_data def test_calculate_head_pos_ctf(): - """Test extracting of cHPI positions from ctf data.""" + """Test extracting of cHPI positions from CTF data.""" raw = read_raw_ctf(ctf_chpi_fname) - quats = calculate_head_pos_ctf(raw) + chpi_locs = extract_chpi_locs_ctf(raw) + quats = compute_head_pos(raw.info, chpi_locs) mc_quats = read_head_pos(ctf_chpi_pos_fname) mc_quats[:, 9] /= 10000 # had old factor in there twice somehow... _assert_quats(quats, mc_quats, dist_tol=0.004, angle_tol=2.5, err_rtol=1., vel_atol=7e-3) # 7 mm/s + plot_head_positions(quats, info=raw.info) raw = read_raw_fif(ctf_fname) with pytest.raises(RuntimeError, match='Could not find'): - calculate_head_pos_ctf(raw) + extract_chpi_locs_ctf(raw) -run_tests_if_main() +@testing.requires_testing_data +def test_calculate_head_pos_kit(): + """Test calculation of head position using KIT data.""" + raw = read_raw_kit(con_fname, mrk_fname, elp_fname, hsp_fname) + assert len(raw.info['hpi_results']) == 1 + chpi_locs = extract_chpi_locs_kit(raw) + assert chpi_locs['rrs'].shape == (2, 5, 3) + assert_array_less(chpi_locs['gofs'], 1.) + assert_array_less(0.98, chpi_locs['gofs']) + quats = compute_head_pos(raw.info, chpi_locs) + assert quats.shape == (2, 10) + # plotting works + plot_head_positions(quats, info=raw.info) + raw_berlin = read_raw_kit(berlin_fname) + assert_allclose(raw_berlin.info['dev_head_t']['trans'], np.eye(4)) + assert len(raw_berlin.info['hpi_results']) == 0 + with pytest.raises(ValueError, match='Invalid value'): + extract_chpi_locs_kit(raw_berlin) + with pytest.raises(RuntimeError, match='not find appropriate'): + extract_chpi_locs_kit(raw_berlin, 'STI 014') + with pytest.raises(RuntimeError, match='no initial cHPI'): + compute_head_pos(raw_berlin.info, chpi_locs) diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py index 30b128faa85..d21cf821d74 100644 --- a/mne/viz/_3d.py +++ b/mne/viz/_3d.py @@ -91,7 +91,6 @@ def plot_head_positions(pos, mode='traces', cmap='viridis', direction='z', mode : str Can be 'traces' (default) to show position and quaternion traces, or 'field' to show the position as a vector field over time. - The 'field' mode requires matplotlib 1.4+. cmap : colormap Colormap to use for the trace plot, default is "viridis". direction : str From 0735446dd5bfe265346d08aa64d6b7cee680cff8 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Sun, 7 Feb 2021 15:21:53 -0500 Subject: [PATCH 114/387] MRG, ENH: Speed up import by caching indented params (#8829) * ENH: Speed up import by caching indented params * FIX: More nesting * FIX: Import * FIX: Old Python * FIX: More * DOC: latest * FIX: Fixes * FIX: Import * FIX: Revert safe svds --- doc/changes/latest.inc | 2 + doc/install/contributing.rst | 14 ++-- mne/beamformer/_compute_beamformer.py | 3 +- mne/beamformer/_rap_music.py | 6 +- mne/bem.py | 4 +- mne/channels/channels.py | 3 +- mne/channels/interpolation.py | 2 +- mne/chpi.py | 6 +- mne/connectivity/spectral.py | 3 +- mne/cov.py | 3 +- mne/cuda.py | 4 +- mne/decoding/csp.py | 4 +- mne/decoding/receptive_field.py | 2 +- mne/decoding/ssd.py | 3 +- mne/decoding/tests/test_receptive_field.py | 3 +- mne/decoding/time_delaying_ridge.py | 3 +- mne/dipole.py | 18 +++-- mne/externals/h5io/_h5io.py | 16 +++-- mne/filter.py | 4 +- mne/fixes.py | 36 +++++++--- mne/forward/_field_interpolation.py | 2 +- mne/forward/forward.py | 3 +- mne/gui/_kit2fiff_gui.py | 5 +- mne/inverse_sparse/_gamma_map.py | 9 ++- mne/inverse_sparse/mxne_debiasing.py | 9 ++- mne/inverse_sparse/mxne_inverse.py | 2 +- mne/inverse_sparse/mxne_optim.py | 32 +++++---- mne/io/compensator.py | 3 +- mne/io/kit/kit.py | 5 +- mne/io/meas_info.py | 3 +- mne/io/open.py | 2 +- mne/io/proc_history.py | 4 +- mne/io/proj.py | 4 +- mne/io/reference.py | 2 +- mne/io/tag.py | 2 +- mne/io/write.py | 4 +- mne/label.py | 4 +- mne/minimum_norm/inverse.py | 4 +- mne/minimum_norm/resolution_matrix.py | 5 +- mne/minimum_norm/time_frequency.py | 2 +- mne/morph.py | 13 ++-- mne/preprocessing/_csd.py | 4 +- mne/preprocessing/ica.py | 5 +- mne/preprocessing/maxwell.py | 9 ++- mne/preprocessing/nirs/_beer_lambert_law.py | 2 +- mne/preprocessing/nirs/nirs.py | 3 +- mne/preprocessing/otp.py | 3 +- mne/preprocessing/xdawn.py | 3 +- mne/proj.py | 6 +- mne/rank.py | 2 +- mne/simulation/metrics.py | 3 +- mne/source_estimate.py | 11 ++- mne/source_space.py | 9 ++- mne/stats/_adjacency.py | 2 +- mne/stats/cluster_level.py | 5 +- mne/stats/regression.py | 5 +- mne/surface.py | 5 +- mne/tests/test_epochs.py | 2 +- mne/tests/test_filter.py | 2 +- mne/tests/test_import_nesting.py | 8 +-- mne/tests/test_source_estimate.py | 3 +- mne/time_frequency/_stft.py | 5 +- mne/time_frequency/_stockwell.py | 24 ++++--- mne/time_frequency/ar.py | 2 +- mne/time_frequency/csd.py | 4 +- mne/time_frequency/multitaper.py | 5 +- mne/time_frequency/tfr.py | 9 ++- mne/transforms.py | 14 ++-- mne/utils/__init__.py | 4 +- mne/utils/_logging.py | 3 +- mne/utils/_testing.py | 2 +- mne/utils/docs.py | 57 ++++++++++++++-- mne/utils/fetching.py | 6 +- mne/utils/linalg.py | 75 +++++++++++---------- mne/utils/numerics.py | 3 +- mne/viz/_3d.py | 5 +- mne/viz/_brain/_brain.py | 3 +- mne/viz/misc.py | 4 +- 78 files changed, 356 insertions(+), 219 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 2c736c0014b..9714ae9f201 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -40,6 +40,8 @@ Enhancements - Reduce memory consumption of `mne.io.Raw` and speed up epoching when thousands of events are present for `mne.Epochs` (:gh:`8801` by `Eric Larson`_) +- Speed up ``import mne`` by reducing function creation overhead (:gh:`8829` by `Eric Larson`_) + - `mne.Report.parse_folder` now processes supported non-FIFF files by default, too (:gh:`8744` by `Richard Höchenberger`_) - `mne.Report` has gained a new method `~mne.Report.add_custom_css` for adding user-defined styles (:gh:`8762` by `Richard Höchenberger`_) diff --git a/doc/install/contributing.rst b/doc/install/contributing.rst index 8b1ff860e05..e1d830ddcb3 100644 --- a/doc/install/contributing.rst +++ b/doc/install/contributing.rst @@ -777,17 +777,17 @@ Code organization Importing --------- -Import modules in this order: +Import modules in this order, preferably alphabetized within each subsection: -1. Python built-in (``os``, ``copy``, ``functools``, etc) -2. standard scientific (``numpy as np``, ``scipy.signal``, etc) -3. others -4. MNE-Python imports (e.g., ``from .pick import pick_types``) +1. Python built-in (``copy``, ``functools``, ``os``, etc.) +2. NumPy (``numpy as np``) and, in test files, pytest (``pytest``) +3. MNE-Python imports (e.g., ``from .pick import pick_types``) When importing from other parts of MNE-Python, use relative imports in the main codebase and absolute imports in tests, tutorials, and how-to examples. Imports -for ``matplotlib`` and optional modules (``sklearn``, ``pandas``, etc.) should -be nested (i.e., within a function or method, not at the top of a file). +for ``matplotlib``, ``scipy``, and optional modules (``sklearn``, ``pandas``, +etc.) should be nested (i.e., within a function or method, not at the top of a +file). This helps reduce import time and limit hard requirements for using MNE. Return types diff --git a/mne/beamformer/_compute_beamformer.py b/mne/beamformer/_compute_beamformer.py index 387f631cf80..3393fc72f8e 100644 --- a/mne/beamformer/_compute_beamformer.py +++ b/mne/beamformer/_compute_beamformer.py @@ -9,7 +9,6 @@ from copy import deepcopy import numpy as np -from scipy import linalg from ..cov import Covariance, make_ad_hoc_cov from ..forward.forward import is_fixed_orient, _restrict_forward_to_src_sel @@ -377,7 +376,7 @@ def _compute_bf_terms(Gk, Cm_inv): 'matrix or using regularization.') noise = loading_factor else: - noise, _ = linalg.eigh(Cm) + noise, _ = np.linalg.eigh(Cm) noise = noise[-rank] noise = max(noise, loading_factor) W /= np.sqrt(noise) diff --git a/mne/beamformer/_rap_music.py b/mne/beamformer/_rap_music.py index 60ffe1e741a..ce87d983d8a 100644 --- a/mne/beamformer/_rap_music.py +++ b/mne/beamformer/_rap_music.py @@ -6,7 +6,6 @@ # License: BSD (3-clause) import numpy as np -from scipy import linalg from ..forward import is_fixed_orient, convert_forward_solution from ..io.pick import pick_channels_evoked, pick_info, pick_channels_forward @@ -47,6 +46,7 @@ def _apply_rap_music(data, info, times, forward, noise_cov, n_dipoles=2, selected active dipoles and their estimated orientation. Computed only if return_explained_data is True. """ + from scipy import linalg info = pick_info(info, picks) del picks # things are much simpler if we avoid surface orientation @@ -184,6 +184,7 @@ def _make_dipoles(times, poss, oris, sol, gof): def _compute_subcorr(G, phi_sig): """Compute the subspace correlation.""" + from scipy import linalg Ug, Sg, Vg = linalg.svd(G, full_matrices=False) # Now we look at the actual rank of the forward fields # in G and handle the fact that it might be rank defficient @@ -197,11 +198,12 @@ def _compute_subcorr(G, phi_sig): tmp = np.dot(Ug.T.conjugate(), phi_sig) Uc, Sc, _ = linalg.svd(tmp, full_matrices=False) X = np.dot(Vg.T / Sg[None, :], Uc[:, 0]) # subcorr - return Sc[0], X / linalg.norm(X) + return Sc[0], X / np.linalg.norm(X) def _compute_proj(A): """Compute the orthogonal projection operation for a manifold vector A.""" + from scipy import linalg U, _, _ = linalg.svd(A, full_matrices=False) return np.identity(A.shape[0]) - np.dot(U, U.T.conjugate()) diff --git a/mne/bem.py b/mne/bem.py index e3c40d1374e..7ba81816a40 100644 --- a/mne/bem.py +++ b/mne/bem.py @@ -16,7 +16,6 @@ from copy import deepcopy import numpy as np -from scipy import linalg from .io.constants import FIFF, FWD from .io._digitization import _dig_kind_dict, _dig_kind_rev, _dig_kind_ints @@ -228,7 +227,7 @@ def _fwd_bem_multi_solution(solids, gamma, nps): slice_k = slice(offsets[si_2], offsets[si_2 + 1]) solids[slice_j, slice_k] = defl - solids[slice_j, slice_k] * mult solids += np.eye(n_tot) - return linalg.inv(solids, overwrite_a=True) + return np.linalg.inv(solids) def _fwd_bem_homog_solution(solids, nps): @@ -634,6 +633,7 @@ def _fwd_eeg_get_multi_sphere_model_coeffs(m, n_terms): def _compose_linear_fitting_data(mu, u): """Get the linear fitting data.""" + from scipy import linalg k1 = np.arange(1, u['nterms']) mu1ns = mu[0] ** k1 # data to be fitted diff --git a/mne/channels/channels.py b/mne/channels/channels.py index 2b3eef06dad..d4c33c3b658 100644 --- a/mne/channels/channels.py +++ b/mne/channels/channels.py @@ -15,7 +15,6 @@ from functools import partial import numpy as np -from scipy import sparse from ..defaults import HEAD_SIZE_DEFAULT, _handle_default from ..transforms import _frame_to_str @@ -1281,6 +1280,7 @@ def _ch_neighbor_adjacency(ch_names, neighbors): ch_adjacency : scipy.sparse matrix The adjacency matrix. """ + from scipy import sparse if len(ch_names) != len(neighbors): raise ValueError('`ch_names` and `neighbors` must ' 'have the same length') @@ -1408,6 +1408,7 @@ def _compute_ch_adjacency(info, ch_type): ch_names : list The list of channel names present in adjacency matrix. """ + from scipy import sparse from scipy.spatial import Delaunay from .. import spatial_tris_adjacency from ..channels.layout import _find_topomap_coords, _pair_grad_sensors diff --git a/mne/channels/interpolation.py b/mne/channels/interpolation.py index 7f9d00af9f3..1574dcb9c6b 100644 --- a/mne/channels/interpolation.py +++ b/mne/channels/interpolation.py @@ -4,7 +4,6 @@ import numpy as np from numpy.polynomial.legendre import legval -from scipy import linalg from ..utils import logger, warn, verbose from ..io.meas_info import _simplify_info @@ -83,6 +82,7 @@ def _make_interpolation_matrix(pos_from, pos_to, alpha=1e-5): Spherical splines for scalp potential and current density mapping. Electroencephalography Clinical Neurophysiology, Feb; 72(2):184-7. """ + from scipy import linalg pos_from = pos_from.copy() pos_to = pos_to.copy() n_from = pos_from.shape[0] diff --git a/mne/chpi.py b/mne/chpi.py index 84eec047cc2..4785d82be3f 100644 --- a/mne/chpi.py +++ b/mne/chpi.py @@ -22,7 +22,6 @@ from functools import partial import numpy as np -from scipy import linalg import itertools from .event import find_events @@ -569,7 +568,7 @@ def _setup_hpi_amplitude_fitting(info, t_window, remove_aliased=False, model += [np.sin(l_t), np.cos(l_t)] # line freqs model += [slope, np.ones(slope.shape)] model = np.concatenate(model, axis=1) - inv_model = linalg.pinv(model) + inv_model = np.linalg.pinv(model) inv_model_reord = _reorder_inv_model(inv_model, len(hpi_freqs)) proj, proj_op, meg_picks = _setup_ext_proj(info, ext_order) @@ -590,6 +589,7 @@ def _reorder_inv_model(inv_model, n_freqs): def _setup_ext_proj(info, ext_order): + from scipy import linalg meg_picks = pick_types(info, meg=True, eeg=False, exclude='bads') info = pick_info(_simplify_info(info), meg_picks) # makes a copy _, _, _, _, mag_or_fine = _get_mf_picks_fix_mags( @@ -1179,7 +1179,7 @@ def filter_chpi(raw, include_line=True, t_step=0.01, t_window='auto', this_recon = recon else: # first or last window model = hpi['model'][:this_len] - inv_model = linalg.pinv(model) + inv_model = np.linalg.pinv(model) this_recon = np.dot(model[:, :n_remove], inv_model[:n_remove]).T this_data = raw._data[meg_picks, time_sl] subt_pt = min(midpt + n_step, n_times) diff --git a/mne/connectivity/spectral.py b/mne/connectivity/spectral.py index 2aa99f423ee..5f13dcfb8d1 100644 --- a/mne/connectivity/spectral.py +++ b/mne/connectivity/spectral.py @@ -10,7 +10,7 @@ from .utils import check_indices from ..utils import _check_option -from ..fixes import _get_args, rfftfreq +from ..fixes import _get_args, _import_fft from ..parallel import parallel_func from ..source_estimate import _BaseSourceEstimate from ..epochs import BaseEpochs @@ -934,6 +934,7 @@ def _prepare_connectivity(epoch_block, tmin, tmax, fmin, fmax, sfreq, indices, mode, fskip, n_bands, cwt_freqs, faverage): """Check and precompute dimensions of results data.""" + rfftfreq = _import_fft('rfftfreq') first_epoch = epoch_block[0] # get the data size and time scale diff --git a/mne/cov.py b/mne/cov.py index 4be5d78f314..6eaf57ed0ba 100644 --- a/mne/cov.py +++ b/mne/cov.py @@ -11,7 +11,6 @@ import os import numpy as np -from scipy import linalg, sparse from .defaults import _EXTRAPOLATE_DEFAULT, _BORDER_DEFAULT, DEFAULTS from .io.write import start_file, end_file @@ -1634,6 +1633,7 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', -------- mne.compute_covariance """ # noqa: E501 + from scipy import linalg cov = cov.copy() info._check_consistency() scalings = _handle_default('scalings_cov_rank', scalings) @@ -1934,6 +1934,7 @@ def whiten_evoked(evoked, noise_cov, picks=None, diag=None, rank=None, def _read_cov(fid, node, cov_kind, limited=False, verbose=None): """Read a noise covariance matrix.""" # Find all covariance matrices + from scipy import sparse covs = dir_tree_find(node, FIFF.FIFFB_MNE_COV) if len(covs) == 0: raise ValueError('No covariance matrices found') diff --git a/mne/cuda.py b/mne/cuda.py index 07abf3df231..6b59b8dfa91 100644 --- a/mne/cuda.py +++ b/mne/cuda.py @@ -4,7 +4,7 @@ import numpy as np -from .fixes import rfft, irfft +from .fixes import _import_fft from .utils import (sizeof_fmt, logger, get_config, warn, _explain_exception, verbose) @@ -154,6 +154,7 @@ def _setup_cuda_fft_multiply_repeated(n_jobs, h, n_fft, ----- This function is designed to be used with fft_multiply_repeated(). """ + rfft, irfft = _import_fft(('rfft', 'irfft')) cuda_dict = dict(n_fft=n_fft, rfft=rfft, irfft=irfft, h_fft=rfft(h, n=n_fft)) if n_jobs == 'cuda': @@ -246,6 +247,7 @@ def _setup_cuda_fft_resample(n_jobs, W, new_len): ----- This function is designed to be used with fft_resample(). """ + rfft, irfft = _import_fft(('rfft', 'irfft')) cuda_dict = dict(use_cuda=False, rfft=rfft, irfft=irfft) rfft_len_x = len(W) // 2 + 1 # fold the window onto inself (should be symmetric) and truncate diff --git a/mne/decoding/csp.py b/mne/decoding/csp.py index 8414c483a2a..a4d5c47a6e5 100644 --- a/mne/decoding/csp.py +++ b/mne/decoding/csp.py @@ -10,7 +10,6 @@ import copy as cp import numpy as np -from scipy import linalg from .base import BaseEstimator from .mixin import TransformerMixin @@ -160,6 +159,7 @@ def fit(self, X, y): self : instance of CSP Returns the modified instance. """ + from scipy import linalg self._check_Xy(X, y) self._classes = np.unique(y) @@ -532,6 +532,7 @@ def _epoch_cov(self, x_class): return cov, weight def _decompose_covs(self, covs, sample_weights): + from scipy import linalg n_classes = len(covs) if n_classes == 2: eigen_values, eigen_vectors = linalg.eigh(covs[0], covs.sum(0)) @@ -761,6 +762,7 @@ def fit(self, X, y): self : instance of SPoC Returns the modified instance. """ + from scipy import linalg self._check_Xy(X, y) if len(np.unique(y)) < 2: diff --git a/mne/decoding/receptive_field.py b/mne/decoding/receptive_field.py index b880e176193..c6728d94d40 100644 --- a/mne/decoding/receptive_field.py +++ b/mne/decoding/receptive_field.py @@ -7,7 +7,6 @@ import numbers import numpy as np -from scipy import linalg from .base import get_coef, BaseEstimator, _check_estimator from .time_delaying_ridge import TimeDelayingRidge @@ -170,6 +169,7 @@ def fit(self, X, y): self : instance The instance so you can chain operations. """ + from scipy import linalg if self.scoring not in _SCORERS.keys(): raise ValueError('scoring must be one of %s, got' '%s ' % (sorted(_SCORERS.keys()), self.scoring)) diff --git a/mne/decoding/ssd.py b/mne/decoding/ssd.py index e62f4703552..2b744916cdd 100644 --- a/mne/decoding/ssd.py +++ b/mne/decoding/ssd.py @@ -3,7 +3,7 @@ # License: BSD (3-clause) import numpy as np -from scipy.linalg import eigh + from ..filter import filter_data from ..cov import _regularized_covariance from . import TransformerMixin, BaseEstimator @@ -153,6 +153,7 @@ def fit(self, X, y=None): self : instance of SSD Returns the modified instance. """ + from scipy.linalg import eigh self._check_X(X) X_aux = X[..., self.picks_, :] diff --git a/mne/decoding/tests/test_receptive_field.py b/mne/decoding/tests/test_receptive_field.py index 418eeed69ec..43a5b0dc818 100644 --- a/mne/decoding/tests/test_receptive_field.py +++ b/mne/decoding/tests/test_receptive_field.py @@ -6,9 +6,10 @@ import pytest import numpy as np +from numpy import einsum +from numpy.fft import rfft, irfft from numpy.testing import assert_array_equal, assert_allclose, assert_equal -from mne.fixes import einsum, rfft, irfft from mne.utils import requires_sklearn, run_tests_if_main from mne.decoding import ReceptiveField, TimeDelayingRidge from mne.decoding.receptive_field import (_delay_time_series, _SCORERS, diff --git a/mne/decoding/time_delaying_ridge.py b/mne/decoding/time_delaying_ridge.py index 243c50585cc..591a9920cb3 100644 --- a/mne/decoding/time_delaying_ridge.py +++ b/mne/decoding/time_delaying_ridge.py @@ -6,7 +6,6 @@ # License: BSD (3-clause) import numpy as np -from scipy import linalg from .base import BaseEstimator from ..cuda import _setup_cuda_fft_multiply_repeated @@ -146,6 +145,7 @@ def _toeplitz_dot(a, b): def _compute_reg_neighbors(n_ch_x, n_delays, reg_type, method='direct', normed=False): """Compute regularization parameter from neighbors.""" + from scipy import linalg from scipy.sparse.csgraph import laplacian known_types = ('ridge', 'laplacian') if isinstance(reg_type, str): @@ -201,6 +201,7 @@ def _compute_reg_neighbors(n_ch_x, n_delays, reg_type, method='direct', def _fit_corrs(x_xt, x_y, n_ch_x, reg_type, alpha, n_ch_in): """Fit the model using correlation matrices.""" # do the regularized solving + from scipy import linalg n_ch_out = x_y.shape[1] assert x_y.shape[0] % n_ch_x == 0 n_delays = x_y.shape[0] // n_ch_x diff --git a/mne/dipole.py b/mne/dipole.py index 6361ed79472..f40cb5fd3e4 100644 --- a/mne/dipole.py +++ b/mne/dipole.py @@ -7,11 +7,11 @@ # License: Simplified BSD from copy import deepcopy +import functools from functools import partial import re import numpy as np -from scipy import linalg from .cov import read_cov, compute_whitener from .io.constants import FIFF @@ -33,7 +33,7 @@ from .parallel import parallel_func from .utils import (logger, verbose, _time_mask, warn, _check_fname, check_fname, _pl, fill_doc, _check_option, ShiftTimeMixin, - _svd_lwork, _repeated_svd, ddot, dgemv, dgemm) + _svd_lwork, _repeated_svd, _get_blas_funcs) @fill_doc @@ -713,6 +713,7 @@ def _dipole_forwards(fwd_data, whitener, rr, n_jobs=1): B_orig = B.copy() # Apply projection and whiten (cov has projections already) + _, _, dgemm = _get_ddot_dgemv_dgemm() B = dgemm(1., B, whitener.T) # column normalization doesn't affect our fitting, so skip for now @@ -758,8 +759,14 @@ def _fit_eval(rd, B, B2, fwd_svd=None, fwd_data=None, whitener=None, return 1. - gof +@functools.lru_cache(None) +def _get_ddot_dgemv_dgemm(): + return _get_blas_funcs(np.float64, ('dot', 'gemv', 'gemm')) + + def _dipole_gof(uu, sing, vv, B, B2): """Calculate the goodness of fit from the forward SVD.""" + ddot, dgemv, _ = _get_ddot_dgemv_dgemm() ncomp = 3 if sing[2] / (sing[0] if sing[0] > 0 else 1.) > 0.2 else 2 one = dgemv(1., vv[:ncomp], B) # np.dot(vv[:ncomp], B) Bm2 = ddot(one, one) # np.sum(one * one) @@ -769,6 +776,7 @@ def _dipole_gof(uu, sing, vv, B, B2): def _fit_Q(fwd_data, whitener, B, B2, B_orig, rd, ori=None): """Fit the dipole moment once the location is known.""" + from scipy import linalg if 'fwd' in fwd_data: # should be a single precomputed "guess" (i.e., fixed position) assert rd is None @@ -933,6 +941,7 @@ def _fit_confidence(rd, Q, ori, whitener, fwd_data): # # And then the confidence interval is the diagonal of C, scaled by 1.96 # (for 95% confidence). + from scipy import linalg direction = np.empty((3, 3)) # The coordinate system has the x axis aligned with the dipole orientation, direction[0] = ori @@ -1157,6 +1166,7 @@ def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=1, ----- .. versionadded:: 0.9.0 """ + from scipy import linalg # This could eventually be adapted to work with other inputs, these # are what is needed: @@ -1212,7 +1222,7 @@ def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=1, kind = 'rad' else: # MEG-only # Use the minimum distance to the MEG sensors as the radius then - R = np.dot(linalg.inv(info['dev_head_t']['trans']), + R = np.dot(np.linalg.inv(info['dev_head_t']['trans']), np.hstack([r0, [1.]]))[:3] # r0 -> device R = R - [info['chs'][pick]['loc'][:3] for pick in pick_types(info, meg=True, exclude=[])] @@ -1350,7 +1360,7 @@ def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=1, guess_fwd, guess_fwd_orig, guess_fwd_scales = _dipole_forwards( fwd_data, whitener, guess_src['rr'], n_jobs=fit_n_jobs) # decompose ahead of time - guess_fwd_svd = [linalg.svd(fwd, overwrite_a=False, full_matrices=False) + guess_fwd_svd = [linalg.svd(fwd, full_matrices=False) for fwd in np.array_split(guess_fwd, len(guess_src['rr']))] guess_data = dict(fwd=guess_fwd, fwd_svd=guess_fwd_svd, diff --git a/mne/externals/h5io/_h5io.py b/mne/externals/h5io/_h5io.py index ce9724b5ae6..8c7afbdbf22 100644 --- a/mne/externals/h5io/_h5io.py +++ b/mne/externals/h5io/_h5io.py @@ -11,10 +11,6 @@ from os import path as op import numpy as np -try: - from scipy import sparse -except ImportError: - sparse = None # Adapted from six PY3 = sys.version_info[0] == 3 @@ -25,6 +21,14 @@ tab_str = '----' +def _import_sparse(): + try: + from scipy import sparse + except ImportError: + sparse = None + return sparse + + ############################################################################## # WRITING @@ -126,6 +130,7 @@ def write_hdf5(fname, data, overwrite=False, compression=4, def _triage_write(key, value, root, comp_kw, where, cleanup_data, slash='error', title=None, use_json=False): + sparse = _import_sparse() if key != title and '/' in key: if slash == 'error': raise ValueError('Found a key with "/", ' @@ -274,6 +279,7 @@ def _triage_read(node, slash='ignore'): if slash not in ['ignore', 'replace']: raise ValueError("slash must be one of 'replace', 'ignore'") h5py = _check_h5py() + sparse = _import_sparse() type_str = node.attrs['TITLE'] if isinstance(type_str, bytes): type_str = type_str.decode() @@ -382,7 +388,7 @@ def object_diff(a, b, pre=''): diffs : str A string representation of the differences. """ - + sparse = _import_sparse() try: from pandas import DataFrame, Series except ImportError: diff --git a/mne/filter.py b/mne/filter.py index 5477604ba67..9bb411ef9e3 100644 --- a/mne/filter.py +++ b/mne/filter.py @@ -7,10 +7,10 @@ import numpy as np from .annotations import _annotations_starts_stops +from .fixes import _import_fft from .io.pick import _picks_to_idx from .cuda import (_setup_cuda_fft_multiply_repeated, _fft_multiply_repeated, _setup_cuda_fft_resample, _fft_resample, _smart_pad) -from .fixes import irfft, ifftshift, fftfreq from .parallel import parallel_func, check_n_jobs from .time_frequency.multitaper import _mt_spectra, _compute_mt_params from .utils import (logger, verbose, sum_squared, warn, _pl, @@ -1428,6 +1428,7 @@ def resample(x, up=1., down=1., npad=100, axis=-1, window='boxcar', n_jobs=1, up=up/down and down=1. """ from scipy.signal import get_window + ifftshift, fftfreq = _import_fft(('ifftshift', 'fftfreq')) # check explicitly for backwards compatibility if not isinstance(axis, int): err = ("The axis parameter needs to be an integer (got %s). " @@ -2255,6 +2256,7 @@ def design_mne_c_filter(sfreq, l_freq=None, h_freq=40., 4197 frequencies are directly constructed, with zeroes in the stop-band and ones in the passband, with squared cosine ramps in between. """ + irfft = _import_fft('irfft') n_freqs = (4096 + 2 * 2048) // 2 + 1 freq_resp = np.ones(n_freqs) l_freq = 0 if l_freq is None else float(l_freq) diff --git a/mne/fixes.py b/mne/fixes.py index d7abc0d69a8..593dbf3c327 100644 --- a/mne/fixes.py +++ b/mne/fixes.py @@ -12,17 +12,15 @@ # Lars Buitinck # License: BSD -import inspect from distutils.version import LooseVersion +import functools +import inspect from math import log import os from pathlib import Path import warnings import numpy as np -import scipy -from scipy import linalg -from scipy.linalg import LinAlgError ############################################################################### @@ -49,6 +47,7 @@ def _safe_svd(A, **kwargs): # https://software.intel.com/en-us/forums/intel-distribution-for-python/topic/628049 # noqa: E501 # For SciPy 0.18 and up, we can work around it by using # lapack_driver='gesvd' instead. + from scipy import linalg if kwargs.get('overwrite_a', False): raise ValueError('Cannot set overwrite_a=True with this function') try: @@ -63,6 +62,11 @@ def _safe_svd(A, **kwargs): raise +def _csc_matrix_cast(x): + from scipy.sparse import csc_matrix + return csc_matrix(x) + + ############################################################################### # Backporting nibabel's read_geometry @@ -144,10 +148,22 @@ def _read_geometry(filepath, read_metadata=False, read_stamp=False): ############################################################################### # Triaging FFT functions to get fast pocketfft (SciPy 1.4) -try: - from scipy.fft import fft, ifft, fftfreq, rfft, irfft, rfftfreq, ifftshift -except ImportError: - from numpy.fft import fft, ifft, fftfreq, rfft, irfft, rfftfreq, ifftshift +@functools.lru_cache(None) +def _import_fft(name): + single = False + if not isinstance(name, tuple): + name = (name,) + single = True + try: + from scipy.fft import rfft # noqa analysis:ignore + except ImportError: + from numpy import fft # noqa + else: + from scipy import fft # noqa + out = [getattr(fft, n) for n in name] + if single: + out = out[0] + return out ############################################################################### @@ -564,6 +580,7 @@ def _set_covariance(self, covariance): is computed. """ + from scipy import linalg # covariance = check_array(covariance) # set covariance self.covariance_ = covariance @@ -582,6 +599,7 @@ def get_precision(self): The precision matrix associated to the current covariance object. """ + from scipy import linalg if self.store_precision: precision = self.precision_ else: @@ -677,6 +695,7 @@ def error_norm(self, comp_cov, norm='frobenius', scaling=True, `self` and `comp_cov` covariance estimators. """ + from scipy import linalg # compute the error error = comp_cov - self.covariance_ # compute the error norm @@ -753,6 +772,7 @@ def log_likelihood(emp_cov, precision): def _logdet(A): """Compute the log det of a positive semidefinite matrix.""" + from scipy import linalg vals = linalg.eigvalsh(A) # avoid negative (numerical errors) or zero (semi-definite matrix) values tol = vals.max() * vals.size * np.finfo(np.float64).eps diff --git a/mne/forward/_field_interpolation.py b/mne/forward/_field_interpolation.py index d4b3b938fb2..3f01ace235f 100644 --- a/mne/forward/_field_interpolation.py +++ b/mne/forward/_field_interpolation.py @@ -9,7 +9,6 @@ from copy import deepcopy import numpy as np -from scipy import linalg from ..io.constants import FWD, FIFF from ..bem import _check_origin @@ -106,6 +105,7 @@ def _compute_mapping_matrix(fmd, info): def _pinv_trunc(x, miss): """Compute pseudoinverse, truncating at most "miss" fraction of varexp.""" + from scipy import linalg u, s, v = linalg.svd(x, full_matrices=False) # Eigenvalue truncation diff --git a/mne/forward/forward.py b/mne/forward/forward.py index 3f89b3f0045..e3d8e7c8eed 100644 --- a/mne/forward/forward.py +++ b/mne/forward/forward.py @@ -12,7 +12,6 @@ import re import numpy as np -from scipy import sparse import shutil import os @@ -168,6 +167,7 @@ def _block_diag(A, n): bd : sparse matrix The block diagonal matrix """ + from scipy import sparse if sparse.issparse(A): # then make block sparse raise NotImplementedError('sparse reversal not implemented yet') ma, na = A.shape @@ -583,6 +583,7 @@ def convert_forward_solution(fwd, surf_ori=False, force_fixed=False, fwd : Forward The modified forward solution. """ + from scipy import sparse fwd = fwd.copy() if copy else fwd if force_fixed is True: diff --git a/mne/gui/_kit2fiff_gui.py b/mne/gui/_kit2fiff_gui.py index d04d53134ca..008aa2ebac1 100644 --- a/mne/gui/_kit2fiff_gui.py +++ b/mne/gui/_kit2fiff_gui.py @@ -8,10 +8,9 @@ import os import queue import sys +from threading import Thread import numpy as np -from scipy.linalg import inv -from threading import Thread from mayavi.core.ui.mayavi_scene import MayaviScene from mayavi.tools.mlab_scene_model import MlabSceneModel @@ -204,7 +203,7 @@ def _get_fid_fname(self): @cached_property def _get_head_dev_trans(self): - return inv(self.dev_head_trans) + return np.linalg.inv(self.dev_head_trans) @cached_property def _get_hsp(self): diff --git a/mne/inverse_sparse/_gamma_map.py b/mne/inverse_sparse/_gamma_map.py index 326d1996ed0..1b7f257b134 100644 --- a/mne/inverse_sparse/_gamma_map.py +++ b/mne/inverse_sparse/_gamma_map.py @@ -3,10 +3,8 @@ # License: Simplified BSD import numpy as np -from scipy import linalg from ..forward import is_fixed_orient - from ..minimum_norm.inverse import _check_reference, _log_exp_var from ..utils import logger, verbose, warn from .mxne_inverse import (_check_ori, _make_sparse_stc, _prepare_gain, @@ -47,6 +45,7 @@ def _gamma_map_opt(M, G, alpha, maxit=10000, tol=1e-6, update_mode=1, active_set : array, shape=(n_active,) Indices of active sources. """ + from scipy import linalg G = G.copy() M = M.copy() @@ -59,10 +58,10 @@ def _gamma_map_opt(M, G, alpha, maxit=10000, tol=1e-6, update_mode=1, n_sensors, n_times = M.shape # apply normalization so the numerical values are sane - M_normalize_constant = linalg.norm(np.dot(M, M.T), ord='fro') + M_normalize_constant = np.linalg.norm(np.dot(M, M.T), ord='fro') M /= np.sqrt(M_normalize_constant) alpha /= M_normalize_constant - G_normalize_constant = linalg.norm(G, ord=np.inf) + G_normalize_constant = np.linalg.norm(G, ord=np.inf) G /= G_normalize_constant if n_sources % group_size != 0: @@ -97,7 +96,7 @@ def denom_fun(x): CM = np.dot(G * gammas[np.newaxis, :], G.T) CM.flat[::n_sensors + 1] += alpha # Invert CM keeping symmetry - U, S, V = linalg.svd(CM, full_matrices=False) + U, S, _ = linalg.svd(CM, full_matrices=False) S = S[np.newaxis, :] del CM CMinv = np.dot(U / (S + eps), U.T) diff --git a/mne/inverse_sparse/mxne_debiasing.py b/mne/inverse_sparse/mxne_debiasing.py index b84b18982c1..54da0e9937d 100755 --- a/mne/inverse_sparse/mxne_debiasing.py +++ b/mne/inverse_sparse/mxne_debiasing.py @@ -5,7 +5,6 @@ from math import sqrt import numpy as np -from scipy import linalg from ..utils import check_random_state, logger, verbose, fill_doc @@ -38,13 +37,13 @@ def power_iteration_kron(A, C, max_iter=1000, tol=1e-3, random_state=0): AS_size = C.shape[0] rng = check_random_state(random_state) B = rng.randn(AS_size, AS_size) - B /= linalg.norm(B, 'fro') + B /= np.linalg.norm(B, 'fro') ATA = np.dot(A.T, A) CCT = np.dot(C, C.T) L0 = np.inf for _ in range(max_iter): Y = np.dot(np.dot(ATA, B), CCT) - L = linalg.norm(Y, 'fro') + L = np.linalg.norm(Y, 'fro') if abs(L - L0) < tol: break @@ -121,14 +120,14 @@ def compute_bias(M, G, X, max_iter=1000, tol=1e-6, n_orient=1, verbose=None): dt = (t0 - 1.0) / t Y = D + dt * (D - D0) - Ddiff = linalg.norm(D - D0, np.inf) + Ddiff = np.linalg.norm(D - D0, np.inf) if Ddiff < tol: logger.info("Debiasing converged after %d iterations " "max(|D - D0| = %e < %e)" % (i, Ddiff, tol)) break else: - Ddiff = linalg.norm(D - D0, np.inf) + Ddiff = np.linalg.norm(D - D0, np.inf) logger.info("Debiasing did not converge after %d iterations! " "max(|D - D0| = %e >= %e)" % (max_iter, Ddiff, tol)) return D diff --git a/mne/inverse_sparse/mxne_inverse.py b/mne/inverse_sparse/mxne_inverse.py index 9b884bf7057..e6320b6b107 100644 --- a/mne/inverse_sparse/mxne_inverse.py +++ b/mne/inverse_sparse/mxne_inverse.py @@ -4,7 +4,6 @@ # License: Simplified BSD import numpy as np -from scipy import linalg from ..source_estimate import SourceEstimate, _BaseSourceEstimate, _make_stc from ..minimum_norm.inverse import (combine_xyz, _prepare_forward, @@ -372,6 +371,7 @@ def mixed_norm(evoked, forward, noise_cov, alpha, loose='auto', depth=0.8, MEG/EEG Source Reconstruction", IEEE Transactions of Medical Imaging, Volume 35 (10), pp. 2218-2228, 2016. """ + from scipy import linalg if not (0. <= alpha < 100.): raise ValueError('alpha must be in [0, 100). ' 'Got alpha = %s' % alpha) diff --git a/mne/inverse_sparse/mxne_optim.py b/mne/inverse_sparse/mxne_optim.py index 13773dee89e..9c6d63bfd6b 100644 --- a/mne/inverse_sparse/mxne_optim.py +++ b/mne/inverse_sparse/mxne_optim.py @@ -3,16 +3,21 @@ # Mathurin Massias # License: Simplified BSD +import functools from math import sqrt import numpy as np -from scipy import linalg from .mxne_debiasing import compute_bias -from ..utils import logger, verbose, sum_squared, warn, dgemm +from ..utils import logger, verbose, sum_squared, warn, _get_blas_funcs from ..time_frequency._stft import stft_norm1, stft_norm2, stft, istft +@functools.lru_cache(None) +def _get_dgemm(): + return _get_blas_funcs(np.float64, 'gemm') + + def groups_norm2(A, n_orient): """Compute squared L2 norms of groups inplace.""" n_positions = A.shape[0] // n_orient @@ -403,6 +408,7 @@ def _bcd(G, X, R, active_set, one_ovr_lc, n_orient, n_positions, alpha * (Lipschitz constants). """ X_j_new = np.zeros_like(X[0:n_orient, :], order='C') + dgemm = _get_dgemm() for j, G_j_c in enumerate(list_G_j_c): idx = slice(j * n_orient, (j + 1) * n_orient) @@ -534,11 +540,11 @@ def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None, lc = np.empty(n_positions) for j in range(n_positions): G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)] - lc[j] = linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2) + lc[j] = np.linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2) else: logger.info("Using proximal iterations") l21_solver = _mixed_norm_solver_prox - lc = 1.01 * linalg.norm(G, ord=2) ** 2 + lc = 1.01 * np.linalg.norm(G, ord=2) ** 2 if active_set_size is not None: E = list() @@ -558,7 +564,7 @@ def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None, elif solver == 'cd': lc_tmp = None else: - lc_tmp = 1.01 * linalg.norm(G[:, active_set], ord=2) ** 2 + lc_tmp = 1.01 * np.linalg.norm(G[:, active_set], ord=2) ** 2 X, as_, _ = l21_solver(M, G[:, active_set], alpha, lc_tmp, maxit=maxit, tol=tol, init=X_init, n_orient=n_orient, dgap_freq=dgap_freq) @@ -707,8 +713,8 @@ def gprime(w): # Reapply weights to have correct unit X *= weights[_active_set][:, np.newaxis] weights = gprime(X) - p_obj = 0.5 * linalg.norm(M - np.dot(G[:, active_set], X), - 'fro') ** 2. + alpha * np.sum(g(X)) + p_obj = 0.5 * np.linalg.norm(M - np.dot(G[:, active_set], X), + 'fro') ** 2. + alpha * np.sum(g(X)) E.append(p_obj) # Check convergence @@ -718,7 +724,7 @@ def gprime(w): break else: active_set = np.zeros_like(active_set) - p_obj = 0.5 * linalg.norm(M) ** 2. + p_obj = 0.5 * np.linalg.norm(M) ** 2. E.append(p_obj) break @@ -1186,7 +1192,7 @@ def _tf_mixed_norm_solver_bcd_(M, G, Z, active_set, candidates, alpha_space, R += np.dot(G_j, X_j) X_j_new += X_j - rows_norm = linalg.norm(X_j_new, 'fro') + rows_norm = np.linalg.norm(X_j_new, 'fro') if rows_norm <= alpha_space_lc[jj]: if was_active: Z[jj] = 0.0 @@ -1439,7 +1445,7 @@ def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4, lc = np.empty(n_positions) for j in range(n_positions): G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)] - lc[j] = linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2) + lc[j] = np.linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2) logger.info("Using block coordinate descent with active set approach") X, Z, active_set, E, gap = _tf_mixed_norm_solver_bcd_active_set( @@ -1538,7 +1544,7 @@ def iterative_tf_mixed_norm_solver(M, G, alpha_space, alpha_time, lc = np.empty(n_positions) for j in range(n_positions): G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)] - lc[j] = linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2) + lc[j] = np.linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2) # space and time penalties, and inverse of their derivatives: def g_space(Z): @@ -1587,7 +1593,7 @@ def g_time_prime_inv(Z): l21_penalty = np.sum(g_space(Z.copy())) l1_penalty = phi.norm(g_time(Z.copy()), ord=1).sum() - p_obj = (0.5 * linalg.norm(M - np.dot(G[:, active_set], X), + p_obj = (0.5 * np.linalg.norm(M - np.dot(G[:, active_set], X), 'fro') ** 2. + alpha_space * l21_penalty + alpha_time * l1_penalty) E.append(p_obj) @@ -1602,7 +1608,7 @@ def g_time_prime_inv(Z): print('Convergence reached after %d reweightings!' % k) break else: - p_obj = 0.5 * linalg.norm(M) ** 2. + p_obj = 0.5 * np.linalg.norm(M) ** 2. E.append(p_obj) logger.info('Iteration %d: as_size=%d, E=%f' % ( k + 1, active_set.sum() / n_orient, p_obj)) diff --git a/mne/io/compensator.py b/mne/io/compensator.py index fa13c68368a..d79f9275700 100644 --- a/mne/io/compensator.py +++ b/mne/io/compensator.py @@ -1,5 +1,4 @@ import numpy as np -from scipy import linalg from .constants import FIFF @@ -94,7 +93,7 @@ def make_compensator(info, from_, to, exclude_comp_chs=False): # s_to = (I - C2)*(I + C1)*s_from = (I + C1 - C2 - C2*C1)*s_from if from_ != 0: C1 = _make_compensator(info, from_) - comp_from_0 = linalg.inv(np.eye(info['nchan']) - C1) + comp_from_0 = np.linalg.inv(np.eye(info['nchan']) - C1) if to != 0: C2 = _make_compensator(info, to) comp_0_to = np.eye(info['nchan']) - C2 diff --git a/mne/io/kit/kit.py b/mne/io/kit/kit.py index da2a6b3bc6c..4197c9c20eb 100644 --- a/mne/io/kit/kit.py +++ b/mne/io/kit/kit.py @@ -14,7 +14,6 @@ from os import SEEK_CUR, path as op import numpy as np -from scipy import linalg from ..pick import pick_types from ...utils import (verbose, logger, warn, fill_doc, _check_option, @@ -796,7 +795,7 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, y = sin(theta) * sin(phi) z = cos(theta) vec_z = np.array([x, y, z]) - vec_z /= linalg.norm(vec_z) + vec_z /= np.linalg.norm(vec_z) vec_x = np.zeros(vec_z.size, dtype=np.float64) if vec_z[1] < vec_z[2]: if vec_z[0] < vec_z[1]: @@ -808,7 +807,7 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, else: vec_x[2] = 1.0 vec_x -= np.sum(vec_x * vec_z) * vec_z - vec_x /= linalg.norm(vec_x) + vec_x /= np.linalg.norm(vec_x) vec_y = np.cross(vec_z, vec_x) # transform to Neuromag like coordinate space vecs = np.vstack((ch['loc'][:3], vec_x, vec_y, vec_z)) diff --git a/mne/io/meas_info.py b/mne/io/meas_info.py index 5022d444194..236b3084a41 100644 --- a/mne/io/meas_info.py +++ b/mne/io/meas_info.py @@ -15,7 +15,6 @@ from textwrap import shorten import numpy as np -from scipy import linalg from .pick import (channel_type, pick_channels, pick_info, get_channel_type_constants) @@ -1405,7 +1404,7 @@ def read_meas_info(fid, tree, clean_bads=False, verbose=None): info['dev_ctf_t'] = dev_ctf_t if dev_head_t is not None and ctf_head_t is not None and dev_ctf_t is None: from ..transforms import Transform - head_ctf_trans = linalg.inv(ctf_head_t['trans']) + head_ctf_trans = np.linalg.inv(ctf_head_t['trans']) dev_ctf_trans = np.dot(head_ctf_trans, info['dev_head_t']['trans']) info['dev_ctf_t'] = Transform('meg', 'ctf_head', dev_ctf_trans) diff --git a/mne/io/open.py b/mne/io/open.py index 49bfd5afc1e..c9b1f1f1dd8 100644 --- a/mne/io/open.py +++ b/mne/io/open.py @@ -9,7 +9,6 @@ from gzip import GzipFile import numpy as np -from scipy import sparse from .tag import read_tag_info, read_tag, Tag, _call_dict_names from .tree import make_dir_tree, dir_tree_find @@ -244,6 +243,7 @@ def _find_type(value, fmts=['FIFF_'], exclude=['FIFF_UNIT']): def _show_tree(fid, tree, indent, level, read_limit, max_str, tag_id): """Show FIFF tree.""" + from scipy import sparse this_idt = indent * level next_idt = indent * (level + 1) # print block-level information diff --git a/mne/io/proc_history.py b/mne/io/proc_history.py index 13611c0feb9..6bbdecb5356 100644 --- a/mne/io/proc_history.py +++ b/mne/io/proc_history.py @@ -4,7 +4,6 @@ # License: Simplified BSD import numpy as np -from scipy.sparse import csc_matrix from .open import read_tag, fiff_open from .tree import dir_tree_find @@ -13,6 +12,7 @@ write_float_sparse, write_id) from .tag import find_tag from .constants import FIFF +from ..fixes import _csc_matrix_cast from ..utils import warn, _check_fname _proc_keys = ['parent_file_id', 'block_id', 'parent_block_id', @@ -153,7 +153,7 @@ def _write_proc_history(fid, info): FIFF.FIFF_CREATOR, FIFF.FIFF_DECOUPLER_MATRIX) _sss_ctc_writers = (write_id, write_int, write_string, write_float_sparse) -_sss_ctc_casters = (dict, np.array, str, csc_matrix) +_sss_ctc_casters = (dict, np.array, str, _csc_matrix_cast) _sss_cal_keys = ('cal_chans', 'cal_corrs') _sss_cal_ids = (FIFF.FIFF_SSS_CAL_CHANS, FIFF.FIFF_SSS_CAL_CORRS) diff --git a/mne/io/proj.py b/mne/io/proj.py index 37075153578..c10b2876cac 100644 --- a/mne/io/proj.py +++ b/mne/io/proj.py @@ -10,7 +10,6 @@ from math import sqrt import numpy as np -from scipy import linalg from .tree import dir_tree_find from .tag import find_tag @@ -554,6 +553,7 @@ def _make_projector(projs, ch_names, bads=(), include_active=True, warning will be raised next time projectors are constructed with the given inputs. If inplace=True, no meaningful data are returned. """ + from scipy import linalg nchan = len(ch_names) if nchan == 0: raise ValueError('No channel names specified') @@ -635,7 +635,7 @@ def _make_projector(projs, ch_names, bads=(), include_active=True, return default_return # Reorthogonalize the vectors - U, S, V = linalg.svd(vecs[:, :nvec], full_matrices=False) + U, S, _ = linalg.svd(vecs[:, :nvec], full_matrices=False) # Throw away the linearly dependent guys nproj = np.sum((S / S[0]) > 1e-2) diff --git a/mne/io/reference.py b/mne/io/reference.py index f1733a1f719..e4477339fa1 100644 --- a/mne/io/reference.py +++ b/mne/io/reference.py @@ -6,7 +6,6 @@ from copy import deepcopy import numpy as np -from scipy import linalg from .constants import FIFF from .meas_info import _check_ch_keys @@ -52,6 +51,7 @@ def _apply_reference(inst, ref_from, ref_to=None, forward=None, ch_type='auto'): """Apply a custom EEG referencing scheme.""" # Check to see that data is preloaded + from scipy import linalg _check_preload(inst, "Applying a reference") ch_type = _get_ch_type(inst, ch_type) diff --git a/mne/io/tag.py b/mne/io/tag.py index 5dea0eae7c8..de336831472 100644 --- a/mne/io/tag.py +++ b/mne/io/tag.py @@ -7,7 +7,6 @@ import struct import numpy as np -from scipy import sparse from .constants import (FIFF, _dig_kind_named, _dig_cardinal_named, _ch_kind_named, _ch_coil_type_named, _ch_unit_named, @@ -168,6 +167,7 @@ def _read_tag_header(fid): def _read_matrix(fid, tag, shape, rlims, matrix_coding): """Read a matrix (dense or sparse) tag.""" + from scipy import sparse matrix_coding = matrix_coding >> 16 # This should be easy to implement (see _frombuffer_rows) diff --git a/mne/io/write.py b/mne/io/write.py index 4cd33598601..baa611e5009 100644 --- a/mne/io/write.py +++ b/mne/io/write.py @@ -10,7 +10,6 @@ import uuid import numpy as np -from scipy import linalg, sparse from .constants import FIFF from ..utils import logger, _file_like @@ -368,7 +367,7 @@ def write_coord_trans(fid, trans): fid.write(np.array(move, dtype='>f4').tobytes()) # ...and its inverse - trans_inv = linalg.inv(trans['trans']) + trans_inv = np.linalg.inv(trans['trans']) rot = trans_inv[:3, :3] move = trans_inv[:3, 3] fid.write(np.array(rot, dtype='>f4').tobytes()) @@ -436,6 +435,7 @@ def write_float_sparse_ccs(fid, kind, mat): def write_float_sparse(fid, kind, mat, fmt='auto'): """Write a single-precision floating-point sparse matrix tag.""" + from scipy import sparse from .tag import _matrix_coding_CCS, _matrix_coding_RCS if fmt == 'auto': fmt = 'csr' if isinstance(mat, sparse.csr_matrix) else 'csc' diff --git a/mne/label.py b/mne/label.py index 6731a9e1a74..00a8386a71e 100644 --- a/mne/label.py +++ b/mne/label.py @@ -12,7 +12,6 @@ import re import numpy as np -from scipy import linalg, sparse from .parallel import parallel_func, check_n_jobs from .source_estimate import (SourceEstimate, VolSourceEstimate, @@ -1150,6 +1149,7 @@ def split_label(label, parts=2, subject=None, subjects_dir=None, projecting all label vertex coordinates onto this axis and dividing them at regular spatial intervals. """ + from scipy import linalg label, subject, subjects_dir = _prep_label_split(label, subject, subjects_dir) @@ -1260,6 +1260,7 @@ def label_sign_flip(label, src): flip : array Sign flip vector (contains 1 or -1). """ + from scipy import linalg if len(src) != 2: raise ValueError('Only source spaces with 2 hemisphers are accepted') @@ -2265,6 +2266,7 @@ def _check_values_labels(values, n_labels): def _labels_to_stc_surf(labels, values, tmin, tstep, subject): + from scipy import sparse subject = _check_labels_subject(labels, subject, 'subject') _check_values_labels(values, len(labels)) vertices = dict(lh=[], rh=[]) diff --git a/mne/minimum_norm/inverse.py b/mne/minimum_norm/inverse.py index 6dc8b1e0a37..ba28e625675 100644 --- a/mne/minimum_norm/inverse.py +++ b/mne/minimum_norm/inverse.py @@ -8,7 +8,6 @@ from copy import deepcopy from math import sqrt import numpy as np -from scipy import linalg from ._eloreta import _compute_eloreta from ..fixes import _safe_svd @@ -528,6 +527,7 @@ def prepare_inverse_operator(orig, nave, lambda2, method='dSPM', inv : instance of InverseOperator Prepared inverse operator. """ + from scipy import linalg if nave <= 0: raise ValueError('The number of averages should be positive') @@ -1454,7 +1454,7 @@ def _prepare_forward(forward, info, noise_cov, fixed, loose, rank, pca, # Adjusting Source Covariance matrix to make trace of G*R*G' equal # to number of sensors. logger.info('Adjusting source covariance matrix.') - trace_GRGT = linalg.norm(gain, ord='fro') ** 2 + trace_GRGT = np.linalg.norm(gain, ord='fro') ** 2 n_nzero = (noise_cov['eig'] > 0).sum() scale = np.sqrt(n_nzero / trace_GRGT) source_std *= scale diff --git a/mne/minimum_norm/resolution_matrix.py b/mne/minimum_norm/resolution_matrix.py index 6409692fd4b..fb788e3de22 100644 --- a/mne/minimum_norm/resolution_matrix.py +++ b/mne/minimum_norm/resolution_matrix.py @@ -7,8 +7,6 @@ import numpy as np -from scipy import linalg - from .. import pick_channels_forward, EvokedArray, SourceEstimate from ..io.constants import FIFF from ..utils import logger, verbose @@ -198,6 +196,7 @@ def _normalise_psf_ctf(funcs, norm): def _summarise_psf_ctf(funcs, mode, n_comp, return_pca_vars): """Summarise PSFs/CTFs across vertices.""" + from scipy import linalg s_var = None # only computed for return_pca_vars=True if mode == 'maxval': # pick PSF/CTF with maximum absolute value @@ -226,7 +225,7 @@ def _summarise_psf_ctf(funcs, mode, n_comp, return_pca_vars): elif mode == 'pca': # SVD across PSFs/CTFs # compute SVD of PSFs/CTFs across vertices - u, s, _ = linalg.svd(funcs, full_matrices=False, compute_uv=True) + u, s, _ = linalg.svd(funcs, full_matrices=False) funcs = u[:, :n_comp] # if explained variances for SVD components requested if return_pca_vars: diff --git a/mne/minimum_norm/time_frequency.py b/mne/minimum_norm/time_frequency.py index 65bc9294674..060e7580503 100644 --- a/mne/minimum_norm/time_frequency.py +++ b/mne/minimum_norm/time_frequency.py @@ -4,7 +4,6 @@ # License: BSD (3-clause) import numpy as np -from scipy import linalg from ..epochs import Epochs, make_fixed_length_events from ..evoked import EvokedArray @@ -28,6 +27,7 @@ def _prepare_source_params(inst, inverse_operator, label=None, prepared=False, method_params=None, use_cps=True, verbose=None): """Prepare inverse operator and params for spectral / TFR analysis.""" + from scipy import linalg inv = _check_or_prepare(inverse_operator, nave, lambda2, method, method_params, prepared) diff --git a/mne/morph.py b/mne/morph.py index 1f75415fb55..07ddfefaf3b 100644 --- a/mne/morph.py +++ b/mne/morph.py @@ -8,7 +8,6 @@ import warnings import copy import numpy as np -from scipy import sparse, linalg from .fixes import _get_img_fdata from .parallel import parallel_func @@ -270,6 +269,7 @@ def compute_source_morph(src, subject_from=None, subject_to='fsaverage', def _compute_sparse_morph(vertices_from, subject_from, subject_to, subjects_dir=None): """Get nearest vertices from one subject to another.""" + from scipy import sparse maps = read_morph_map(subject_to, subject_from, subjects_dir) cnt = 0 vertices = list() @@ -506,6 +506,7 @@ def compute_vol_morph_mat(self, *, verbose=None): return self def _morph_vols(self, vols, mesg, subselect=True): + from scipy import sparse from dipy.align.reslice import reslice interp = self.src_data['interpolator'].tocsc()[ :, np.concatenate(self._vol_vertices_from)] @@ -533,7 +534,7 @@ def _morph_vols(self, vols, mesg, subselect=True): src_shape = self.src_data['src_shape_full'][::-1] resamp_0 = _grid_interp( src_shape, self.pre_affine.codomain_shape, - linalg.inv(from_affine) @ self.pre_affine.codomain_grid2world) + np.linalg.inv(from_affine) @ self.pre_affine.codomain_grid2world) # reslice to match what was used during the morph # (brain.mgz and whatever was used to create the source space # will not necessarily have the same domain/zooms) @@ -541,7 +542,7 @@ def _morph_vols(self, vols, mesg, subselect=True): # pre_affine.transform(img_real) resamp_1 = _grid_interp( self.pre_affine.codomain_shape, self.pre_affine.domain_shape, - linalg.inv(self.pre_affine.codomain_grid2world) @ + np.linalg.inv(self.pre_affine.codomain_grid2world) @ self.pre_affine.affine @ self.pre_affine.domain_grid2world) resamp_0_1 = resamp_1 @ resamp_0 @@ -578,7 +579,7 @@ def _morph_vols(self, vols, mesg, subselect=True): if resamp_2 is None: resamp_2 = _grid_interp( img_real.shape, self.src_data['to_vox_map'][0], - linalg.inv(affine) @ + np.linalg.inv(affine) @ self.src_data['to_vox_map'][1]) # Equivalent to: # _resample_from_to( @@ -1098,6 +1099,7 @@ def _compute_morph_matrix(subject_from, subject_to, vertices_from, vertices_to, smooth=None, subjects_dir=None, warn=True, xhemi=False): """Compute morph matrix.""" + from scipy import sparse logger.info('Computing morph matrix...') subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) @@ -1134,6 +1136,7 @@ def _compute_morph_matrix(subject_from, subject_to, vertices_from, vertices_to, def _hemi_morph(tris, vertices_to, vertices_from, smooth, maps, warn): + from scipy import sparse if len(vertices_from) == 0: return sparse.csr_matrix((len(vertices_to), 0)) e = mesh_edges(tris) @@ -1230,6 +1233,7 @@ def grade_to_vertices(subject, grade, subjects_dir=None, n_jobs=1, def _surf_nearest(vertices, adj_mat): + from scipy import sparse from scipy.sparse.csgraph import dijkstra if not check_version('scipy', '1.3'): raise ValueError('scipy >= 1.3 is required to use nearest smoothing, ' @@ -1258,6 +1262,7 @@ def _csr_row_norm(data, row_norm): def _surf_upsampling_mat(idx_from, e, smooth, warn=True): """Upsample data on a subject's surface given mesh edges.""" # we're in CSR format and it's to==from + from scipy import sparse assert isinstance(e, sparse.csr_matrix) n_tot = e.shape[0] assert e.shape == (n_tot, n_tot) diff --git a/mne/preprocessing/_csd.py b/mne/preprocessing/_csd.py index f951a162f15..a0b9bb12081 100644 --- a/mne/preprocessing/_csd.py +++ b/mne/preprocessing/_csd.py @@ -14,8 +14,6 @@ import numpy as np -from scipy import linalg - from .. import pick_types from ..utils import _validate_type, _ensure_int, _check_preload from ..io import BaseRaw @@ -29,7 +27,7 @@ def _prepare_G(G, lambda2): G.flat[::len(G) + 1] += lambda2 # compute the CSD - Gi = linalg.inv(G) + Gi = np.linalg.inv(G) TC = Gi.sum(0) sgi = np.sum(TC) # compute sum total diff --git a/mne/preprocessing/ica.py b/mne/preprocessing/ica.py index f44b7bdc659..f7b056152ef 100644 --- a/mne/preprocessing/ica.py +++ b/mne/preprocessing/ica.py @@ -17,7 +17,6 @@ import json import numpy as np -from scipy import linalg from .ecg import (qrs_detector, _get_ecg_channel_index, _make_ecg, create_ecg_epochs) @@ -764,6 +763,7 @@ def _fit(self, data, fit_type): self.current_fit = fit_type def _update_mixing_matrix(self): + from scipy import linalg self.mixing_matrix_ = linalg.pinv(self.unmixing_matrix_) def _update_ica_names(self): @@ -1695,7 +1695,7 @@ def _pick_sources(self, data, include, exclude, n_pca_components): if self.noise_cov is None: # revert standardization data *= self.pre_whitener_ else: - data = np.dot(linalg.pinv(self.pre_whitener_, cond=1e-14), data) + data = np.linalg.pinv(self.pre_whitener_, rcond=1e-14) @ data return data @@ -2692,6 +2692,7 @@ def read_ica_eeglab(fname, *, verbose=None): ica : instance of ICA An ICA object based on the information contained in the input file. """ + from scipy import linalg eeg = _check_load_mat(fname, None) info, eeg_montage, _ = _get_info(eeg) info.set_montage(eeg_montage) diff --git a/mne/preprocessing/maxwell.py b/mne/preprocessing/maxwell.py index 6129af65d42..a9472b988c9 100644 --- a/mne/preprocessing/maxwell.py +++ b/mne/preprocessing/maxwell.py @@ -12,7 +12,6 @@ from os import path as op import numpy as np -from scipy import linalg from .. import __version__ from ..annotations import _annotations_starts_stops @@ -33,7 +32,7 @@ from ..io.pick import pick_types, pick_info from ..utils import (verbose, logger, _clean_names, warn, _time_mask, _pl, _check_option, _ensure_int, _validate_type, use_log_level) -from ..fixes import _get_args, _safe_svd, einsum, bincount +from ..fixes import _safe_svd, einsum, bincount from ..channels.channels import _get_T1T2_mag_inds, fix_mag_coil_types @@ -840,6 +839,7 @@ def _get_decomp(trans, all_coils, cal, regularize, exp, ignore_ref, coil_scale, grad_picks, mag_picks, good_mask, mag_or_fine, bad_condition, t, mag_scale): """Get a decomposition matrix and pseudoinverse matrices.""" + from scipy import linalg # # Fine calibration processing (point-like magnetometers and calib. coeffs) # @@ -1545,9 +1545,7 @@ def _reset_meg_bads(info): if info['ch_names'].index(bad) not in meg_picks] -check_disable = dict() # not available on really old versions of SciPy -if 'check_finite' in _get_args(linalg.svd): - check_disable['check_finite'] = False +check_disable = dict(check_finite=False) def _orth_overwrite(A): @@ -1576,6 +1574,7 @@ def _overlap_projector(data_int, data_res, corr): # computation # we use np.linalg.norm instead of sp.linalg.norm here: ~2x faster! + from scipy import linalg n = np.linalg.norm(data_int) n = 1. if n == 0 else n # all-zero data should gracefully continue data_int = _orth_overwrite((data_int / n).T) diff --git a/mne/preprocessing/nirs/_beer_lambert_law.py b/mne/preprocessing/nirs/_beer_lambert_law.py index 69d52e82e82..c0ccd1f0657 100644 --- a/mne/preprocessing/nirs/_beer_lambert_law.py +++ b/mne/preprocessing/nirs/_beer_lambert_law.py @@ -7,7 +7,6 @@ import os.path as op import numpy as np -from scipy import linalg from ...io import BaseRaw from ...io.constants import FIFF @@ -31,6 +30,7 @@ def beer_lambert_law(raw, ppf=0.1): raw : instance of Raw The modified raw instance. """ + from scipy import linalg raw = raw.copy().load_data() _validate_type(raw, BaseRaw, 'raw') diff --git a/mne/preprocessing/nirs/nirs.py b/mne/preprocessing/nirs/nirs.py index 7d6bd05990c..7760e5422bc 100644 --- a/mne/preprocessing/nirs/nirs.py +++ b/mne/preprocessing/nirs/nirs.py @@ -6,7 +6,6 @@ import re import numpy as np -from scipy import linalg from ...io.pick import _picks_to_idx from ...utils import fill_doc @@ -28,7 +27,7 @@ def source_detector_distances(info, picks=None): Array containing distances in meters. Of shape equal to number of channels, or shape of picks if supplied. """ - dist = [linalg.norm(ch['loc'][3:6] - ch['loc'][6:9]) + dist = [np.linalg.norm(ch['loc'][3:6] - ch['loc'][6:9]) for ch in info['chs']] picks = _picks_to_idx(info, picks, exclude=[]) return np.array(dist, float)[picks] diff --git a/mne/preprocessing/otp.py b/mne/preprocessing/otp.py index 8acc37ac27a..3d8c32bf42c 100644 --- a/mne/preprocessing/otp.py +++ b/mne/preprocessing/otp.py @@ -7,7 +7,6 @@ from functools import partial import numpy as np -from scipy import linalg from .._ola import _COLA, _Storer from ..io.pick import _picks_to_idx @@ -19,7 +18,7 @@ def _svd_cov(cov, data): """Use a covariance matrix to compute the SVD faster.""" # This makes use of mathematical equivalences between PCA and SVD # on zero-mean data - s, u = linalg.eigh(cov) + s, u = np.linalg.eigh(cov) norm = np.ones((s.size,)) mask = s > np.finfo(float).eps * s[-1] # largest is last s = np.sqrt(s, out=s) diff --git a/mne/preprocessing/xdawn.py b/mne/preprocessing/xdawn.py index fe1cb5ef68c..17a50a47251 100644 --- a/mne/preprocessing/xdawn.py +++ b/mne/preprocessing/xdawn.py @@ -5,7 +5,6 @@ # License: BSD (3-clause) import numpy as np -from scipy import linalg from .. import EvokedArray, Evoked from ..cov import Covariance, _regularized_covariance @@ -59,6 +58,7 @@ def _least_square_evoked(epochs_data, events, tmin, sfreq): toeplitz : array, shape (n_class * n_components, n_channels) An concatenated array of toeplitz matrix for each event type. """ + from scipy import linalg n_epochs, n_channels, n_times = epochs_data.shape tmax = tmin + n_times / float(sfreq) @@ -143,6 +143,7 @@ def _fit_xdawn(epochs_data, y, n_components, reg=None, signal_cov=None, evokeds : array, shape (n_class, n_components, n_times) The independent evoked responses per condition. """ + from scipy import linalg if not isinstance(epochs_data, np.ndarray) or epochs_data.ndim != 3: raise ValueError('epochs_data must be 3D ndarray') diff --git a/mne/proj.py b/mne/proj.py index fcf73bb6146..33aac527725 100644 --- a/mne/proj.py +++ b/mne/proj.py @@ -3,7 +3,6 @@ # License: BSD (3-clause) import numpy as np -from scipy import linalg from .epochs import Epochs from .utils import check_fname, logger, verbose, _check_option @@ -76,6 +75,7 @@ def write_proj(fname, projs): @verbose def _compute_proj(data, info, n_grad, n_mag, n_eeg, desc_prefix, meg='separate', verbose=None): + from scipy import linalg grad_ind = pick_types(info, meg='grad', ref_meg=False, exclude='bads') mag_ind = pick_types(info, meg='mag', ref_meg=False, exclude='bads') eeg_ind = pick_types(info, meg=False, eeg=True, ref_meg=False, @@ -120,8 +120,7 @@ def _compute_proj(data, info, n_grad, n_mag, n_eeg, desc_prefix, continue data_ind = data[ind][:, ind] # data is the covariance matrix: U * S**2 * Ut - U, Sexp2, _ = linalg.svd(data_ind, full_matrices=False, - overwrite_a=True) + U, Sexp2, _ = linalg.svd(data_ind, full_matrices=False) U = U[:, :n] exp_var = Sexp2 / Sexp2.sum() exp_var = exp_var[:n] @@ -367,6 +366,7 @@ def sensitivity_map(fwd, projs=None, ch_type='grad', mode='fixed', exclude=[], The sensitivity map as a SourceEstimate or VolSourceEstimate instance for visualization. """ + from scipy import linalg # check strings _check_option('ch_type', ch_type, ['eeg', 'grad', 'mag']) _check_option('mode', mode, ['free', 'fixed', 'ratio', 'radiality', diff --git a/mne/rank.py b/mne/rank.py index bf5d7e27fcf..04695603f04 100644 --- a/mne/rank.py +++ b/mne/rank.py @@ -5,7 +5,6 @@ # License: BSD (3-clause) import numpy as np -from scipy import linalg from .defaults import _handle_default from .io.meas_info import _simplify_info @@ -48,6 +47,7 @@ def estimate_rank(data, tol='auto', return_singular=False, norm=True, If return_singular is True, the singular values that were thresholded to determine the rank are also returned. """ + from scipy import linalg if norm: data = data.copy() # operate on a copy norms = _compute_row_norms(data) diff --git a/mne/simulation/metrics.py b/mne/simulation/metrics.py index b7e68fcd72d..dd228d6a517 100644 --- a/mne/simulation/metrics.py +++ b/mne/simulation/metrics.py @@ -4,7 +4,6 @@ # License: BSD (3-clause) import numpy as np -from scipy.linalg import norm from ..utils import _check_option @@ -61,5 +60,5 @@ def source_estimate_quantification(stc1, stc2, metric='rms'): # Calculate correlation coefficient between matrix elements elif metric == 'cosine': score = 1. - (np.dot(data1.flatten(), data2.flatten()) / - (norm(data1) * norm(data2))) + (np.linalg.norm(data1) * np.linalg.norm(data2))) return score diff --git a/mne/source_estimate.py b/mne/source_estimate.py index abdae45ca00..25a6f658fcb 100644 --- a/mne/source_estimate.py +++ b/mne/source_estimate.py @@ -11,8 +11,6 @@ from types import GeneratorType import numpy as np -from scipy import linalg, sparse -from scipy.sparse import coo_matrix, block_diag as sparse_block_diag from .baseline import rescale from .cov import Covariance @@ -2643,6 +2641,7 @@ def spatio_temporal_tris_adjacency(tris, n_times, remap_vertices=False, vertices are time 1, the nodes from 2 to 2N are the vertices during time 2, etc. """ + from scipy import sparse if remap_vertices: logger.info('Reassigning vertex indices.') tris = np.searchsorted(np.unique(tris), tris) @@ -2679,6 +2678,7 @@ def spatio_temporal_dist_adjacency(src, n_times, dist, verbose=None): vertices are time 1, the nodes from 2 to 2N are the vertices during time 2, etc. """ + from scipy.sparse import block_diag as sparse_block_diag if src[0]['dist'] is None: raise RuntimeError('src must have distances included, consider using ' 'setup_source_space with add_dist=True') @@ -2787,6 +2787,7 @@ def spatial_inter_hemi_adjacency(src, dist, verbose=None): existing intra-hemispheric adjacency matrix, e.g. computed using geodesic distances. """ + from scipy import sparse from scipy.spatial.distance import cdist src = _ensure_src(src, kind='surface') adj = cdist(src[0]['rr'][src[0]['vertno']], @@ -2801,6 +2802,7 @@ def spatial_inter_hemi_adjacency(src, dist, verbose=None): @verbose def _get_adjacency_from_edges(edges, n_times, verbose=None): """Given edges sparse matrix, create adjacency matrix.""" + from scipy.sparse import coo_matrix n_vertices = edges.shape[0] logger.info("-- number of adjacent vertices : %d" % n_vertices) nnz = edges.col.size @@ -2832,11 +2834,12 @@ def _get_ico_tris(grade, verbose=None, return_surf=False): def _pca_flip(flip, data): + from scipy import linalg U, s, V = linalg.svd(data, full_matrices=False) # determine sign-flip sign = np.sign(np.dot(U[:, 0], flip)) # use average power in label for scaling - scale = linalg.norm(s) / np.sqrt(len(data)) + scale = np.linalg.norm(s) / np.sqrt(len(data)) return sign * scale * V[0] @@ -2878,6 +2881,7 @@ def _prepare_label_extraction(stc, labels, src, mode, allow_empty, use_sparse): # of vol src space. # If stc=None (i.e. no activation time courses provided) and mode='mean', # only computes vertex indices and label_flip will be list of None. + from scipy import sparse from .label import label_sign_flip, Label, BiHemiLabel # if source estimate provided in stc, get vertices from source space and @@ -3069,6 +3073,7 @@ def _gen_extract_label_time_course(stcs, labels, src, *, mode='mean', allow_empty=False, mri_resolution=True, verbose=None): # loop through source estimates and extract time series + from scipy import sparse if src is None and mode in ['mean', 'max']: kind = 'surface' else: diff --git a/mne/source_space.py b/mne/source_space.py index 293a883b7aa..7ba5cafdf47 100644 --- a/mne/source_space.py +++ b/mne/source_space.py @@ -13,7 +13,6 @@ import os.path as op import numpy as np -from scipy import sparse, linalg from .io.constants import FIFF from .io.meas_info import create_info, Info @@ -1102,6 +1101,7 @@ def write_source_spaces(fname, src, overwrite=False, verbose=None): def _write_one_source_space(fid, this, verbose=None): """Write one source space.""" + from scipy import sparse if this['type'] == 'surf': src_type = FIFF.FIFFV_MNE_SPACE_SURFACE elif this['type'] == 'vol': @@ -2296,6 +2296,7 @@ def _src_vol_dims(s): def _add_interpolator(sp): """Compute a sparse matrix to interpolate the data into an MRI volume.""" # extract transformation information from mri + from scipy import sparse mri_width, mri_height, mri_depth, nvox = _src_vol_dims(sp[0]) # @@ -2348,6 +2349,7 @@ def _add_interpolator(sp): def _grid_interp(from_shape, to_shape, trans, order=1, inuse=None): """Compute a grid-to-grid linear or nearest interpolation given.""" + from scipy import sparse from_shape = np.array(from_shape, int) to_shape = np.array(to_shape, int) trans = np.array(trans, np.float64) # to -> from @@ -2649,6 +2651,7 @@ def add_source_space_distances(src, dist_limit=np.inf, n_jobs=1, verbose=None): the source space to disk, as the computed distances will automatically be stored along with the source space data for future use. """ + from scipy.sparse import csr_matrix from scipy.sparse.csgraph import dijkstra n_jobs = check_n_jobs(n_jobs) src = _ensure_src(src) @@ -2706,7 +2709,7 @@ def add_source_space_distances(src, dist_limit=np.inf, n_jobs=1, verbose=None): i, j = np.meshgrid(s['vertno'], s['vertno']) i = i.ravel()[idx] j = j.ravel()[idx] - s['dist'] = sparse.csr_matrix( + s['dist'] = csr_matrix( (d, (i, j)), shape=(s['np'], s['np']), dtype=np.float32) s['dist_limit'] = np.array([dist_limit], np.float32) @@ -3194,7 +3197,7 @@ def _get_src_nn(s, use_cps=True, vertices=None): # Project out the surface normal and compute SVD nn[vp] = np.sum( s['nn'][s['pinfo'][s['patch_inds'][p]], :], axis=0) - nn /= linalg.norm(nn, axis=-1, keepdims=True) + nn /= np.linalg.norm(nn, axis=-1, keepdims=True) else: nn = s['nn'][vertices, :] return nn diff --git a/mne/stats/_adjacency.py b/mne/stats/_adjacency.py index 4929c2f9253..c81344a8aef 100644 --- a/mne/stats/_adjacency.py +++ b/mne/stats/_adjacency.py @@ -5,7 +5,6 @@ # License: Simplified BSD import numpy as np -from scipy import sparse from ..utils import _validate_type, _check_option from ..utils.check import int_like @@ -30,6 +29,7 @@ def combine_adjacency(*structure): adjacency : scipy.sparse.coo_matrix, shape (n_features, n_features) The adjacency matrix. """ + from scipy import sparse structure = list(structure) for di, dim in enumerate(structure): name = f'structure[{di}]' diff --git a/mne/stats/cluster_level.py b/mne/stats/cluster_level.py index 85cbc9385fc..78887d9edcb 100644 --- a/mne/stats/cluster_level.py +++ b/mne/stats/cluster_level.py @@ -11,7 +11,6 @@ # License: Simplified BSD import numpy as np -from scipy import sparse from .parametric import f_oneway, ttest_1samp_no_p from ..parallel import parallel_func, check_n_jobs @@ -283,6 +282,7 @@ def _get_clusters_st(x_in, neighbors, max_step=1): def _get_components(x_in, adjacency, return_list=True): """Get connected components from a mask and a adjacency matrix.""" + from scipy import sparse if adjacency is False: components = np.arange(len(x_in)) else: @@ -502,6 +502,7 @@ def _find_clusters_1dir_parts(x, x_in, adjacency, max_step, partitions, def _find_clusters_1dir(x, x_in, adjacency, max_step, t_power, ndimage): """Actually call the clustering algorithm.""" + from scipy import sparse if adjacency is None: labels, n_labels = ndimage.label(x_in) @@ -592,6 +593,7 @@ def _pval_from_histogram(T, H0, tail): def _setup_adjacency(adjacency, n_tests, n_times): + from scipy import sparse if not sparse.issparse(adjacency): raise ValueError("If adjacency matrix is given, it must be a " "SciPy sparse matrix.") @@ -1379,6 +1381,7 @@ def _st_mask_from_s_inds(n_times, n_vertices, vertices, set_as=True): @verbose def _get_partitions_from_adjacency(adjacency, n_times, verbose=None): """Specify disjoint subsets (e.g., hemispheres) based on adjacency.""" + from scipy import sparse if isinstance(adjacency, list): test = np.ones(len(adjacency)) test_adj = np.zeros((len(adjacency), len(adjacency)), dtype='bool') diff --git a/mne/stats/regression.py b/mne/stats/regression.py index 80f198b46af..64635af97e3 100644 --- a/mne/stats/regression.py +++ b/mne/stats/regression.py @@ -10,7 +10,6 @@ from collections import namedtuple import numpy as np -from scipy import linalg, sparse from ..source_estimate import SourceEstimate from ..epochs import BaseEpochs @@ -100,7 +99,7 @@ def linear_regression(inst, design_matrix, names=None): def _fit_lm(data, design_matrix, names): """Aux function.""" - from scipy import stats + from scipy import stats, linalg n_samples = len(data) n_features = np.product(data.shape[1:]) if design_matrix.ndim != 2: @@ -242,6 +241,7 @@ def linear_regression_raw(raw, events, event_id=None, tmin=-.1, tmax=1, waveforms: II. Non-linear effects, overlap correction, and practical considerations. Psychophysiology, 52(2), 169-189. """ + from scipy import linalg if isinstance(solver, str): if solver not in {"cholesky"}: raise ValueError("No such solver: {}".format(solver)) @@ -312,6 +312,7 @@ def _prepare_rerp_data(raw, events, picks=None, decim=1): def _prepare_rerp_preds(n_samples, sfreq, events, event_id=None, tmin=-.1, tmax=1, covariates=None): """Build predictor matrix and metadata (e.g. condition time windows).""" + from scipy import sparse conds = list(event_id) if covariates is not None: conds += list(covariates) diff --git a/mne/surface.py b/mne/surface.py index 2c1aef53b75..021f8b525e0 100644 --- a/mne/surface.py +++ b/mne/surface.py @@ -18,7 +18,6 @@ from struct import pack import numpy as np -from scipy.sparse import coo_matrix, csr_matrix, eye as speye from .io.constants import FIFF from .io.open import fiff_open @@ -279,6 +278,7 @@ def _triangle_neighbors(tris, npts): # for ti, tri in enumerate(tris): # for t in tri: # neighbor_tri[t].append(ti) + from scipy.sparse import coo_matrix rows = tris.ravel() cols = np.repeat(np.arange(len(tris)), 3) data = np.ones(len(cols)) @@ -1483,6 +1483,7 @@ def _make_morph_map(subject_from, subject_to, subjects_dir, xhemi): def _make_morph_map_hemi(subject_from, subject_to, subjects_dir, reg_from, reg_to): """Construct morph map for one hemisphere.""" + from scipy.sparse import csr_matrix, eye as speye # add speedy short-circuit for self-maps if subject_from == subject_to and reg_from == reg_to: fname = op.join(subjects_dir, subject_from, 'surf', reg_from) @@ -1656,6 +1657,7 @@ def mesh_edges(tris): edges : sparse matrix The adjacency matrix. """ + from scipy.sparse import coo_matrix if np.max(tris) > len(np.unique(tris)): raise ValueError( 'Cannot compute adjacency on a selection of triangles.') @@ -1690,6 +1692,7 @@ def mesh_dist(tris, vert): dist_matrix : scipy.sparse.csr_matrix Sparse matrix with distances between adjacent vertices. """ + from scipy.sparse import csr_matrix edges = mesh_edges(tris).tocoo() # Euclidean distances between neighboring vertices diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py index fe18fcf47b3..20b0286768d 100644 --- a/mne/tests/test_epochs.py +++ b/mne/tests/test_epochs.py @@ -16,6 +16,7 @@ from numpy.testing import (assert_array_equal, assert_array_almost_equal, assert_allclose, assert_equal, assert_array_less) import numpy as np +from numpy.fft import rfft, rfftfreq import matplotlib.pyplot as plt import scipy.signal @@ -28,7 +29,6 @@ from mne.datasets import testing from mne.chpi import read_head_pos, head_pos_to_trans_rot_t from mne.event import merge_events -from mne.fixes import rfft, rfftfreq from mne.io import RawArray, read_raw_fif from mne.io.constants import FIFF from mne.io.proj import _has_eeg_average_ref_proj diff --git a/mne/tests/test_filter.py b/mne/tests/test_filter.py index bb46ec51d02..78d54732cd1 100644 --- a/mne/tests/test_filter.py +++ b/mne/tests/test_filter.py @@ -8,7 +8,7 @@ from scipy.signal import resample as sp_resample, butter, freqz, sosfreqz from mne import create_info -from mne.fixes import fft, fftfreq +from numpy.fft import fft, fftfreq from mne.io import RawArray, read_raw_fif from mne.io.pick import _DATA_CH_TYPES_SPLIT from mne.filter import (filter_data, resample, _resample_stim_channels, diff --git a/mne/tests/test_import_nesting.py b/mne/tests/test_import_nesting.py index 407ee55f4e4..18cce60ef81 100644 --- a/mne/tests/test_import_nesting.py +++ b/mne/tests/test_import_nesting.py @@ -12,10 +12,9 @@ out = set() -# check scipy +# check scipy (Numba imports it to check the version) ok_scipy_submodules = set(['scipy', 'numpy', # these appear in old scipy - 'fftpack', 'lib', 'linalg', 'fft', - 'misc', 'sparse', 'version']) + 'version']) scipy_submodules = set(x.split('.')[1] for x in sys.modules.keys() if x.startswith('scipy.') and '__' not in x and not x.split('.')[1].startswith('_') @@ -29,7 +28,8 @@ for key in ('sklearn', 'pandas', 'mayavi', 'pyvista', 'matplotlib', 'dipy', 'nibabel', 'cupy', 'picard', 'pyvistaqt'): if x.startswith(key): - out |= {key} + x = '.'.join(x.split('.')[:2]) + out |= {x} if len(out) > 0: print('\\nFound un-nested import(s) for %s' % (sorted(out),), end='') exit(len(out)) diff --git a/mne/tests/test_source_estimate.py b/mne/tests/test_source_estimate.py index ece8b971feb..bc5804008c9 100644 --- a/mne/tests/test_source_estimate.py +++ b/mne/tests/test_source_estimate.py @@ -9,6 +9,7 @@ import re import numpy as np +from numpy.fft import fft from numpy.testing import (assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal, assert_array_less) import pytest @@ -32,7 +33,7 @@ write_source_spaces) from mne.datasets import testing from mne.externals.h5io import write_hdf5 -from mne.fixes import fft, _get_img_fdata, nullcontext +from mne.fixes import _get_img_fdata, nullcontext from mne.io import read_info from mne.io.constants import FIFF from mne.source_estimate import grade_to_tris, _get_vol_mask diff --git a/mne/time_frequency/_stft.py b/mne/time_frequency/_stft.py index f72839a9731..6d3a6808451 100644 --- a/mne/time_frequency/_stft.py +++ b/mne/time_frequency/_stft.py @@ -1,7 +1,7 @@ from math import ceil import numpy as np -from ..fixes import rfft, irfft, rfftfreq +from ..fixes import _import_fft from ..utils import logger, verbose @@ -34,6 +34,7 @@ def stft(x, wsize, tstep=None, verbose=None): istft stftfreq """ + rfft = _import_fft('rfft') if not np.isrealobj(x): raise ValueError("x is not a real valued array") @@ -118,6 +119,7 @@ def istft(X, tstep=None, Tx=None): stft """ # Errors and warnings + irfft = _import_fft('irfft') X = np.asarray(X) if X.ndim < 2: raise ValueError(f'X must have ndim >= 2, got {X.ndim}') @@ -195,6 +197,7 @@ def stftfreq(wsize, sfreq=None): # noqa: D401 stft istft """ + rfftfreq = _import_fft('rfftfreq') freqs = rfftfreq(wsize) if sfreq is not None: freqs *= float(sfreq) diff --git a/mne/time_frequency/_stockwell.py b/mne/time_frequency/_stockwell.py index 2017998d556..c39d895cda5 100644 --- a/mne/time_frequency/_stockwell.py +++ b/mne/time_frequency/_stockwell.py @@ -4,11 +4,11 @@ # License : BSD 3-clause from copy import deepcopy -import math + import numpy as np -from scipy import fftpack # XXX explore cuda optimization at some point. +from ..fixes import _import_fft from ..io.pick import _pick_data_channels, pick_info from ..utils import verbose, warn, fill_doc, _validate_type from ..parallel import parallel_func, check_n_jobs @@ -25,7 +25,7 @@ def _is_power_of_two(n): if n_fft is None or (not _is_power_of_two(n_fft) and n_times > n_fft): # Compute next power of 2 - n_fft = 2 ** int(math.ceil(math.log(n_times, 2))) + n_fft = 2 ** int(np.ceil(np.log2(n_times))) elif n_fft < n_times: raise ValueError("n_fft cannot be smaller than signal size. " "Got %s < %s." % (n_fft, n_times)) @@ -42,7 +42,8 @@ def _is_power_of_two(n): def _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width): """Precompute stockwell Gaussian windows (in the freq domain).""" - tw = fftpack.fftfreq(n_samp, 1. / sfreq) / n_samp + fft, fftfreq = _import_fft(('fft', 'fftfreq')) + tw = fftfreq(n_samp, 1. / sfreq) / n_samp tw = np.r_[tw[:1], tw[1:][::-1]] k = width # 1 for classical stowckwell transform @@ -55,35 +56,37 @@ def _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width): window = ((f / (np.sqrt(2. * np.pi) * k)) * np.exp(-0.5 * (1. / k ** 2.) * (f ** 2.) * tw ** 2.)) window /= window.sum() # normalisation - windows[i_f] = fftpack.fft(window) + windows[i_f] = fft(window) return windows def _st(x, start_f, windows): """Compute ST based on Ali Moukadem MATLAB code (used in tests).""" + fft, ifft = _import_fft(('fft', 'ifft')) n_samp = x.shape[-1] ST = np.empty(x.shape[:-1] + (len(windows), n_samp), dtype=np.complex128) # do the work - Fx = fftpack.fft(x) + Fx = fft(x) XF = np.concatenate([Fx, Fx], axis=-1) for i_f, window in enumerate(windows): f = start_f + i_f - ST[..., i_f, :] = fftpack.ifft(XF[..., f:f + n_samp] * window) + ST[..., i_f, :] = ifft(XF[..., f:f + n_samp] * window) return ST def _st_power_itc(x, start_f, compute_itc, zero_pad, decim, W): """Aux function.""" + fft, ifft = _import_fft(('fft', 'ifft')) n_samp = x.shape[-1] n_out = (n_samp - zero_pad) n_out = n_out // decim + bool(n_out % decim) psd = np.empty((len(W), n_out)) itc = np.empty_like(psd) if compute_itc else None - X = fftpack.fft(x) + X = fft(x) XX = np.concatenate([X, X], axis=-1) for i_f, window in enumerate(W): f = start_f + i_f - ST = fftpack.ifft(XX[:, f:f + n_samp] * window) + ST = ifft(XX[:, f:f + n_samp] * window) if zero_pad > 0: TFR = ST[:, :-zero_pad:decim] else: @@ -155,6 +158,7 @@ def tfr_array_stockwell(data, sfreq, fmin=None, fmax=None, n_fft=None, ---------- .. footbibliography:: """ + fftfreq = _import_fft('fftfreq') _validate_type(data, np.ndarray, 'data') if data.ndim != 3: raise ValueError( @@ -164,7 +168,7 @@ def tfr_array_stockwell(data, sfreq, fmin=None, fmax=None, n_fft=None, n_out = data.shape[2] // decim + bool(data.shape[-1] % decim) data, n_fft_, zero_pad = _check_input_st(data, n_fft) - freqs = fftpack.fftfreq(n_fft_, 1. / sfreq) + freqs = fftfreq(n_fft_, 1. / sfreq) if fmin is None: fmin = freqs[freqs > 0][0] if fmax is None: diff --git a/mne/time_frequency/ar.py b/mne/time_frequency/ar.py index 649e8d1d08b..1f4cc9a3a85 100644 --- a/mne/time_frequency/ar.py +++ b/mne/time_frequency/ar.py @@ -4,7 +4,6 @@ # License: BSD (3-clause) import numpy as np -from scipy import linalg from ..defaults import _handle_default from ..io.pick import _picks_to_idx, _picks_by_type, pick_info @@ -16,6 +15,7 @@ def _yule_walker(X, order=1): Operates in-place. """ + from scipy import linalg assert X.ndim == 2 denom = X.shape[-1] - np.arange(order + 1) r = np.zeros(order + 1, np.float64) diff --git a/mne/time_frequency/csd.py b/mne/time_frequency/csd.py index a5a2cd172d4..da10a6a6c7c 100644 --- a/mne/time_frequency/csd.py +++ b/mne/time_frequency/csd.py @@ -10,7 +10,7 @@ import numpy as np from .tfr import _cwt_array, morlet, _get_nfft -from ..fixes import rfftfreq +from ..fixes import _import_fft from ..io.pick import pick_channels, _picks_to_idx from ..utils import (logger, verbose, warn, copy_function_doc_to_method_doc, ProgressBar) @@ -699,6 +699,7 @@ def csd_array_fourier(X, sfreq, t0=0, fmin=0, fmax=np.inf, tmin=None, csd_morlet csd_multitaper """ + rfftfreq = _import_fft('rfftfreq') X, times, tmin, tmax, fmin, fmax = _prepare_csd_array( X, sfreq, t0, tmin, tmax, fmin, fmax) @@ -846,6 +847,7 @@ def csd_array_multitaper(X, sfreq, t0=0, fmin=0, fmax=np.inf, tmin=None, csd_morlet csd_multitaper """ + rfftfreq = _import_fft('rfftfreq') X, times, tmin, tmax, fmin, fmax = _prepare_csd_array( X, sfreq, t0, tmin, tmax, fmin, fmax) diff --git a/mne/time_frequency/multitaper.py b/mne/time_frequency/multitaper.py index f818bddadd9..b05bb59dea5 100644 --- a/mne/time_frequency/multitaper.py +++ b/mne/time_frequency/multitaper.py @@ -6,7 +6,7 @@ import operator import numpy as np -from ..fixes import rfft, irfft, rfftfreq +from ..fixes import _import_fft from ..parallel import parallel_func from ..utils import sum_squared, warn, verbose, logger, _check_option @@ -62,6 +62,7 @@ def dpss_windows(N, half_nbw, Kmax, low_bias=True, interp_from=None, from scipy import interpolate from scipy.signal.windows import dpss as sp_dpss from ..filter import next_fast_len + rfft, irfft = _import_fft(('rfft', 'irfft')) # This np.int32 business works around a weird Windows bug, see # gh-5039 and https://github.com/scipy/scipy/pull/8608 Kmax = np.int32(operator.index(Kmax)) @@ -299,6 +300,7 @@ def _mt_spectra(x, dpss, sfreq, n_fft=None): freqs : array The frequency points in Hz of the spectra """ + rfft, rfftfreq = _import_fft(('rfft', 'rfftfreq')) if n_fft is None: n_fft = x.shape[-1] @@ -410,6 +412,7 @@ def psd_array_multitaper(x, sfreq, fmin=0, fmax=np.inf, bandwidth=None, ----- .. versionadded:: 0.14.0 """ + rfftfreq = _import_fft('rfftfreq') _check_option('normalization', normalization, ['length', 'full']) # Reshape data so its 2-D for parallelization diff --git a/mne/time_frequency/tfr.py b/mne/time_frequency/tfr.py index 8b549321d47..2d539a0b156 100644 --- a/mne/time_frequency/tfr.py +++ b/mne/time_frequency/tfr.py @@ -11,15 +11,13 @@ from copy import deepcopy from functools import partial -from math import sqrt import numpy as np -from scipy import linalg from .multitaper import dpss_windows from ..baseline import rescale -from ..fixes import fft, ifft +from ..fixes import _import_fft from ..filter import next_fast_len from ..parallel import parallel_func from ..utils import (logger, verbose, _time_mask, _freq_mask, check_fname, @@ -96,7 +94,7 @@ def morlet(sfreq, freqs, n_cycles=7.0, sigma=None, zero_mean=False): real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2) oscillation -= real_offset W = oscillation * gaussian_enveloppe - W /= sqrt(0.5) * linalg.norm(W.ravel()) + W /= np.sqrt(0.5) * np.linalg.norm(W.ravel()) Ws.append(W) return Ws @@ -162,7 +160,7 @@ def _make_dpss(sfreq, freqs, n_cycles=7., time_bandwidth=4.0, zero_mean=False): if zero_mean: # to make it zero mean real_offset = Wk.mean() Wk -= real_offset - Wk /= sqrt(0.5) * linalg.norm(Wk.ravel()) + Wk /= np.sqrt(0.5) * np.linalg.norm(Wk.ravel()) Wm.append(Wk) @@ -219,6 +217,7 @@ def _cwt_gen(X, Ws, *, fsize=0, mode="same", decim=1, use_fft=True): out : array, shape (n_signals, n_freqs, n_time_decim) The time-frequency transform of the signals. """ + fft, ifft = _import_fft(('fft', 'ifft')) _check_option('mode', mode, ['same', 'valid', 'full']) decim = _check_decim(decim) X = np.asarray(X) diff --git a/mne/transforms.py b/mne/transforms.py index 07e128157be..e88d9db0983 100644 --- a/mne/transforms.py +++ b/mne/transforms.py @@ -12,7 +12,6 @@ import numpy as np from copy import deepcopy -from scipy import linalg from .fixes import einsum, jit, mean from .io.constants import FIFF @@ -330,9 +329,9 @@ def rotation3d_align_z_axis(target_z_axis): # assert that r is a rotation matrix r^t * r = I and det(r) = 1 assert(np.any((r.dot(r.T) - np.identity(3)) < 1E-12)) - assert((linalg.det(r) - 1.0) < 1E-12) + assert((np.linalg.det(r) - 1.0) < 1E-12) # assert that r maps [0 0 1] on the device z axis (target_z_axis) - assert(linalg.norm(target_z_axis - r.dot([0, 0, 1])) < 1e-12) + assert(np.linalg.norm(target_z_axis - r.dot([0, 0, 1])) < 1e-12) return r @@ -587,7 +586,7 @@ def invert_transform(trans): inv_trans : dict Inverse transform. """ - return Transform(trans['to'], trans['from'], linalg.inv(trans['trans'])) + return Transform(trans['to'], trans['from'], np.linalg.inv(trans['trans'])) def transform_surface_to(surf, dest, trans, copy=False): @@ -660,12 +659,12 @@ def get_ras_to_neuromag_trans(nasion, lpa, rpa): "arrays of length 3.") right = rpa - lpa - right_unit = right / linalg.norm(right) + right_unit = right / np.linalg.norm(right) origin = lpa + np.dot(nasion - lpa, right_unit) * right_unit anterior = nasion - origin - anterior_unit = anterior / linalg.norm(anterior) + anterior_unit = anterior / np.linalg.norm(anterior) superior_unit = np.cross(right_unit, anterior_unit) @@ -912,6 +911,7 @@ class _TPSWarp(object): """ def fit(self, source, destination, reg=1e-3): + from scipy import linalg from scipy.spatial.distance import cdist assert source.shape[1] == destination.shape[1] == 3 assert source.shape[0] == destination.shape[0] @@ -1040,6 +1040,7 @@ def fit(self, source, destination, order=4, reg=1e-5, center=True, inst : instance of SphericalSurfaceWarp The warping object (for chaining). """ + from scipy import linalg from .bem import _fit_sphere from .source_space import _check_spacing match_rr = _check_spacing(match, verbose=False)[2]['rr'] @@ -1383,6 +1384,7 @@ def _fit_matched_points(p, x, weights=None, scale=False): def _average_quats(quats, weights=None): """Average unit quaternions properly.""" + from scipy import linalg assert quats.ndim == 2 and quats.shape[1] in (3, 4) if weights is None: weights = np.ones(quats.shape[0]) diff --git a/mne/utils/__init__.py b/mne/utils/__init__.py index 6d254307f16..b5f60df1f52 100644 --- a/mne/utils/__init__.py +++ b/mne/utils/__init__.py @@ -64,7 +64,7 @@ _check_dt, _ReuseCycle, _arange_div) from .mixin import (SizeMixin, GetEpochsMixin, _prepare_read_metadata, _prepare_write_metadata, _FakeNoPandas, ShiftTimeMixin) -from .linalg import (_svd_lwork, _repeated_svd, _sym_mat_pow, sqrtm_sym, - dgesdd, dgemm, zgemm, dgemv, ddot, LinAlgError, eigh) +from .linalg import (_svd_lwork, _repeated_svd, _sym_mat_pow, sqrtm_sym, eigh, + _get_blas_funcs) from .dataframe import (_set_pandas_dtype, _scale_dataframe_data, _convert_times, _build_data_frame) diff --git a/mne/utils/_logging.py b/mne/utils/_logging.py index a02be5ad063..a0e37b79edf 100644 --- a/mne/utils/_logging.py +++ b/mne/utils/_logging.py @@ -13,6 +13,7 @@ import os.path as op import warnings +from .docs import fill_doc from ..externals.decorator import FunctionMaker @@ -79,6 +80,7 @@ def verbose(function): Examples -------- You can use the ``verbose`` argument to set the verbose level on the fly:: + >>> import mne >>> cov = mne.compute_raw_covariance(raw, verbose='WARNING') # doctest: +SKIP >>> cov = mne.compute_raw_covariance(raw, verbose='INFO') # doctest: +SKIP @@ -88,7 +90,6 @@ def verbose(function): """ # noqa: E501 # See https://decorator.readthedocs.io/en/latest/tests.documentation.html # #dealing-with-third-party-decorators - from .docs import fill_doc try: fill_doc(function) except TypeError: # nothing to add diff --git a/mne/utils/_testing.py b/mne/utils/_testing.py index 60cbbbfd619..3376b0d493f 100644 --- a/mne/utils/_testing.py +++ b/mne/utils/_testing.py @@ -19,7 +19,6 @@ import numpy as np from numpy.testing import assert_array_equal, assert_allclose -from scipy import linalg from ._logging import warn, ClosingStringIO from .numerics import object_diff @@ -449,6 +448,7 @@ def assert_meg_snr(actual, desired, min_tol, med_tol=500., chpi_med_tol=500., def assert_snr(actual, desired, tol): """Assert actual and desired arrays are within some SNR tolerance.""" + from scipy import linalg with np.errstate(divide='ignore'): # allow infinite snr = (linalg.norm(desired, ord='fro') / linalg.norm(desired - actual, ord='fro')) diff --git a/mne/utils/docs.py b/mne/utils/docs.py index ed3381d4777..c7a683f9f8a 100644 --- a/mne/utils/docs.py +++ b/mne/utils/docs.py @@ -12,10 +12,8 @@ import warnings import webbrowser -from .config import get_config from ..defaults import HEAD_SIZE_DEFAULT -from ..externals.doccer import filldoc, unindent_dict -from .check import _check_option +from ..externals.doccer import indentcount_lines ############################################################################## @@ -2086,9 +2084,54 @@ If True (default False), accept the license terms of this dataset. """ -# Finalize -docdict = unindent_dict(docdict) -fill_doc = filldoc(docdict, unindent_params=False) +docdict_indented = {} + + +def fill_doc(f): + """Fill a docstring with docdict entries. + + Parameters + ---------- + f : callable + The function to fill the docstring of. Will be modified in place. + + Returns + ------- + f : callable + The function, potentially with an updated ``__doc__``. + """ + docstring = f.__doc__ + if not docstring: + return f + lines = docstring.splitlines() + # Find the minimum indent of the main docstring, after first line + if len(lines) < 2: + icount = 0 + else: + icount = indentcount_lines(lines[1:]) + # Insert this indent to dictionary docstrings + try: + indented = docdict_indented[icount] + except KeyError: + indent = ' ' * icount + docdict_indented[icount] = indented = {} + for name, dstr in docdict.items(): + lines = dstr.splitlines() + try: + newlines = [lines[0]] + for line in lines[1:]: + newlines.append(indent + line) + indented[name] = '\n'.join(newlines) + except IndexError: + indented[name] = dstr + try: + f.__doc__ = docstring % indented + except (TypeError, ValueError, KeyError) as exp: + funcname = f.__name__ + funcname = docstring.split('\n')[0] if funcname is None else funcname + raise RuntimeError('Error documenting %s:\n%s' + % (funcname, str(exp))) + return f ############################################################################## @@ -2395,6 +2438,8 @@ def open_docs(kind=None, version=None): The default can be changed by setting the configuration value MNE_DOCS_VERSION. """ + from .check import _check_option + from .config import get_config if kind is None: kind = get_config('MNE_DOCS_KIND', 'api') help_dict = dict(api='python_reference.html', tutorials='tutorials.html', diff --git a/mne/utils/fetching.py b/mne/utils/fetching.py index 836abc2bce1..e1f7b9b86b1 100644 --- a/mne/utils/fetching.py +++ b/mne/utils/fetching.py @@ -7,8 +7,6 @@ import os import shutil import time -from urllib import parse, request -from urllib.error import HTTPError, URLError from .progressbar import ProgressBar from .numerics import hashfunc @@ -21,6 +19,8 @@ def _get_http(url, temp_file_name, initial_size, timeout, verbose_bool): """Safely (resume a) download to a file from http(s).""" + from urllib import request + from urllib.error import HTTPError, URLError # Actually do the reading response = None extra = '' @@ -94,6 +94,7 @@ def _fetch_file(url, file_name, print_destination=True, resume=True, """ # Adapted from NISL: # https://github.com/nisl/tutorial/blob/master/nisl/datasets.py + from urllib import parse if hash_ is not None and (not isinstance(hash_, str) or len(hash_) != 32) and hash_type == 'md5': raise ValueError('Bad hash value given, should be a 32-character ' @@ -135,6 +136,7 @@ def _fetch_file(url, file_name, print_destination=True, resume=True, def _url_to_local_path(url, path): """Mirror a url path in a local destination (keeping folder structure).""" + from urllib import parse, request destination = parse.urlparse(url).path # First char should be '/', and it needs to be discarded if len(destination) < 2 or destination[0] != '/': diff --git a/mne/utils/linalg.py b/mne/utils/linalg.py index 18447cd2c3a..8cce6a22c6a 100644 --- a/mne/utils/linalg.py +++ b/mne/utils/linalg.py @@ -22,49 +22,47 @@ # # License: BSD (3-clause) +import functools + import numpy as np -from scipy import linalg -from scipy.linalg import LinAlgError -from scipy._lib._util import _asarray_validated -_d = np.empty(0, np.float64) -_z = np.empty(0, np.complex128) -dgemm = linalg.get_blas_funcs('gemm', (_d,)) -zgemm = linalg.get_blas_funcs('gemm', (_z,)) -dgemv = linalg.get_blas_funcs('gemv', (_d,)) -ddot = linalg.get_blas_funcs('dot', (_d,)) -_I = np.cast['F'](1j) + +# For efficiency, names should be str or tuple of str, dtype a builtin +# NumPy dtype + +@functools.lru_cache(None) +def _get_blas_funcs(dtype, names): + from scipy import linalg + return linalg.get_blas_funcs(names, (np.empty(0, dtype),)) + + +@functools.lru_cache(None) +def _get_lapack_funcs(dtype, names): + from scipy import linalg + assert dtype in (np.float64, np.complex128) + x = np.empty(0, dtype) + return linalg.get_lapack_funcs(names, (x,)) ############################################################################### # linalg.svd and linalg.pinv2 -dgesdd, dgesdd_lwork = linalg.get_lapack_funcs(('gesdd', 'gesdd_lwork'), (_d,)) -zgesdd, zgesdd_lwork = linalg.get_lapack_funcs(('gesdd', 'gesdd_lwork'), (_z,)) -dgesvd, dgesvd_lwork = linalg.get_lapack_funcs(('gesvd', 'gesvd_lwork'), (_d,)) -zgesvd, zgesvd_lwork = linalg.get_lapack_funcs(('gesvd', 'gesvd_lwork'), (_z,)) - def _svd_lwork(shape, dtype=np.float64): """Set up SVD calculations on identical-shape float64/complex128 arrays.""" - if dtype == np.float64: - gesdd_lwork, gesvd_lwork = dgesdd_lwork, dgesvd_lwork - else: - assert dtype == np.complex128 - gesdd_lwork, gesvd_lwork = zgesdd_lwork, zgesvd_lwork + from scipy import linalg + gesdd_lwork, gesvd_lwork = _get_lapack_funcs( + dtype, ('gesdd_lwork', 'gesvd_lwork')) sdd_lwork = linalg.decomp_svd._compute_lwork( gesdd_lwork, *shape, compute_uv=True, full_matrices=False) svd_lwork = linalg.decomp_svd._compute_lwork( gesvd_lwork, *shape, compute_uv=True, full_matrices=False) - return (sdd_lwork, svd_lwork) + return sdd_lwork, svd_lwork def _repeated_svd(x, lwork, overwrite_a=False): """Mimic scipy.linalg.svd, avoid lwork and get_lapack_funcs overhead.""" - if x.dtype == np.float64: - gesdd, gesvd = dgesdd, zgesdd - else: - assert x.dtype == np.complex128 - gesdd, gesvd = zgesdd, zgesvd + gesdd, gesvd = _get_lapack_funcs( + x.dtype, ('gesdd', 'gesvd')) # this has to use overwrite_a=False in case we need to fall back to gesvd u, s, v, info = gesdd(x, compute_uv=True, lwork=lwork[0], full_matrices=False, overwrite_a=False) @@ -73,7 +71,7 @@ def _repeated_svd(x, lwork, overwrite_a=False): u, s, v, info = gesvd(x, compute_uv=True, lwork=lwork[1], full_matrices=False, overwrite_a=overwrite_a) if info > 0: - raise LinAlgError("SVD did not converge") + raise np.linalg.LinAlgError("SVD did not converge") if info < 0: raise ValueError('illegal value in %d-th argument of internal gesdd' % -info) @@ -83,8 +81,17 @@ def _repeated_svd(x, lwork, overwrite_a=False): ############################################################################### # linalg.eigh -dsyevd, = linalg.get_lapack_funcs(('syevd',), (_d,)) -zheevd, = linalg.get_lapack_funcs(('heevd',), (_z,)) +@functools.lru_cache(None) +def _get_evd(dtype): + from scipy import linalg + x = np.empty(0, dtype) + if dtype == np.float64: + driver = 'syevd' + else: + assert dtype == np.complex128 + driver = 'heevd' + evr, = linalg.get_lapack_funcs((driver,), (x,)) + return evr, driver def eigh(a, overwrite_a=False, check_finite=True): @@ -108,15 +115,13 @@ def eigh(a, overwrite_a=False, check_finite=True): The normalized eigenvector corresponding to the eigenvalue ``w[i]`` is the column ``v[:, i]``. """ + from scipy.linalg import LinAlgError + from scipy._lib._util import _asarray_validated # We use SYEVD, see https://github.com/scipy/scipy/issues/9212 if check_finite: a = _asarray_validated(a, check_finite=check_finite) - if a.dtype == np.float64: - evr, driver = dsyevd, 'syevd' - else: - assert a.dtype == np.complex128 - evr, driver = zheevd, 'heevd' - w, v, info = evr(a, lower=1, overwrite_a=overwrite_a) + evd, driver = _get_evd(a.dtype) + w, v, info = evd(a, lower=1, overwrite_a=overwrite_a) if info == 0: return w, v if info < 0: diff --git a/mne/utils/numerics.py b/mne/utils/numerics.py index 1d49eb7b53c..7c24daf44be 100644 --- a/mne/utils/numerics.py +++ b/mne/utils/numerics.py @@ -19,7 +19,6 @@ from datetime import datetime, timedelta, timezone import numpy as np -from scipy import sparse from ._logging import logger, warn, verbose from .check import check_random_state, _ensure_int, _validate_type @@ -670,6 +669,7 @@ def object_size(x, memo=None): size : int The estimated size in bytes of the object. """ + from scipy import sparse # Note: this will not process object arrays properly (since those only) # hold references if memo is None: @@ -739,6 +739,7 @@ def object_diff(a, b, pre=''): diffs : str A string representation of the differences. """ + from scipy import sparse out = '' if type(a) != type(b): # Deal with NamedInt and NamedFloat diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py index d21cf821d74..6ad705b9eb5 100644 --- a/mne/viz/_3d.py +++ b/mne/viz/_3d.py @@ -18,7 +18,6 @@ from functools import partial import numpy as np -from scipy import linalg from ..defaults import DEFAULTS from ..fixes import einsum, _crop_colorbar, _get_img_fdata, _get_args @@ -2042,7 +2041,7 @@ def _glass_brain_crosshairs(params, x, y, z): def _cut_coords_to_ijk(cut_coords, img): - ijk = apply_trans(linalg.inv(img.affine), cut_coords) + ijk = apply_trans(np.linalg.inv(img.affine), cut_coords) ijk = np.clip(np.round(ijk).astype(int), 0, np.array(img.shape[:3]) - 1) return ijk @@ -3033,7 +3032,7 @@ def plot_sensors_connectivity(info, con, picks=None): con_nodes = list() con_val = list() for i, j in zip(ii, jj): - if linalg.norm(sens_loc[i] - sens_loc[j]) > min_dist: + if np.linalg.norm(sens_loc[i] - sens_loc[j]) > min_dist: con_nodes.append((i, j)) con_val.append(con[i, j]) diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index 88fbe9e1ff2..517982457f5 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -18,7 +18,6 @@ import warnings import numpy as np -from scipy import sparse from collections import OrderedDict from .colormap import calculate_lut @@ -1173,6 +1172,7 @@ def _configure_vertex_time_course(self): def _configure_picking(self): # get data for each hemi + from scipy import sparse for idx, hemi in enumerate(['vol', 'lh', 'rh']): hemi_data = self._data.get(hemi) if hemi_data is not None: @@ -2813,6 +2813,7 @@ def set_data_smoothing(self, n_steps): n_steps : int Number of smoothing steps. """ + from scipy import sparse from ...morph import _hemi_morph for hemi in ['lh', 'rh']: hemi_data = self._data.get(hemi) diff --git a/mne/viz/misc.py b/mne/viz/misc.py index 17bde9e0392..a9e9b4b8f51 100644 --- a/mne/viz/misc.py +++ b/mne/viz/misc.py @@ -21,7 +21,6 @@ from collections import defaultdict import numpy as np -from scipy import linalg from ..defaults import DEFAULTS from ..fixes import _get_img_fdata @@ -117,9 +116,10 @@ def plot_cov(cov, info, exclude=(), colorbar=True, proj=False, show_svd=True, .. versionchanged:: 0.19 Approximate ranks for each channel type are shown with red dashed lines. """ - from ..cov import Covariance import matplotlib.pyplot as plt from matplotlib.colors import Normalize + from scipy import linalg + from ..cov import Covariance info, C, ch_names, idx_names = _index_info_cov(info, cov, exclude) del cov, exclude From 971c290a0dd2fc564607c6eeff042bde371e3608 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Mon, 8 Feb 2021 11:07:35 -0500 Subject: [PATCH 115/387] MRG, MAINT: NumpyDoc validation in sphinx build (#8823) * WIP: NumpyDoc validation in sphinx build * FIX: Many fixes * FIX: Set * FIX: URL --- doc/conf.py | 18 +++++++++- mne/annotations.py | 8 ++++- mne/bem.py | 15 +++------ mne/coreg.py | 7 ++-- mne/datasets/eegbci/eegbci.py | 36 ++++++++++---------- mne/datasets/kiloword/kiloword.py | 8 +++-- mne/datasets/sleep_physionet/age.py | 12 ++++--- mne/datasets/sleep_physionet/temazepam.py | 13 +++---- mne/decoding/base.py | 1 - mne/decoding/search_light.py | 5 +-- mne/dipole.py | 1 - mne/epochs.py | 11 +++--- mne/event.py | 16 ++++++--- mne/fixes.py | 41 ++++++++++++----------- mne/io/base.py | 1 - mne/io/meas_info.py | 12 +++++-- mne/label.py | 21 +++++++++--- mne/preprocessing/_regress.py | 2 +- mne/preprocessing/nirs/_tddr.py | 5 +-- mne/report.py | 8 ++++- mne/simulation/source.py | 4 +-- mne/utils/mixin.py | 1 - requirements_doc.txt | 2 +- 23 files changed, 152 insertions(+), 96 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index 79f06b10f5d..128ced06991 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -26,9 +26,10 @@ from numpydoc import docscrape import matplotlib import mne -from mne.viz import Brain # noqa +from mne.tests.test_docstring_parameters import error_ignores from mne.utils import (linkcode_resolve, # noqa, analysis:ignore _assert_no_instances, sizeof_fmt) +from mne.viz import Brain # noqa if LooseVersion(sphinx_gallery.__version__) < LooseVersion('0.2'): raise ImportError('Must have at least version 0.2 of sphinx-gallery, got ' @@ -689,3 +690,18 @@ def reset_warnings(gallery_conf, fname): 'mayavi.mlab.pipeline.surface', 'CoregFrame', 'Kit2FiffFrame', 'FiducialsFrame', } +numpydoc_validate = True +numpydoc_validation_checks = {'all'} | set(error_ignores) +numpydoc_validation_exclude = { # set of regex + # dict subclasses + r'\.clear', r'\.get$', r'\.copy$', r'\.fromkeys', r'\.items', r'\.keys', + r'\.pop', r'\.popitem', r'\.setdefault', r'\.update', r'\.values', + # list subclasses + r'\.append', r'\.count', r'\.extend', r'\.index', r'\.insert', r'\.remove', + r'\.sort', + # we currently don't document these properly (probably okay) + r'\.__getitem__', r'\.__contains__', r'\.__hash__', r'\.__mul__', + r'\.__sub__', r'\.__add__', r'\.__iter__', r'\.__div__', r'\.__neg__', + # copied from sklearn + r'mne\.utils\.deprecated', +} diff --git a/mne/annotations.py b/mne/annotations.py index e9388e2801d..3e4bdcd4cd2 100644 --- a/mne/annotations.py +++ b/mne/annotations.py @@ -227,7 +227,13 @@ def __repr__(self): return '<' + shorten(s, width=77, placeholder=' ...') + '>' def __len__(self): - """Return the number of annotations.""" + """Return the number of annotations. + + Returns + ------- + n_annot : int + The number of annotations. + """ return len(self.duration) def __add__(self, other): diff --git a/mne/bem.py b/mne/bem.py index 7ba81816a40..96880d6e2c3 100644 --- a/mne/bem.py +++ b/mne/bem.py @@ -846,11 +846,6 @@ def fit_sphere_to_headshape(info, dig_kinds='auto', units='m', verbose=None): Can be "m" (default) or "mm". .. versionadded:: 0.12 - move_origin : bool - If True, allow the origin to vary. Otherwise, fix it at (0, 0, 0). - - .. versionadded:: 0.20 - %(verbose)s Returns @@ -1062,7 +1057,7 @@ def make_watershed_bem(subject, subjects_dir=None, overwrite=False, gcaatlas : bool Specify the --brain_atlas option for mri_watershed. preflood : int - Change the preflood height + Change the preflood height. show : bool Show surfaces to visually inspect all three BEM surfaces (recommended). @@ -1826,6 +1821,10 @@ def make_flash_bem(subject, overwrite=False, show=True, subjects_dir=None, .. versionadded:: 0.18 %(verbose)s + See Also + -------- + convert_flash_mris + Notes ----- This program assumes that FreeSurfer is installed and sourced properly. @@ -1833,10 +1832,6 @@ def make_flash_bem(subject, overwrite=False, show=True, subjects_dir=None, This function extracts the BEM surfaces (outer skull, inner skull, and outer skin) from multiecho FLASH MRI data with spin angles of 5 and 30 degrees, in mgz format. - - See Also - -------- - convert_flash_mris """ from .viz.misc import plot_bem diff --git a/mne/coreg.py b/mne/coreg.py index e5d9a23380f..620903ceeaa 100644 --- a/mne/coreg.py +++ b/mne/coreg.py @@ -1226,11 +1226,8 @@ def get_mni_fiducials(subject, subjects_dir=None, verbose=None): Parameters ---------- - subject : str - Name of the mri subject - subjects_dir : None | str - Override the SUBJECTS_DIR environment variable - (sys.environ['SUBJECTS_DIR']) + %(subject)s + %(subjects_dir)s %(verbose)s Returns diff --git a/mne/datasets/eegbci/eegbci.py b/mne/datasets/eegbci/eegbci.py index 8b355158b57..2b8eb4751d3 100644 --- a/mne/datasets/eegbci/eegbci.py +++ b/mne/datasets/eegbci/eegbci.py @@ -98,19 +98,7 @@ def load_data(subject, runs, path=None, force_update=False, update_path=None, subject : int The subject to use. Can be in the range of 1-109 (inclusive). runs : int | list of int - The runs to use. The runs correspond to: - - ========= =================================== - run task - ========= =================================== - 1 Baseline, eyes open - 2 Baseline, eyes closed - 3, 7, 11 Motor execution: left vs right hand - 4, 8, 12 Motor imagery: left vs right hand - 5, 9, 13 Motor execution: hands vs feet - 6, 10, 14 Motor imagery: hands vs feet - ========= =================================== - + The runs to use. See Notes for details. path : None | str Location of where to look for the EEGBCI data storing location. If None, the environment variable or config parameter @@ -123,6 +111,8 @@ def load_data(subject, runs, path=None, force_update=False, update_path=None, update_path : bool | None If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python config to the given path. If None, the user is prompted. + base_url : str + The URL root for the data. %(verbose)s Returns @@ -132,11 +122,23 @@ def load_data(subject, runs, path=None, force_update=False, update_path=None, Notes ----- - For example, one could do: + The run numbers correspond to: + + ========= =================================== + run task + ========= =================================== + 1 Baseline, eyes open + 2 Baseline, eyes closed + 3, 7, 11 Motor execution: left vs right hand + 4, 8, 12 Motor imagery: left vs right hand + 5, 9, 13 Motor execution: hands vs feet + 6, 10, 14 Motor imagery: hands vs feet + ========= =================================== + + For example, one could do:: >>> from mne.datasets import eegbci - >>> eegbci.load_data(1, [4, 10, 14],\ - os.getenv('HOME') + '/datasets') # doctest:+SKIP + >>> eegbci.load_data(1, [4, 10, 14], os.getenv('HOME') + '/datasets') # doctest:+SKIP This would download runs 4, 10, and 14 (hand/foot motor imagery) runs from subject 1 in the EEGBCI dataset to the 'datasets' folder, and prompt the @@ -153,7 +155,7 @@ def load_data(subject, runs, path=None, force_update=False, update_path=None, PhysioBank, PhysioToolkit, and PhysioNet: Components of a New Research Resource for Complex Physiologic Signals. Circulation 101(23):e215-e220 - """ + """ # noqa: E501 if not hasattr(runs, '__iter__'): runs = [runs] diff --git a/mne/datasets/kiloword/kiloword.py b/mne/datasets/kiloword/kiloword.py index f72310a1ea3..0623816b1bf 100644 --- a/mne/datasets/kiloword/kiloword.py +++ b/mne/datasets/kiloword/kiloword.py @@ -7,8 +7,7 @@ @verbose def data_path(path=None, force_update=False, update_path=True, download=True, verbose=None): - """ - Get path to local copy of the kiloword dataset. + """Get path to local copy of the kiloword dataset. This is the dataset from [1]_. @@ -27,6 +26,11 @@ def data_path(path=None, force_update=False, update_path=True, download=True, update_path : bool | None If True, set the MNE_DATASETS_KILOWORD_PATH in mne-python config to the given path. If None, the user is prompted. + download : bool + If False and the kiloword dataset has not been downloaded yet, + it will not be downloaded and the path will be returned as + '' (empty string). This is mostly used for debugging purposes + and can be safely ignored by most users. %(verbose)s Returns diff --git a/mne/datasets/sleep_physionet/age.py b/mne/datasets/sleep_physionet/age.py index 0700f8c5a3b..2c96e72d737 100644 --- a/mne/datasets/sleep_physionet/age.py +++ b/mne/datasets/sleep_physionet/age.py @@ -16,7 +16,7 @@ @verbose -def fetch_data(subjects, recording=[1, 2], path=None, force_update=False, +def fetch_data(subjects, recording=(1, 2), path=None, force_update=False, update_path=None, base_url=BASE_URL, on_missing='raise', verbose=None): # noqa: D301 """Get paths to local copies of PhysioNet Polysomnography dataset files. @@ -52,6 +52,8 @@ def fetch_data(subjects, recording=[1, 2], path=None, force_update=False, update_path : bool | None If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python config to the given path. If None, the user is prompted. + base_url : str + The URL root. on_missing : 'raise' | 'warn' | 'ignore' What to do if one or several recordings are not available. Valid keys are 'raise' | 'warn' | 'ignore'. Default is 'error'. If on_missing @@ -64,6 +66,10 @@ def fetch_data(subjects, recording=[1, 2], path=None, force_update=False, paths : list List of local data paths of the given type. + See Also + -------- + mne.datasets.sleep_physionet.temazepam.fetch_data + Notes ----- For example, one could do: @@ -83,10 +89,6 @@ def fetch_data(subjects, recording=[1, 2], path=None, force_update=False, PhysioBank, PhysioToolkit, and PhysioNet: Components of a New Research Resource for Complex Physiologic Signals. Circulation 101(23):e215-e220 - - See Also - -------- - :func:`mne.datasets.sleep_physionet.temazepam.fetch_data` """ # noqa: E501 records = np.loadtxt(AGE_SLEEP_RECORDS, skiprows=1, diff --git a/mne/datasets/sleep_physionet/temazepam.py b/mne/datasets/sleep_physionet/temazepam.py index 9eec4ba5ef3..49cda98a240 100644 --- a/mne/datasets/sleep_physionet/temazepam.py +++ b/mne/datasets/sleep_physionet/temazepam.py @@ -16,8 +16,7 @@ @verbose -def fetch_data(subjects, recording=[b'Placebo', 'temazepam'], - path=None, force_update=False, +def fetch_data(subjects, *, path=None, force_update=False, update_path=None, base_url=BASE_URL, verbose=None): """Get paths to local copies of PhysioNet Polysomnography dataset files. @@ -45,6 +44,8 @@ def fetch_data(subjects, recording=[b'Placebo', 'temazepam'], update_path : bool | None If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python config to the given path. If None, the user is prompted. + base_url : str + The base URL to download from. %(verbose)s Returns @@ -52,6 +53,10 @@ def fetch_data(subjects, recording=[b'Placebo', 'temazepam'], paths : list List of local data paths of the given type. + See Also + -------- + mne.datasets.sleep_physionet.age.fetch_data + Notes ----- For example, one could do: @@ -71,10 +76,6 @@ def fetch_data(subjects, recording=[b'Placebo', 'temazepam'], PhysioBank, PhysioToolkit, and PhysioNet: Components of a New Research Resource for Complex Physiologic Signals. Circulation 101(23):e215-e220 - - See Also - -------- - :func:`mne.datasets.sleep_physionet.age.fetch_data` """ records = np.loadtxt(TEMAZEPAM_SLEEP_RECORDS, skiprows=1, diff --git a/mne/decoding/base.py b/mne/decoding/base.py index 2b665a6d0d0..b2183ae76bc 100644 --- a/mne/decoding/base.py +++ b/mne/decoding/base.py @@ -144,7 +144,6 @@ def fit_transform(self, X, y): ------- y_pred : array, shape (n_samples,) The predicted targets. - """ return self.fit(X, y).transform(X) diff --git a/mne/decoding/search_light.py b/mne/decoding/search_light.py index 14f4387a571..8876aeb5057 100644 --- a/mne/decoding/search_light.py +++ b/mne/decoding/search_light.py @@ -114,8 +114,9 @@ def fit_transform(self, X, y, **fit_params): X : array, shape (n_samples, nd_features, n_tasks) The training input samples. For each task, a clone estimator is fitted independently. The feature dimension can be - multidimensional e.g. - X.shape = (n_samples, n_features_1, n_features_2, n_estimators) + multidimensional, e.g.:: + + X.shape = (n_samples, n_features_1, n_features_2, n_estimators) y : array, shape (n_samples,) | (n_samples, n_targets) The target values. **fit_params : dict of string -> object diff --git a/mne/dipole.py b/mne/dipole.py index f40cb5fd3e4..3eaf12030be 100644 --- a/mne/dipole.py +++ b/mne/dipole.py @@ -328,7 +328,6 @@ def __len__(self): >>> len(dipoles) # doctest: +SKIP 10 - """ return self.pos.shape[0] diff --git a/mne/epochs.py b/mne/epochs.py index 8ca334ef23c..014b2064da6 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -2400,9 +2400,14 @@ def combine_event_ids(epochs, old_event_ids, new_event_id, copy=True): copy : bool Whether to return a new instance or modify in place. + Returns + ------- + epochs : instance of Epochs + The modified epochs. + Notes ----- - This For example (if epochs.event_id was {'Left': 1, 'Right': 2}: + This For example (if epochs.event_id was ``{'Left': 1, 'Right': 2}``:: combine_event_ids(epochs, ['Left', 'Right'], {'Directional': 12}) @@ -3187,7 +3192,7 @@ def average_movements(epochs, head_pos=None, orig_sfreq=None, picks=None, origin='auto', weight_all=True, int_order=8, ext_order=3, destination=None, ignore_ref=False, return_mapping=False, mag_scale=100., verbose=None): - u"""Average data using Maxwell filtering, transforming using head positions. + """Average data using Maxwell filtering, transforming using head positions. Parameters ---------- @@ -3206,7 +3211,6 @@ def average_movements(epochs, head_pos=None, orig_sfreq=None, picks=None, receive uniform weight per epoch. %(maxwell_int)s %(maxwell_ext)s - %(maxwell_reg)s %(maxwell_dest)s %(maxwell_ref)s return_mapping : bool @@ -3244,7 +3248,6 @@ def average_movements(epochs, head_pos=None, orig_sfreq=None, picks=None, .. [1] Taulu S. and Kajola M. "Presentation of electromagnetic multichannel data: The signal space separation method," Journal of Applied Physics, vol. 97, pp. 124905 1-10, 2005. - .. [2] Wehner DT, Hämäläinen MS, Mody M, Ahlfors SP. "Head movements of children in MEG: Quantification, effects on source estimation, and compensation. NeuroImage 40:541–550, 2008. diff --git a/mne/event.py b/mne/event.py index ee2a2d84b6f..3d94809a924 100644 --- a/mne/event.py +++ b/mne/event.py @@ -98,7 +98,7 @@ def define_target_events(events, reference_id, target_id, sfreq, tmin, tmax, tmax : float The upper limit border in seconds from the target event. new_id : int - new_id for the new event + New ID for the new event. fill_na : int | None Fill event to be inserted if target is not available within the time window specified. If None, the 'null' events will be dropped. @@ -106,9 +106,9 @@ def define_target_events(events, reference_id, target_id, sfreq, tmin, tmax, Returns ------- new_events : ndarray - The new defined events + The new defined events. lag : ndarray - time lag between reference and target in milliseconds. + Time lag between reference and target in milliseconds. """ if new_id is None: new_id = reference_id @@ -823,7 +823,7 @@ def shift_time_events(events, ids, tshift, sfreq): Parameters ---------- events : array, shape=(n_events, 3) - The events + The events. ids : ndarray of int | None The ids of events to shift. tshift : float @@ -1143,7 +1143,13 @@ def __getitem__(self, item): return cats[0] if len(cats) == 1 else cats def __len__(self): - """Return number of averaging categories marked active in DACQ.""" + """Return number of averaging categories marked active in DACQ. + + Returns + ------- + n_cat : int + The number of categories. + """ return len(self.categories) def _events_from_acq_pars(self): diff --git a/mne/fixes.py b/mne/fixes.py index 593dbf3c327..d289d6c2625 100644 --- a/mne/fixes.py +++ b/mne/fixes.py @@ -317,7 +317,7 @@ def is_regressor(estimator): class BaseEstimator(object): - """Base class for all estimators in scikit-learn + """Base class for all estimators in scikit-learn. Notes ----- @@ -358,13 +358,13 @@ def get_params(self, deep=True): Parameters ---------- - deep : boolean, optional + deep : bool, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- - params : mapping of string to any + params : dict Parameter names mapped to their values. """ out = dict() @@ -392,13 +392,21 @@ def get_params(self, deep=True): def set_params(self, **params): """Set the parameters of this estimator. + The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form ``__`` so that it's possible to update each component of a nested object. + + Parameters + ---------- + **params : dict + Parameters. + Returns ------- - self + inst : instance + The object. """ if not params: # Simple optimisation to gain speed (inspect is slow) @@ -607,23 +615,21 @@ def get_precision(self): return precision def fit(self, X, y=None): - """Fits the Maximum Likelihood Estimator covariance model - according to the given training data and parameters. + """Fit the Maximum Likelihood Estimator covariance model. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training data, where n_samples is the number of samples and n_features is the number of features. - - y : not used, present for API consistence purpose. + y : ndarray | None + Not used, present for API consistency. Returns ------- self : object Returns self. - - """ + """ # noqa: E501 # X = check_array(X) if self.assume_centered: self.location_ = np.zeros(X.shape[1]) @@ -636,8 +642,9 @@ def fit(self, X, y=None): return self def score(self, X_test, y=None): - """Computes the log-likelihood of a Gaussian data set with - `self.covariance_` as an estimator of its covariance matrix. + """Compute the log-likelihood of a Gaussian dataset. + + Uses ``self.covariance_`` as an estimator of its covariance matrix. Parameters ---------- @@ -646,15 +653,14 @@ def score(self, X_test, y=None): the number of samples and n_features is the number of features. X_test is assumed to be drawn from the same distribution than the data used in fit (including centering). - - y : not used, present for API consistence purpose. + y : ndarray | None + Not used, present for API consistency. Returns ------- res : float The likelihood of the data set with `self.covariance_` as an estimator of its covariance matrix. - """ # compute empirical covariance of the test set test_cov = empirical_covariance( @@ -667,23 +673,19 @@ def score(self, X_test, y=None): def error_norm(self, comp_cov, norm='frobenius', scaling=True, squared=True): """Computes the Mean Squared Error between two covariance estimators. - (In the sense of the Frobenius norm). Parameters ---------- comp_cov : array-like, shape = [n_features, n_features] The covariance to compare with. - norm : str The type of norm used to compute the error. Available error types: - 'frobenius' (default): sqrt(tr(A^t.A)) - 'spectral': sqrt(max(eigenvalues(A^t.A)) where A is the error ``(comp_cov - self.covariance_)``. - scaling : bool If True (default), the squared error norm is divided by n_features. If False, the squared error norm is not rescaled. - squared : bool Whether to compute the squared error norm or the error norm. If True (default), the squared error norm is returned. @@ -693,7 +695,6 @@ def error_norm(self, comp_cov, norm='frobenius', scaling=True, ------- The Mean Squared Error (in the sense of the Frobenius norm) between `self` and `comp_cov` covariance estimators. - """ from scipy import linalg # compute the error diff --git a/mne/io/base.py b/mne/io/base.py index ebef6bbaff4..476bb33d189 100644 --- a/mne/io/base.py +++ b/mne/io/base.py @@ -1507,7 +1507,6 @@ def __len__(self): >>> len(raw) # doctest: +SKIP 1000 - """ return self.n_times diff --git a/mne/io/meas_info.py b/mne/io/meas_info.py index 236b3084a41..2431183eeea 100644 --- a/mne/io/meas_info.py +++ b/mne/io/meas_info.py @@ -201,8 +201,16 @@ class Info(dict, MontageMixin): modified by various MNE-Python functions or methods (which have safeguards to ensure all fields remain in sync). - This class should not be instantiated directly. To create a measurement - information structure, use :func:`mne.create_info`. + .. warning:: This class should not be instantiated directly. To create a + measurement information structure, use + :func:`mne.create_info`. + + Parameters + ---------- + *args : list + Arguments. + **kwargs : dict + Keyword arguments. Attributes ---------- diff --git a/mne/label.py b/mne/label.py index 00a8386a71e..d316503069c 100644 --- a/mne/label.py +++ b/mne/label.py @@ -269,7 +269,13 @@ def __repr__(self): # noqa: D105 return "