diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index a693bc6724c..8c0e6f51696 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -65,6 +65,8 @@ Bugs - Pass ``rank`` everyhwere in forward preparation for source imaging. This bug affected sparse solvers when using maxfilter data, by `Alex Gramfort`_ (:gh:`8368`) +- Fix bug in :func:`mne.viz.plot_alignment` where ECoG and sEEG channels were not plotted and fNIRS channels were always plotted in the head coordinate frame by `Eric Larson`_ + API changes ~~~~~~~~~~~ diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py index af72f305e04..800723236b5 100644 --- a/mne/viz/_3d.py +++ b/mne/viz/_3d.py @@ -887,7 +887,7 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, car_loc = list() eeg_loc = list() eegp_loc = list() - other_loc = {key: list() for key in other_keys} + other_loc = dict() if len(eeg) > 0: eeg_loc = np.array([info['chs'][k]['loc'][:3] for k in eeg_picks]) if len(eeg_loc) > 0: @@ -947,27 +947,32 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, del dig for key, picks in other_picks.items(): if other_bools[key] and len(picks): + title = DEFAULTS["titles"][key] if key != 'fnirs' else 'fNIRS' + if key != 'fnirs' or 'channels' in fnirs: + other_loc[key] = np.array([info['chs'][pick]['loc'][:3] + for pick in picks]) + logger.info( + f'Plotting {len(other_loc[key])} {title}' + f' location{_pl(other_loc[key])}') if key == 'fnirs': - if 'channels' in fnirs: - other_loc[key] = np.array([info['chs'][pick]['loc'][:3] - for pick in picks]) if 'sources' in fnirs: other_loc['source'] = np.array( [info['chs'][pick]['loc'][3:6] for pick in picks]) logger.info('Plotting %d %s source%s' % (len(other_loc['source']), - key, _pl(other_loc['source']))) + title, _pl(other_loc['source']))) if 'detectors' in fnirs: other_loc['detector'] = np.array( [info['chs'][pick]['loc'][6:9] for pick in picks]) logger.info('Plotting %d %s detector%s' % (len(other_loc['detector']), - key, _pl(other_loc['detector']))) - other_keys = sorted(other_loc.keys()) - logger.info('Plotting %d %s location%s' - % (len(other_loc[key]), key, _pl(other_loc[key]))) + title, _pl(other_loc['detector']))) + for v in other_loc.values(): + v[:] = apply_trans(head_trans, v) + other_keys = sorted(other_loc) # re-sort and only keep non-empty + del other_bools # initialize figure renderer = _get_renderer(fig, bgcolor=(0.5, 0.5, 0.5), size=(800, 800)) @@ -1072,10 +1077,12 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, fwd_nn[:, ori, 2], color=color, mode='arrow', scale=1.5e-3) if 'pairs' in fnirs and len(fnirs_picks) > 0: - fnirs_loc = np.array([info['chs'][k]['loc'][3:9] for k in fnirs_picks]) - logger.info('Plotting %d fnirs pairs' % (fnirs_loc.shape[0])) - renderer.tube(origin=fnirs_loc[:, :3], - destination=fnirs_loc[:, 3:]) + origin = apply_trans(head_trans, np.array( + [info['chs'][k]['loc'][3:6] for k in fnirs_picks])) + destination = apply_trans(head_trans, np.array( + [info['chs'][k]['loc'][6:9] for k in fnirs_picks])) + logger.info(f'Plotting {origin.shape[0]} fNIRS pair{_pl(origin)}') + renderer.tube(origin=origin, destination=destination) renderer.set_camera(azimuth=90, elevation=90, distance=0.6, focalpoint=(0., 0., 0.)) diff --git a/mne/viz/tests/test_3d.py b/mne/viz/tests/test_3d.py index c0237ed2145..35224d518ed 100644 --- a/mne/viz/tests/test_3d.py +++ b/mne/viz/tests/test_3d.py @@ -227,11 +227,16 @@ def test_plot_alignment(tmpdir, renderer): evoked_eeg_ecog_seeg.set_channel_types({'EEG 001': 'ecog', 'EEG 002': 'seeg'}) with pytest.warns(RuntimeWarning, match='Cannot plot MEG'): - plot_alignment(evoked_eeg_ecog_seeg.info, subject='sample', - trans=trans_fname, subjects_dir=subjects_dir, - surfaces=['white', 'outer_skin', 'outer_skull'], - meg=['helmet', 'sensors'], - eeg=['original', 'projected'], ecog=True, seeg=True) + with catch_logging() as log: + plot_alignment(evoked_eeg_ecog_seeg.info, subject='sample', + trans=trans_fname, subjects_dir=subjects_dir, + surfaces=['white', 'outer_skin', 'outer_skull'], + meg=['helmet', 'sensors'], + eeg=['original', 'projected'], ecog=True, seeg=True, + verbose=True) + log = log.getvalue() + assert '1 ECoG location' in log + assert '1 sEEG location' in log renderer.backend._close_all() sphere = make_sphere_model(info=evoked.info, r0='auto', head_radius='auto') @@ -334,60 +339,24 @@ def test_plot_alignment(tmpdir, renderer): trans=trans_fname, fwd=fwd, surfaces='white', coord_frame='head') - # fNIRS + # fNIRS (default is pairs) info = read_raw_nirx(nirx_fname).info with catch_logging() as log: plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True) log = log.getvalue() - assert '26 fnirs pairs' in log - - with catch_logging() as log: - plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True, - fnirs='channels') - log = log.getvalue() - assert '26 fnirs locations' in log - - with catch_logging() as log: - plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True, - fnirs='pairs') - log = log.getvalue() - assert '26 fnirs pairs' in log - - with catch_logging() as log: - plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True, - fnirs='sources') - log = log.getvalue() - assert '26 fnirs sources' in log - - with catch_logging() as log: - plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True, - fnirs='detectors') - log = log.getvalue() - assert '26 fnirs detectors' in log - - with catch_logging() as log: - plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True, - fnirs=['channels', 'pairs']) - log = log.getvalue() - assert '26 fnirs pairs' in log - assert '26 fnirs locations' in log - - with catch_logging() as log: - plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True, - fnirs=['pairs', 'sources', 'detectors']) - log = log.getvalue() - assert '26 fnirs pairs' in log - assert '26 fnirs sources' in log - assert '26 fnirs detectors' in log + assert '26 fNIRS pairs' in log + assert '26 fNIRS locations' not in log + assert '26 fNIRS sources' not in log + assert '26 fNIRS detectors' not in log with catch_logging() as log: plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True, - fnirs=['channels', 'pairs', 'sources', 'detectors']) + fnirs=['channels', 'sources', 'detectors']) log = log.getvalue() - assert '26 fnirs pairs' in log - assert '26 fnirs locations' in log - assert '26 fnirs sources' in log - assert '26 fnirs detectors' in log + assert '26 fNIRS pairs' not in log + assert '26 fNIRS locations' in log + assert '26 fNIRS sources' in log + assert '26 fNIRS detectors' in log renderer.backend._close_all() diff --git a/tutorials/misc/plot_ecog.py b/tutorials/misc/plot_ecog.py index f7fe937eb62..31231fcc3dd 100644 --- a/tutorials/misc/plot_ecog.py +++ b/tutorials/misc/plot_ecog.py @@ -33,8 +33,8 @@ ############################################################################### # Let's load some ECoG electrode locations and names, and turn them into -# a :class:`mne.channels.DigMontage` class. -# First, use pandas to read in the .tsv file. +# a :class:`mne.channels.DigMontage` class. First, use pandas to read in the +# ``.tsv`` file. # In this tutorial, the electrode coordinates are assumed to be in meters elec_df = pd.read_csv(misc_path + '/ecog/sample_ecog_electrodes.tsv', diff --git a/tutorials/preprocessing/plot_70_fnirs_processing.py b/tutorials/preprocessing/plot_70_fnirs_processing.py index 448635a475b..9cb3fc39785 100644 --- a/tutorials/preprocessing/plot_70_fnirs_processing.py +++ b/tutorials/preprocessing/plot_70_fnirs_processing.py @@ -14,7 +14,6 @@ Here we will work with the :ref:`fNIRS motor data `. """ -# sphinx_gallery_thumbnail_number = 1 import os import numpy as np @@ -44,13 +43,13 @@ fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, - subject='fsaverage', + subject='fsaverage', coord_frame='mri', trans='fsaverage', surfaces=['brain'], fnirs=['channels', 'pairs', 'sources', 'detectors'], subjects_dir=subjects_dir, fig=fig) -mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) - +mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=60, distance=0.4, + focalpoint=(0., -0.01, 0.02)) ############################################################################### # Selecting channels appropriate for detecting neural responses