diff --git a/examples/inverse/plot_source_space_snr.py b/examples/inverse/plot_source_space_snr.py new file mode 100644 index 00000000000..4191665d597 --- /dev/null +++ b/examples/inverse/plot_source_space_snr.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +""" +=============================== +Computing source space SNR +=============================== + +This example shows how to compute and plot source space SNR as in [1]_. +""" +# Author: Padma Sundaram +# Kaisu Lankinen +# +# License: BSD (3-clause) + +# sphinx_gallery_thumbnail_number = 2 + +import mne +from mne.datasets import sample +from mne.minimum_norm import make_inverse_operator, apply_inverse +import numpy as np +import matplotlib.pyplot as plt + +print(__doc__) + +data_path = sample.data_path() +subjects_dir = data_path + '/subjects' + +# Read data +fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif' +evoked = mne.read_evokeds(fname_evoked, condition='Left Auditory', + baseline=(None, 0)) +fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif' +fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif' +fwd = mne.read_forward_solution(fname_fwd) +cov = mne.read_cov(fname_cov) + +############################################################################### +# MEG-EEG +# ------- + +# Read inverse operator: +inv_op = make_inverse_operator(evoked.info, fwd, cov, fixed=True, verbose=True) + +# Calculate MNE: +snr = 3.0 +lambda2 = 1.0 / snr ** 2 +stc = apply_inverse(evoked, inv_op, lambda2, 'MNE', verbose=True) + +# Calculate SNR in source space: +snr_stc = stc.estimate_snr(evoked.info, fwd, cov) + +# Plot an average SNR across source points over time: +ave = np.mean(snr_stc.data, axis=0) + +fig, ax = plt.subplots() +ax.plot(evoked.times, ave) +ax.set(xlabel='Time (sec)', ylabel='SNR MEG-EEG') +fig.tight_layout() + +# Find time point of maximum SNR: +maxidx = np.argmax(ave) + +# Plot SNR on source space at the time point of maximum SNR: +kwargs = dict(initial_time=evoked.times[maxidx], hemi='split', + views=['lat', 'med'], subjects_dir=subjects_dir, size=(600, 600), + clim=dict(kind='value', lims=(-100, -70, -40)), + transparent=True, colormap='viridis') +snr_stc.plot(**kwargs) + +############################################################################### +# EEG +# --- +# Next we do the same for EEG and plot the result on the cortex: + +evoked_eeg = evoked.copy().pick_types(eeg=True, meg=False) +inv_op_eeg = make_inverse_operator(evoked_eeg.info, fwd, cov, fixed=True, + verbose=True) +stc_eeg = apply_inverse(evoked_eeg, inv_op_eeg, lambda2, 'MNE', verbose=True) +snr_stc_eeg = stc_eeg.estimate_snr(evoked_eeg.info, fwd, cov) +snr_stc_eeg.plot(**kwargs) + +############################################################################### +# MEG +# --- +# Finally we do this for MEG: + +evoked_meg = evoked.copy().pick_types(eeg=False, meg=True) +inv_op_meg = make_inverse_operator(evoked_meg.info, fwd, cov, fixed=True, + verbose=True) +stc_meg = apply_inverse(evoked_meg, inv_op_meg, lambda2, 'MNE', verbose=True) +snr_stc_meg = stc_meg.estimate_snr(evoked_meg.info, fwd, cov) +snr_stc_meg.plot(**kwargs) + +############################################################################## +# References +# ---------- +# .. [1] Goldenholz, D. M., Ahlfors, S. P., Hämäläinen, M. S., Sharon, D., +# Ishitobi, M., Vaina, L. M., & Stufflebeam, S. M. (2009). Mapping the +# Signal-To-Noise-Ratios of Cortical Sources in Magnetoencephalography +# and Electroencephalography. Human Brain Mapping, 30(4), 1077–1086. +# doi:10.1002/hbm.20571 diff --git a/mne/source_estimate.py b/mne/source_estimate.py index 73eefd2e769..6b71268aacf 100644 --- a/mne/source_estimate.py +++ b/mne/source_estimate.py @@ -1505,6 +1505,77 @@ def extract_label_time_course(self, labels, src, mode='mean_flip', return label_tc + @verbose + def estimate_snr(self, info, fwd, cov, verbose=None): + r"""Compute time-varying SNR in the source space. + + This function should only be used with source estimates with units + nanoAmperes (i.e., MNE-like solutions, *not* dSPM or sLORETA). + + .. warning:: This function currently only works properly for fixed + orientation. + + Parameters + ---------- + info : instance Info + The measurement info. + fwd : instance of Forward + The forward solution used to create the source estimate. + cov : instance of Covariance + The noise covariance used to estimate the resting cortical + activations. Should be an evoked covariance, not empty room. + %(verbose)s + + Returns + ------- + snr_stc : instance of SourceEstimate + The source estimate with the SNR computed. + + Notes + ----- + We define the SNR in decibels for each source location at each + time point as: + + .. math:: + + {\rm SNR} = 10\log_10[\frac{a^2}{N}\sum_k\frac{b_k^2}{s_k^2}] + + where :math:`\\b_k` is the signal on sensor :math:`k` provided by the + forward model for a source with unit amplitude, :math:`a` is the + source amplitude, :math:`N` is the number of sensors, and + :math:`s_k^2` is the noise variance on sensor :math:`k`. + + References + ---------- + .. [1] Goldenholz, D. M., Ahlfors, S. P., Hämäläinen, M. S., Sharon, + D., Ishitobi, M., Vaina, L. M., & Stufflebeam, S. M. (2009). + Mapping the Signal-To-Noise-Ratios of Cortical Sources in + Magnetoencephalography and Electroencephalography. + Human Brain Mapping, 30(4), 1077–1086. doi:10.1002/hbm.20571 + """ + from .forward import convert_forward_solution + from .minimum_norm.inverse import _prepare_forward + + if (self.data >= 0).all(): + warn_('This STC appears to be from free orientation, currently SNR' + ' function is valid only for fixed orientation') + + fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=False) + + # G is gain matrix [ch x src], cov is noise covariance [ch x ch] + G, _, _, _, _, _, _, cov, _ = _prepare_forward( + fwd, info, cov, fixed=True, loose=0, rank=None, pca=False, + use_cps=True, exp=None, limit_depth_chs=False, combine_xyz='fro', + allow_fixed_depth=False, limit=None) + G = G['sol']['data'] + n_channels = cov['dim'] # number of sensors/channels + b_k2 = (G * G).T + s_k2 = np.diag(cov['data']) + scaling = (1 / n_channels) * np.sum(b_k2 / s_k2, axis=1, keepdims=True) + snr_stc = self.copy() + snr_stc._data[:] = 10 * np.log10((self.data * self.data) * scaling) + return snr_stc + def get_peak(self, hemi=None, tmin=None, tmax=None, mode='abs', vert_as_index=False, time_as_index=False): """Get location and latency of peak amplitude.