Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 12 additions & 8 deletions examples/inverse/plot_dics_source_power.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,13 +41,9 @@

raw = mne.io.read_raw_fif(raw_fname)

# Set picks, use a single sensor type
picks = mne.pick_types(raw.info, meg='grad', exclude='bads')

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hooray!

# Read epochs
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events, event_id=1, tmin=-1.5, tmax=2, picks=picks,
preload=True)
epochs = mne.Epochs(raw, events, event_id=1, tmin=-1.5, tmax=2, preload=True)

# Read forward operator and point to freesurfer subject directory
fname_fwd = op.join(data_path, 'derivatives', 'sub-{}'.format(subject),
Expand All @@ -70,16 +66,24 @@
# ERS activity starts at 0.5 seconds after stimulus onset
csd_ers = csd_morlet(epochs, freqs, tmin=0.5, tmax=1.5, decim=20)

###############################################################################
# To compute the source power for a frequency band, rather than each frequency
# separately, we average the CSD objects across frequencies.
csd = csd.mean()
csd_baseline = csd_baseline.mean()
csd_ers = csd_ers.mean()

###############################################################################
# Computing DICS spatial filters using the CSD that was computed on the entire
# timecourse.
filters = make_dics(epochs.info, fwd, csd.mean(), pick_ori='max-power')
filters = make_dics(epochs.info, fwd, csd, noise_csd=csd_baseline,
pick_ori='max-power', real_filter=True)

###############################################################################
# Applying DICS spatial filters separately to the CSD computed using the
# baseline and the CSD computed during the ERS activity.
baseline_source_power, freqs = apply_dics_csd(csd_baseline.mean(), filters)
beta_source_power, freqs = apply_dics_csd(csd_ers.mean(), filters)
baseline_source_power, freqs = apply_dics_csd(csd_baseline, filters)
beta_source_power, freqs = apply_dics_csd(csd_ers, filters)

###############################################################################
# Visualizing source power during ERS activity relative to the baseline power.
Expand Down
46 changes: 35 additions & 11 deletions mne/beamformer/_dics.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,10 @@
# License: BSD (3-clause)
import numpy as np

from ..io.pick import pick_info
from ..utils import (logger, verbose, warn, _check_one_ch_type,
_check_channels_spatial_filter, _check_rank,
_check_option)
_check_option, _check_info_inv)
from ..forward import _subject_from_forward
from ..minimum_norm.inverse import combine_xyz, _check_reference
from ..source_estimate import _make_stc, _get_src_type
Expand All @@ -21,8 +22,8 @@


@verbose
def make_dics(info, forward, csd, reg=0.05, label=None, pick_ori=None,
rank=None, inversion='single', weight_norm=None,
def make_dics(info, forward, csd, reg=0.05, noise_csd=None, label=None,
pick_ori=None, rank=None, inversion='single', weight_norm=None,
normalize_fwd=True, real_filter=False, reduce_rank=False,
verbose=None):
"""Compute a Dynamic Imaging of Coherent Sources (DICS) spatial filter.
Expand All @@ -49,6 +50,14 @@ def make_dics(info, forward, csd, reg=0.05, label=None, pick_ori=None,
reg : float
The regularization to apply to the cross-spectral density before
computing the inverse.
noise_csd : instance of CrossSpectralDensity | None
Noise cross-spectral density (CSD) matrices. If provided, whitening
will be done. The noise CSDs need to have been computed for the same
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The noise CSD matrices need to be computed

Copy link
Contributor Author

@wmvanvliet wmvanvliet Nov 6, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

My reasoning with the grammar was that as you are supplying the CSDs are a parameter to this function, they already have been computed.

frequencies as the data CSDs. Providing noise CSDs is mandatory if you
mix sensor types, e.g. gradiometers with magnetometers or EEG with
MEG.

.. versionadded:: 0.20
label : Label | None
Restricts the solution to a given label.
pick_ori : None | 'normal' | 'max-power'
Expand Down Expand Up @@ -191,19 +200,27 @@ def make_dics(info, forward, csd, reg=0.05, label=None, pick_ori=None,
exp = None # turn off depth weighting entirely
combine_xyz = False

_check_one_ch_type('dics', info, forward)
_check_one_ch_type('dics', info, forward, csd, noise_csd)

# pick info, get gain matrix, etc.
_, info, proj, vertices, G, _, nn, orient_std = _prepare_beamformer_input(
info, forward, label, pick_ori,
combine_xyz=combine_xyz, exp=exp)
subject = _subject_from_forward(forward)
src_type = _get_src_type(forward['src'], vertices)
del forward
ch_names = list(info['ch_names'])

csd_picks = [csd.ch_names.index(ch) for ch in ch_names]

if noise_csd is not None:
# Use the same noise CSD for all frequencies
if len(noise_csd.frequencies) > 1:
noise_csd = noise_csd.mean()
noise_csd = noise_csd.get_data(as_cov=True)
if real_filter:
noise_csd['data'] = noise_csd['data'].real

_, _, proj, vertices, G, whitener, nn, orient_std = \
_prepare_beamformer_input(
info, forward, label, pick_ori, noise_cov=noise_csd, rank=rank,
pca=False, combine_xyz=combine_xyz, exp=exp)

logger.info('Computing DICS spatial filters...')
Ws = []
for i, freq in enumerate(frequencies):
Expand All @@ -212,13 +229,15 @@ def make_dics(info, forward, csd, reg=0.05, label=None, pick_ori=None,
(freq, i + 1, n_freqs))

Cm = csd.get_data(index=i)

if real_filter:
Cm = Cm.real

# Ensure the CSD is in the same order as the leadfield
Cm = Cm[csd_picks, :][:, csd_picks]

# Whiten the CSD
Cm = np.dot(whitener, np.dot(Cm, whitener.conj().T))

# compute spatial filter
W = _compute_beamformer(G, Cm, reg, n_orient, weight_norm, pick_ori,
reduce_rank, rank=rank, inversion=inversion,
Expand All @@ -227,12 +246,13 @@ def make_dics(info, forward, csd, reg=0.05, label=None, pick_ori=None,

Ws = np.array(Ws)

src_type = _get_src_type(forward['src'], vertices)
filters = Beamformer(
kind='DICS', weights=Ws, csd=csd, ch_names=ch_names, proj=proj,
vertices=vertices, subject=subject, pick_ori=pick_ori,
inversion=inversion, weight_norm=weight_norm,
normalize_fwd=bool(normalize_fwd), src_type=src_type,
n_orient=n_orient if pick_ori is None else 1)
n_orient=n_orient if pick_ori is None else 1, whitener=whitener)

return filters

Expand Down Expand Up @@ -434,6 +454,7 @@ def apply_dics_csd(csd, filters, verbose=None):
vertices = filters['vertices']
n_orient = filters['n_orient']
subject = filters['subject']
whitener = filters['whitener']
n_sources = np.sum([len(v) for v in vertices])

# If CSD is summed over multiple frequencies, take the average frequency
Expand All @@ -455,6 +476,9 @@ def apply_dics_csd(csd, filters, verbose=None):
Cm = Cm[csd_picks, :][:, csd_picks]
W = filters['weights'][i]

# Whiten the CSD
Cm = np.dot(whitener, np.dot(Cm, whitener.conj().T))

source_power[:, i] = _compute_power(Cm, W, n_orient)

logger.info('[done]')
Expand Down
8 changes: 6 additions & 2 deletions mne/beamformer/_lcmv.py
Original file line number Diff line number Diff line change
Expand Up @@ -417,8 +417,12 @@ def apply_lcmv_cov(data_cov, filters, verbose=None):
data_cov = pick_channels_cov(data_cov, sel_names)

n_orient = filters['weights'].shape[0] // filters['nsource']
source_power = _compute_power(data_cov['data'], filters['weights'],
n_orient)

# Whiten the CSD
whitener = filters['whitener']
Cm = np.dot(whitener, np.dot(data_cov['data'], whitener.conj().T))

source_power = _compute_power(Cm, filters['weights'], n_orient)

# compatibility with 0.16, add src_type as None if not present:
filters, warn_text = _check_src_type(filters)
Expand Down
8 changes: 4 additions & 4 deletions mne/beamformer/tests/test_dics.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def test_make_dics(tmpdir, _load_forward):

fwd_free, fwd_surf, fwd_fixed, fwd_vol, label = _load_forward
epochs, _, csd, _ = _simulate_data(fwd_fixed)
with pytest.raises(RuntimeError, match='several sensor types'):
with pytest.raises(ValueError, match='several sensor types'):
make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None)
epochs.pick_types(meg='grad')

Expand Down Expand Up @@ -255,7 +255,7 @@ def test_apply_dics_csd(_load_forward):
source_ind = vertices.tolist().index(source_vertno)
reg = 1 # Lots of regularization for our toy dataset

with pytest.raises(RuntimeError, match='several sensor types'):
with pytest.raises(ValueError, match='several sensor types'):
make_dics(epochs.info, fwd_free, csd)
epochs.pick_types(meg='grad')

Expand Down Expand Up @@ -383,7 +383,7 @@ def test_apply_dics_timeseries(_load_forward):
source_ind = vertices.tolist().index(source_vertno)
reg = 5 # Lots of regularization for our toy dataset

with pytest.raises(RuntimeError, match='several sensor types'):
with pytest.raises(ValueError, match='several sensor types'):
make_dics(evoked.info, fwd_surf, csd)
evoked.pick_types(meg='grad')

Expand Down Expand Up @@ -485,7 +485,7 @@ def test_tf_dics(_load_forward):
frequencies = [10, 20]
freq_bins = [(8, 12), (18, 22)]

with pytest.raises(RuntimeError, match='several sensor types'):
with pytest.raises(ValueError, match='several sensor types'):
stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths,
freq_bins=freq_bins, frequencies=frequencies,
decim=10, reg=reg, label=label)
Expand Down
19 changes: 11 additions & 8 deletions mne/cov.py
Original file line number Diff line number Diff line change
Expand Up @@ -916,9 +916,9 @@ def _check_scalings_user(scalings):
def _eigvec_subspace(eig, eigvec, mask):
"""Compute the subspace from a subset of eigenvectors."""
# We do the same thing we do with projectors:
P = np.eye(len(eigvec)) - np.dot(eigvec[~mask].T, eigvec[~mask])
P = np.eye(len(eigvec)) - np.dot(eigvec[~mask].conj().T, eigvec[~mask])
eig, eigvec = eigh(P)
eigvec = eigvec.T
eigvec = eigvec.conj().T
return eig, eigvec


Expand Down Expand Up @@ -1318,7 +1318,7 @@ def _get_ch_whitener(A, pca, ch_type, rank):
"""Get whitener params for a set of channels."""
# whitening operator
eig, eigvec = eigh(A, overwrite_a=True)
eigvec = eigvec.T
eigvec = eigvec.conj().T
mask = np.ones(len(eig), bool)
eig[:-rank] = 0.0
mask[:-rank] = False
Expand Down Expand Up @@ -1425,8 +1425,10 @@ def _smart_eigh(C, info, rank, scalings=None, projs=None,
# time saving short-circuit
if proj_subspace and sum(rank.values()) == C.shape[0]:
return np.ones(n_chan), np.eye(n_chan), np.ones(n_chan, bool)
eig = np.zeros(n_chan)
eigvec = np.zeros((n_chan, n_chan))

dtype = np.complex if C.dtype == np.complex else np.float
eig = np.zeros(n_chan, dtype)
eigvec = np.zeros((n_chan, n_chan), dtype)
mask = np.zeros(n_chan, bool)
for ch_type, picks in _picks_by_type(info, meg_combined=True,
ref_meg=False, exclude='bads'):
Expand Down Expand Up @@ -1746,11 +1748,12 @@ def compute_whitener(noise_cov, info=None, picks=None, rank=None,
nzero = (eig > 0)
eig[~nzero] = 0. # get rid of numerical noise (negative) ones

W = np.zeros((n_chan, 1), dtype=np.float)
dtype = np.complex if noise_cov['eigvec'].dtype == np.complex else np.float
W = np.zeros((n_chan, 1), dtype)
W[nzero, 0] = 1.0 / np.sqrt(eig[nzero])
# Rows of eigvec are the eigenvectors
W = W * noise_cov['eigvec'] # C ** -0.5
C = np.sqrt(eig) * noise_cov['eigvec'].T # C ** 0.5
C = np.sqrt(eig) * noise_cov['eigvec'].conj().T # C ** 0.5
n_nzero = nzero.sum()
logger.info(' Created the whitener using a noise covariance matrix '
'with rank %d (%d small eigenvalues omitted)'
Expand All @@ -1761,7 +1764,7 @@ def compute_whitener(noise_cov, info=None, picks=None, rank=None,
W = W[nzero]
C = C[:, nzero]
elif pca is False:
W = np.dot(noise_cov['eigvec'].T, W)
W = np.dot(noise_cov['eigvec'].conj().T, W)
C = np.dot(C, noise_cov['eigvec'])

# Triage return
Expand Down
23 changes: 19 additions & 4 deletions mne/time_frequency/csd.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,10 @@ def __init__(self, data, ch_names, frequencies, n_fft, tmin=None,
self.frequencies = frequencies

self.n_fft = n_fft
self.projs = cp.deepcopy(projs)
if projs is None:
self.projs = []
else:
self.projs = cp.deepcopy(projs)

@property
def n_channels(self):
Expand Down Expand Up @@ -339,7 +342,7 @@ def pick_frequency(self, freq=None, index=None):

return self[index]

def get_data(self, frequency=None, index=None):
def get_data(self, frequency=None, index=None, as_cov=False):
"""Get the CSD matrix for a given frequency as NumPy array.

If there is only one matrix defined in the CSD object, calling this
Expand All @@ -355,10 +358,15 @@ def get_data(self, frequency=None, index=None):
index : int | None
Return the CSD matrix for the frequency or frequency-bin with the
given index.
as_cov : bool
Whether to return the data as a numpy array (`False`, the default),
or pack it in a :class:`mne.Covariance` object (`True`).

.. versionadded:: 0.20

Returns
-------
csd : ndarray, shape (n_channels, n_channels)
csd : ndarray, shape (n_channels, n_channels) | instance of Covariance
The CSD matrix corresponding to the requested frequency.

See Also
Expand All @@ -376,7 +384,14 @@ def get_data(self, frequency=None, index=None):
raise ValueError('Cannot specify both a frequency and index.')
index = self._get_frequency_index(frequency)

return _vector_to_sym_mat(self._data[:, index])
data = _vector_to_sym_mat(self._data[:, index])
if as_cov:
# Pack the data into a Covariance object
from ..cov import Covariance # to avoid circular import
return Covariance(data, self.ch_names, bads=[], projs=self.projs,
nfree=self.n_fft)
else:
return data

@copy_function_doc_to_method_doc(plot_csd)
def plot(self, info=None, mode='csd', colorbar=True, cmap='viridis',
Expand Down
23 changes: 13 additions & 10 deletions mne/utils/check.py
Original file line number Diff line number Diff line change
Expand Up @@ -448,29 +448,32 @@ def _check_rank(rank):
def _check_one_ch_type(method, info, forward, data_cov=None, noise_cov=None):
"""Check number of sensor types and presence of noise covariance matrix."""
from ..cov import make_ad_hoc_cov, Covariance
from ..time_frequency.csd import CrossSpectralDensity
from ..io.pick import pick_info
from ..channels.channels import _contains_ch_type
picks = _check_info_inv(info, forward, data_cov=data_cov,
noise_cov=noise_cov)
info_pick = pick_info(info, picks)
if isinstance(data_cov, CrossSpectralDensity):
_validate_type(noise_cov, [None, CrossSpectralDensity], 'noise_cov')
# FIXME
picks = list(range(len(data_cov.ch_names)))
info_pick = info
else:
_validate_type(noise_cov, [None, Covariance], 'noise_cov')
picks = _check_info_inv(info, forward, data_cov=data_cov,
noise_cov=noise_cov)
info_pick = pick_info(info, picks)
ch_types =\
[_contains_ch_type(info_pick, tt) for tt in ('mag', 'grad', 'eeg')]
if sum(ch_types) > 1:
if method == 'lcmv' and noise_cov is None:
if noise_cov is None:
raise ValueError('Source reconstruction with several sensor types'
' requires a noise covariance matrix to be '
'able to apply whitening.')
if method == 'dics':
raise RuntimeError(
'The use of several sensor types with the DICS beamformer is '
'not supported yet.')
if noise_cov is None:
noise_cov = make_ad_hoc_cov(info_pick, std=1.)
else:
noise_cov = noise_cov.copy()
if 'estimator' in noise_cov:
if isinstance(noise_cov, Covariance) and 'estimator' in noise_cov:
del noise_cov['estimator']
_validate_type(noise_cov, Covariance, 'noise_cov')
return noise_cov, picks


Expand Down