diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index a59ba48ab01..054b0c65924 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -13,3 +13,4 @@ e39995d9be6fc831c7a4a59f09b7a7c0a41ae315 # 12588, percent formatting 1c5b39ff1d99bbcb2fc0e0071a989b3f3845ff30 # 12603, ruff UP028 b8b168088cb474f27833f5f9db9d60abe00dca83 # 12779, PR JSONs ee64eba6f345e895e3d5e7d2804fa6aa2dac2e6d # 12781, Header unification +362f9330925fb79a6adc19a42243672676dec63e # 12799, UP038 diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 837e4eaa1e2..231488d2d47 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -15,13 +15,20 @@ Again, thanks for contributing! --> -#### Reference issue -Example: Fixes #1234. +#### Reference issue (if any) + + #### What does this implement/fix? -Explain your changes. + + #### Additional information -Any additional information you think is important. + + diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 571c3943ae7..81743789785 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -99,7 +99,7 @@ jobs: sed -i '/dipy/d' environment.yml sed -i 's/- mne$/- mne-base/' environment.yml if: matrix.os == 'ubuntu-latest' && startswith(matrix.kind, 'conda') && matrix.python == '3.12' - - uses: mamba-org/setup-micromamba@v1 + - uses: mamba-org/setup-micromamba@v2 with: environment-file: ${{ env.CONDA_ENV }} environment-name: mne @@ -131,4 +131,4 @@ jobs: - uses: codecov/codecov-action@v4 with: token: ${{ secrets.CODECOV_TOKEN }} - if: success() + if: always() diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b17205310b8..c600e31ded4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ repos: # Ruff mne - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.2 + rev: v0.7.0 hooks: - id: ruff name: ruff lint mne @@ -51,12 +51,33 @@ repos: # sorting - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v5.0.0 hooks: - id: file-contents-sorter files: ^doc/changes/names.inc|^.mailmap args: ["--ignore-case"] + - repo: https://github.com/pappasam/toml-sort + rev: v0.23.1 + hooks: + - id: toml-sort-fix + files: pyproject.toml + + # dependencies + - repo: local + hooks: + - id: dependency-sync + name: Sync dependency list between pyproject.toml and README.rst + language: python + entry: ./tools/hooks/sync_dependencies.py + files: pyproject.toml + additional_dependencies: ["mne"] + + +# these should *not* be run on CIs: +ci: + skip: [dependency-sync] # needs MNE to work, which exceeds the free tier space alloc. + # The following are too slow to run on local commits, so let's only run on CIs: # # - repo: https://github.com/pre-commit/mirrors-mypy diff --git a/README.rst b/README.rst index 745e74849c3..50e0daaa52c 100644 --- a/README.rst +++ b/README.rst @@ -43,9 +43,6 @@ only, use pip_ in a terminal: $ pip install --upgrade mne -The current MNE-Python release requires Python 3.9 or higher. MNE-Python 0.17 -was the last release to support Python 2.7. - For more complete instructions, including our standalone installers and more advanced installation methods, please refer to the `installation guide`_. @@ -73,42 +70,20 @@ Dependencies The minimum required dependencies to run MNE-Python are: +.. ↓↓↓ BEGIN CORE DEPS LIST. DO NOT EDIT! HANDLED BY PRE-COMMIT HOOK ↓↓↓ + - `Python `__ ≥ 3.9 -- `NumPy `__ ≥ 1.24 -- `SciPy `__ ≥ 1.10 +- `NumPy `__ ≥ 1.23 +- `SciPy `__ ≥ 1.9 - `Matplotlib `__ ≥ 3.6 - `Pooch `__ ≥ 1.5 - `tqdm `__ - `Jinja2 `__ - `decorator `__ -- `lazy_loader `__ - -For full functionality, some functions require: - -- `scikit-learn `__ ≥ 1.2 -- `Joblib `__ ≥ 1.2 (for parallelization) -- `mne-qt-browser `__ ≥ 0.5 (for fast raw data visualization) -- `Qt `__ ≥ 5.15 via one of the following bindings (for fast raw data visualization and interactive 3D visualization): - - - `PySide6 `__ ≥ 6.0 - - `PyQt6 `__ ≥ 6.0 - - `PyQt5 `__ ≥ 5.15 - -- `Numba `__ ≥ 0.56.4 -- `NiBabel `__ ≥ 3.2.1 -- `OpenMEEG `__ ≥ 2.5.6 -- `pandas `__ ≥ 1.5.2 -- `Picard `__ ≥ 0.3 -- `CuPy `__ ≥ 9.0.0 (for NVIDIA CUDA acceleration) -- `DIPY `__ ≥ 1.4.0 -- `imageio `__ ≥ 2.8.0 -- `PyVista `__ ≥ 0.37 (for 3D visualization) -- `PyVistaQt `__ ≥ 0.9 (for 3D visualization) -- `mffpy `__ ≥ 0.5.7 -- `h5py `__ -- `h5io `__ -- `pymatreader `__ +- `lazy-loader `__ ≥ 0.3 +- `packaging `__ +.. ↑↑↑ END CORE DEPS LIST. DO NOT EDIT! HANDLED BY PRE-COMMIT HOOK ↑↑↑ Contributing ^^^^^^^^^^^^ diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 3ced8ce46f9..1719dd95354 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -111,7 +111,7 @@ stages: - bash: | set -e python -m pip install --progress-bar off --upgrade pip - python -m pip install --progress-bar off "mne-qt-browser[opengl] @ git+https://github.com/mne-tools/mne-qt-browser.git@main" pyvista scikit-learn pytest-error-for-skips python-picard qtpy nibabel sphinx-gallery PySide6 + python -m pip install --progress-bar off "mne-qt-browser[opengl] @ git+https://github.com/mne-tools/mne-qt-browser.git@main" pyvista scikit-learn pytest-error-for-skips python-picard qtpy nibabel sphinx-gallery "PySide6!=6.8.0,!=6.8.0.1" python -m pip uninstall -yq mne python -m pip install --progress-bar off --upgrade -e .[test] displayName: 'Install dependencies with pip' @@ -201,7 +201,7 @@ stages: displayName: 'PyQt6' - bash: | set -eo pipefail - python -m pip install PySide6 + python -m pip install "PySide6!=6.8.0,!=6.8.0.1" mne sys_info -pd mne sys_info -pd | grep "qtpy .* (PySide6=.*)$" PYTEST_QT_API=PySide6 pytest ${TEST_OPTIONS} diff --git a/doc/Makefile b/doc/Makefile index ab8219473b0..c0badd0bd9b 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -76,3 +76,6 @@ view: @python -c "import webbrowser; webbrowser.open_new_tab('file://$(PWD)/_build/html/sg_execution_times.html')" show: view + +serve: + python -m http.server -d _build/html diff --git a/doc/_static/js/set_installer_tab.js b/doc/_static/js/set_installer_tab.js index 5b6b737a565..3b4c043387a 100644 --- a/doc/_static/js/set_installer_tab.js +++ b/doc/_static/js/set_installer_tab.js @@ -12,21 +12,34 @@ function setTabs() { } if (navigator.userAgent.indexOf("Mac") !== -1) { // there's no good way to distinguish intel vs M1 in javascript so we - // just default to showing the first of the 2 macOS tabs - platform = "macos-intel"; + // just default to showing the most modern macOS installer + platform = "macos-apple"; } - let all_tab_nodes = document.querySelectorAll( - '.platform-selector-tabset')[0].children; - let input_nodes = [...all_tab_nodes].filter( - child => child.nodeName === "INPUT"); + var platform_short = platform.split("-")[0]; + let tab_label_nodes = [...document.querySelectorAll('.sd-tab-label')]; - let correct_label = tab_label_nodes.filter( + + let install_tab_nodes = document.querySelectorAll( + '.install-selector-tabset')[0].children; + let install_input_nodes = [...install_tab_nodes].filter( + child => child.nodeName === "INPUT"); + let install_label = tab_label_nodes.filter( // label.id is drawn from :name: property in the rST, which must // be unique across the whole site (*sigh*) - label => label.id.startsWith(platform))[0]; - let input_id = correct_label.getAttribute('for'); - let correct_input = input_nodes.filter(node => node.id === input_id)[0]; - correct_input.checked = true; + label => label.id.startsWith(`install-${platform}`))[0]; + let install_id = install_label.getAttribute('for'); + let install_input = install_input_nodes.filter(node => node.id === install_id)[0]; + install_input.checked = true; + + let uninstall_tab_nodes = document.querySelectorAll( + '.uninstall-selector-tabset')[0].children; + let uninstall_input_nodes = [...uninstall_tab_nodes].filter( + child => child.nodeName === "INPUT"); + let uninstall_label = tab_label_nodes.filter( + label => label.id.startsWith(`uninstall-${platform_short}`))[0]; + let uninstall_id = uninstall_label.getAttribute('for'); + let uninstall_input = uninstall_input_nodes.filter(node => node.id === uninstall_id)[0]; + uninstall_input.checked = true; } documentReady(setTabs); diff --git a/doc/_static/js/update_installer_version.js b/doc/_static/js/update_installer_version.js index 7cb8bdede1e..ad18890caf7 100644 --- a/doc/_static/js/update_installer_version.js +++ b/doc/_static/js/update_installer_version.js @@ -54,7 +54,7 @@ async function warnVersion() { title.innerText = "Warning"; inner.innerText = warn; outer.append(title, inner); - document.querySelectorAll('.platform-selector-tabset')[0].before(outer); + document.querySelectorAll('.install-selector-tabset')[0].before(outer); } } diff --git a/doc/_static/style.css b/doc/_static/style.css index 4bc6e03708e..70735d9c3fb 100644 --- a/doc/_static/style.css +++ b/doc/_static/style.css @@ -13,6 +13,20 @@ html[data-theme="light"] { + /* pydata-sphinx-theme overrides */ + /* ↓↓↓ use default "info" colors for "primary" */ + --pst-color-primary: #276be9; + --pst-color-primary-bg: #dce7fc; + /* ↓↓↓ use default "primary" colors for "info" */ + --pst-color-info: var(--pst-teal-500); + --pst-color-info-bg: var(--pst-teal-200); + /* ↓↓↓ use "warning" colors for "secondary" */ + --pst-color-secondary: var(--pst-color-warning); + --pst-color-secondary-bg: var(--pst-color-warning-bg); + /* ↓↓↓ make sure new primary (link) color propogates to links on code */ + --pst-color-inline-code-links: var(--pst-color-link); + /* ↓↓↓ make sure new secondary (hover) color propogates to hovering on table rows */ + --pst-color-table-row-hover-bg: var(--pst-color-secondary-bg); /* topbar logo links */ --mne-color-github: #000; --mne-color-discourse: #d0232b; @@ -21,8 +35,6 @@ html[data-theme="light"] { --copybtn-opacity: 0.75; /* card header bg color */ --mne-color-card-header: rgba(0, 0, 0, 0.05); - /* section headings */ - --mne-color-heading: #003e80; /* sphinx-gallery overrides */ --sg-download-a-background-color: var(--pst-color-primary); --sg-download-a-background-image: unset; @@ -33,6 +45,20 @@ html[data-theme="light"] { --sg-download-a-hover-box-shadow-2: none; } html[data-theme="dark"] { + /* pydata-sphinx-theme overrides */ + /* ↓↓↓ use default "info" colors for "primary" */ + --pst-color-primary: #79a3f2; + --pst-color-primary-bg: #06245d; + /* ↓↓↓ use default "primary" colors for "info" */ + --pst-color-info: var(--pst-teal-400); + --pst-color-info-bg: var(--pst-teal-800); + /* ↓↓↓ use "warning" colors for "secondary" */ + --pst-color-secondary: var(--pst-color-warning); + --pst-color-secondary-bg: var(--pst-color-warning-bg); + /* ↓↓↓ make sure new primary (link) color propogates to links on code */ + --pst-color-inline-code-links: var(--pst-color-link); + /* ↓↓↓ make sure new secondary (hover) color propogates to hovering on table rows */ + --pst-color-table-row-hover-bg: var(--pst-color-secondary-bg); /* topbar logo links */ --mne-color-github: rgb(240, 246, 252); /* from their logo SVG */ --mne-color-discourse: #FFF9AE; /* from their logo SVG */ @@ -41,8 +67,6 @@ html[data-theme="dark"] { --copybtn-opacity: 0.25; /* card header bg color */ --mne-color-card-header: rgba(255, 255, 255, 0.2); - /* section headings */ - --mne-color-heading: #b8cbe0; /* sphinx-gallery overrides */ --sg-download-a-background-color: var(--pst-color-primary); --sg-download-a-background-image: unset; @@ -52,9 +76,6 @@ html[data-theme="dark"] { --sg-download-a-hover-box-shadow-1: none; --sg-download-a-hover-box-shadow-2: none; } -h1, h2, h3, h4, h5, h6 { - color: var(--mne-color-heading); -} /* ************************************************************ Sphinx fixes */ @@ -161,20 +182,45 @@ iframe.sg_report { top: 0; } -/* TODO: Either pydata-sphinx-theme (for using Bootstrap) or sphinx-gallery (for adding table formatting) should fix this */ -.table-striped-columns>:not(caption)>tr>:nth-child(2n),.table-striped>tbody>tr:nth-of-type(odd)>* { - --bs-table-accent-bg: var(--bs-table-striped-bg); +/* ******************************************************** HTML repr tables */ + +/* make table responsive to pydata-sphinx-theme's light/dark mode */ +.table > :not(caption) > * > * { color: var(--pst-color-text-base); } -.table-hover>tbody>tr:hover>* { - --bs-table-accent-bg: var(--bs-table-hover-bg); - color: var(--pst-color-text-base); +.mne-repr-table tbody tr:hover { + background-color: var(--pst-color-table-row-hover-bg); } -.rendered_html table { - color: var(--pst-color-text-base); +.mne-repr-section-toggle > button > svg > path { + fill: var(--pst-color-text-base); } - - +/* make the expand/collapse button look nicer */ +.mne-repr-section-toggle > button { + padding: 20%; +} +/* make section header rows more distinct (and harmonize with pydata-sphinx-theme table +style in the process). Color copied from pydata-sphinx-theme; 2px copied from bootstrap. +*/ +.mne-repr-table th { + border-bottom: 2px solid var(--pst-color-primary); +} +/* harmonize the channel names buttons with the rest of the table */ +.mne-ch-names-btn { + font-size: inherit; + padding: 0.25rem; + min-width: 1.5rem; + font-weight: bold; +} +/* +.mne-ch-names-btn:hover { + background-color: var(--pst-color-); + text-decoration: underline; +} +.mne-ch-names-btn:focus-visible { + outline: 0.1875rem solid var(--pst-color-accent); + outline-offset: 0.1875rem; +} +*/ /* ***************************************************** sphinx-design fixes */ p.btn a { color: unset; diff --git a/doc/changes/devel/12366.newfeature.rst b/doc/changes/devel/12366.newfeature.rst new file mode 100644 index 00000000000..979c7141504 --- /dev/null +++ b/doc/changes/devel/12366.newfeature.rst @@ -0,0 +1 @@ +Add support for `dict` type argument ``ref_channels`` to :func:`mne.set_eeg_reference`, to allow flexible re-referencing (e.g. ``raw.set_eeg_reference(ref_channels={'A1': ['A2', 'A3']})`` will set the new A1 data to be ``A1 - mean(A2, A3)``), by `Alex Lepauvre`_ and `Qian Chu`_ and `Daniel McCloy`_. \ No newline at end of file diff --git a/doc/changes/devel/12787.other.rst b/doc/changes/devel/12787.other.rst new file mode 100644 index 00000000000..1f53fdea066 --- /dev/null +++ b/doc/changes/devel/12787.other.rst @@ -0,0 +1 @@ +Use custom code in :func:`mne.sys_info` to get the amount of physical memory and a more informative CPU name instead of using the ``psutil`` package, by `Clemens Brunner`_. \ No newline at end of file diff --git a/doc/changes/devel/12792.newfeature.rst b/doc/changes/devel/12792.newfeature.rst index 8866b5c201a..81ef79c8a11 100644 --- a/doc/changes/devel/12792.newfeature.rst +++ b/doc/changes/devel/12792.newfeature.rst @@ -1 +1 @@ -Add reader for ANT Neuro files in the ``*.cnt`` format with :func:`~mne.io.read_raw_ant`, by `Mathieu Scheltienne`_ and `Eric Larson`_. +Add reader for ANT Neuro files in the ``*.cnt`` format with :func:`~mne.io.read_raw_ant`, by `Mathieu Scheltienne`_, `Eric Larson`_ and `Proloy Das`_. diff --git a/doc/changes/devel/12827.other.rst b/doc/changes/devel/12827.other.rst new file mode 100644 index 00000000000..3ccbaa0bff6 --- /dev/null +++ b/doc/changes/devel/12827.other.rst @@ -0,0 +1 @@ +Improve documentation clarity of ``fit_transform`` methods for :class:`mne.decoding.SSD`, :class:`mne.decoding.CSP`, and :class:`mne.decoding.SPoC` classes, by `Thomas Binns`_. \ No newline at end of file diff --git a/doc/changes/devel/12829.apichange.rst b/doc/changes/devel/12829.apichange.rst new file mode 100644 index 00000000000..d0bd4c12a46 --- /dev/null +++ b/doc/changes/devel/12829.apichange.rst @@ -0,0 +1 @@ +Deprecate ``average`` parameter in ``plot_filters`` and ``plot_patterns`` methods of the :class:`mne.decoding.CSP` and :class:`mne.decoding.SPoC` classes, by `Thomas Binns`_. \ No newline at end of file diff --git a/doc/changes/devel/12830.newfeature.rst b/doc/changes/devel/12830.newfeature.rst new file mode 100644 index 00000000000..4d51229392d --- /dev/null +++ b/doc/changes/devel/12830.newfeature.rst @@ -0,0 +1 @@ +:func:`mne.channels.read_custom_montage` may now read a newer version of the ``.elc`` ASA Electrode file format, by `Stefan Appelhoff`_. diff --git a/doc/changes/devel/12834.dependency.rst b/doc/changes/devel/12834.dependency.rst new file mode 100644 index 00000000000..ca19423df87 --- /dev/null +++ b/doc/changes/devel/12834.dependency.rst @@ -0,0 +1,2 @@ +Importing from ``mne.decoding`` now explicitly requires ``scikit-learn`` to be installed, +by `Eric Larson`_. diff --git a/doc/changes/devel/12842.bugfix.rst b/doc/changes/devel/12842.bugfix.rst new file mode 100644 index 00000000000..75f83683b8f --- /dev/null +++ b/doc/changes/devel/12842.bugfix.rst @@ -0,0 +1 @@ +Fix bug where :meth:`mne.Epochs.compute_tfr` could not be used with the multitaper method and complex or phase outputs, by `Thomas Binns`_. \ No newline at end of file diff --git a/doc/changes/devel/12843.bugfix.rst b/doc/changes/devel/12843.bugfix.rst new file mode 100644 index 00000000000..6f3be428b3a --- /dev/null +++ b/doc/changes/devel/12843.bugfix.rst @@ -0,0 +1,3 @@ +Fixed a bug where split FIF files that were read and then appended to other +:class:`mne.io.Raw` instances had their ``BAD boundary`` annotations incorrectly offset +in samples by the number of split files, by `Eric Larson`_. diff --git a/doc/changes/devel/12843.other.rst b/doc/changes/devel/12843.other.rst new file mode 100644 index 00000000000..5271d6124de --- /dev/null +++ b/doc/changes/devel/12843.other.rst @@ -0,0 +1 @@ +Improve handling of filenames in ``raw.filenames`` by using :class:`~pathlib.Path` instead of :class:`str`, by `Mathieu Scheltienne`_. diff --git a/doc/changes/devel/12844.other.rst b/doc/changes/devel/12844.other.rst new file mode 100644 index 00000000000..ce959d8132a --- /dev/null +++ b/doc/changes/devel/12844.other.rst @@ -0,0 +1 @@ +Improve automatic figure scaling of :func:`mne.viz.plot_events`, and event_id and count overview legend when a high amount of unique events is supplied, by `Stefan Appelhoff`_. diff --git a/doc/changes/devel/12846.bugfix.rst b/doc/changes/devel/12846.bugfix.rst new file mode 100644 index 00000000000..ce18e8f5201 --- /dev/null +++ b/doc/changes/devel/12846.bugfix.rst @@ -0,0 +1,2 @@ +Enforce SI units for Eyetracking data (eyegaze data should be radians of visual angle, not pixels. Pupil size data should be meters). +Updated tutorials so demonstrate how to convert data to SI units before analyses, by `Scott Huberty`_. \ No newline at end of file diff --git a/doc/changes/devel/12853.bugfix.rst b/doc/changes/devel/12853.bugfix.rst new file mode 100644 index 00000000000..18c8afbb8ea --- /dev/null +++ b/doc/changes/devel/12853.bugfix.rst @@ -0,0 +1 @@ +Prevent the ``colorbar`` parameter being ignored in topomap plots such as :meth:`mne.time_frequency.Spectrum.plot_topomap`, by `Thomas Binns`_. \ No newline at end of file diff --git a/doc/changes/devel/12862.other.rst b/doc/changes/devel/12862.other.rst new file mode 100644 index 00000000000..393beeb8a8c --- /dev/null +++ b/doc/changes/devel/12862.other.rst @@ -0,0 +1 @@ +:meth:`mne.preprocessing.ICA.find_bads_muscle` can now be run when passing an ``inst`` without sensor positions. However, it will just use the first of three criteria (slope) to find muscle-related ICA components, by `Stefan Appelhoff`_. diff --git a/doc/changes/devel/12871.newfeature.rst b/doc/changes/devel/12871.newfeature.rst new file mode 100644 index 00000000000..7c6f9e6c9df --- /dev/null +++ b/doc/changes/devel/12871.newfeature.rst @@ -0,0 +1,2 @@ +Added the ``title`` argument to :func:`mne.viz.create_3d_figure`, and +``color`` and ``position`` arguments to :func:`mne.viz.set_3d_title`, by `Eric Larson`_. diff --git a/doc/changes/devel/12875.bugfix.rst b/doc/changes/devel/12875.bugfix.rst new file mode 100644 index 00000000000..c4fa57e9100 --- /dev/null +++ b/doc/changes/devel/12875.bugfix.rst @@ -0,0 +1,2 @@ +Fix bug where invalid data types (e.g., ``np.ndarray``s) could be used in some +:class:`mne.io.Info` fields like ``info["subject_info"]["weight"]``, by `Eric Larson`_. \ No newline at end of file diff --git a/doc/changes/devel/12877.bugfix.rst b/doc/changes/devel/12877.bugfix.rst new file mode 100644 index 00000000000..2d9ecf2c489 --- /dev/null +++ b/doc/changes/devel/12877.bugfix.rst @@ -0,0 +1,4 @@ +When creating a :class:`~mne.time_frequency.SpectrumArray`, the array shape check now +compares against the total of both 'good' and 'bad' channels in the provided +:class:`~mne.Info` (previously only good channels were checked), by +`Mathieu Scheltienne`_. diff --git a/doc/changes/devel/12884.bugfix.rst b/doc/changes/devel/12884.bugfix.rst new file mode 100644 index 00000000000..6c5beda7241 --- /dev/null +++ b/doc/changes/devel/12884.bugfix.rst @@ -0,0 +1 @@ +Fix bug where :ref:`mne coreg` would always show MEG channels even if the "MEG Sensors" checkbox was disabled, by `Eric Larson`_. diff --git a/doc/changes/devel/12901.bugfix.rst b/doc/changes/devel/12901.bugfix.rst new file mode 100644 index 00000000000..d68f70f7141 --- /dev/null +++ b/doc/changes/devel/12901.bugfix.rst @@ -0,0 +1 @@ +:class:`mne.Report` HDF5 files are now written in ``mode='a'`` (append) to allow users to store other data in the HDF5 files, by `Eric Larson`_. diff --git a/doc/changes/devel/12901.newfeature.rst b/doc/changes/devel/12901.newfeature.rst new file mode 100644 index 00000000000..8d0137fce78 --- /dev/null +++ b/doc/changes/devel/12901.newfeature.rst @@ -0,0 +1,8 @@ +Improved reporting and plotting options: + +- :meth:`mne.Report.add_projs` can now plot with :func:`mne.viz.plot_projs_joint` rather than :func:`mne.viz.plot_projs_topomap` +- :class:`mne.Report` now has attributes ``img_max_width`` and ``img_max_res`` that can be used to control image scaling. +- :class:`mne.Report` now has an attribute ``collapse`` that allows collapsing sections and/or subsections by default. +- :func:`mne.viz.plot_head_positions` now has a ``totals=True`` option to show the total distance and angle of the head. + +Changes by `Eric Larson`_. diff --git a/doc/changes/devel/12909.bugfix.rst b/doc/changes/devel/12909.bugfix.rst new file mode 100644 index 00000000000..9e2f5672323 --- /dev/null +++ b/doc/changes/devel/12909.bugfix.rst @@ -0,0 +1 @@ +Fix bug in :func:`mne.io.read_raw_gdf` when NumPy >= 2 is used, by `Clemens Brunner`_. \ No newline at end of file diff --git a/doc/changes/devel/12912.newfeature.rst b/doc/changes/devel/12912.newfeature.rst new file mode 100644 index 00000000000..2a7343ebd2c --- /dev/null +++ b/doc/changes/devel/12912.newfeature.rst @@ -0,0 +1 @@ +Added the ``psd_args`` argument to :func:`mne.viz.plot_ica_sources` and :meth:`mne.preprocessing.ICA.plot_sources`, by `Richard Scholz`_. \ No newline at end of file diff --git a/doc/changes/names.inc b/doc/changes/names.inc index 8dba79bcccf..2c5e8e1ee2c 100644 --- a/doc/changes/names.inc +++ b/doc/changes/names.inc @@ -10,7 +10,7 @@ .. _Alex Ciok: https://github.com/alexCiok .. _Alex Gramfort: https://alexandre.gramfort.net .. _Alex Kiefer: https://home.alexk101.dev -.. _Alex Lepauvre: https://github.com/AlexLepauvre +.. _Alex Lepauvre: https://github.com/AlexLepauvre .. _Alex Rockhill: https://github.com/alexrockhill/ .. _Alexander Rudiuk: https://github.com/ARudiuk .. _Alexandre Barachant: https://alexandre.barachant.org @@ -22,7 +22,7 @@ .. _Andrew Quinn: https://github.com/ajquinn .. _Aniket Pradhan: https://github.com/Aniket-Pradhan .. _Anna Padee: https://github.com/apadee/ -.. _Annalisa Pascarella: https://www.dima.unige.it/~pascarel/html/cv.html +.. _Annalisa Pascarella: https://www.iac.cnr.it/personale/annalisa-pascarella .. _Anne-Sophie Dubarry: https://github.com/annesodub .. _Antoine Gauthier: https://github.com/Okamille .. _Antti Rantala: https://github.com/Odingod @@ -206,7 +206,7 @@ .. _Nathalie Gayraud: https://github.com/ngayraud .. _Naveen Srinivasan: https://github.com/naveensrinivasan .. _Nick Foti: https://nfoti.github.io -.. _Nick Ward: https://www.ucl.ac.uk/ion/departments/sobell/Research/NWard +.. _Nick Ward: https://profiles.ucl.ac.uk/10827-nick-ward .. _Nicolas Barascud: https://github.com/nbara .. _Nicolas Fourcaud-Trocmé: https://www.crnl.fr/fr/user/316 .. _Niels Focke: https://neurologie.umg.eu/forschung/arbeitsgruppen/epilepsie-und-bildgebungsforschung @@ -261,7 +261,7 @@ .. _Samuel Louviot: https://github.com/Sam54000 .. _Samuel Powell: https://github.com/samuelpowell .. _Santeri Ruuskanen: https://github.com/ruuskas -.. _Sara Sommariva: https://www.dima.unige.it/~sommariva/ +.. _Sara Sommariva: https://github.com/sarasommariva .. _Sawradip Saha: https://sawradip.github.io/ .. _Scott Huberty: https://orcid.org/0000-0003-2637-031X .. _Sebastiaan Mathot: https://www.cogsci.nl/smathot diff --git a/doc/conf.py b/doc/conf.py index 32eedf80dba..f66dc2af5b3 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -166,7 +166,7 @@ "mne-gui-addons": ("https://mne.tools/mne-gui-addons", None), "picard": ("https://pierreablin.github.io/picard/", None), "eeglabio": ("https://eeglabio.readthedocs.io/en/latest", None), - "pybv": ("https://pybv.readthedocs.io/en/latest/", None), + "pybv": ("https://pybv.readthedocs.io/en/latest", None), } intersphinx_mapping.update( get_intersphinx_mapping( @@ -209,6 +209,8 @@ "ColorbarBase": "matplotlib.colorbar.ColorbarBase", # sklearn "LeaveOneOut": "sklearn.model_selection.LeaveOneOut", + "MetadataRequest": "sklearn.utils.metadata_routing.MetadataRequest", + "estimator": "sklearn.base.BaseEstimator", # joblib "joblib.Parallel": "joblib.Parallel", # nibabel @@ -397,6 +399,9 @@ "mapping", "to", "any", + "pandas", + "polars", + "default", # unlinkable "CoregistrationUI", "mne_qt_browser.figure.MNEQtBrowser", @@ -600,6 +605,28 @@ def append_attr_meth_examples(app, what, name, obj, options, lines): """.format(name.split(".")[-1], name).split("\n") +def fix_sklearn_inherited_docstrings(app, what, name, obj, options, lines): + """Fix sklearn docstrings because they use autolink and we do not.""" + if ( + name.startswith("mne.decoding.") or name.startswith("mne.preprocessing.Xdawn") + ) and name.endswith( + ( + ".get_metadata_routing", + ".fit", + ".fit_transform", + ".set_output", + ".transform", + ) + ): + if ":Parameters:" in lines: + loc = lines.index(":Parameters:") + else: + loc = lines.index(":Returns:") + lines.insert(loc, "") + lines.insert(loc, ".. default-role:: autolink") + lines.insert(loc, "") + + # -- Other extension configuration ------------------------------------------- # Consider using http://magjac.com/graphviz-visual-editor for this @@ -621,6 +648,7 @@ def append_attr_meth_examples(app, what, name, obj, options, lines): "https://doi.org/10.1093/", # academic.oup.com/sleep/ "https://doi.org/10.1098/", # royalsocietypublishing.org "https://doi.org/10.1101/", # www.biorxiv.org + "https://doi.org/10.1103", # journals.aps.org/rmp "https://doi.org/10.1111/", # onlinelibrary.wiley.com/doi/10.1111/psyp "https://doi.org/10.1126/", # www.science.org "https://doi.org/10.1137/", # epubs.siam.org @@ -654,6 +682,9 @@ def append_attr_meth_examples(app, what, name, obj, options, lines): # Too slow "https://speakerdeck.com/dengemann/", "https://www.dtu.dk/english/service/phonebook/person", + "https://www.gnu.org/software/make/", + "https://www.macports.org/", + "https://hastie.su.domains/CASI", # SSL problems sometimes "http://ilabs.washington.edu", "https://psychophysiology.cpmc.columbia.edu", @@ -1659,6 +1690,7 @@ def make_version(app, exception): def setup(app): """Set up the Sphinx app.""" app.connect("autodoc-process-docstring", append_attr_meth_examples) + app.connect("autodoc-process-docstring", fix_sklearn_inherited_docstrings) # High prio, will happen before SG app.connect("builder-inited", generate_credit_rst, priority=10) app.connect("builder-inited", report_scraper.set_dirs, priority=20) diff --git a/doc/development/roadmap.rst b/doc/development/roadmap.rst index defd4eac5cc..278ed3ce213 100644 --- a/doc/development/roadmap.rst +++ b/doc/development/roadmap.rst @@ -10,6 +10,141 @@ Code projects, while others require more extensive work. Open ---- +Type Annotations +^^^^^^^^^^^^^^^^ + +We would like to have type annotations for as much of our codebase as is practicable. +The main motivation for this is to improve the end-user experience when writing analysis +code that uses MNE-Python (i.e., code-completion suggestions, which rely on static +analysis / type hints). The main discussion of how to go about this is in :gh:`12243`. +Some piecemeal progress has been made (e.g., :gh:`12250`) but there isn't currently +anyone actively chipping away at this, hence its status as "open" rather than "in +progress". + +Docstring De-duplication +^^^^^^^^^^^^^^^^^^^^^^^^ + +For many years, MNE-Python has used a technique borrowed from SciPy (called +`doccer `__) +for improving the consistency of parameter names and descriptions that recur across our +API. For example, parameters for number of parallel jobs to use, for specifying random +seeds, or for controlling the appearance of a colorbar on a plot --- all of these appear +in multiple functions/methods in MNE-Python. The approach works by re-defining a +function's ``__doc__`` attribute at import time, filling in placeholders in the +docstring's parameter list with fully spelled-out equivalents (which are stored in a big +dictionary called the ``docdict``). There are two major downsides: + +1. Many docstrings can't be read (at least not in full) while browsing the source code. +2. Static code analyzers don't have access to the completed docstrings, so things like + hover-tooltips in IDEs are less useful than they would be if the docstrings were + complete in-place. + +A possible route forward: + +- Convert all docstrings to be fully spelled out in the source code. +- Instead of maintaining the ``docdict``, maintain a registry of sets of + function+parameter combinations that ought to be identical. +- Add a test that the entries in the registry are indeed identical, so that + inconsistencies cannot be introduced in existing code. +- Add a test that parses docstrings in any *newly added* functions and looks for + parameter names that maybe should be added to the registry of identical docstrings. +- To allow for parameter descriptions that should be *nearly* identical (e.g., the same + except one refers to :class:`~mne.io.Raw` objects and the other refers to + :class:`~mne.Epochs` objects), consider using regular expressions to check the + "identity" of the parameter descriptions. + +The main discussion is in :gh:`8218`; a wider discussion among maintainers of other +packages in the Scientific Python Ecosystem is +`here `__. + +Containerization +^^^^^^^^^^^^^^^^ + +Users sometimes encounter difficulty getting a working MNE-Python environment on shared +resources (such as compute clusters), due to various problems (old versions of package +managers or graphics libraries, lack of sufficient permissions, etc). Providing a +robust and up-to-date containerized distribution of MNE-Python would alleviate some of +these issues. Initial efforts can be seen in the +`MNE-Docker repository `__; these efforts +should be revived, brought up-to-date as necessary, and integrated into our normal +release process so that the images do not become stale. + +Education +^^^^^^^^^ + +Live workshops/tutorials/trainings on MNE-Python have historically been organized +*ad-hoc* rather than centrally. Instructors for these workshops are often approached +directly by the organization or group desiring to host the training, and there is often +no way for users outside that group to attend (or even learn about the opportunity). At +a minimum, we would like to have a process for keeping track of educational events that +feature MNE-Python or other tools in the MNE suite. Ideally, we would go further and +initiate a recurring series of tutorials that could be advertised widely. Such events +might even provide a small revenue stream for MNE-Python, to support things like +continuous integration costs. + + +In progress +----------- + +Diversity, Equity, and Inclusion (DEI) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +MNE-Python is committed to recruiting and retaining a diverse pool of +contributors, see :gh:`8221`. + +First-class OPM support +^^^^^^^^^^^^^^^^^^^^^^^ +MNE-Python has support for reading some OPM data formats such as FIF and FIL/QuSpin. +Support should be added for other manufacturers, and standard preprocessing routines +should be added to deal with coregistration adjustment and OPM-specific artifacts. +See for example :gh:`11275`, :gh:`11276`, :gh:`11579`, :gh:`12179`. + +Deep source modeling +^^^^^^^^^^^^^^^^^^^^ +Existing source modeling and inverse routines are not explicitly designed to +deal with deep sources. Advanced algorithms exist from MGH for enhancing +deep source localization, and these should be implemented and vetted in +MNE-Python. See :gh:`6784`. + +Time-frequency classes +^^^^^^^^^^^^^^^^^^^^^^ +Historically our codebase had classes related to :term:`TFRs ` that +were incomplete. New classes are being built from the ground up: + +- new classes :class:`~mne.time_frequency.Spectrum` and + :class:`~mne.time_frequency.EpochsSpectrum` (implemented in :gh:`10184`, with + follow-up tweaks and bugfixes in :gh:`11178`, :gh:`11259`, :gh:`11280`, :gh:`11345`, + :gh:`11418`, :gh:`11563`, :gh:`11680`, :gh:`11682`, :gh:`11778`, :gh:`11921`, + :gh:`11978`, :gh:`12747`), and corresponding array-based constructors + :class:`~mne.time_frequency.SpectrumArray` and + :class:`~mne.time_frequency.EpochsSpectrumArray` (:gh:`11803`). + +- new class :class:`~mne.time_frequency.RawTFR` and updated classes + :class:`~mne.time_frequency.EpochsTFR` and :class:`~mne.time_frequency.AverageTFR`, + and corresponding array-based constructors :class:`~mne.time_frequency.RawTFRArray`, + :class:`~mne.time_frequency.EpochsTFRArray` and + :class:`~mne.time_frequency.AverageTFRArray` (implemented in :gh:`11282`, with + follow-ups in :gh:`12514`, :gh:`12842`). + +- new/updated classes for source-space frequency and time-frequency data are not yet + implemented. + +Other related issues: :gh:`6290`, :gh:`7671`, :gh:`8026`, :gh:`8724`, :gh:`9045`, +and PRs: :gh:`6609`, :gh:`6629`, :gh:`6672`, :gh:`6673`, :gh:`8397`, :gh:`8892`. + +Modernization of realtime processing +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +LSL has become the de facto standard for streaming data from EEG/MEG systems. +We should deprecate `MNE-Realtime`_ in favor of the newly minted `MNE-LSL`_. +We should then fully support MNE-LSL using modern coding best practices such as CI +integration. + +Core components of commonly used real-time processing pipelines should be implemented in +MNE-LSL, including but not limited to realtime IIR filtering, artifact rejection, +montage and reference setting, and online averaging. Integration with standard +MNE-Python plotting routines (evoked joint plots, topomaps, etc.) should be +supported with continuous updating. + Clustering statistics API ^^^^^^^^^^^^^^^^^^^^^^^^^ The current clustering statistics code has limited functionality. It should be @@ -49,63 +184,24 @@ Clear tutorials will be needed to: Regression tests will be written to ensure equivalent outputs when compared to FieldTrip for cases that FieldTrip also supports. -More details are in :gh:`4859`. - -Modernization of realtime processing -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -LSL has become the de facto standard for streaming data from EEG/MEG systems. -We should deprecate `MNE-Realtime`_ in favor of the newly minted `MNE-LSL`_. -We should then fully support MNE-LSL using modern coding best practices such as CI -integration. +More details are in :gh:`4859`; progress in :gh:`12663`. -Core components of commonly used real-time processing pipelines should be implemented in -MNE-LSL, including but not limited to realtime IIR filtering, artifact rejection, -montage and reference setting, and online averaging. Integration with standard -MNE-Python plotting routines (evoked joint plots, topomaps, etc.) should be -supported with continuous updating. -In progress ------------ - -Diversity, Equity, and Inclusion (DEI) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -MNE-Python is committed to recruiting and retaining a diverse pool of -contributors, see :gh:`8221`. +.. _documentation-updates: -First-class OPM support -^^^^^^^^^^^^^^^^^^^^^^^ -MNE-Python has support for reading some OPM data formats such as FIF and FIL/QuSpin. -Support should be added for other manufacturers, and standard preprocessing routines -should be added to deal with coregistration adjustment and OPM-specific artifacts. -See for example :gh:`11275`, :gh:`11276`, :gh:`11579`, :gh:`12179`. +Documentation updates +^^^^^^^^^^^^^^^^^^^^^ +Our documentation has many minor issues, which can be found under the tag +:gh:`labels/DOC`. -Deep source modeling -^^^^^^^^^^^^^^^^^^^^ -Existing source modeling and inverse routines are not explicitly designed to -deal with deep sources. Advanced algorithms exist from MGH for enhancing -deep source localization, and these should be implemented and vetted in -MNE-Python. See :gh:`6784`. -Time-frequency classes -^^^^^^^^^^^^^^^^^^^^^^ -Our current codebase implements classes related to :term:`TFRs ` that -remain incomplete. We should implement new classes from the ground up -that can hold frequency data (``Spectrum``), cross-spectral data -(``CrossSpectrum``), multitaper estimates (``MultitaperSpectrum``), and -time-varying estimates (``Spectrogram``). These should work for -continuous, epoched, and averaged sensor data, as well as source-space brain -data. - -See related issues :gh:`6290`, :gh:`7671`, :gh:`8026`, :gh:`8724`, :gh:`9045`, -and PRs :gh:`6609`, :gh:`6629`, :gh:`6672`, :gh:`6673`, :gh:`8397`, and -:gh:`8892`. +Completed +--------- 3D visualization ^^^^^^^^^^^^^^^^ -Historically we have used Mayavi for 3D visualization, but have faced -limitations and challenges with it. We should work to use some other backend -(e.g., PyVista) to get major improvements, such as: +Historically we used Mayavi for 3D visualization, but faced limitations and challenges +with it. We switched to PyVista to get major improvements, such as: 1. *Proper notebook support (through ``ipyvtklink``)* (complete; updated to use ``trame``) 2. *Better interactivity with surface plots* (complete) @@ -113,22 +209,10 @@ limitations and challenges with it. We should work to use some other backend :ref:`time-frequency-viz`) 4. Integration of multiple functions as done in ``mne_analyze``, e.g., simultaneous source estimate viewing, field map - viewing, head surface display, etc. These are all currently available in - separate functions, but we should be able to combine them in a single plot - as well. - -The meta-issue for tracking to-do lists for surface plotting is :gh:`7162`. - -.. _documentation-updates: - -Documentation updates -^^^^^^^^^^^^^^^^^^^^^ -Our documentation has many minor issues, which can be found under the tag -:gh:`labels/DOC`. + viewing, head surface display, etc. These were all available in + separate functions, but can now be combined in a single plot. - -Completed ---------- +The meta-issue tracking to-do lists for surface plotting was :gh:`7162`. Improved sEEG/ECoG/DBS support ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/install/installers.rst b/doc/install/installers.rst index 9f7932e911d..1aa352edcfb 100644 --- a/doc/install/installers.rst +++ b/doc/install/installers.rst @@ -7,12 +7,15 @@ MNE-Python installers are the easiest way to install MNE-Python and all dependencies. They also provide many additional Python packages and tools. Got any questions? Let us know on the `MNE Forum`_! +Platform-specific installers +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + .. tab-set:: - :class: platform-selector-tabset + :class: install-selector-tabset .. tab-item:: Linux :class-content: text-center - :name: linux-installers + :name: install-linux .. button-link:: https://github.com/mne-tools/mne-installers/releases/download/v1.8.0/MNE-Python-1.8.0_0-Linux.sh :ref-type: ref @@ -33,7 +36,7 @@ Python packages and tools. Got any questions? Let us know on the `MNE Forum`_! .. tab-item:: macOS (Intel) :class-content: text-center - :name: macos-intel-installers + :name: install-macos-intel .. button-link:: https://github.com/mne-tools/mne-installers/releases/download/v1.8.0/MNE-Python-1.8.0_0-macOS_Intel.pkg :ref-type: ref @@ -49,7 +52,7 @@ Python packages and tools. Got any questions? Let us know on the `MNE Forum`_! .. tab-item:: macOS (Apple Silicon) :class-content: text-center - :name: macos-apple-installers + :name: install-macos-apple .. button-link:: https://github.com/mne-tools/mne-installers/releases/download/v1.8.0/MNE-Python-1.8.0_0-macOS_M1.pkg :ref-type: ref @@ -65,7 +68,7 @@ Python packages and tools. Got any questions? Let us know on the `MNE Forum`_! .. tab-item:: Windows :class-content: text-center - :name: windows-installers + :name: install-windows .. button-link:: https://github.com/mne-tools/mne-installers/releases/download/v1.8.0/MNE-Python-1.8.0_0-Windows.exe :ref-type: ref @@ -107,3 +110,54 @@ bundles to the ``Applications`` folder on macOS. applications to start, especially on the very first run – which may take particularly long on Apple Silicon-based computers. Subsequent runs should usually be much faster. + +Uninstallation +^^^^^^^^^^^^^^ + +To remove the MNE-Python distribution provided by our installers above: + +1. Remove relevant lines from your shell initialization scripts if you + added them at installation time. To do this, you can run from the MNE Prompt: + + .. code-block:: bash + + $ conda init --reverse + + Or you can manually edit shell initialization scripts, e.g., ``~/.bashrc`` or + ``~/.bash_profile``. + +2. Follow the instructions below to remove the MNE-Python conda installation for your platform: + + .. tab-set:: + :class: uninstall-selector-tabset + + .. tab-item:: Linux + :name: uninstall-linux + + In a BASH terminal you can do: + + .. code-block:: bash + + $ which python + /home/username/mne-python/1.8.0_0/bin/python + $ rm -Rf /home/$USER/mne-python + $ rm /home/$USER/.local/share/applications/mne-python-*.desktop + + .. tab-item:: macOS + :name: uninstall-macos + + You can simply `drag the MNE-Python folder to the trash in the Finder `__. + + Alternatively, you can do something like: + + .. code-block:: bash + + $ which python + /Users/username/Applications/MNE-Python/1.8.0_0/.mne-python/bin/python + $ rm -Rf /Users/$USER/Applications/MNE-Python # if user-specific + $ rm -Rf /Applications/MNE-Python # if system-wide + + .. tab-item:: Windows + :name: uninstall-windows + + To uninstall MNE-Python, you can remove the application using the `Windows Control Panel `__. diff --git a/doc/references.bib b/doc/references.bib index 12e9302cd89..a129d2f46a2 100644 --- a/doc/references.bib +++ b/doc/references.bib @@ -444,7 +444,7 @@ @book{EfronHastie2016 series = {Institute of {{Mathematical Statistics}} Monographs}, shorttitle = {Computer Age Statistical Inference}, title = {Computer Age Statistical Inference: Algorithms, Evidence, and Data Science}, - url = {https://web.stanford.edu/~hastie/CASI/}, + url = {https://hastie.su.domains/CASI/}, year = {2016} } diff --git a/doc/sphinxext/prs/12366.json b/doc/sphinxext/prs/12366.json new file mode 100644 index 00000000000..b336718a89c --- /dev/null +++ b/doc/sphinxext/prs/12366.json @@ -0,0 +1,47 @@ +{ + "merge_commit_sha": "d917be12de5739654f3c690fe176382948e261d2", + "authors": [ + { + "n": "Qian Chu", + "e": null + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Alex Lepauvre", + "e": "alexander.lepauvre@ad.aesthetics.mpg.de" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12366.newfeature.rst": { + "a": 1, + "d": 0 + }, + "doc/changes/names.inc": { + "a": 1, + "d": 1 + }, + "mne/_fiff/reference.py": { + "a": 103, + "d": 14 + }, + "mne/_fiff/tests/test_reference.py": { + "a": 154, + "d": 0 + }, + "mne/utils/docs.py": { + "a": 18, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12787.json b/doc/sphinxext/prs/12787.json new file mode 100644 index 00000000000..045dd24450f --- /dev/null +++ b/doc/sphinxext/prs/12787.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "828953e0ba584c149bcd2d6898d3599af1b30f40", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "doc/changes/devel/12787.other.rst": { + "a": 1, + "d": 0 + }, + "mne/utils/config.py": { + "a": 55, + "d": 5 + }, + "mne/utils/tests/test_config.py": { + "a": 1, + "d": 1 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12788.json b/doc/sphinxext/prs/12788.json new file mode 100644 index 00000000000..787c8383b76 --- /dev/null +++ b/doc/sphinxext/prs/12788.json @@ -0,0 +1,91 @@ +{ + "merge_commit_sha": "dcb05a22cff16539e5753496668fa0a0b14b6c9a", + "authors": [ + { + "n": "Daniel McCloy", + "e": null + } + ], + "changes": { + "doc/_static/style.css": { + "a": 35, + "d": 10 + }, + "mne/html_templates/repr/_acquisition.html.jinja": { + "a": 13, + "d": 23 + }, + "mne/html_templates/repr/_channels.html.jinja": { + "a": 7, + "d": 17 + }, + "mne/html_templates/repr/_filters.html.jinja": { + "a": 6, + "d": 16 + }, + "mne/html_templates/repr/_general.html.jinja": { + "a": 8, + "d": 18 + }, + "mne/html_templates/repr/epochs.html.jinja": { + "a": 1, + "d": 1 + }, + "mne/html_templates/repr/evoked.html.jinja": { + "a": 1, + "d": 1 + }, + "mne/html_templates/repr/forward.html.jinja": { + "a": 3, + "d": 3 + }, + "mne/html_templates/repr/ica.html.jinja": { + "a": 1, + "d": 1 + }, + "mne/html_templates/repr/info.html.jinja": { + "a": 1, + "d": 1 + }, + "mne/html_templates/repr/inverse_operator.html.jinja": { + "a": 1, + "d": 1 + }, + "mne/html_templates/repr/raw.html.jinja": { + "a": 1, + "d": 1 + }, + "mne/html_templates/repr/spectrum.html.jinja": { + "a": 1, + "d": 1 + }, + "mne/html_templates/repr/static/_section_header_row.html.jinja": { + "a": 12, + "d": 0 + }, + "mne/html_templates/repr/static/repr.css": { + "a": 95, + "d": 93 + }, + "mne/html_templates/repr/static/repr.js": { + "a": 17, + "d": 29 + }, + "mne/html_templates/repr/tfr.html.jinja": { + "a": 1, + "d": 1 + }, + "tutorials/intro/70_report.py": { + "a": 2, + "d": 2 + }, + "tutorials/io/70_reading_eyetracking_data.py": { + "a": 1, + "d": 1 + }, + "tutorials/raw/20_event_arrays.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12822.json b/doc/sphinxext/prs/12822.json new file mode 100644 index 00000000000..c141dd3a44a --- /dev/null +++ b/doc/sphinxext/prs/12822.json @@ -0,0 +1,87 @@ +{ + "merge_commit_sha": "346af6fec258e3bb0d30791dfacbe86cdb354f2b", + "authors": [ + { + "n": "github-actions[bot]", + "e": "41898282+github-actions[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/sphinxext/prs/12792.json": { + "a": 75, + "d": 0 + }, + "doc/sphinxext/prs/12796.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12797.json": { + "a": 311, + "d": 0 + }, + "doc/sphinxext/prs/12798.json": { + "a": 99, + "d": 0 + }, + "doc/sphinxext/prs/12799.json": { + "a": 251, + "d": 0 + }, + "doc/sphinxext/prs/12800.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12801.json": { + "a": 23, + "d": 0 + }, + "doc/sphinxext/prs/12802.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/12803.json": { + "a": 27, + "d": 0 + }, + "doc/sphinxext/prs/12804.json": { + "a": 31, + "d": 0 + }, + "doc/sphinxext/prs/12805.json": { + "a": 51, + "d": 0 + }, + "doc/sphinxext/prs/12806.json": { + "a": 47, + "d": 0 + }, + "doc/sphinxext/prs/12810.json": { + "a": 35, + "d": 0 + }, + "doc/sphinxext/prs/12811.json": { + "a": 51, + "d": 0 + }, + "doc/sphinxext/prs/12812.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12813.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12814.json": { + "a": 15, + "d": 0 + }, + "doc/sphinxext/prs/12815.json": { + "a": 19, + "d": 0 + }, + "doc/sphinxext/prs/12820.json": { + "a": 159, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12823.json b/doc/sphinxext/prs/12823.json new file mode 100644 index 00000000000..fb637d0e6cd --- /dev/null +++ b/doc/sphinxext/prs/12823.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "e221c6cb71ec42f7eebe1c5ea81089d78c91d4a8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".git-blame-ignore-revs": { + "a": 1, + "d": 0 + }, + "examples/inverse/morph_volume_stc.py": { + "a": 1, + "d": 0 + }, + "mne/morph.py": { + "a": 7, + "d": 1 + }, + "mne/preprocessing/ieeg/_volume.py": { + "a": 3, + "d": 1 + }, + "mne/tests/test_morph.py": { + "a": 5, + "d": 1 + }, + "mne/transforms.py": { + "a": 22, + "d": 10 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12825.json b/doc/sphinxext/prs/12825.json new file mode 100644 index 00000000000..83fe12fffab --- /dev/null +++ b/doc/sphinxext/prs/12825.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "3f1c7803f73862dec3e4e85e36e916f2a6d642c9", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12826.json b/doc/sphinxext/prs/12826.json new file mode 100644 index 00000000000..1ed61498660 --- /dev/null +++ b/doc/sphinxext/prs/12826.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "013d6c9f630342e5af7633641162c7b85417782a", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "mne/preprocessing/hfc.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12827.json b/doc/sphinxext/prs/12827.json new file mode 100644 index 00000000000..80311bb192c --- /dev/null +++ b/doc/sphinxext/prs/12827.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "f3a3ca4430e1d4b9c539e7949c946f4f83bdb43f", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12827.other.rst": { + "a": 1, + "d": 0 + }, + "mne/decoding/csp.py": { + "a": 52, + "d": 3 + }, + "mne/decoding/ssd.py": { + "a": 25, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12829.json b/doc/sphinxext/prs/12829.json new file mode 100644 index 00000000000..78d7d0433f4 --- /dev/null +++ b/doc/sphinxext/prs/12829.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "c993ae5d65b3c161876da407e293e6784c6e8ad9", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + } + ], + "changes": { + "doc/changes/devel/12829.apichange.rst": { + "a": 1, + "d": 0 + }, + "mne/decoding/csp.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12830.json b/doc/sphinxext/prs/12830.json new file mode 100644 index 00000000000..7e46a12cc4e --- /dev/null +++ b/doc/sphinxext/prs/12830.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "3ecdd6b2102b00f97d7ad98ca2483a0f79e90b30", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12830.newfeature.rst": { + "a": 1, + "d": 0 + }, + "mne/channels/_standard_montage_utils.py": { + "a": 19, + "d": 4 + }, + "mne/channels/tests/test_montage.py": { + "a": 24, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12832.json b/doc/sphinxext/prs/12832.json new file mode 100644 index 00000000000..7b877da0580 --- /dev/null +++ b/doc/sphinxext/prs/12832.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "c46733a4812f5d19370699c7b18cb1c1c4ded9fa", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "tools/install_pre_requirements.sh": { + "a": 3, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12833.json b/doc/sphinxext/prs/12833.json new file mode 100644 index 00000000000..e1f4124bf3c --- /dev/null +++ b/doc/sphinxext/prs/12833.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "2f299276fecedcb8b3626ea40fc4f1aecff51bce", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".github/PULL_REQUEST_TEMPLATE.md": { + "a": 11, + "d": 4 + }, + "tools/github_actions_test.sh": { + "a": 3, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12834.json b/doc/sphinxext/prs/12834.json new file mode 100644 index 00000000000..cf5154627ed --- /dev/null +++ b/doc/sphinxext/prs/12834.json @@ -0,0 +1,135 @@ +{ + "merge_commit_sha": "5425ef42e41b6a427f3365e13ea57ecf9c0c12b0", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12834.dependency.rst": { + "a": 2, + "d": 0 + }, + "doc/conf.py": { + "a": 28, + "d": 0 + }, + "examples/visualization/evoked_whitening.py": { + "a": 1, + "d": 0 + }, + "mne/cov.py": { + "a": 3, + "d": 3 + }, + "mne/decoding/__init__.pyi": { + "a": 7, + "d": 2 + }, + "mne/decoding/base.py": { + "a": 24, + "d": 34 + }, + "mne/decoding/csp.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/ems.py": { + "a": 2, + "d": 2 + }, + "mne/decoding/mixin.py": { + "a": 0, + "d": 89 + }, + "mne/decoding/receptive_field.py": { + "a": 5, + "d": 13 + }, + "mne/decoding/search_light.py": { + "a": 7, + "d": 21 + }, + "mne/decoding/ssd.py": { + "a": 9, + "d": 9 + }, + "mne/decoding/tests/test_base.py": { + "a": 32, + "d": 42 + }, + "mne/decoding/tests/test_csp.py": { + "a": 7, + "d": 9 + }, + "mne/decoding/tests/test_ems.py": { + "a": 4, + "d": 4 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 30, + "d": 37 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 24, + "d": 33 + }, + "mne/decoding/tests/test_ssd.py": { + "a": 6, + "d": 4 + }, + "mne/decoding/tests/test_time_frequency.py": { + "a": 4, + "d": 3 + }, + "mne/decoding/tests/test_transformer.py": { + "a": 6, + "d": 10 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 0, + "d": 3 + }, + "mne/decoding/time_frequency.py": { + "a": 1, + "d": 2 + }, + "mne/decoding/transformer.py": { + "a": 1, + "d": 2 + }, + "mne/fixes.py": { + "a": 48, + "d": 228 + }, + "mne/preprocessing/tests/test_xdawn.py": { + "a": 6, + "d": 2 + }, + "mne/tests/test_docstring_parameters.py": { + "a": 12, + "d": 3 + }, + "pyproject.toml": { + "a": 4, + "d": 0 + }, + "tools/install_pre_requirements.sh": { + "a": 1, + "d": 3 + }, + "tools/vulture_allowlist.py": { + "a": 4, + "d": 0 + }, + "tutorials/forward/90_compute_covariance.py": { + "a": 4, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12837.json b/doc/sphinxext/prs/12837.json new file mode 100644 index 00000000000..196fdf71511 --- /dev/null +++ b/doc/sphinxext/prs/12837.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "9a2f887146f080357a83104c4af2c95e2176fe88", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + }, + "tutorials/inverse/70_eeg_mri_coords.py": { + "a": 6, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12838.json b/doc/sphinxext/prs/12838.json new file mode 100644 index 00000000000..3e4860bd08f --- /dev/null +++ b/doc/sphinxext/prs/12838.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "5cd52993c0027a958cfbc4f6810dff5d180301e5", + "authors": [ + { + "n": "Clemens Brunner", + "e": null + } + ], + "changes": { + "mne/utils/config.py": { + "a": 11, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12839.json b/doc/sphinxext/prs/12839.json new file mode 100644 index 00000000000..f1f8d57e8aa --- /dev/null +++ b/doc/sphinxext/prs/12839.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "ebab915ea723155ee0fed788bcc92c2ff16a4f7f", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/channels/_standard_montage_utils.py": { + "a": 9, + "d": 1 + }, + "mne/channels/montage.py": { + "a": 5, + "d": 1 + }, + "mne/channels/tests/test_montage.py": { + "a": 55, + "d": 1 + }, + "tutorials/inverse/70_eeg_mri_coords.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12841.json b/doc/sphinxext/prs/12841.json new file mode 100644 index 00000000000..0165fbe7092 --- /dev/null +++ b/doc/sphinxext/prs/12841.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "528e04658c5ea7a5e55bcb5889ebb4c6d6847284", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + } + ], + "changes": { + "doc/changes/devel/12792.newfeature.rst": { + "a": 1, + "d": 1 + }, + "mne/datasets/config.py": { + "a": 2, + "d": 2 + }, + "mne/io/ant/ant.py": { + "a": 2, + "d": 2 + }, + "mne/io/ant/tests/test_ant.py": { + "a": 218, + "d": 33 + }, + "pyproject.toml": { + "a": 1, + "d": 1 + }, + "tools/vulture_allowlist.py": { + "a": 1, + "d": 0 + }, + "tutorials/io/20_reading_eeg_data.py": { + "a": 28, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12842.json b/doc/sphinxext/prs/12842.json new file mode 100644 index 00000000000..7647f81bc5c --- /dev/null +++ b/doc/sphinxext/prs/12842.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "e999e853b263c8b48a6fcfeb60c82b445e20d88b", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + } + ], + "changes": { + ".github/workflows/tests.yml": { + "a": 1, + "d": 1 + }, + "doc/changes/devel/12842.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/decoding/receptive_field.py": { + "a": 5, + "d": 4 + }, + "mne/decoding/tests/test_search_light.py": { + "a": 1, + "d": 0 + }, + "mne/decoding/time_delaying_ridge.py": { + "a": 13, + "d": 9 + }, + "mne/time_frequency/tests/test_tfr.py": { + "a": 7, + "d": 0 + }, + "mne/time_frequency/tfr.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12843.json b/doc/sphinxext/prs/12843.json new file mode 100644 index 00000000000..896123e0088 --- /dev/null +++ b/doc/sphinxext/prs/12843.json @@ -0,0 +1,195 @@ +{ + "merge_commit_sha": "10ff91a339642f75e18ba1497b60ead88c24cd2b", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12843.bugfix.rst": { + "a": 3, + "d": 0 + }, + "doc/changes/devel/12843.other.rst": { + "a": 1, + "d": 0 + }, + "mne/_fiff/open.py": { + "a": 22, + "d": 21 + }, + "mne/_fiff/tests/test_what.py": { + "a": 9, + "d": 4 + }, + "mne/_fiff/tree.py": { + "a": 9, + "d": 6 + }, + "mne/_fiff/utils.py": { + "a": 1, + "d": 1 + }, + "mne/_fiff/what.py": { + "a": 1, + "d": 1 + }, + "mne/annotations.py": { + "a": 32, + "d": 38 + }, + "mne/channels/montage.py": { + "a": 1, + "d": 1 + }, + "mne/chpi.py": { + "a": 1, + "d": 1 + }, + "mne/commands/tests/test_commands.py": { + "a": 1, + "d": 1 + }, + "mne/epochs.py": { + "a": 15, + "d": 6 + }, + "mne/evoked.py": { + "a": 19, + "d": 6 + }, + "mne/export/_eeglab.py": { + "a": 1, + "d": 1 + }, + "mne/gui/_coreg.py": { + "a": 29, + "d": 30 + }, + "mne/gui/_gui.py": { + "a": 13, + "d": 15 + }, + "mne/io/base.py": { + "a": 67, + "d": 43 + }, + "mne/io/boxy/boxy.py": { + "a": 1, + "d": 1 + }, + "mne/io/brainvision/brainvision.py": { + "a": 2, + "d": 2 + }, + "mne/io/bti/bti.py": { + "a": 1, + "d": 1 + }, + "mne/io/cnt/cnt.py": { + "a": 1, + "d": 1 + }, + "mne/io/ctf/ctf.py": { + "a": 1, + "d": 1 + }, + "mne/io/curry/curry.py": { + "a": 1, + "d": 1 + }, + "mne/io/edf/edf.py": { + "a": 4, + "d": 3 + }, + "mne/io/eeglab/eeglab.py": { + "a": 3, + "d": 2 + }, + "mne/io/egi/egimff.py": { + "a": 1, + "d": 4 + }, + "mne/io/fiff/raw.py": { + "a": 43, + "d": 23 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 52, + "d": 29 + }, + "mne/io/hitachi/hitachi.py": { + "a": 2, + "d": 3 + }, + "mne/io/kit/kit.py": { + "a": 1, + "d": 1 + }, + "mne/io/nedf/nedf.py": { + "a": 1, + "d": 1 + }, + "mne/io/neuralynx/neuralynx.py": { + "a": 1, + "d": 1 + }, + "mne/io/nihon/nihon.py": { + "a": 1, + "d": 1 + }, + "mne/io/persyst/persyst.py": { + "a": 1, + "d": 1 + }, + "mne/io/tests/test_raw.py": { + "a": 6, + "d": 3 + }, + "mne/preprocessing/ica.py": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/tests/test_maxwell.py": { + "a": 1, + "d": 1 + }, + "mne/proj.py": { + "a": 2, + "d": 1 + }, + "mne/report/report.py": { + "a": 41, + "d": 56 + }, + "mne/source_space/_source_space.py": { + "a": 1, + "d": 1 + }, + "mne/tests/test_annotations.py": { + "a": 22, + "d": 0 + }, + "mne/utils/check.py": { + "a": 3, + "d": 3 + }, + "mne/viz/backends/_notebook.py": { + "a": 1, + "d": 1 + }, + "mne/viz/backends/_qt.py": { + "a": 1, + "d": 1 + }, + "mne/viz/raw.py": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12844.json b/doc/sphinxext/prs/12844.json new file mode 100644 index 00000000000..17e1ce65ad8 --- /dev/null +++ b/doc/sphinxext/prs/12844.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "a218f96927ef06fd8a4e38363b9d98f74a93f1c3", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "doc/changes/devel/12844.other.rst": { + "a": 1, + "d": 0 + }, + "examples/datasets/limo_data.py": { + "a": 1, + "d": 1 + }, + "examples/preprocessing/epochs_metadata.py": { + "a": 2, + "d": 2 + }, + "mne/viz/misc.py": { + "a": 32, + "d": 5 + }, + "tutorials/clinical/60_sleep.py": { + "a": 4, + "d": 4 + }, + "tutorials/intro/10_overview.py": { + "a": 1, + "d": 1 + }, + "tutorials/preprocessing/70_fnirs_processing.py": { + "a": 4, + "d": 4 + }, + "tutorials/raw/20_event_arrays.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12845.json b/doc/sphinxext/prs/12845.json new file mode 100644 index 00000000000..2cf94a62a25 --- /dev/null +++ b/doc/sphinxext/prs/12845.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "efe8b6a9ce87c9a45dcdc20a8c44e99d2efb6c13", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/Makefile": { + "a": 3, + "d": 0 + }, + "doc/_static/js/set_installer_tab.js": { + "a": 24, + "d": 11 + }, + "doc/_static/js/update_installer_version.js": { + "a": 1, + "d": 1 + }, + "doc/install/installers.rst": { + "a": 59, + "d": 5 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12846.json b/doc/sphinxext/prs/12846.json new file mode 100644 index 00000000000..efb06196e5f --- /dev/null +++ b/doc/sphinxext/prs/12846.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "670330a1ead288f11cc3ffd5ca9d65136ca55ccd", + "authors": [ + { + "n": "Scott Huberty", + "e": null + } + ], + "changes": { + "doc/changes/devel/12846.bugfix.rst": { + "a": 1, + "d": 0 + }, + "examples/visualization/eyetracking_plot_heatmap.py": { + "a": 4, + "d": 0 + }, + "mne/defaults.py": { + "a": 9, + "d": 9 + }, + "tutorials/io/70_reading_eyetracking_data.py": { + "a": 33, + "d": 17 + }, + "tutorials/preprocessing/90_eyetracking_data.py": { + "a": 32, + "d": 8 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12850.json b/doc/sphinxext/prs/12850.json new file mode 100644 index 00000000000..8cb357550c8 --- /dev/null +++ b/doc/sphinxext/prs/12850.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "4f58e814b4e5d01311f359f3d8f8e2cbca391fbb", + "authors": [ + { + "n": "Scott Huberty", + "e": null + }, + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/defaults.py": { + "a": 4, + "d": 4 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12853.json b/doc/sphinxext/prs/12853.json new file mode 100644 index 00000000000..4113fdfeac6 --- /dev/null +++ b/doc/sphinxext/prs/12853.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "2cab0257a430127c4bae348380ab2be99bffb412", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + } + ], + "changes": { + "doc/changes/devel/12853.bugfix.rst": { + "a": 1, + "d": 0 + }, + "mne/viz/tests/test_topomap.py": { + "a": 18, + "d": 0 + }, + "mne/viz/topomap.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12856.json b/doc/sphinxext/prs/12856.json new file mode 100644 index 00000000000..8eefad7827a --- /dev/null +++ b/doc/sphinxext/prs/12856.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "efbd97292592e92c2bdfe09e2fea9cf9b1d2e6f6", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12860.json b/doc/sphinxext/prs/12860.json new file mode 100644 index 00000000000..2220691bead --- /dev/null +++ b/doc/sphinxext/prs/12860.json @@ -0,0 +1,27 @@ +{ + "merge_commit_sha": "6f9646ff979294683bb385e63f3587aa3c7a6dd5", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + } + ], + "changes": { + "mne/decoding/tests/test_base.py": { + "a": 5, + "d": 1 + }, + "mne/preprocessing/ica.py": { + "a": 8, + "d": 7 + }, + "mne/utils/progressbar.py": { + "a": 5, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12861.json b/doc/sphinxext/prs/12861.json new file mode 100644 index 00000000000..d7b6f005206 --- /dev/null +++ b/doc/sphinxext/prs/12861.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "fa841cbc30ce830e40c60a8a44518b84e2f7f0cf", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + } + ], + "changes": { + "mne/io/brainvision/brainvision.py": { + "a": 12, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12862.json b/doc/sphinxext/prs/12862.json new file mode 100644 index 00000000000..c89ada322dc --- /dev/null +++ b/doc/sphinxext/prs/12862.json @@ -0,0 +1,35 @@ +{ + "merge_commit_sha": "b9cdca8638b20768e85822f5e5e57a5fe043d393", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + }, + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Clemens Brunner", + "e": "clemens.brunner@gmail.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "doc/changes/devel/12862.other.rst": { + "a": 1, + "d": 0 + }, + "mne/preprocessing/ica.py": { + "a": 42, + "d": 9 + }, + "mne/preprocessing/tests/test_ica.py": { + "a": 7, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12865.json b/doc/sphinxext/prs/12865.json new file mode 100644 index 00000000000..c52aeea4606 --- /dev/null +++ b/doc/sphinxext/prs/12865.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "e6f9c5df856418d331684dd3477e8c1d1c0f37e1", + "authors": [ + { + "n": "Thomas S. Binns", + "e": "t.s.binns@outlook.com" + } + ], + "changes": { + "environment.yml": { + "a": 1, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12866.json b/doc/sphinxext/prs/12866.json new file mode 100644 index 00000000000..deda3246f0f --- /dev/null +++ b/doc/sphinxext/prs/12866.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "ab2516826c4b3492f1246db978791207ad7f66d7", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/preprocessing/ecg.py": { + "a": 3, + "d": 5 + }, + "mne/preprocessing/ica.py": { + "a": 6, + "d": 7 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12867.json b/doc/sphinxext/prs/12867.json new file mode 100644 index 00000000000..dde94ea5570 --- /dev/null +++ b/doc/sphinxext/prs/12867.json @@ -0,0 +1,15 @@ +{ + "merge_commit_sha": "fc05aeb19e7e998356d400d5e93d977545a0511a", + "authors": [ + { + "n": "Stefan Appelhoff", + "e": "stefan.appelhoff@mailbox.org" + } + ], + "changes": { + "mne/viz/misc.py": { + "a": 2, + "d": 2 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12868.json b/doc/sphinxext/prs/12868.json new file mode 100644 index 00000000000..a89104cb11e --- /dev/null +++ b/doc/sphinxext/prs/12868.json @@ -0,0 +1,43 @@ +{ + "merge_commit_sha": "d248a5763e1ced3c9c65a12101e46d85e806df54", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + } + ], + "changes": { + "examples/io/read_impedances.py": { + "a": 77, + "d": 0 + }, + "mne/decoding/tests/test_receptive_field.py": { + "a": 1, + "d": 0 + }, + "mne/io/ant/ant.py": { + "a": 1, + "d": 13 + }, + "mne/io/ant/tests/test_ant.py": { + "a": 1, + "d": 6 + }, + "pyproject.toml": { + "a": 4, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12869.json b/doc/sphinxext/prs/12869.json new file mode 100644 index 00000000000..8abf2a3f1cd --- /dev/null +++ b/doc/sphinxext/prs/12869.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "1dd07a4ee95a3a16f2e0b6cf97b3e0416595dc7a", + "authors": [ + { + "n": "pre-commit-ci[bot]", + "e": "66853113+pre-commit-ci[bot]@users.noreply.github.com" + }, + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 1, + "d": 1 + }, + "mne/decoding/receptive_field.py": { + "a": 2, + "d": 1 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12870.json b/doc/sphinxext/prs/12870.json new file mode 100644 index 00000000000..d62c050fd91 --- /dev/null +++ b/doc/sphinxext/prs/12870.json @@ -0,0 +1,23 @@ +{ + "merge_commit_sha": "50ce70a9d04645988a3fddead56e6892faa940fe", + "authors": [ + { + "n": "Mathieu Scheltienne", + "e": "mathieu.scheltienne@fcbg.ch" + }, + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + ".pre-commit-config.yaml": { + "a": 6, + "d": 0 + }, + "pyproject.toml": { + "a": 311, + "d": 314 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12871.json b/doc/sphinxext/prs/12871.json new file mode 100644 index 00000000000..91385573801 --- /dev/null +++ b/doc/sphinxext/prs/12871.json @@ -0,0 +1,31 @@ +{ + "merge_commit_sha": "381688fc250e6f1b1a6e1bf26357379b0fec94a4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12871.newfeature.rst": { + "a": 2, + "d": 0 + }, + "mne/viz/backends/_abstract.py": { + "a": 4, + "d": 2 + }, + "mne/viz/backends/_pyvista.py": { + "a": 12, + "d": 4 + }, + "mne/viz/backends/renderer.py": { + "a": 34, + "d": 3 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12875.json b/doc/sphinxext/prs/12875.json new file mode 100644 index 00000000000..124aa3135a3 --- /dev/null +++ b/doc/sphinxext/prs/12875.json @@ -0,0 +1,39 @@ +{ + "merge_commit_sha": "5492174fc0e64762056b5b99a330129ca6eefca4", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + }, + { + "n": "Daniel McCloy", + "e": "dan@mccloy.info" + }, + { + "n": "autofix-ci[bot]", + "e": "114827586+autofix-ci[bot]@users.noreply.github.com" + } + ], + "changes": { + "doc/changes/devel/12875.bugfix.rst": { + "a": 2, + "d": 0 + }, + "mne/_fiff/meas_info.py": { + "a": 178, + "d": 114 + }, + "mne/_fiff/tests/test_meas_info.py": { + "a": 15, + "d": 5 + }, + "mne/io/cnt/cnt.py": { + "a": 1, + "d": 0 + }, + "mne/io/persyst/persyst.py": { + "a": 1, + "d": 0 + } + } +} \ No newline at end of file diff --git a/doc/sphinxext/prs/12878.json b/doc/sphinxext/prs/12878.json new file mode 100644 index 00000000000..400bc5dd56e --- /dev/null +++ b/doc/sphinxext/prs/12878.json @@ -0,0 +1,19 @@ +{ + "merge_commit_sha": "8379668a42b68ba115310dfd9e5f821e3efb4db8", + "authors": [ + { + "n": "Eric Larson", + "e": "larson.eric.d@gmail.com" + } + ], + "changes": { + "mne/io/fiff/raw.py": { + "a": 8, + "d": 5 + }, + "mne/io/fiff/tests/test_raw_fiff.py": { + "a": 9, + "d": 0 + } + } +} \ No newline at end of file diff --git a/environment.yml b/environment.yml index a0dbdf5ec49..9bf113c3ecf 100644 --- a/environment.yml +++ b/environment.yml @@ -2,7 +2,7 @@ name: mne channels: - conda-forge dependencies: - - python>=3.9 + - python >=3.10 - pip - numpy - scipy @@ -28,25 +28,25 @@ dependencies: - psutil - numexpr - imageio - - spyder-kernels>=1.10.0 - - imageio>=2.6.1 - - imageio-ffmpeg>=0.4.1 - - vtk>=9.2 + - spyder-kernels >=1.10.0 + - imageio >=2.6.1 + - imageio-ffmpeg >=0.4.1 + - vtk >=9.2 - traitlets - - pyvista>=0.32,!=0.35.2,!=0.38.0,!=0.38.1,!=0.38.2,!=0.38.3,!=0.38.4,!=0.38.5,!=0.38.6,!=0.42.0 - - pyvistaqt>=0.4 - - qdarkstyle!=3.2.2 + - pyvista >=0.32,!=0.35.2,!=0.38.0,!=0.38.1,!=0.38.2,!=0.38.3,!=0.38.4,!=0.38.5,!=0.38.6,!=0.42.0 + - pyvistaqt >=0.4 + - qdarkstyle !=3.2.2 - darkdetect - dipy - nibabel - - openmeeg>=2.5.5 + - openmeeg >=2.5.5 - nilearn - python-picard - qtpy - - pyside6 + - pyside6 !=6.8.0,!=6.8.0.1 - mne-base - seaborn-base - - mffpy>=0.5.7 + - mffpy >=0.5.7 - ipyevents - ipywidgets - ipympl @@ -59,7 +59,7 @@ dependencies: - mne-qt-browser - pymatreader - eeglabio - - edfio>=0.2.1 + - edfio >=0.2.1 - pybv - mamba - lazy_loader diff --git a/examples/datasets/limo_data.py b/examples/datasets/limo_data.py index 54a2f34a530..f7f6d58cf19 100644 --- a/examples/datasets/limo_data.py +++ b/examples/datasets/limo_data.py @@ -107,7 +107,7 @@ print(limo_epochs.metadata.head()) # %% -# Now let's take a closer look at the information in the epochs +# Now let us take a closer look at the information in the epochs # metadata. # We want include all columns in the summary table diff --git a/examples/inverse/morph_volume_stc.py b/examples/inverse/morph_volume_stc.py index 7a9db303e90..24b23fc374e 100644 --- a/examples/inverse/morph_volume_stc.py +++ b/examples/inverse/morph_volume_stc.py @@ -20,6 +20,7 @@ result will be plotted, showing the fsaverage T1 weighted anatomical MRI, overlaid with the morphed volumetric source estimate. """ + # Author: Tommy Clausner # # License: BSD-3-Clause diff --git a/examples/io/read_impedances.py b/examples/io/read_impedances.py new file mode 100644 index 00000000000..0c32e625703 --- /dev/null +++ b/examples/io/read_impedances.py @@ -0,0 +1,77 @@ +""" +.. _ex-io-impedances: + +================================= +Getting impedances from raw files +================================= + +Many EEG systems provide impedance measurements for each channel within their file +format. MNE does not parse this information and does not store it in the +:class:`~mne.io.Raw` object. However, it is possible to extract this information from +the raw data and store it in a separate data structure. + +ANT Neuro +--------- + +The ``.cnt`` file format from ANT Neuro stores impedance information in the form of +triggers. The function :func:`mne.io.read_raw_ant` reads this information and marks the +time-segment during which an impedance measurement was performed as +:class:`~mne.Annotations` with the description set in the argument +``impedance_annotation``. However, it doesn't extract the impedance values themselves. +To do so, use the function ``antio.parser.read_triggers``. +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from antio import read_cnt +from antio.parser import read_triggers +from matplotlib import pyplot as plt + +from mne.datasets import testing +from mne.io import read_raw_ant +from mne.viz import plot_topomap + +fname = testing.data_path() / "antio" / "CA_208" / "test_CA_208.cnt" +cnt = read_cnt(fname) +_, _, _, impedances, _ = read_triggers(cnt) + +raw = read_raw_ant(fname, eog=r"EOG") +impedances = [{ch: imp[k] for k, ch in enumerate(raw.ch_names)} for imp in impedances] +print(impedances[0]) # impedances measurement at the beginning of the recording + +# %% +# Note that the impedance measurement contains all channels, including the bipolar ones. +# We can visualize the impedances on a topographic map; below we show a topography of +# impedances before and after the recording for the EEG channels only. + +raw.pick("eeg").set_montage("standard_1020") +impedances = [{ch: imp[ch] for ch in raw.ch_names} for imp in impedances] + +f, ax = plt.subplots(1, 2, layout="constrained", figsize=(10, 5)) +f.suptitle("Impedances (kOhm)") +impedance = list(impedances[0].values()) +plot_topomap( + impedance, + raw.info, + vlim=(0, 50), + axes=ax[0], + show=False, + names=[f"{elt:.1f}" for elt in impedance], +) +ax[0].set_title("Impedances at the beginning of the recording") +impedance = list(impedances[0].values()) +plot_topomap( + impedance, + raw.info, + vlim=(0, 50), + axes=ax[1], + show=False, + names=[f"{elt:.1f}" for elt in impedance], +) +ax[1].set_title("Impedances at the end of the recording") +plt.show() + +# %% +# In this very short test file, the impedances are stable over time. diff --git a/examples/preprocessing/epochs_metadata.py b/examples/preprocessing/epochs_metadata.py index d1ea9a85996..9c46368afa0 100644 --- a/examples/preprocessing/epochs_metadata.py +++ b/examples/preprocessing/epochs_metadata.py @@ -35,8 +35,8 @@ # # All experimental events are stored in the :class:`~mne.io.Raw` instance as # :class:`~mne.Annotations`. We first need to convert these to events and the -# corresponding mapping from event codes to event names (``event_id``). We then -# visualize the events. +# corresponding mapping from event codes to event names (``event_id``). +# We then visualize the events. all_events, all_event_id = mne.events_from_annotations(raw) mne.viz.plot_events(events=all_events, event_id=all_event_id, sfreq=raw.info["sfreq"]) diff --git a/examples/visualization/evoked_whitening.py b/examples/visualization/evoked_whitening.py index 9a474d9ea36..ed05ae3ba11 100644 --- a/examples/visualization/evoked_whitening.py +++ b/examples/visualization/evoked_whitening.py @@ -16,6 +16,7 @@ ---------- .. footbibliography:: """ + # Authors: Alexandre Gramfort # Denis A. Engemann # diff --git a/examples/visualization/eyetracking_plot_heatmap.py b/examples/visualization/eyetracking_plot_heatmap.py index a57857f34ad..07983685b5e 100644 --- a/examples/visualization/eyetracking_plot_heatmap.py +++ b/examples/visualization/eyetracking_plot_heatmap.py @@ -68,6 +68,10 @@ cmap = plt.get_cmap("viridis") plot_gaze(epochs["natural"], calibration=calibration, cmap=cmap, sigma=50) +# %% +# .. note:: The (0, 0) pixel coordinates are at the top-left of the +# trackable area of the screen for many eye trackers. + # %% # Overlaying plots with images # ---------------------------- diff --git a/mne/_fiff/meas_info.py b/mne/_fiff/meas_info.py index 2759d2332a3..c881822e44a 100644 --- a/mne/_fiff/meas_info.py +++ b/mne/_fiff/meas_info.py @@ -5,10 +5,12 @@ import contextlib import datetime import operator +import re import string from collections import Counter, OrderedDict from collections.abc import Mapping from copy import deepcopy +from functools import partial from io import BytesIO from textwrap import shorten @@ -305,6 +307,9 @@ def _unique_channel_names(ch_names, max_length=None, verbose=None): return ch_names +# %% Mixin classes + + class MontageMixin: """Mixin for Montage getting and setting.""" @@ -922,6 +927,152 @@ def get_channel_types(self, picks=None, unique=False, only_data_chs=False): return ch_types +# %% ValidatedDict class + + +class ValidatedDict(dict): + _attributes = {} # subclasses should set this to validated attributes + + def __init__(self, *args, **kwargs): + self._unlocked = True + super().__init__(*args, **kwargs) + self._unlocked = False + + def __getstate__(self): + """Get state (for pickling).""" + return {"_unlocked": self._unlocked} + + def __setstate__(self, state): + """Set state (for pickling).""" + self._unlocked = state["_unlocked"] + + def __setitem__(self, key, val): + """Attribute setter.""" + # During unpickling, the _unlocked attribute has not been set, so + # let __setstate__ do it later and act unlocked now + unlocked = getattr(self, "_unlocked", True) + if key in self._attributes: + if isinstance(self._attributes[key], str): + if not unlocked: + raise RuntimeError(self._attributes[key]) + else: + val = self._attributes[key]( + val, info=self + ) # attribute checker function + else: + class_name = self.__class__.__name__ + extra = "" + if "temp" in self._attributes: + var_name = _camel_to_snake(class_name) + extra = ( + f"You can set {var_name}['temp'] to store temporary objects in " + f"{class_name} instances, but these will not survive an I/O " + "round-trip." + ) + raise RuntimeError( + f"{class_name} does not support directly setting the key {repr(key)}. " + + extra + ) + super().__setitem__(key, val) + + def update(self, other=None, **kwargs): + """Update method using __setitem__().""" + iterable = other.items() if isinstance(other, Mapping) else other + if other is not None: + for key, val in iterable: + self[key] = val + for key, val in kwargs.items(): + self[key] = val + + def copy(self): + """Copy the instance. + + Returns + ------- + info : instance of Info + The copied info. + """ + return deepcopy(self) + + def __repr__(self): + """Return a string representation.""" + mapping = ", ".join(f"{key}: {val}" for key, val in self.items()) + return f"<{_camel_to_snake(self.__class__.__name__)} | {mapping}>" + + +# %% Subject info + + +def _check_types(x, *, info, name, types, cast=None): + _validate_type(x, types, name) + if cast is not None and x is not None: + x = cast(x) + return x + + +class SubjectInfo(ValidatedDict): + _attributes = { + "id": partial(_check_types, name='subject_info["id"]', types=int), + "his_id": partial(_check_types, name='subject_info["his_id"]', types=str), + "last_name": partial(_check_types, name='subject_info["last_name"]', types=str), + "first_name": partial( + _check_types, name='subject_info["first_name"]', types=str + ), + "middle_name": partial( + _check_types, name='subject_info["middle_name"]', types=str + ), + "birthday": partial( + _check_types, name='subject_info["birthday"]', types=(datetime.date, None) + ), + "sex": partial(_check_types, name='subject_info["sex"]', types=int), + "hand": partial(_check_types, name='subject_info["hand"]', types=int), + "weight": partial( + _check_types, name='subject_info["weight"]', types="numeric", cast=float + ), + "height": partial( + _check_types, name='subject_info["height"]', types="numeric", cast=float + ), + } + + def __init__(self, initial): + _validate_type(initial, dict, "subject_info") + super().__init__() + for key, val in initial.items(): + self[key] = val + + +class HeliumInfo(ValidatedDict): + _attributes = { + "he_level_raw": partial( + _check_types, + name='helium_info["he_level_raw"]', + types="numeric", + cast=float, + ), + "helium_level": partial( + _check_types, + name='helium_info["helium_level"]', + types="numeric", + cast=float, + ), + "orig_file_guid": partial( + _check_types, name='helium_info["orig_file_guid"]', types=str + ), + "meas_date": partial( + _check_types, name='helium_info["meas_date"]', types=datetime.datetime + ), + } + + def __init__(self, initial): + _validate_type(initial, dict, "helium_info") + super().__init__() + for key, val in initial.items(): + self[key] = val + + +# %% Info class and helpers + + def _format_trans(obj, key): from ..transforms import Transform @@ -993,11 +1144,6 @@ def _check_bads(bads, *, info): return MNEBadsList(bads=bads, info=info) -def _check_description(description, *, info): - _validate_type(description, (None, str), "info['description']") - return description - - def _check_dev_head_t(dev_head_t, *, info): from ..transforms import Transform, _ensure_trans @@ -1007,62 +1153,8 @@ def _check_dev_head_t(dev_head_t, *, info): return dev_head_t -def _check_experimenter(experimenter, *, info): - _validate_type(experimenter, (None, str), "experimenter") - return experimenter - - -def _check_line_freq(line_freq, *, info): - _validate_type(line_freq, (None, "numeric"), "line_freq") - line_freq = float(line_freq) if line_freq is not None else line_freq - return line_freq - - -def _check_subject_info(subject_info, *, info): - _validate_type(subject_info, (None, dict), "subject_info") - if isinstance(subject_info, dict): - if "birthday" in subject_info: - _validate_type( - subject_info["birthday"], - (datetime.date, None), - "subject_info['birthday']", - ) - return subject_info - - -def _check_device_info(device_info, *, info): - _validate_type( - device_info, - ( - None, - dict, - ), - "device_info", - ) - return device_info - - -def _check_helium_info(helium_info, *, info): - _validate_type( - helium_info, - ( - None, - dict, - ), - "helium_info", - ) - if isinstance(helium_info, dict): - if "meas_date" in helium_info: - _validate_type( - helium_info["meas_date"], - datetime.datetime, - "helium_info['meas_date']", - ) - return helium_info - - # TODO: Add fNIRS convention to loc -class Info(dict, SetChannelsMixin, MontageMixin, ContainsMixin): +class Info(ValidatedDict, SetChannelsMixin, MontageMixin, ContainsMixin): """Measurement information. This data structure behaves like a dictionary. It contains all metadata @@ -1502,24 +1594,28 @@ class Info(dict, SetChannelsMixin, MontageMixin, ContainsMixin): "custom_ref_applied": "custom_ref_applied cannot be set directly. " "Please use method inst.set_eeg_reference() " "instead.", - "description": _check_description, + "description": partial(_check_types, name="description", types=(str, None)), "dev_ctf_t": "dev_ctf_t cannot be set directly.", "dev_head_t": _check_dev_head_t, - "device_info": _check_device_info, + "device_info": partial(_check_types, name="device_info", types=(dict, None)), "dig": "dig cannot be set directly. " "Please use method inst.set_montage() instead.", "events": "events cannot be set directly.", - "experimenter": _check_experimenter, + "experimenter": partial(_check_types, name="experimenter", types=(str, None)), "file_id": "file_id cannot be set directly.", "gantry_angle": "gantry_angle cannot be set directly.", - "helium_info": _check_helium_info, + "helium_info": partial( + _check_types, name="helium_info", types=(dict, None), cast=HeliumInfo + ), "highpass": "highpass cannot be set directly. " "Please use method inst.filter() instead.", "hpi_meas": "hpi_meas can not be set directly.", "hpi_results": "hpi_results cannot be set directly.", "hpi_subsystem": "hpi_subsystem cannot be set directly.", "kit_system_id": "kit_system_id cannot be set directly.", - "line_freq": _check_line_freq, + "line_freq": partial( + _check_types, name="line_freq", types=("numeric", None), cast=float + ), "lowpass": "lowpass cannot be set directly. " "Please use method inst.filter() instead.", "maxshield": "maxshield cannot be set directly.", @@ -1541,7 +1637,9 @@ class Info(dict, SetChannelsMixin, MontageMixin, ContainsMixin): "instead.", "sfreq": "sfreq cannot be set directly. " "Please use method inst.resample() instead.", - "subject_info": _check_subject_info, + "subject_info": partial( + _check_types, name="subject_info", types=(dict, None), cast=SubjectInfo + ), "temp": lambda x, info=None: x, "utc_offset": "utc_offset cannot be set directly.", "working_dir": "working_dir cannot be set directly.", @@ -1549,8 +1647,8 @@ class Info(dict, SetChannelsMixin, MontageMixin, ContainsMixin): } def __init__(self, *args, **kwargs): - self._unlocked = True super().__init__(*args, **kwargs) + self._unlocked = True # Deal with h5io writing things as dict if "bads" in self: self["bads"] = MNEBadsList(bads=self["bads"], info=self) @@ -1579,46 +1677,16 @@ def __init__(self, *args, **kwargs): else: self["meas_date"] = _ensure_meas_date_none_or_dt(meas_date) self._unlocked = False - - def __getstate__(self): - """Get state (for pickling).""" - return {"_unlocked": self._unlocked} + # with validation and casting + for key in ("helium_info", "subject_info"): + if key in self: + self[key] = self[key] def __setstate__(self, state): """Set state (for pickling).""" - self._unlocked = state["_unlocked"] + super().__setstate__(state) self["bads"] = MNEBadsList(bads=self["bads"], info=self) - def __setitem__(self, key, val): - """Attribute setter.""" - # During unpickling, the _unlocked attribute has not been set, so - # let __setstate__ do it later and act unlocked now - unlocked = getattr(self, "_unlocked", True) - if key in self._attributes: - if isinstance(self._attributes[key], str): - if not unlocked: - raise RuntimeError(self._attributes[key]) - else: - val = self._attributes[key]( - val, info=self - ) # attribute checker function - else: - raise RuntimeError( - f"Info does not support directly setting the key {repr(key)}. " - "You can set info['temp'] to store temporary objects in an " - "Info instance, but these will not survive an I/O round-trip." - ) - super().__setitem__(key, val) - - def update(self, other=None, **kwargs): - """Update method using __setitem__().""" - iterable = other.items() if isinstance(other, Mapping) else other - if other is not None: - for key, val in iterable: - self[key] = val - for key, val in kwargs.items(): - self[key] = val - @contextlib.contextmanager def _unlock(self, *, update_redundant=False, check_after=False): """Context manager unlocking access to attributes.""" @@ -1638,16 +1706,6 @@ def _unlock(self, *, update_redundant=False, check_after=False): finally: self._unlocked = state - def copy(self): - """Copy the instance. - - Returns - ------- - info : instance of Info - The copied info. - """ - return deepcopy(self) - def normalize_proj(self): """(Re-)Normalize projection vectors after subselection. @@ -1738,6 +1796,8 @@ def __repr__(self): entr = str(bool(v)) if not v: non_empty -= 1 # don't count if 0 + elif isinstance(v, ValidatedDict): + entr = repr(v) else: try: this_len = len(v) @@ -2378,10 +2438,10 @@ def read_meas_info(fid, tree, clean_bads=False, verbose=None): si["hand"] = int(tag.data.item()) elif kind == FIFF.FIFF_SUBJ_WEIGHT: tag = read_tag(fid, pos) - si["weight"] = tag.data + si["weight"] = float(tag.data.item()) elif kind == FIFF.FIFF_SUBJ_HEIGHT: tag = read_tag(fid, pos) - si["height"] = tag.data + si["height"] = float(tag.data.item()) info["subject_info"] = si del si @@ -3697,3 +3757,7 @@ def _get_fnirs_ch_pos(info): for optode in [*srcs, *dets]: ch_pos[optode] = _optode_position(info, optode) return ch_pos + + +def _camel_to_snake(s): + return re.sub(r"(? 0: + with inst.info._unlock(): + inst.info["custom_ref_applied"] = FIFF.FIFFV_MNE_CUSTOM_REF_ON + + return ref_to + + +def _check_ssp(inst, ref_items): + """Check for SSPs that may block re-referencing.""" projs_to_remove = [] for i, proj in enumerate(inst.info["projs"]): # Remove any average reference projections @@ -55,10 +68,7 @@ def _check_before_reference(inst, ref_from, ref_to, ch_type): # Inactive SSPs may block re-referencing elif ( not proj["active"] - and len( - [ch for ch in (ref_from + ref_to) if ch in proj["data"]["col_names"]] - ) - > 0 + and len([ch for ch in ref_items if ch in proj["data"]["col_names"]]) > 0 ): raise RuntimeError( "Inactive signal space projection (SSP) operators are " @@ -74,14 +84,72 @@ def _check_before_reference(inst, ref_from, ref_to, ch_type): # Need to call setup_proj after changing the projs: inst._projector, _ = setup_proj(inst.info, add_eeg_ref=False, activate=False) - # If the reference touches EEG/ECoG/sEEG/DBS electrodes, note in the - # info that a non-CAR has been applied. - ref_to_channels = pick_channels(inst.ch_names, ref_to, ordered=True) - if len(np.intersect1d(ref_to_channels, eeg_idx)) > 0: - with inst.info._unlock(): - inst.info["custom_ref_applied"] = FIFF.FIFFV_MNE_CUSTOM_REF_ON - return ref_to +def _check_before_dict_reference(inst, ref_dict): + """Prepare instance for dict-based referencing.""" + # Check to see that data is preloaded + _check_preload(inst, "Applying a reference") + + # Promote all values to list-like. This simplifies our logic and also helps catch + # self-referencing cases like `{"Cz": ["Cz"]}` + _refdict = {k: [v] if isinstance(v, str) else list(v) for k, v in ref_dict.items()} + + # Check that keys are strings and values are lists-of-strings + key_types = {type(k) for k in _refdict} + value_types = {type(v) for val in _refdict.values() for v in val} + for elem_name, elem in dict(key=key_types, value=value_types).items(): + if bad_elem := elem - {str}: + raise TypeError( + f"{elem_name.capitalize()}s in the ref_channels dict must be strings. " + f"Your dict has {elem_name}s of type " + f'{", ".join(map(lambda x: x.__name__, bad_elem))}.' + ) + + # Check that keys are valid channels and values are lists-of-valid-channels + ch_set = set(inst.ch_names) + bad_ch_set = set(inst.info["bads"]) + keys = set(_refdict) + values = set(sum(_refdict.values(), [])) + for elem_name, elem in dict(key=keys, value=values).items(): + if bad_elem := elem - ch_set: + raise ValueError( + f'ref_channels dict contains invalid {elem_name}(s) ' + f'({", ".join(bad_elem)}) ' + "that are not names of channels in the instance." + ) + # Check that values are not bad channels + if bad_elem := elem.intersection(bad_ch_set): + warn( + f"ref_channels dict contains {elem_name}(s) " + f"({', '.join(bad_elem)}) " + "that are marked as bad channels." + ) + + _check_ssp(inst, keys.union(values)) + + # Check for self-referencing + self_ref = [[k] == v for k, v in _refdict.items()] + if any(self_ref): + which = np.array(list(_refdict))[np.nonzero(self_ref)] + for ch in which: + warn(f"Channel {ch} is self-referenced, which will nullify the channel.") + + # Check that channel types match. First unpack list-like vals into separate items: + pairs = [(k, v) for k in _refdict for v in _refdict[k]] + ch_type_map = dict(zip(inst.ch_names, inst.get_channel_types())) + mismatch = [ch_type_map[k] != ch_type_map[v] for k, v in pairs] + if any(mismatch): + mismatch_pairs = np.array(pairs)[mismatch] + for k, v in mismatch_pairs: + warn( + f"Channel {k} ({ch_type_map[k]}) is referenced to channel {v} which is " + f"a different channel type ({ch_type_map[v]})." + ) + + # convert channel names to indices + keys_ix = pick_channels(inst.ch_names, list(_refdict), ordered=True) + vals_ix = (pick_channels(inst.ch_names, v, ordered=True) for v in _refdict.values()) + return dict(zip(keys_ix, vals_ix)) def _apply_reference(inst, ref_from, ref_to=None, forward=None, ch_type="auto"): @@ -125,6 +193,22 @@ def _apply_reference(inst, ref_from, ref_to=None, forward=None, ch_type="auto"): return inst, ref_data +def _apply_dict_reference(inst, ref_dict): + """Apply a dict-based custom EEG referencing scheme.""" + # this converts all keys to channel indices and all values to arrays of ch. indices: + ref_dict = _check_before_dict_reference(inst, ref_dict) + + data = inst._data + orig_data = data.copy() + for ref_to, ref_from in ref_dict.items(): + ref_data = orig_data[..., ref_from, :].mean(-2, keepdims=True) + data[..., [ref_to], :] -= ref_data + + with inst.info._unlock(): + inst.info["custom_ref_applied"] = FIFF.FIFFV_MNE_CUSTOM_REF_ON + return inst, None + + @fill_doc def add_reference_channels(inst, ref_channels, copy=True): """Add reference channels to data that consists of all zeros. @@ -316,18 +400,23 @@ def set_eeg_reference( Returns ------- inst : instance of Raw | Epochs | Evoked - Data with EEG channels re-referenced. If ``ref_channels='average'`` and + Data with EEG channels re-referenced. If ``ref_channels="average"`` and ``projection=True`` a projection will be added instead of directly re-referencing the data. ref_data : array Array of reference data subtracted from EEG channels. This will be - ``None`` if ``projection=True`` or ``ref_channels='REST'``. + ``None`` if ``projection=True``, or if ``ref_channels`` is ``"REST"`` or a + :class:`dict`. %(set_eeg_reference_see_also_notes)s """ from ..forward import Forward _check_can_reref(inst) + if isinstance(ref_channels, dict): + logger.info("Applying a custom dict-based reference.") + return _apply_dict_reference(inst, ref_channels) + ch_type = _get_ch_type(inst, ch_type) if projection: # average reference projector diff --git a/mne/_fiff/tests/test_meas_info.py b/mne/_fiff/tests/test_meas_info.py index c68a0ca730a..6624ee0dd94 100644 --- a/mne/_fiff/tests/test_meas_info.py +++ b/mne/_fiff/tests/test_meas_info.py @@ -306,8 +306,8 @@ def test_read_write_info(tmp_path): assert info["hpi_meas"][0]["creator"] == creator assert info["subject_info"]["his_id"] == creator assert info["gantry_angle"] == gantry_angle - assert info["subject_info"]["height"] == 2.3 - assert info["subject_info"]["weight"] == 11.1 + assert_allclose(info["subject_info"]["height"], 2.3) + assert_allclose(info["subject_info"]["weight"], 11.1) for key in ["secs", "usecs", "version"]: assert info["meas_id"][key] == meas_id[key] assert_array_equal(info["meas_id"]["machid"], meas_id["machid"]) @@ -566,6 +566,18 @@ def test_check_consistency(): with pytest.raises(KeyError, match="key missing"): info2._check_consistency() + # bad subject_info entries + info2 = info.copy() + with pytest.raises(TypeError, match="must be an instance"): + info2["subject_info"] = "bad" + info2["subject_info"] = dict() + with pytest.raises(TypeError, match="must be an instance"): + info2["subject_info"]["height"] = "bad" + with pytest.raises(TypeError, match="must be an instance"): + info2["subject_info"]["weight"] = [0] + with pytest.raises(TypeError, match=r'subject_info\["height"\] must be an .*'): + info2["subject_info"] = {"height": "bad"} + def _test_anonymize_info(base_info): """Test that sensitive information can be anonymized.""" @@ -867,10 +879,8 @@ def test_field_round_trip(tmp_path): write_info(fname, info) info_read = read_info(fname) assert_object_equal(info, info_read) - info["helium_info"]["meas_date"] = (1, 2) with pytest.raises(TypeError, match="datetime"): - # trigger the check - info["helium_info"] = info["helium_info"] + info["helium_info"]["meas_date"] = (1, 2) def test_equalize_channels(): diff --git a/mne/_fiff/tests/test_reference.py b/mne/_fiff/tests/test_reference.py index 3a9a752776f..542b13eac29 100644 --- a/mne/_fiff/tests/test_reference.py +++ b/mne/_fiff/tests/test_reference.py @@ -361,6 +361,160 @@ def test_set_eeg_reference_rest(): assert 0.995 < exp_var <= 1 +@testing.requires_testing_data +@pytest.mark.parametrize("inst_type", ["raw", "epochs"]) +@pytest.mark.parametrize( + "ref_channels, expectation", + [ + ( + {2: "EEG 001"}, + pytest.raises( + TypeError, + match="Keys in the ref_channels dict must be strings. " + "Your dict has keys of type int.", + ), + ), + ( + {"EEG 001": (1, 2)}, + pytest.raises( + TypeError, + match="Values in the ref_channels dict must be strings. " + "Your dict has values of type int.", + ), + ), + ( + {"EEG 001": [1, 2]}, + pytest.raises( + TypeError, + match="Values in the ref_channels dict must be strings. " + "Your dict has values of type int.", + ), + ), + ( + {"EEG 999": "EEG 001"}, + pytest.raises( + ValueError, + match=r"ref_channels dict contains invalid key\(s\) \(EEG 999\) " + "that are not names of channels in the instance.", + ), + ), + ( + {"EEG 001": "EEG 999"}, + pytest.raises( + ValueError, + match=r"ref_channels dict contains invalid value\(s\) \(EEG 999\) " + "that are not names of channels in the instance.", + ), + ), + ( + {"EEG 001": "EEG 057"}, + pytest.warns( + RuntimeWarning, + match=r"ref_channels dict contains value\(s\) \(EEG 057\) " + "that are marked as bad channels.", + ), + ), + ( + {"EEG 001": "STI 001"}, + pytest.warns( + RuntimeWarning, + match=( + r"Channel EEG 001 \(eeg\) is referenced to channel " + r"STI 001 which is a different channel type \(stim\)." + ), + ), + ), + ( + {"EEG 001": "EEG 001"}, + pytest.warns( + RuntimeWarning, + match=( + "Channel EEG 001 is self-referenced, " + "which will nullify the channel." + ), + ), + ), + ( + {"EEG 001": "EEG 002", "EEG 002": "EEG 003", "EEG 003": "EEG 005"}, + nullcontext(), + ), + ( + { + "EEG 001": ["EEG 002", "EEG 003"], + "EEG 002": "EEG 003", + "EEG 003": "EEG 005", + }, + nullcontext(), + ), + ], +) +def test_set_eeg_reference_dict(ref_channels, inst_type, expectation): + """Test setting dict-based reference.""" + if inst_type == "raw": + inst = read_raw_fif(fif_fname).crop(0, 1).pick(picks=["eeg", "stim"]) + # Test re-referencing Epochs object + elif inst_type == "epochs": + raw = read_raw_fif(fif_fname, preload=False) + events = read_events(eve_fname) + inst = Epochs( + raw, + events=events, + event_id=1, + tmin=-0.2, + tmax=0.5, + preload=False, + ) + with pytest.raises( + RuntimeError, + match="By default, MNE does not load data.*Applying a reference requires.*", + ): + inst.set_eeg_reference(ref_channels=ref_channels) + inst.load_data() + inst.info["bads"] = ["EEG 057"] + with expectation: + reref, _ = set_eeg_reference(inst.copy(), ref_channels, copy=False) + + if isinstance(expectation, nullcontext): + # Check that the custom_ref_applied is set correctly: + assert reref.info["custom_ref_applied"] == FIFF.FIFFV_MNE_CUSTOM_REF_ON + + # Get raw data + _data = inst._data + + # Get that channels that were and weren't re-referenced: + ch_raw = pick_channels( + inst.ch_names, + [ch for ch in inst.ch_names if ch not in list(ref_channels.keys())], + ) + ch_reref = pick_channels(inst.ch_names, list(ref_channels.keys()), ordered=True) + + # Check that the non re-reference channels are untouched: + assert_allclose( + _data[..., ch_raw, :], reref._data[..., ch_raw, :], 1e-6, atol=1e-15 + ) + + # Compute the reference data: + ref_data = [] + for val in ref_channels.values(): + if isinstance(val, str): + val = [val] # pick_channels expects a list + ref_data.append( + _data[..., pick_channels(inst.ch_names, val, ordered=True), :].mean( + -2, keepdims=True + ) + ) + if inst_type == "epochs": + ref_data = np.concatenate(ref_data, axis=1) + else: + ref_data = np.squeeze(np.array(ref_data)) + assert_allclose( + _data[..., ch_reref, :], + reref._data[..., ch_reref, :] + ref_data, + 1e-6, + atol=1e-15, + ) + + @testing.requires_testing_data @pytest.mark.parametrize("inst_type", ("raw", "epochs", "evoked")) def test_set_bipolar_reference(inst_type): diff --git a/mne/_fiff/tests/test_what.py b/mne/_fiff/tests/test_what.py index 9665b64523b..80bebdceeba 100644 --- a/mne/_fiff/tests/test_what.py +++ b/mne/_fiff/tests/test_what.py @@ -23,7 +23,7 @@ def test_what(tmp_path, verbose_debug): """Test mne.what.""" pytest.importorskip("sklearn") # ICA - ica = ICA(max_iter=1) + ica = ICA(max_iter=1, random_state=0) raw = RawArray(np.random.RandomState(0).randn(3, 10), create_info(3, 1000.0, "eeg")) with _record_warnings(): # convergence sometimes ica.fit(raw) @@ -33,30 +33,35 @@ def test_what(tmp_path, verbose_debug): # test files fnames = glob.glob(str(data_path / "MEG" / "sample" / "*.fif")) fnames += glob.glob(str(data_path / "subjects" / "sample" / "bem" / "*.fif")) + fnames += [str(fname)] fnames = sorted(fnames) want_dict = dict( eve="events", ave="evoked", cov="cov", + ica="ica", inv="inverse", fwd="forward", trans="transform", proj="proj", raw="raw", - meg="raw", sol="bem solution", bem="bem surfaces", src="src", dense="bem surfaces", - sparse="bem surfaces", head="bem surfaces", fiducials="fiducials", ) + got = set() for fname in fnames: + print(fname) kind = Path(fname).stem.split("-")[-1] if len(kind) > 5: kind = kind.split("_")[-1] this = what(fname) - assert this == want_dict[kind] + assert this == want_dict[kind], fname + print() + got.add(kind) + assert set(want_dict) == got fname = data_path / "MEG" / "sample" / "sample_audvis-ave_xfit.dip" assert what(fname) == "unknown" diff --git a/mne/_fiff/tree.py b/mne/_fiff/tree.py index 16ad60cf3a0..142c40aa5a8 100644 --- a/mne/_fiff/tree.py +++ b/mne/_fiff/tree.py @@ -48,7 +48,7 @@ def make_dir_tree(fid, directory, start=0, indent=0, verbose=None): else: block = 0 - logger.debug(" " * indent + f"start {{ {block}") + start_separate = False this = start @@ -64,6 +64,9 @@ def make_dir_tree(fid, directory, start=0, indent=0, verbose=None): while this < len(directory): if directory[this].kind == FIFF.FIFF_BLOCK_START: if this != start: + if not start_separate: + start_separate = True + logger.debug(" " * indent + f"start {{ {block}") child, this = make_dir_tree(fid, directory, this, indent + 1) tree["nchild"] += 1 tree["children"].append(child) @@ -96,10 +99,10 @@ def make_dir_tree(fid, directory, start=0, indent=0, verbose=None): if tree["nent"] == 0: tree["directory"] = None - logger.debug( - " " * (indent + 1) - + f"block = {tree['block']} nent = {tree['nent']} nchild = {tree['nchild']}" - ) - logger.debug(" " * indent + f"end }} {block:d}") + content = f"block = {tree['block']} nent = {tree['nent']} nchild = {tree['nchild']}" + if start_separate: + logger.debug(" " * indent + f"end }} {content}") + else: + logger.debug(" " * indent + content) last = this return tree, last diff --git a/mne/_fiff/utils.py b/mne/_fiff/utils.py index 6171062351a..b158914bb88 100644 --- a/mne/_fiff/utils.py +++ b/mne/_fiff/utils.py @@ -224,7 +224,7 @@ def _read_segments_file( # Read up to 100 MB of data at a time, block_size is in data samples block_size = ((int(100e6) // n_bytes) // n_channels) * n_channels block_size = min(data_left, block_size) - with open(raw._filenames[fi], "rb", buffering=0) as fid: + with open(raw.filenames[fi], "rb", buffering=0) as fid: fid.seek(data_offset) # extract data in chunks for sample_start in np.arange(0, data_left, block_size) // n_channels: diff --git a/mne/_fiff/what.py b/mne/_fiff/what.py index 6f7e0ef995e..d91f79da5cf 100644 --- a/mne/_fiff/what.py +++ b/mne/_fiff/what.py @@ -39,7 +39,7 @@ def what(fname): from ..transforms import read_trans from .meas_info import read_fiducials - _check_fname(fname, overwrite="read", must_exist=True) + fname = _check_fname(fname, overwrite="read", must_exist=True) checks = OrderedDict() checks["raw"] = read_raw_fif checks["ica"] = read_ica diff --git a/mne/annotations.py b/mne/annotations.py index 9a9a82848f0..629ee7b20cb 100644 --- a/mne/annotations.py +++ b/mne/annotations.py @@ -3,7 +3,6 @@ # Copyright the MNE-Python contributors. import json -import os.path as op import re import warnings from collections import Counter, OrderedDict @@ -1194,48 +1193,43 @@ def read_annotations( from .io.edf.edf import _read_annotations_edf from .io.eeglab.eeglab import _read_annotations_eeglab - fname = str( - _check_fname( - fname, - overwrite="read", - must_exist=True, - need_dir=str(fname).endswith(".ds"), # for CTF - name="fname", - ) + fname = _check_fname( + fname, + overwrite="read", + must_exist=True, + need_dir=str(fname).endswith(".ds"), # for CTF + name="fname", ) - name = op.basename(fname) - if name.endswith(("fif", "fif.gz")): + readers = { + ".csv": _read_annotations_csv, + ".cnt": _read_annotations_cnt, + ".ds": _read_annotations_ctf, + ".cef": _read_annotations_curry, + ".set": _read_annotations_eeglab, + ".edf": _read_annotations_edf, + ".bdf": _read_annotations_edf, + ".gdf": _read_annotations_edf, + ".vmrk": _read_annotations_brainvision, + ".amrk": _read_annotations_brainvision, + ".txt": _read_annotations_txt, + } + kwargs = { + ".vmrk": {"sfreq": sfreq, "ignore_marker_types": ignore_marker_types}, + ".amrk": {"sfreq": sfreq, "ignore_marker_types": ignore_marker_types}, + ".cef": {"sfreq": sfreq}, + ".set": {"uint16_codec": uint16_codec}, + ".edf": {"encoding": encoding}, + ".bdf": {"encoding": encoding}, + ".gdf": {"encoding": encoding}, + } + if fname.suffix in readers: + annotations = readers[fname.suffix](fname, **kwargs.get(fname.suffix, {})) + elif fname.name.endswith(("fif", "fif.gz")): # Read FiF files ff, tree, _ = fiff_open(fname, preload=False) with ff as fid: annotations = _read_annotations_fif(fid, tree) - elif name.endswith("txt"): - annotations = _read_annotations_txt(fname) - - elif name.endswith(("vmrk", "amrk")): - annotations = _read_annotations_brainvision( - fname, sfreq=sfreq, ignore_marker_types=ignore_marker_types - ) - - elif name.endswith("csv"): - annotations = _read_annotations_csv(fname) - - elif name.endswith("cnt"): - annotations = _read_annotations_cnt(fname) - - elif name.endswith("ds"): - annotations = _read_annotations_ctf(fname) - - elif name.endswith("cef"): - annotations = _read_annotations_curry(fname, sfreq=sfreq) - - elif name.endswith("set"): - annotations = _read_annotations_eeglab(fname, uint16_codec=uint16_codec) - - elif name.endswith(("edf", "bdf", "gdf")): - annotations = _read_annotations_edf(fname, encoding=encoding) - - elif name.startswith("events_") and fname.endswith("mat"): + elif fname.name.startswith("events_") and fname.suffix == ".mat": annotations = _read_brainstorm_annotations(fname) else: raise OSError(f'Unknown annotation file format "{fname}"') diff --git a/mne/channels/_standard_montage_utils.py b/mne/channels/_standard_montage_utils.py index 26d9c1434fe..eb3dc10d10e 100644 --- a/mne/channels/_standard_montage_utils.py +++ b/mne/channels/_standard_montage_utils.py @@ -230,6 +230,11 @@ def _check_dupes_odict(ch_names, pos): def _read_elc(fname, head_size): """Read .elc files. + The `.elc` files are so-called "asa electrode files". ASA here stands for + Advances Source Analysis, and is a software package developed and sold by + the ANT Neuro company. They provide a device for sensor digitization, called + 'xensor', which produces the `.elc` files. + Parameters ---------- fname : str @@ -241,12 +246,12 @@ def _read_elc(fname, head_size): Returns ------- montage : instance of DigMontage - The montage in [m]. + The montage units are [m]. """ fid_names = ("Nz", "LPA", "RPA") - ch_names_, pos = [], [] with open(fname) as fid: + # Read units # _read_elc does require to detect the units. (see _mgh_or_standard) for line in fid: if "UnitPosition" in line: @@ -258,15 +263,33 @@ def _read_elc(fname, head_size): for line in fid: if "Positions\n" in line: break + + # Read positions + new_style = False pos = [] for line in fid: if "Labels\n" in line: break - pos.append(list(map(float, line.split()))) + if ":" in line: + # Of the 'new' format: `E01 : 5.288 -3.658 119.693` + pos.append(list(map(float, line.split(":")[1].split()))) + new_style = True + else: + # Of the 'old' format: `5.288 -3.658 119.693` + pos.append(list(map(float, line.split()))) + + # Read labels + ch_names_ = [] for line in fid: if not line or not set(line) - {" "}: break - ch_names_.append(line.strip(" ").strip("\n")) + if new_style: + # Not sure how this format would deal with spaces in channel labels, + # but none of my test files had this, so let's wait until it comes up. + parsed = line.strip(" ").strip("\n").split() + else: + parsed = [line.strip(" ").strip("\n")] + ch_names_.extend(parsed) pos = np.array(pos) * scale if head_size is not None: diff --git a/mne/channels/montage.py b/mne/channels/montage.py index ddf885452a1..346a8246fe2 100644 --- a/mne/channels/montage.py +++ b/mne/channels/montage.py @@ -836,7 +836,7 @@ def read_dig_fif(fname): read_dig_localite make_dig_montage """ - _check_fname(fname, overwrite="read", must_exist=True) + fname = _check_fname(fname, overwrite="read", must_exist=True) # Load the dig data f, tree = fiff_open(fname)[:2] with f as fid: @@ -1537,7 +1537,10 @@ def _read_eeglab_locations(fname): return ch_names, pos -def read_custom_montage(fname, head_size=HEAD_SIZE_DEFAULT, coord_frame=None): +@verbose +def read_custom_montage( + fname, head_size=HEAD_SIZE_DEFAULT, coord_frame=None, *, verbose=None +): """Read a montage from a file. Parameters @@ -1558,6 +1561,7 @@ def read_custom_montage(fname, head_size=HEAD_SIZE_DEFAULT, coord_frame=None): for most readers but ``"head"`` for EEGLAB. .. versionadded:: 0.20 + %(verbose)s Returns ------- diff --git a/mne/channels/tests/test_montage.py b/mne/channels/tests/test_montage.py index 92a489adc35..d0c406473e8 100644 --- a/mne/channels/tests/test_montage.py +++ b/mne/channels/tests/test_montage.py @@ -300,7 +300,66 @@ def test_documented(): ), "elc", None, - id="ASA electrode", + id="old ASA electrode (elc)", + ), + pytest.param( + partial(read_custom_montage, head_size=None), + ( + "NumberPositions= 96\n" + "UnitPosition mm\n" + "Positions\n" + "E01 : 5.288 -3.658 119.693\n" + "E02 : 59.518 -4.031 101.404\n" + "E03 : 29.949 -50.988 98.145\n" + "Labels\n" + "E01 E02 E03\n" + ), + make_dig_montage( + ch_pos={ + "E01": [0.005288, -0.003658, 0.119693], + "E02": [0.059518, -0.004031, 0.101404], + "E03": [0.029949, -0.050988, 0.098145], + }, + ), + "elc", + None, + id="new ASA electrode (elc)", + ), + pytest.param( + partial(read_custom_montage, head_size=None), + ( + "ReferenceLabel\n" + "avg\n" + "UnitPosition mm\n" + "NumberPositions= 6\n" + "Positions\n" + "-69.2574 10.5895 -25.0009\n" + "3.3791 94.6594 32.2592\n" + "77.2856 12.0537 -30.2488\n" + "4.6147 121.8858 8.6370\n" + "-31.3669 54.0269 94.9191\n" + "-8.7495 56.5653 99.6655\n" + "Labels\n" + "LPA\n" + "Nz\n" + "RPA\n" + "EEG 000\n" + "EEG 001\n" + "EEG 002\n" + ), + make_dig_montage( + ch_pos={ + "EEG 000": [0.004615, 0.121886, 0.008637], + "EEG 001": [-0.031367, 0.054027, 0.094919], + "EEG 002": [-0.00875, 0.056565, 0.099665], + }, + nasion=[0.003379, 0.094659, 0.032259], + lpa=[-0.069257, 0.010589, -0.025001], + rpa=[0.077286, 0.012054, -0.030249], + ), + "elc", + None, + id="another old ASA electrode (elc)", ), pytest.param( partial(read_custom_montage, head_size=1), @@ -522,8 +581,26 @@ def test_montage_readers(reader, file_content, expected_dig, ext, warning, tmp_p actual_ch_pos = dig_montage._get_ch_pos() expected_ch_pos = expected_dig._get_ch_pos() for kk in actual_ch_pos: - assert_allclose(actual_ch_pos[kk], expected_ch_pos[kk], atol=1e-5) + assert_allclose(actual_ch_pos[kk], expected_ch_pos[kk], atol=1e-5, err_msg=kk) assert len(dig_montage.dig) == len(expected_dig.dig) + for key in ("nasion", "lpa", "rpa"): + expected = [ + d + for d in expected_dig.dig + if d["kind"] == FIFF.FIFFV_POINT_CARDINAL + and d["ident"] == getattr(FIFF, f"FIFFV_POINT_{key.upper()}") + ] + got = [ + d + for d in dig_montage.dig + if d["kind"] == FIFF.FIFFV_POINT_CARDINAL + and d["ident"] == getattr(FIFF, f"FIFFV_POINT_{key.upper()}") + ] + assert len(expected) in (0, 1), key + assert len(got) in (0, 1), key + assert len(expected) == len(got) + if len(expected): + assert_allclose(got[0]["r"], expected[0]["r"], atol=1e-5, err_msg=key) for d1, d2 in zip(dig_montage.dig, expected_dig.dig): assert d1["coord_frame"] == d2["coord_frame"] for key in ("coord_frame", "ident", "kind"): diff --git a/mne/chpi.py b/mne/chpi.py index 22909415aaa..d04081779c9 100644 --- a/mne/chpi.py +++ b/mne/chpi.py @@ -306,7 +306,7 @@ def extract_chpi_locs_kit(raw, stim_channel="MISC 064", *, verbose=None): dtype = np.dtype([("good", " 2: - raise ValueError( - f"LinearModel only accepts up to 2-dimensional y, got {y.shape} " - "instead." - ) + X = check_array(X, input_name="X") + if y is not None: + y = check_array(y, dtype=None, ensure_2d=False, input_name="y") + if y.ndim > 2: + raise ValueError( + f"LinearModel only accepts up to 2-dimensional y, got {y.shape} " + "instead." + ) # fit the Model self.model.fit(X, y, **fit_params) @@ -153,16 +149,12 @@ def filters_(self): def _set_cv(cv, estimator=None, X=None, y=None): """Set the default CV depending on whether clf is classifier/regressor.""" # Detect whether classification or regression - from sklearn.base import is_classifier if estimator in ["classifier", "regressor"]: est_is_classifier = estimator == "classifier" else: est_is_classifier = is_classifier(estimator) # Setup CV - from sklearn import model_selection as models - from sklearn.model_selection import KFold, StratifiedKFold, check_cv - if isinstance(cv, int | np.int64): XFold = StratifiedKFold if est_is_classifier else KFold cv = XFold(n_splits=cv) @@ -391,12 +383,6 @@ def cross_val_multiscore( Array of scores of the estimator for each run of the cross validation. """ # This code is copied from sklearn - from sklearn.base import clone, is_classifier - from sklearn.model_selection._split import check_cv - from sklearn.utils import indexable - - check_scoring = _get_check_scoring() - X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) @@ -449,12 +435,16 @@ def _fit_and_score( ): """Fit estimator and compute scores for a given dataset split.""" # This code is adapted from sklearn + from sklearn.model_selection import _validation from sklearn.utils.metaestimators import _safe_split from sklearn.utils.validation import _num_samples # Adjust length of sample weights + fit_params = fit_params if fit_params is not None else {} - fit_params = _check_fit_params(X, fit_params, train) + fit_params = { + k: _validation._index_param_value(X, v, train) for k, v in fit_params.items() + } if parameters is not None: estimator.set_params(**parameters) diff --git a/mne/decoding/csp.py b/mne/decoding/csp.py index 7bd77361fa6..1261ca82055 100644 --- a/mne/decoding/csp.py +++ b/mne/decoding/csp.py @@ -6,6 +6,7 @@ import numpy as np from scipy.linalg import eigh +from sklearn.base import BaseEstimator, TransformerMixin from .._fiff.meas_info import create_info from ..cov import _compute_rank_raw_array, _regularized_covariance, _smart_eigh @@ -15,12 +16,10 @@ _check_option, _validate_type, _verbose_safe_false, - copy_doc, fill_doc, pinv, + warn, ) -from .base import BaseEstimator -from .mixin import TransformerMixin @fill_doc @@ -272,8 +271,31 @@ def inverse_transform(self, X): ) return X[:, np.newaxis, :] * self.patterns_[: self.n_components].T - @copy_doc(TransformerMixin.fit_transform) - def fit_transform(self, X, y, **fit_params): # noqa: D102 + def fit_transform(self, X, y=None, **fit_params): + """Fit CSP to data, then transform it. + + Fits transformer to ``X`` and ``y`` with optional parameters ``fit_params``, and + returns a transformed version of ``X``. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) + The data on which to estimate the CSP. + y : array, shape (n_epochs,) + The class for each epoch. + **fit_params : dict + Additional fitting parameters passed to the :meth:`mne.decoding.CSP.fit` + method. Not used for this class. + + Returns + ------- + X_csp : array, shape (n_epochs, n_components[, n_times]) + If ``self.transform_into == 'average_power'`` then returns the power of CSP + features averaged over time and shape is ``(n_epochs, n_components)``. If + ``self.transform_into == 'csp_space'`` then returns the data in CSP space + and shape is ``(n_epochs, n_components, n_times)``. + """ + # use parent TransformerMixin method but with custom docstring return super().fit_transform(X, y=y, **fit_params) @fill_doc @@ -369,6 +391,9 @@ def plot_patterns( if components is None: components = np.arange(self.n_components) + if average is not None: + warn("`average` is deprecated and will be removed in 1.10.", FutureWarning) + # set sampling frequency to have 1 component per time point info = cp.deepcopy(info) with info._unlock(): @@ -500,6 +525,9 @@ def plot_filters( if components is None: components = np.arange(self.n_components) + if average is not None: + warn("`average` is deprecated and will be removed in 1.10.", FutureWarning) + # set sampling frequency to have 1 component per time point info = cp.deepcopy(info) with info._unlock(): @@ -946,3 +974,30 @@ def transform(self, X): space and shape is (n_epochs, n_components, n_times). """ return super().transform(X) + + def fit_transform(self, X, y=None, **fit_params): + """Fit SPoC to data, then transform it. + + Fits transformer to ``X`` and ``y`` with optional parameters ``fit_params``, and + returns a transformed version of ``X``. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) + The data on which to estimate the SPoC. + y : array, shape (n_epochs,) + The class for each epoch. + **fit_params : dict + Additional fitting parameters passed to the :meth:`mne.decoding.CSP.fit` + method. Not used for this class. + + Returns + ------- + X : array, shape (n_epochs, n_components[, n_times]) + If ``self.transform_into == 'average_power'`` then returns the power of CSP + features averaged over time and shape is ``(n_epochs, n_components)``. If + ``self.transform_into == 'csp_space'`` then returns the data in CSP space + and shape is ``(n_epochs, n_components, n_times)``. + """ + # use parent TransformerMixin method but with custom docstring + return super().fit_transform(X, y=y, **fit_params) diff --git a/mne/decoding/ems.py b/mne/decoding/ems.py index 511a4ce1681..f6811de460d 100644 --- a/mne/decoding/ems.py +++ b/mne/decoding/ems.py @@ -5,15 +5,15 @@ from collections import Counter import numpy as np +from sklearn.base import BaseEstimator, TransformerMixin from .._fiff.pick import _picks_to_idx, pick_info, pick_types from ..parallel import parallel_func from ..utils import logger, verbose from .base import _set_cv -from .mixin import EstimatorMixin, TransformerMixin -class EMS(TransformerMixin, EstimatorMixin): +class EMS(BaseEstimator, TransformerMixin): """Transformer to compute event-matched spatial filters. This version of EMS :footcite:`SchurgerEtAl2013` operates on the entire diff --git a/mne/decoding/mixin.py b/mne/decoding/mixin.py deleted file mode 100644 index c660cb7eca7..00000000000 --- a/mne/decoding/mixin.py +++ /dev/null @@ -1,89 +0,0 @@ -# Authors: The MNE-Python contributors. -# License: BSD-3-Clause -# Copyright the MNE-Python contributors. - - -class TransformerMixin: - """Mixin class for all transformers in scikit-learn.""" - - def fit_transform(self, X, y=None, **fit_params): - """Fit to data, then transform it. - - Fits transformer to ``X`` and ``y`` with optional parameters - ``fit_params``, and returns a transformed version of ``X``. - - Parameters - ---------- - X : array, shape (n_samples, n_features) - Training set. - y : array, shape (n_samples,) - Target values or class labels. - **fit_params : dict - Additional fitting parameters passed to the ``fit`` method.. - - Returns - ------- - X_new : array, shape (n_samples, n_features_new) - Transformed array. - """ - # non-optimized default implementation; override when a better - # method is possible for a given clustering algorithm - if y is None: - # fit method of arity 1 (unsupervised transformation) - return self.fit(X, **fit_params).transform(X) - else: - # fit method of arity 2 (supervised transformation) - return self.fit(X, y, **fit_params).transform(X) - - -class EstimatorMixin: - """Mixin class for estimators.""" - - def get_params(self, deep=True): - """Get the estimator params. - - Parameters - ---------- - deep : bool - Deep. - """ - return - - def set_params(self, **params): - """Set parameters (mimics sklearn API). - - Parameters - ---------- - **params : dict - Extra parameters. - - Returns - ------- - inst : object - The instance. - """ - if not params: - return self - valid_params = self.get_params(deep=True) - for key, value in params.items(): - split = key.split("__", 1) - if len(split) > 1: - # nested objects case - name, sub_name = split - if name not in valid_params: - raise ValueError( - f"Invalid parameter {name} for estimator {self}. Check the list" - " of available parameters with `estimator.get_params().keys()`." - ) - sub_object = valid_params[name] - sub_object.set_params(**{sub_name: value}) - else: - # simple objects case - if key not in valid_params: - raise ValueError( - f"Invalid parameter {key} for estimator " - f"{self.__class__.__name__}. Check the list of available " - "parameters with `estimator.get_params().keys()`." - ) - setattr(self, key, value) - return self diff --git a/mne/decoding/receptive_field.py b/mne/decoding/receptive_field.py index c03596bb811..f95aab09ac6 100644 --- a/mne/decoding/receptive_field.py +++ b/mne/decoding/receptive_field.py @@ -6,8 +6,11 @@ import numpy as np from scipy.stats import pearsonr +from sklearn.base import clone, is_regressor +from sklearn.exceptions import NotFittedError +from sklearn.metrics import r2_score -from ..utils import _validate_type, fill_doc, pinv, verbose +from ..utils import _validate_type, fill_doc, pinv from .base import BaseEstimator, _check_estimator, get_coef from .time_delaying_ridge import TimeDelayingRidge @@ -65,7 +68,6 @@ class ReceptiveField(BaseEstimator): duration. Only used if ``estimator`` is float or None. .. versionadded:: 0.18 - %(verbose)s Attributes ---------- @@ -101,7 +103,6 @@ class ReceptiveField(BaseEstimator): .. footbibliography:: """ # noqa E501 - @verbose def __init__( self, tmin, @@ -114,12 +115,11 @@ def __init__( patterns=False, n_jobs=None, edge_correction=True, - verbose=None, ): - self.feature_names = feature_names - self.sfreq = float(sfreq) self.tmin = tmin self.tmax = tmax + self.sfreq = sfreq + self.feature_names = feature_names self.estimator = 0.0 if estimator is None else estimator self.fit_intercept = fit_intercept self.scoring = scoring @@ -127,9 +127,6 @@ def __init__( self.n_jobs = n_jobs self.edge_correction = edge_correction - def _more_tags(self): - return {"no_validation": True} - def __repr__(self): # noqa: D105 s = f"tmin, tmax : ({self.tmin:.3f}, {self.tmax:.3f}), " estimator = self.estimator @@ -158,7 +155,7 @@ def _delay_and_reshape(self, X, y=None): X, self.tmin, self.tmax, - self.sfreq, + self.sfreq_, fill_mean=self.fit_intercept_, ) X = _reshape_for_est(X) @@ -186,14 +183,13 @@ def fit(self, X, y): raise ValueError( f"scoring must be one of {sorted(_SCORERS.keys())}, got {self.scoring} " ) - from sklearn.base import clone, is_regressor - + self.sfreq_ = float(self.sfreq) X, y, _, self._y_dim = self._check_dimensions(X, y) if self.tmin > self.tmax: raise ValueError(f"tmin ({self.tmin}) must be at most tmax ({self.tmax})") # Initialize delays - self.delays_ = _times_to_delays(self.tmin, self.tmax, self.sfreq) + self.delays_ = _times_to_delays(self.tmin, self.tmax, self.sfreq_) # Define the slice that we should use in the middle self.valid_samples_ = _delays_to_slice(self.delays_) @@ -206,7 +202,7 @@ def fit(self, X, y): estimator = TimeDelayingRidge( self.tmin, self.tmax, - self.sfreq, + self.sfreq_, alpha=self.estimator, fit_intercept=self.fit_intercept_, n_jobs=self.n_jobs, @@ -298,7 +294,7 @@ def predict(self, X): be obtained using ``y_pred[rf.valid_samples_]``. """ if not hasattr(self, "delays_"): - raise ValueError("Estimator has not been fit yet.") + raise NotFittedError("Estimator has not been fit yet.") X, _, X_dim = self._check_dimensions(X, None, predict=True)[:3] del _ # convert to sklearn and back @@ -514,8 +510,6 @@ def _corr_score(y_true, y, multioutput=None): def _r2_score(y_true, y, multioutput=None): - from sklearn.metrics import r2_score - return r2_score(y_true, y, multioutput=multioutput) diff --git a/mne/decoding/search_light.py b/mne/decoding/search_light.py index 01e55a263d3..64f38a60634 100644 --- a/mne/decoding/search_light.py +++ b/mne/decoding/search_light.py @@ -5,13 +5,14 @@ import logging import numpy as np -from scipy.sparse import issparse +from sklearn.base import BaseEstimator, TransformerMixin, clone +from sklearn.metrics import check_scoring +from sklearn.preprocessing import LabelEncoder +from sklearn.utils import check_array -from ..fixes import _get_check_scoring from ..parallel import parallel_func from ..utils import ProgressBar, _parse_verbose, array_split_idx, fill_doc, verbose -from .base import BaseEstimator, _check_estimator -from .mixin import TransformerMixin +from .base import _check_estimator @fill_doc @@ -56,9 +57,6 @@ def __init__( self.allow_2d = allow_2d self.verbose = verbose - def _more_tags(self): - return {"no_validation": True, "requires_fit": False} - @property def _estimator_type(self): return getattr(self.base_estimator, "_estimator_type", None) @@ -256,14 +254,9 @@ def decision_function(self, X): def _check_Xy(self, X, y=None): """Aux. function to check input data.""" # Once we require sklearn 1.1+ we should do something like: - # from sklearn.utils import check_array - # X = check_array(X, ensure_2d=False, input_name="X") - # y = check_array(y, dtype=None, ensure_2d=False, input_name="y") - if issparse(X): - raise TypeError("X should be a dense array, got sparse instead.") - X = np.asarray(X) + X = check_array(X, ensure_2d=False, allow_nd=True, input_name="X") if y is not None: - y = np.asarray(y) + y = check_array(y, dtype=None, ensure_2d=False, input_name="y") if len(X) != len(y) or len(y) < 1: raise ValueError("X and y must have the same length.") if X.ndim < 3: @@ -300,8 +293,6 @@ def score(self, X, y): score : array, shape (n_samples, n_estimators) Score for each estimator/task. """ # noqa: E501 - check_scoring = _get_check_scoring() - X = self._check_Xy(X, y) if X.shape[-1] != len(self.estimators_): raise ValueError("The number of estimators does not match X.shape[-1]") @@ -357,8 +348,6 @@ def _sl_fit(estimator, X, y, pb, **fit_params): estimators_ : list of estimators The fitted estimators. """ - from sklearn.base import clone - estimators_ = list() for ii in range(X.shape[-1]): est = clone(estimator) @@ -600,7 +589,6 @@ def score(self, X, y): score : array, shape (n_samples, n_estimators, n_slices) Score for each estimator / data slice couple. """ # noqa: E501 - check_scoring = _get_check_scoring() X = self._check_Xy(X, y) # For predictions/transforms the parallelization is across the data and # not across the estimators to avoid memory load. @@ -719,8 +707,6 @@ def _gl_score(estimators, scoring, X, y, pb): def _fix_auc(scoring, y): - from sklearn.preprocessing import LabelEncoder - # This fixes sklearn's inability to compute roc_auc when y not in [0, 1] # scikit-learn/scikit-learn#6874 if scoring is not None: diff --git a/mne/decoding/ssd.py b/mne/decoding/ssd.py index f5f1ff94516..4043aa99835 100644 --- a/mne/decoding/ssd.py +++ b/mne/decoding/ssd.py @@ -4,12 +4,12 @@ import numpy as np from scipy.linalg import eigh +from sklearn.base import BaseEstimator, TransformerMixin from .._fiff.pick import _picks_to_idx from ..cov import Covariance, _regularized_covariance from ..defaults import _handle_default from ..filter import filter_data -from ..fixes import BaseEstimator from ..rank import compute_rank from ..time_frequency import psd_array_welch from ..utils import ( @@ -20,7 +20,6 @@ fill_doc, logger, ) -from .mixin import TransformerMixin @fill_doc @@ -125,14 +124,8 @@ def __init__( "The signal band-pass must be within the noise " "band-pass!" ) - self.picks_ = _picks_to_idx(info, picks, none="data", exclude="bads") + self.picks = picks del picks - ch_types = info.get_channel_types(picks=self.picks_, unique=True) - if len(ch_types) > 1: - raise ValueError( - "At this point SSD only supports fitting " - "single channel types. Your info has %i types" % (len(ch_types)) - ) self.info = info self.freqs_signal = (filt_params_signal["l_freq"], filt_params_signal["h_freq"]) self.freqs_noise = (filt_params_noise["l_freq"], filt_params_noise["h_freq"]) @@ -183,6 +176,13 @@ def fit(self, X, y=None): self : instance of SSD Returns the modified instance. """ + ch_types = self.info.get_channel_types(picks=self.picks, unique=True) + if len(ch_types) > 1: + raise ValueError( + "At this point SSD only supports fitting " + "single channel types. Your info has %i types" % (len(ch_types)) + ) + self.picks_ = _picks_to_idx(self.info, self.picks, none="data", exclude="bads") self._check_X(X) X_aux = X[..., self.picks_, :] @@ -261,6 +261,31 @@ def transform(self, X): X_ssd = X_ssd[:, self.sorter_spec, :][:, : self.n_components, :] return X_ssd + def fit_transform(self, X, y=None, **fit_params): + """Fit SSD to data, then transform it. + + Fits transformer to ``X`` and ``y`` with optional parameters ``fit_params``, and + returns a transformed version of ``X``. + + Parameters + ---------- + X : array, shape ([n_epochs, ]n_channels, n_times) + The input data from which to estimate the SSD. Either 2D array obtained from + continuous data or 3D array obtained from epoched data. + y : None + Ignored; exists for compatibility with scikit-learn pipelines. + **fit_params : dict + Additional fitting parameters passed to the :meth:`mne.decoding.SSD.fit` + method. Not used for this class. + + Returns + ------- + X_ssd : array, shape ([n_epochs, ]n_components, n_times) + The processed data. + """ + # use parent TransformerMixin method but with custom docstring + return super().fit_transform(X, y=y, **fit_params) + def get_spectral_ratio(self, ssd_sources): """Get the spectal signal-to-noise ratio for each spatial filter. diff --git a/mne/decoding/tests/test_base.py b/mne/decoding/tests/test_base.py index 10d9950bbf7..0930d007d28 100644 --- a/mne/decoding/tests/test_base.py +++ b/mne/decoding/tests/test_base.py @@ -2,6 +2,7 @@ # License: BSD-3-Clause # Copyright the MNE-Python contributors. +import platform from contextlib import nullcontext import numpy as np @@ -14,6 +15,30 @@ assert_equal, ) +pytest.importorskip("sklearn") + +from sklearn import svm +from sklearn.base import ( + BaseEstimator as sklearn_BaseEstimator, +) +from sklearn.base import ( + TransformerMixin as sklearn_TransformerMixin, +) +from sklearn.base import ( + is_classifier, + is_regressor, +) +from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge +from sklearn.model_selection import ( + GridSearchCV, + KFold, + StratifiedKFold, + cross_val_score, +) +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.utils.estimator_checks import parametrize_with_checks + from mne import EpochsArray, create_info from mne.decoding import GeneralizingEstimator, Scaler, TransformerMixin, Vectorizer from mne.decoding.base import ( @@ -25,8 +50,6 @@ ) from mne.decoding.search_light import SlidingEstimator -pytest.importorskip("sklearn") - def _make_data(n_samples=1000, n_features=5, n_targets=3): """Generate some testing data. @@ -70,18 +93,6 @@ def _make_data(n_samples=1000, n_features=5, n_targets=3): @pytest.mark.filterwarnings("ignore:invalid value encountered in cast.*:RuntimeWarning") def test_get_coef(): """Test getting linear coefficients (filters/patterns) from estimators.""" - from sklearn import svm - from sklearn.base import ( - BaseEstimator, - TransformerMixin, - is_classifier, - is_regressor, - ) - from sklearn.linear_model import Ridge - from sklearn.model_selection import GridSearchCV - from sklearn.pipeline import make_pipeline - from sklearn.preprocessing import StandardScaler - lm_classification = LinearModel() assert is_classifier(lm_classification) @@ -100,6 +111,8 @@ def test_get_coef(): assert is_regressor(lm_gs_regression) # Define a classifier, an invertible transformer and an non-invertible one. + assert BaseEstimator is sklearn_BaseEstimator + assert TransformerMixin is sklearn_TransformerMixin class Clf(BaseEstimator): def fit(self, X, y): @@ -223,9 +236,6 @@ def transform(self, X): ) def test_get_coef_inverse_transform(inverse, Scale, kwargs): """Test get_coef with and without inverse_transform.""" - from sklearn.linear_model import Ridge - from sklearn.pipeline import make_pipeline - lm_regression = LinearModel(Ridge()) X, y, A = _make_data(n_samples=1000, n_features=3, n_targets=1) # Check with search_light and combination of preprocessing ending with sl: @@ -254,9 +264,6 @@ def test_get_coef_inverse_transform(inverse, Scale, kwargs): def test_get_coef_multiclass(n_features, n_targets): """Test get_coef on multiclass problems.""" # Check patterns with more than 1 regressor - from sklearn.linear_model import LinearRegression, Ridge - from sklearn.pipeline import make_pipeline - X, Y, A = _make_data(n_samples=30000, n_features=n_features, n_targets=n_targets) lm = LinearModel(LinearRegression()).fit(X, Y) assert_array_equal(lm.filters_.shape, lm.patterns_.shape) @@ -306,12 +313,9 @@ def test_get_coef_multiclass(n_features, n_targets): ) # TODO: Need to fix this properly in LinearModel @pytest.mark.filterwarnings("ignore:'multi_class' was deprecated in.*:FutureWarning") +@pytest.mark.filterwarnings("ignore:lbfgs failed to converge.*:") def test_get_coef_multiclass_full(n_classes, n_channels, n_times): """Test a full example with pattern extraction.""" - from sklearn.linear_model import LogisticRegression - from sklearn.model_selection import StratifiedKFold - from sklearn.pipeline import make_pipeline - data = np.zeros((10 * n_classes, n_channels, n_times)) # Make only the first channel informative for ii in range(n_classes): @@ -337,7 +341,9 @@ def test_get_coef_multiclass_full(n_classes, n_channels, n_times): if n_times > 1: want += (n_times, n_times) assert scores.shape == want - assert_array_less(0.8, scores) + # On Windows LBFGS can fail to converge, so we need to be a bit more tol here + limit = 0.7 if platform.system() == "Windows" else 0.8 + assert_array_less(limit, scores) clf.fit(X, y) patterns = get_coef(clf, "patterns_", inverse_transform=True) assert patterns.shape == (n_classes, n_channels, n_times) @@ -347,8 +353,6 @@ def test_get_coef_multiclass_full(n_classes, n_channels, n_times): def test_linearmodel(): """Test LinearModel class for computing filters and patterns.""" # check categorical target fit in standard linear model - from sklearn.linear_model import LinearRegression - rng = np.random.RandomState(0) clf = LinearModel() n, n_features = 20, 3 @@ -362,9 +366,6 @@ def test_linearmodel(): clf.fit(wrong_X, y) # check categorical target fit in standard linear model with GridSearchCV - from sklearn import svm - from sklearn.model_selection import GridSearchCV - parameters = {"kernel": ["linear"], "C": [1, 10]} clf = LinearModel( GridSearchCV(svm.SVC(), parameters, cv=2, refit=True, n_jobs=None) @@ -403,9 +404,6 @@ def test_linearmodel(): def test_cross_val_multiscore(): """Test cross_val_multiscore for computing scores on decoding over time.""" - from sklearn.linear_model import LinearRegression, LogisticRegression - from sklearn.model_selection import KFold, StratifiedKFold, cross_val_score - logreg = LogisticRegression(solver="liblinear", random_state=0) # compare to cross-val-score @@ -462,19 +460,15 @@ def test_cross_val_multiscore(): assert_array_equal(manual, auto) -def test_sklearn_compliance(): +@parametrize_with_checks([LinearModel(LogisticRegression())]) +def test_sklearn_compliance(estimator, check): """Test LinearModel compliance with sklearn.""" - pytest.importorskip("sklearn") - from sklearn.linear_model import LogisticRegression - from sklearn.utils.estimator_checks import check_estimator - - lm = LinearModel(LogisticRegression()) ignores = ( + "check_n_features_in", # maybe we should add this someday? "check_estimator_sparse_data", # we densify "check_estimators_overwrite_params", # self.model changes! "check_parameters_default_constructible", ) - for est, check in check_estimator(lm, generate_only=True): - if any(ignore in str(check) for ignore in ignores): - continue - check(est) + if any(ignore in str(check) for ignore in ignores): + return + check(estimator) diff --git a/mne/decoding/tests/test_csp.py b/mne/decoding/tests/test_csp.py index 79528dc5615..7a1a83feeaf 100644 --- a/mne/decoding/tests/test_csp.py +++ b/mne/decoding/tests/test_csp.py @@ -13,6 +13,13 @@ assert_equal, ) +pytest.importorskip("sklearn") + +from sklearn.linear_model import LogisticRegression +from sklearn.model_selection import StratifiedKFold, cross_val_score +from sklearn.pipeline import Pipeline, make_pipeline +from sklearn.svm import SVC + from mne import Epochs, compute_proj_raw, io, pick_types, read_events from mne.decoding import CSP, LinearModel, Scaler, SPoC, get_coef from mne.decoding.csp import _ajd_pham @@ -255,11 +262,6 @@ def test_csp(): @pytest.mark.parametrize("reg", [None, 0.001, "oas"]) def test_regularized_csp(ch_type, rank, reg): """Test Common Spatial Patterns algorithm using regularized covariance.""" - pytest.importorskip("sklearn") - from sklearn.linear_model import LogisticRegression - from sklearn.model_selection import StratifiedKFold, cross_val_score - from sklearn.pipeline import make_pipeline - raw = io.read_raw_fif(raw_fname).pick(ch_type, exclude="bads").load_data() n_orig = len(raw.ch_names) ch_decim = 2 @@ -373,10 +375,6 @@ def test_regularized_csp(ch_type, rank, reg): def test_csp_pipeline(): """Test if CSP works in a pipeline.""" - pytest.importorskip("sklearn") - from sklearn.pipeline import Pipeline - from sklearn.svm import SVC - csp = CSP(reg=1, norm_trace=False) svc = SVC() pipe = Pipeline([("CSP", csp), ("SVC", svc)]) diff --git a/mne/decoding/tests/test_ems.py b/mne/decoding/tests/test_ems.py index 6a5effc07b7..10774c0681a 100644 --- a/mne/decoding/tests/test_ems.py +++ b/mne/decoding/tests/test_ems.py @@ -8,6 +8,10 @@ import pytest from numpy.testing import assert_array_almost_equal, assert_equal +pytest.importorskip("sklearn") + +from sklearn.model_selection import StratifiedKFold + from mne import Epochs, io, pick_types, read_events from mne.decoding import EMS, compute_ems @@ -17,13 +21,9 @@ tmin, tmax = -0.2, 0.5 event_id = dict(aud_l=1, vis_l=3) -pytest.importorskip("sklearn") - def test_ems(): """Test event-matched spatial filters.""" - from sklearn.model_selection import StratifiedKFold - raw = io.read_raw_fif(raw_fname, preload=False) # create unequal number of events diff --git a/mne/decoding/tests/test_receptive_field.py b/mne/decoding/tests/test_receptive_field.py index d46289819b8..51bb7bb188d 100644 --- a/mne/decoding/tests/test_receptive_field.py +++ b/mne/decoding/tests/test_receptive_field.py @@ -10,6 +10,11 @@ from numpy.fft import irfft, rfft from numpy.testing import assert_allclose, assert_array_equal, assert_equal +pytest.importorskip("sklearn") + +from sklearn.linear_model import Ridge +from sklearn.utils.estimator_checks import parametrize_with_checks + from mne.decoding import ReceptiveField, TimeDelayingRidge from mne.decoding.receptive_field import ( _SCORERS, @@ -79,9 +84,6 @@ def test_compute_reg_neighbors(): def test_rank_deficiency(): """Test signals that are rank deficient.""" # See GH#4253 - pytest.importorskip("sklearn") - from sklearn.linear_model import Ridge - N = 256 fs = 1.0 tmin, tmax = -50, 100 @@ -174,9 +176,6 @@ def test_time_delay(): @pytest.mark.parametrize("n_jobs", n_jobs_test) def test_receptive_field_basic(n_jobs): """Test model prep and fitting.""" - pytest.importorskip("sklearn") - from sklearn.linear_model import Ridge - # Make sure estimator pulling works mod = Ridge() rng = np.random.RandomState(1337) @@ -372,9 +371,6 @@ def test_time_delaying_fast_calc(n_jobs): @pytest.mark.parametrize("n_jobs", n_jobs_test) def test_receptive_field_1d(n_jobs): """Test that the fast solving works like Ridge.""" - pytest.importorskip("sklearn") - from sklearn.linear_model import Ridge - rng = np.random.RandomState(0) x = rng.randn(500, 1) for delay in range(-2, 3): @@ -433,9 +429,6 @@ def test_receptive_field_1d(n_jobs): @pytest.mark.parametrize("n_jobs", n_jobs_test) def test_receptive_field_nd(n_jobs): """Test multidimensional support.""" - pytest.importorskip("sklearn") - from sklearn.linear_model import Ridge - # multidimensional rng = np.random.RandomState(3) x = rng.randn(1000, 3) @@ -552,9 +545,6 @@ def _make_data(n_feats, n_targets, n_samples, tmin, tmax): def test_inverse_coef(): """Test inverse coefficients computation.""" - pytest.importorskip("sklearn") - from sklearn.linear_model import Ridge - tmin, tmax = 0.0, 10.0 n_feats, n_targets, n_samples = 3, 2, 1000 n_delays = int((tmax - tmin) + 1) @@ -583,9 +573,6 @@ def test_inverse_coef(): def test_linalg_warning(): """Test that warnings are issued when no regularization is applied.""" - pytest.importorskip("sklearn") - from sklearn.linear_model import Ridge - n_feats, n_targets, n_samples = 5, 60, 50 X, y = _make_data(n_feats, n_targets, n_samples, tmin, tmax) for estimator in (0.0, Ridge(alpha=0.0)): @@ -596,12 +583,9 @@ def test_linalg_warning(): rf.fit(y, X) -def test_tdr_sklearn_compliance(): +@parametrize_with_checks([TimeDelayingRidge(0, 10, 1.0, 0.1, "laplacian", n_jobs=1)]) +def test_tdr_sklearn_compliance(estimator, check): """Test sklearn estimator compliance.""" - pytest.importorskip("sklearn") - from sklearn.utils.estimator_checks import check_estimator - - tdr = TimeDelayingRidge(0, 10, 1.0, 0.1, "laplacian", n_jobs=1) # We don't actually comply with a bunch of the regressor specs :( ignores = ( "check_supervised_y_no_nan", @@ -609,27 +593,37 @@ def test_tdr_sklearn_compliance(): "check_parameters_default_constructible", "check_estimators_unfitted", "_invariance", + "check_complex_data", + "check_estimators_empty_data_messages", + "check_estimators_nan_inf", + "check_supervised_y_2d", + "check_n_features_in", "check_fit2d_1sample", + "check_fit1d", + "check_fit2d_predict1d", + "check_requires_y_none", ) - for est, check in check_estimator(tdr, generate_only=True): - if any(ignore in str(check) for ignore in ignores): - continue - check(est) + if any(ignore in str(check) for ignore in ignores): + return + check(estimator) -def test_rf_sklearn_compliance(): +@pytest.mark.filterwarnings("ignore:.*invalid value encountered in subtract.*:") +@parametrize_with_checks([ReceptiveField(-1, 2, 1.0, estimator=Ridge(), patterns=True)]) +def test_rf_sklearn_compliance(estimator, check): """Test sklearn RF compliance.""" - pytest.importorskip("sklearn") - from sklearn.linear_model import Ridge - from sklearn.utils.estimator_checks import check_estimator - - rf = ReceptiveField(-1, 2, 1.0, estimator=Ridge(), patterns=True) ignores = ( "check_parameters_default_constructible", "_invariance", "check_fit2d_1sample", + # Should probably fix these? + "check_complex_data", + "check_dtype_object", + "check_estimators_empty_data_messages", + "check_n_features_in", + "check_fit2d_predict1d", + "check_estimators_unfitted", ) - for est, check in check_estimator(rf, generate_only=True): - if any(ignore in str(check) for ignore in ignores): - continue - check(est) + if any(ignore in str(check) for ignore in ignores): + return + check(estimator) diff --git a/mne/decoding/tests/test_search_light.py b/mne/decoding/tests/test_search_light.py index 329b6b3d30f..9e15a1df59b 100644 --- a/mne/decoding/tests/test_search_light.py +++ b/mne/decoding/tests/test_search_light.py @@ -9,11 +9,22 @@ import pytest from numpy.testing import assert_array_equal, assert_equal +sklearn = pytest.importorskip("sklearn") + +from sklearn.base import BaseEstimator, clone, is_classifier +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis +from sklearn.ensemble import BaggingClassifier +from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge +from sklearn.metrics import make_scorer, roc_auc_score +from sklearn.model_selection import cross_val_predict +from sklearn.multiclass import OneVsRestClassifier +from sklearn.pipeline import make_pipeline +from sklearn.svm import SVC +from sklearn.utils.estimator_checks import parametrize_with_checks + from mne.decoding.search_light import GeneralizingEstimator, SlidingEstimator from mne.decoding.transformer import Vectorizer -from mne.utils import _record_warnings, check_version, use_log_level - -sklearn = pytest.importorskip("sklearn") +from mne.utils import check_version, use_log_level NEW_MULTICLASS_SAMPLE_WEIGHT = check_version("sklearn", "1.4") @@ -35,14 +46,6 @@ def test_search_light(): # https://github.com/scikit-learn/scikit-learn/issues/27711 if platform.system() == "Windows" and check_version("numpy", "2.0.0.dev0"): pytest.skip("sklearn int_t / long long mismatch") - from sklearn.linear_model import LogisticRegression, Ridge - from sklearn.metrics import make_scorer, roc_auc_score - from sklearn.multiclass import OneVsRestClassifier - from sklearn.pipeline import make_pipeline - - with _record_warnings(): # NumPy module import - from sklearn.ensemble import BaggingClassifier - from sklearn.base import is_classifier logreg = OneVsRestClassifier(LogisticRegression(solver="liblinear", random_state=0)) @@ -197,11 +200,6 @@ def metadata_routing(): def test_generalization_light(metadata_routing): """Test GeneralizingEstimator.""" - from sklearn.linear_model import LogisticRegression - from sklearn.metrics import roc_auc_score - from sklearn.multiclass import OneVsRestClassifier - from sklearn.pipeline import make_pipeline - if NEW_MULTICLASS_SAMPLE_WEIGHT: clf = LogisticRegression(random_state=0) clf.set_fit_request(sample_weight=True) @@ -296,8 +294,6 @@ def test_generalization_light(metadata_routing): ) def test_verbose_arg(capsys, n_jobs, verbose): """Test controlling output with the ``verbose`` argument.""" - from sklearn.svm import SVC - X, y = make_data() clf = SVC() @@ -318,11 +314,6 @@ def test_verbose_arg(capsys, n_jobs, verbose): def test_cross_val_predict(): """Test cross_val_predict with predict_proba.""" - from sklearn.base import BaseEstimator, clone - from sklearn.discriminant_analysis import LinearDiscriminantAnalysis - from sklearn.linear_model import LinearRegression - from sklearn.model_selection import cross_val_predict - rng = np.random.RandomState(42) X = rng.randn(10, 1, 3) y = rng.randint(0, 2, 10) @@ -352,13 +343,9 @@ def predict_proba(self, X): @pytest.mark.slowtest -def test_sklearn_compliance(): +@parametrize_with_checks([SlidingEstimator(LogisticRegression(), allow_2d=True)]) +def test_sklearn_compliance(estimator, check): """Test LinearModel compliance with sklearn.""" - from sklearn.linear_model import LogisticRegression - from sklearn.utils.estimator_checks import check_estimator - - est = SlidingEstimator(LogisticRegression(), allow_2d=True) - ignores = ( "check_estimator_sparse_data", # we densify "check_classifiers_one_label_sample_weights", # don't handle singleton @@ -366,8 +353,13 @@ def test_sklearn_compliance(): "check_classifiers_train", "check_decision_proba_consistency", "check_parameters_default_constructible", + # Should probably fix these? + "check_estimators_unfitted", + "check_transformer_data_not_an_array", + "check_n_features_in", + "check_fit2d_predict1d", + "check_do_not_raise_errors_in_init_or_set_params", ) - for est, check in check_estimator(est, generate_only=True): - if any(ignore in str(check) for ignore in ignores): - continue - check(est) + if any(ignore in str(check) for ignore in ignores): + return + check(estimator) diff --git a/mne/decoding/tests/test_ssd.py b/mne/decoding/tests/test_ssd.py index e306dffa2db..198feeb6532 100644 --- a/mne/decoding/tests/test_ssd.py +++ b/mne/decoding/tests/test_ssd.py @@ -8,6 +8,10 @@ import pytest from numpy.testing import assert_array_almost_equal, assert_array_equal +pytest.importorskip("sklearn") + +from sklearn.pipeline import Pipeline + from mne import create_info, io from mne.decoding import CSP from mne.decoding.ssd import SSD @@ -150,8 +154,9 @@ def test_ssd(): ch_types = np.reshape([["mag"] * 10, ["eeg"] * 10], n_channels) info_2 = create_info(ch_names=n_channels, sfreq=sf, ch_types=ch_types) + ssd = SSD(info_2, filt_params_signal, filt_params_noise) with pytest.raises(ValueError, match="At this point SSD"): - ssd = SSD(info_2, filt_params_signal, filt_params_noise) + ssd.fit(X) # Number of channels info_3 = create_info(ch_names=n_channels + 1, sfreq=sf, ch_types="eeg") @@ -298,9 +303,6 @@ def test_ssd_epoched_data(): def test_ssd_pipeline(): """Test if SSD works in a pipeline.""" - pytest.importorskip("sklearn") - from sklearn.pipeline import Pipeline - sf = 250 X, A, S = simulate_data(n_trials=100, n_channels=20, n_samples=500) X_e = np.reshape(X, (100, 20, 500)) diff --git a/mne/decoding/tests/test_time_frequency.py b/mne/decoding/tests/test_time_frequency.py index 52c4e9f1bc9..37e7d7d8dc2 100644 --- a/mne/decoding/tests/test_time_frequency.py +++ b/mne/decoding/tests/test_time_frequency.py @@ -7,14 +7,15 @@ import pytest from numpy.testing import assert_array_equal +pytest.importorskip("sklearn") + +from sklearn.base import clone + from mne.decoding.time_frequency import TimeFrequency def test_timefrequency(): """Test TimeFrequency.""" - pytest.importorskip("sklearn") - from sklearn.base import clone - # Init n_freqs = 3 freqs = [20, 21, 22] diff --git a/mne/decoding/tests/test_transformer.py b/mne/decoding/tests/test_transformer.py index 098df9fa0f0..8dcc3ad74c7 100644 --- a/mne/decoding/tests/test_transformer.py +++ b/mne/decoding/tests/test_transformer.py @@ -13,6 +13,11 @@ assert_equal, ) +pytest.importorskip("sklearn") + +from sklearn.decomposition import PCA +from sklearn.kernel_ridge import KernelRidge + from mne import Epochs, io, pick_types, read_events from mne.decoding import ( FilterEstimator, @@ -23,7 +28,7 @@ Vectorizer, ) from mne.defaults import DEFAULTS -from mne.utils import check_version, use_log_level +from mne.utils import use_log_level tmin, tmax = -0.2, 0.5 event_id = dict(aud_l=1, vis_l=3) @@ -58,11 +63,6 @@ def test_scaler(info, method): y = epochs.events[:, -1] epochs_data_t = epochs_data.transpose([1, 0, 2]) - if method in ("mean", "median"): - if not check_version("sklearn"): - with pytest.raises((ImportError, RuntimeError), match=" module "): - Scaler(info, method) - return if info: info = epochs.info @@ -217,10 +217,6 @@ def test_vectorizer(): def test_unsupervised_spatial_filter(): """Test unsupervised spatial filter.""" - pytest.importorskip("sklearn") - from sklearn.decomposition import PCA - from sklearn.kernel_ridge import KernelRidge - raw = io.read_raw_fif(raw_fname) events = read_events(event_name) picks = pick_types( diff --git a/mne/decoding/time_delaying_ridge.py b/mne/decoding/time_delaying_ridge.py index 5d85556c954..b80b36d3922 100644 --- a/mne/decoding/time_delaying_ridge.py +++ b/mne/decoding/time_delaying_ridge.py @@ -287,27 +287,22 @@ def __init__( n_jobs=None, edge_correction=True, ): - if tmin > tmax: - raise ValueError(f"tmin must be <= tmax, got {tmin} and {tmax}") - self.tmin = float(tmin) - self.tmax = float(tmax) - self.sfreq = float(sfreq) - self.alpha = float(alpha) + self.tmin = tmin + self.tmax = tmax + self.sfreq = sfreq + self.alpha = alpha self.reg_type = reg_type self.fit_intercept = fit_intercept self.edge_correction = edge_correction self.n_jobs = n_jobs - def _more_tags(self): - return {"no_validation": True} - @property def _smin(self): - return int(round(self.tmin * self.sfreq)) + return int(round(self.tmin_ * self.sfreq_)) @property def _smax(self): - return int(round(self.tmax * self.sfreq)) + 1 + return int(round(self.tmax_ * self.sfreq_)) + 1 def fit(self, X, y): """Estimate the coefficients of the linear model. @@ -326,6 +321,12 @@ def fit(self, X, y): """ _validate_type(X, "array-like", "X") _validate_type(y, "array-like", "y") + self.tmin_ = float(self.tmin) + self.tmax_ = float(self.tmax) + self.sfreq_ = float(self.sfreq) + self.alpha_ = float(self.alpha) + if self.tmin_ > self.tmax_: + raise ValueError(f"tmin must be <= tmax, got {self.tmin_} and {self.tmax_}") X = np.asarray(X, dtype=float) y = np.asarray(y, dtype=float) if X.ndim == 3: @@ -352,7 +353,7 @@ def fit(self, X, y): self.edge_correction, ) self.coef_ = _fit_corrs( - self.cov_, x_y_, n_ch_x, self.reg_type, self.alpha, n_ch_x + self.cov_, x_y_, n_ch_x, self.reg_type, self.alpha_, n_ch_x ) # This is the sklearn formula from LinearModel (will be 0. for no fit) if self.fit_intercept: diff --git a/mne/decoding/time_frequency.py b/mne/decoding/time_frequency.py index 20bcc40baca..de6ec52155b 100644 --- a/mne/decoding/time_frequency.py +++ b/mne/decoding/time_frequency.py @@ -3,11 +3,10 @@ # Copyright the MNE-Python contributors. import numpy as np +from sklearn.base import BaseEstimator, TransformerMixin from ..time_frequency.tfr import _compute_tfr from ..utils import _check_option, fill_doc, verbose -from .base import BaseEstimator -from .mixin import TransformerMixin @fill_doc diff --git a/mne/decoding/transformer.py b/mne/decoding/transformer.py index d3cdbc172ea..e293d108ba8 100644 --- a/mne/decoding/transformer.py +++ b/mne/decoding/transformer.py @@ -3,6 +3,7 @@ # Copyright the MNE-Python contributors. import numpy as np +from sklearn.base import BaseEstimator, TransformerMixin from .._fiff.pick import ( _pick_data_channels, @@ -15,8 +16,6 @@ from ..filter import filter_data from ..time_frequency import psd_array_multitaper from ..utils import _check_option, _validate_type, fill_doc, verbose -from .base import BaseEstimator -from .mixin import TransformerMixin class _ConstantScaler: diff --git a/mne/defaults.py b/mne/defaults.py index a2dd2a05250..d5aab1a8d38 100644 --- a/mne/defaults.py +++ b/mne/defaults.py @@ -64,8 +64,8 @@ whitened="Z", gsr="S", temperature="C", - eyegaze="AU", - pupil="AU", + eyegaze="rad", + pupil="m", ), units=dict( mag="fT", @@ -92,8 +92,8 @@ whitened="Z", gsr="S", temperature="C", - eyegaze="AU", - pupil="AU", + eyegaze="rad", + pupil="µm", ), # scalings for the units scalings=dict( @@ -122,7 +122,7 @@ gsr=1.0, temperature=1.0, eyegaze=1.0, - pupil=1.0, + pupil=1e6, ), # rough guess for a good plot scalings_plot_raw=dict( @@ -156,8 +156,8 @@ gof=1e2, gsr=1.0, temperature=0.1, - eyegaze=3e-1, - pupil=1e3, + eyegaze=2e-1, + pupil=1e-2, ), scalings_cov_rank=dict( mag=1e12, @@ -183,8 +183,8 @@ hbo=(0, 20), hbr=(0, 20), csd=(-50.0, 50.0), - eyegaze=(0.0, 5000.0), - pupil=(0.0, 5000.0), + eyegaze=(-1, 1), + pupil=(-1.0, 1.0), ), titles=dict( mag="Magnetometers", diff --git a/mne/epochs.py b/mne/epochs.py index c25d8a6797b..515bbb69a72 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -411,7 +411,7 @@ class BaseEpochs( .. versionadded:: 0.16 %(drop_log)s - filename : str | None + filename : Path | None The filename (if the epochs are read from disk). %(metadata_epochs)s @@ -683,7 +683,7 @@ def __init__( # more memory safe in most instances for ii, epoch in enumerate(self._data): self._data[ii] = np.dot(self._projector, epoch) - self._filename = str(filename) if filename is not None else filename + self.filename = filename if filename is not None else filename if raw_sfreq is None: raw_sfreq = self.info["sfreq"] self._raw_sfreq = raw_sfreq @@ -2013,10 +2013,19 @@ def apply_function( return self @property - def filename(self): - """The filename.""" + def filename(self) -> Path | None: + """The filename if the epochs are loaded from disk. + + :type: :class:`pathlib.Path` | ``None`` + """ return self._filename + @filename.setter + def filename(self, value): + if value is not None: + value = _check_fname(value, overwrite="read", must_exist=True) + self._filename = value + def __repr__(self): """Build string representation.""" s = f"{len(self.events)} events " @@ -4266,15 +4275,15 @@ def __init__(self, fname, proj=True, preload=True, verbose=None): filetype="epochs", endings=("-epo.fif", "-epo.fif.gz", "_epo.fif", "_epo.fif.gz"), ) - fname = str(_check_fname(fname=fname, must_exist=True, overwrite="read")) + fname = _check_fname(fname=fname, must_exist=True, overwrite="read") elif not preload: raise ValueError("preload must be used with file-like objects") fnames = [fname] + fname_rep = _get_fname_rep(fname) ep_list = list() raw = list() for fname in fnames: - fname_rep = _get_fname_rep(fname) logger.info(f"Reading {fname_rep} ...") fid, tree, _ = fiff_open(fname, preload=preload) next_fname = _get_next_fname(fid, fname, tree) diff --git a/mne/event.py b/mne/event.py index 91b14c53e80..0c6b63f3396 100644 --- a/mne/event.py +++ b/mne/event.py @@ -519,7 +519,7 @@ def _find_events( else: logger.info( f"Trigger channel {ch_name} has a non-zero initial value of " - "{initial_value} (consider using initial_event=True to detect this " + f"{initial_value} (consider using initial_event=True to detect this " "event)" ) diff --git a/mne/evoked.py b/mne/evoked.py index a14436efacf..a985fc30ad7 100644 --- a/mne/evoked.py +++ b/mne/evoked.py @@ -176,7 +176,7 @@ def __init__( ): _validate_type(proj, bool, "'proj'") # Read the requested data - fname = str(_check_fname(fname=fname, must_exist=True, overwrite="read")) + fname = _check_fname(fname=fname, must_exist=True, overwrite="read") ( self.info, self.nave, @@ -197,6 +197,18 @@ def __init__( self.apply_proj() self.filename = fname + @property + def filename(self) -> Path | None: + """The filename of the evoked object, if it exists. + + :type: :class:`~pathlib.Path` | None + """ + return self._filename + + @filename.setter + def filename(self, value): + self._filename = Path(value) if value is not None else value + @property def kind(self): """The data kind.""" @@ -1481,6 +1493,7 @@ def __init__( self.baseline = baseline if self.baseline is not None: # omit log msg if not baselining self.apply_baseline(self.baseline) + self._filename = None def _get_entries(fid, evoked_node, allow_maxshield=False): @@ -1562,7 +1575,7 @@ def combine_evoked(all_evoked, weights): .. Warning:: Other than cases like simple subtraction mentioned above (where all - weights are -1 or 1), if you provide numeric weights instead of using + weights are ``-1`` or ``1``), if you provide numeric weights instead of using ``'equal'`` or ``'nave'``, the resulting `~mne.Evoked` object's ``.nave`` attribute (which is used to scale noise covariance when applying the inverse operator) may not be suitable for inverse imaging. @@ -1571,7 +1584,7 @@ def combine_evoked(all_evoked, weights): ---------- all_evoked : list of Evoked The evoked datasets. - weights : list of float | 'equal' | 'nave' + weights : list of float | ``'equal'`` | ``'nave'`` The weights to apply to the data of each evoked instance, or a string describing the weighting strategy to apply: ``'nave'`` computes sum-to-one weights proportional to each object's ``nave`` attribute; @@ -1681,7 +1694,7 @@ def read_evokeds( baseline correction, but merely omit the optional, additional baseline correction. kind : str - Either 'average' or 'standard_error', the type of data to read. + Either ``'average'`` or ``'standard_error'``, the type of data to read. proj : bool If False, available projectors won't be applied to the data. allow_maxshield : bool | str (default False) @@ -1689,7 +1702,7 @@ def read_evokeds( active compensation (MaxShield). Data recorded with MaxShield should generally not be loaded directly, but should first be processed using SSS/tSSS to remove the compensation signals that may also affect brain - activity. Can also be "yes" to load without eliciting a warning. + activity. Can also be ``"yes"`` to load without eliciting a warning. %(verbose)s Returns @@ -1710,7 +1723,7 @@ def read_evokeds( saving, this will be reflected in their ``baseline`` attribute after reading. """ - fname = str(_check_fname(fname, overwrite="read", must_exist=True)) + fname = _check_fname(fname, overwrite="read", must_exist=True) check_fname(fname, "evoked", ("-ave.fif", "-ave.fif.gz", "_ave.fif", "_ave.fif.gz")) logger.info(f"Reading {fname} ...") return_list = True diff --git a/mne/export/_eeglab.py b/mne/export/_eeglab.py index 607102901df..3c8f896164a 100644 --- a/mne/export/_eeglab.py +++ b/mne/export/_eeglab.py @@ -18,7 +18,7 @@ def _export_raw(fname, raw): # remove extra epoc and STI channels drop_chs = ["epoc"] # filenames attribute of RawArray is filled with None - if raw.filenames[0] and not (raw.filenames[0].endswith(".fif")): + if raw.filenames[0] and raw.filenames[0].suffix != ".fif": drop_chs.append("STI 014") ch_names = [ch for ch in raw.ch_names if ch not in drop_chs] diff --git a/mne/fixes.py b/mne/fixes.py index 18f4536d72b..e9d62fb42e6 100644 --- a/mne/fixes.py +++ b/mne/fixes.py @@ -19,9 +19,7 @@ import operator as operator_module import os import warnings -from io import StringIO from math import log -from pprint import pprint import numpy as np @@ -134,231 +132,6 @@ def _get_img_fdata(img): return data.astype(dtype) -############################################################################## -# adapted from scikit-learn - - -_DEFAULT_TAGS = { - "array_api_support": False, - "non_deterministic": False, - "requires_positive_X": False, - "requires_positive_y": False, - "X_types": ["2darray"], - "poor_score": False, - "no_validation": False, - "multioutput": False, - "allow_nan": False, - "stateless": False, - "multilabel": False, - "_skip_test": False, - "_xfail_checks": False, - "multioutput_only": False, - "binary_only": False, - "requires_fit": True, - "preserves_dtype": [np.float64], - "requires_y": False, - "pairwise": False, -} - - -class BaseEstimator: - """Base class for all estimators in scikit-learn. - - Notes - ----- - All estimators should specify all the parameters that can be set - at the class level in their ``__init__`` as explicit keyword - arguments (no ``*args`` or ``**kwargs``). - """ - - @classmethod - def _get_param_names(cls): - """Get parameter names for the estimator.""" - # fetch the constructor or the original constructor before - # deprecation wrapping if any - init = getattr(cls.__init__, "deprecated_original", cls.__init__) - if init is object.__init__: - # No explicit constructor to introspect - return [] - - # introspect the constructor arguments to find the model parameters - # to represent - init_signature = inspect.signature(init) - # Consider the constructor parameters excluding 'self' - parameters = [ - p - for p in init_signature.parameters.values() - if p.name != "self" and p.kind != p.VAR_KEYWORD - ] - for p in parameters: - if p.kind == p.VAR_POSITIONAL: - raise RuntimeError( - "scikit-learn estimators should always " - "specify their parameters in the signature" - " of their __init__ (no varargs)." - f" {cls} with constructor {init_signature} doesn't " - " follow this convention." - ) - # Extract and sort argument names excluding 'self' - return sorted([p.name for p in parameters]) - - def get_params(self, deep=True): - """Get parameters for this estimator. - - Parameters - ---------- - deep : bool, optional - If True, will return the parameters for this estimator and - contained subobjects that are estimators. - - Returns - ------- - params : dict - Parameter names mapped to their values. - """ - out = dict() - for key in self._get_param_names(): - # We need deprecation warnings to always be on in order to - # catch deprecated param values. - # This is set in utils/__init__.py but it gets overwritten - # when running under python3 somehow. - warnings.simplefilter("always", DeprecationWarning) - try: - with warnings.catch_warnings(record=True) as w: - value = getattr(self, key, None) - if len(w) and w[0].category is DeprecationWarning: - # if the parameter is deprecated, don't show it - continue - finally: - warnings.filters.pop(0) - - # XXX: should we rather test if instance of estimator? - if deep and hasattr(value, "get_params"): - deep_items = value.get_params().items() - out.update((key + "__" + k, val) for k, val in deep_items) - out[key] = value - return out - - def set_params(self, **params): - """Set the parameters of this estimator. - - The method works on simple estimators as well as on nested objects - (such as pipelines). The latter have parameters of the form - ``__`` so that it's possible to update each - component of a nested object. - - Parameters - ---------- - **params : dict - Parameters. - - Returns - ------- - inst : instance - The object. - """ - if not params: - # Simple optimisation to gain speed (inspect is slow) - return self - valid_params = self.get_params(deep=True) - for key, value in params.items(): - split = key.split("__", 1) - if len(split) > 1: - # nested objects case - name, sub_name = split - if name not in valid_params: - raise ValueError( - f"Invalid parameter {name} for estimator {self}. " - "Check the list of available parameters " - "with `estimator.get_params().keys()`." - ) - sub_object = valid_params[name] - sub_object.set_params(**{sub_name: value}) - else: - # simple objects case - if key not in valid_params: - raise ValueError( - f"Invalid parameter {key} for estimator " - f"{self.__class__.__name__}. " - "Check the list of available parameters " - "with `estimator.get_params().keys()`." - ) - setattr(self, key, value) - return self - - def __repr__(self): # noqa: D105 - params = StringIO() - pprint(self.get_params(deep=False), params) - params.seek(0) - class_name = self.__class__.__name__ - return f"{class_name}({params.read().strip()})" - - # __getstate__ and __setstate__ are omitted because they only contain - # conditionals that are not satisfied by our objects (e.g., - # ``if type(self).__module__.startswith('sklearn.')``. - - def _more_tags(self): - return _DEFAULT_TAGS - - def _get_tags(self): - collected_tags = {} - for base_class in reversed(inspect.getmro(self.__class__)): - if hasattr(base_class, "_more_tags"): - # need the if because mixins might not have _more_tags - # but might do redundant work in estimators - # (i.e. calling more tags on BaseEstimator multiple times) - more_tags = base_class._more_tags(self) - collected_tags.update(more_tags) - return collected_tags - - -# newer sklearn deprecates importing from sklearn.metrics.scoring, -# but older sklearn does not expose check_scoring in sklearn.metrics. -def _get_check_scoring(): - try: - from sklearn.metrics import check_scoring # noqa - except ImportError: - from sklearn.metrics.scorer import check_scoring # noqa - return check_scoring - - -def _check_fit_params(X, fit_params, indices=None): - """Check and validate the parameters passed during `fit`. - - Parameters - ---------- - X : array-like of shape (n_samples, n_features) - Data array. - - fit_params : dict - Dictionary containing the parameters passed at fit. - - indices : array-like of shape (n_samples,), default=None - Indices to be selected if the parameter has the same size as - `X`. - - Returns - ------- - fit_params_validated : dict - Validated parameters. We ensure that the values support - indexing. - """ - try: - from sklearn.utils.validation import ( - _check_fit_params as _sklearn_check_fit_params, - ) - - return _sklearn_check_fit_params(X, fit_params, indices) - except ImportError: - from sklearn.model_selection import _validation - - fit_params_validated = { - k: _validation._index_param_value(X, v, indices) - for k, v in fit_params.items() - } - return fit_params_validated - - ############################################################################### # Copied from sklearn to simplify code paths @@ -401,7 +174,54 @@ def empirical_covariance(X, assume_centered=False): return covariance -class EmpiricalCovariance(BaseEstimator): +class _EstimatorMixin: + def _param_names(self): + return inspect.getfullargspec(self.__init__).args[1:] + + def get_params(self, deep=True): + """Get parameters for this estimator. + + Parameters + ---------- + deep : bool, default=True + If True, will return the parameters for this estimator and + contained subobjects that are estimators. + + Returns + ------- + params : dict + Parameter names mapped to their values. + """ + out = dict() + for key in self._param_names(): + out[key] = getattr(self, key) + return out + + def set_params(self, **params): + """Set the parameters of this estimator. + + The method works on simple estimators as well as on nested objects + (such as pipelines). The latter have parameters of the form + ``__`` so that it's possible to update each + component of a nested object. + + Parameters + ---------- + **params : dict + Estimator parameters. + + Returns + ------- + self : object + Estimator instance. + """ + param_names = self._param_names() + for key in params: + if key in param_names: + setattr(self, key, params[key]) + + +class EmpiricalCovariance(_EstimatorMixin): """Maximum likelihood covariance estimator. Read more in the :ref:`User Guide `. diff --git a/mne/gui/_coreg.py b/mne/gui/_coreg.py index 4aaec5cbc94..98e3fbfc0b3 100644 --- a/mne/gui/_coreg.py +++ b/mne/gui/_coreg.py @@ -15,7 +15,7 @@ from pathlib import Path import numpy as np -from traitlets import Bool, Float, HasTraits, Unicode, observe +from traitlets import Bool, Float, HasTraits, Instance, Unicode, observe from .._fiff.constants import FIFF from .._fiff.meas_info import _empty_info, read_fiducials, read_info, write_fiducials @@ -92,51 +92,51 @@ class CoregistrationUI(HasTraits): Parameters ---------- - info_file : None | str + info_file : None | path-like The FIFF file with digitizer data for coregistration. %(subject)s %(subjects_dir)s %(fiducials)s head_resolution : bool - If True, use a high-resolution head surface. Defaults to False. + If ``True``, use a high-resolution head surface. Defaults to ``False``. head_opacity : float - The opacity of the head surface. Defaults to 0.8. + The opacity of the head surface. Defaults to ``0.8``. hpi_coils : bool - If True, display the HPI coils. Defaults to True. + If ``True``, display the HPI coils. Defaults to ``True``. head_shape_points : bool - If True, display the head shape points. Defaults to True. + If ``True``, display the head shape points. Defaults to ``True``. eeg_channels : bool - If True, display the EEG channels. Defaults to True. + If ``True``, display the EEG channels. Defaults to ``True``. meg_channels : bool - If True, display the MEG channels. Defaults to False. + If ``True``, display the MEG channels. Defaults to ``False``. fnirs_channels : bool - If True, display the fNIRS channels. Defaults to True. + If ``True``, display the fNIRS channels. Defaults to ``True``. orient_glyphs : bool - If True, orient the sensors towards the head surface. Default to False. + If ``True``, orient the sensors towards the head surface. Default to ``False``. scale_by_distance : bool - If True, scale the sensors based on their distance to the head surface. - Defaults to True. + If ``True``, scale the sensors based on their distance to the head surface. + Defaults to ``True``. mark_inside : bool - If True, mark the head shape points that are inside the head surface - with a different color. Defaults to True. + If ``True``, mark the head shape points that are inside the head surface + with a different color. Defaults to ``True``. sensor_opacity : float - The opacity of the sensors between 0 and 1. Defaults to 1.0. + The opacity of the sensors between ``0`` and ``1``. Defaults to ``1.``. trans : path-like | Transform - The Head<->MRI transform or the path to its FIF file ("-trans.fif"). + The Head<->MRI transform or the path to its FIF file (``"-trans.fif"``). size : tuple The dimensions (width, height) of the rendering view. The default is - (800, 600). - bgcolor : tuple | str + ``(800, 600)``. + bgcolor : tuple of float | str The background color as a tuple (red, green, blue) of float - values between 0 and 1 or a valid color name (i.e. 'white' - or 'w'). Defaults to 'grey'. + values between ``0`` and ``1`` or a valid color name (i.e. ``'white'`` + or ``'w'``). Defaults to ``'grey'``. show : bool - Display the window as soon as it is ready. Defaults to True. + Display the window as soon as it is ready. Defaults to ``True``. block : bool Whether to halt program execution until the GUI has been closed (``True``) or not (``False``, default). %(fullscreen)s - The default is False. + The default is ``False``. .. versionadded:: 1.1 %(interaction_scene)s @@ -155,7 +155,7 @@ class CoregistrationUI(HasTraits): _subjects_dir = Unicode() _lock_fids = Bool() _current_fiducial = Unicode() - _info_file = Unicode() + _info_file = Instance(Path, default_value=Path(".")) _orient_glyphs = Bool() _scale_by_distance = Bool() _mark_inside = Bool() @@ -457,10 +457,9 @@ def _set_info_file(self, fname): tuple(supported), endings_err=tuple(supported), ) - fname = str(_check_fname(fname, overwrite="read")) # cast to str - + fname = Path(fname) # ctf ds `files` are actually directories - if fname.endswith((".ds",)): + if fname.suffix == ".ds": info_file = _check_fname( fname, overwrite="read", must_exist=True, need_dir=True ) @@ -473,7 +472,7 @@ def _set_info_file(self, fname): valid = False if valid: style = dict(border="initial") - self._info_file = str(info_file) + self._info_file = info_file else: style = dict(border="2px solid #ff0000") self._forward_widget_command("info_file_field", "set_style", style) @@ -712,7 +711,7 @@ def _current_fiducial_changed(self, change=None): def _info_file_changed(self, change=None): if not self._info_file: return - elif self._info_file.endswith((".fif", ".fif.gz")): + elif self._info_file.name.endswith((".fif", ".fif.gz")): fid, tree, _ = fiff_open(self._info_file) fid.close() if len(dir_tree_find(tree, FIFF.FIFFB_MEAS_INFO)) > 0: @@ -1279,7 +1278,14 @@ def _add_channels(self): fnirs=self._defaults["sensor_opacity"], meg=0.25, ) - picks = pick_types(self._info, ref_meg=False, meg=True, eeg=True, fnirs=True) + picks = pick_types( + self._info, + ref_meg=False, + meg=True, + eeg=True, + fnirs=True, + exclude=(), + ) these_actors = _plot_sensors_3d( self._renderer, self._info, @@ -1296,7 +1302,7 @@ def _add_channels(self): nearest=self._nearest, **plot_types, ) - sens_actors = sum(these_actors.values(), list()) + sens_actors = sum((these_actors or {}).values(), list()) self._update_actor("sensors", sens_actors) def _add_head_surface(self): @@ -1761,7 +1767,7 @@ def _configure_dock(self): ) self._widgets["meg"] = self._renderer._dock_add_check_box( name="Show MEG sensors", - value=self._helmet, + value=self._meg_channels, callback=self._set_meg_channels, tooltip="Enable/Disable MEG sensors", layout=view_options_layout, @@ -1911,7 +1917,7 @@ def _configure_dock(self): tooltip="Save the transform file to disk", layout=save_trans_layout, filter_="Head->MRI transformation (*-trans.fif *_trans.fif)", - initial_directory=str(Path(self._info_file).parent), + initial_directory=self._info_file.parent, ) self._widgets["load_trans"] = self._renderer._dock_add_file_button( name="load_trans", @@ -1920,7 +1926,7 @@ def _configure_dock(self): tooltip="Load the transform file from disk", layout=save_trans_layout, filter_="Head->MRI transformation (*-trans.fif *_trans.fif)", - initial_directory=str(Path(self._info_file).parent), + initial_directory=self._info_file.parent, ) self._renderer._layout_add_widget(trans_layout, save_trans_layout) self._widgets["reset_trans"] = self._renderer._dock_add_button( diff --git a/mne/gui/_gui.py b/mne/gui/_gui.py index f4db67cf2f6..b8898d8b7c2 100644 --- a/mne/gui/_gui.py +++ b/mne/gui/_gui.py @@ -40,35 +40,35 @@ def coregistration( width : int | None Specify the width for window (in logical pixels). Default is None, which uses ``MNE_COREG_WINDOW_WIDTH`` config value - (which defaults to 800). + (which defaults to ``800``). height : int | None Specify a height for window (in logical pixels). Default is None, which uses ``MNE_COREG_WINDOW_WIDTH`` config value - (which defaults to 400). - inst : None | str + (which defaults to ``400``). + inst : None | path-like Path to an instance file containing the digitizer data. Compatible for Raw, Epochs, and Evoked files. subject : None | str Name of the mri subject. %(subjects_dir)s head_opacity : float | None - The opacity of the head surface in the range [0., 1.]. + The opacity of the head surface in the range ``[0., 1.]``. Default is None, which uses ``MNE_COREG_HEAD_OPACITY`` config value - (which defaults to 1.). + (which defaults to ``1.``). head_high_res : bool | None Use a high resolution head surface. Default is None, which uses ``MNE_COREG_HEAD_HIGH_RES`` config value (which defaults to True). trans : path-like | Transform | None - The Head<->MRI transform or the path to its FIF file ("-trans.fif"). + The Head<->MRI transform or the path to its FIF file (``"-trans.fif"``). orient_to_surface : bool | None - If True (default), orient EEG electrode and head shape points - to the head surface. + If True (default), orient EEG electrode and head shape points to the head + surface. .. versionadded:: 0.16 scale_by_distance : bool | None - If True (default), scale the digitization points by their - distance from the scalp surface. + If True (default), scale the digitization points by their distance from the + scalp surface. .. versionadded:: 0.16 mark_inside : bool | None @@ -84,8 +84,8 @@ def coregistration( Default interaction mode if ``None`` and no config setting found changed from ``'trackball'`` to ``'terrain'``. %(fullscreen)s - Default is None, which uses ``MNE_COREG_FULLSCREEN`` config value - (which defaults to False). + Default is ``None``, which uses ``MNE_COREG_FULLSCREEN`` config value + (which defaults to ``False``). .. versionadded:: 1.1 show : bool @@ -176,9 +176,7 @@ def __call__(self, block, block_vars, gallery_conf): gui_classes = (CoregistrationUI,) try: - from mne_gui_addons._ieeg_locate import ( - IntracranialElectrodeLocator, - ) + from mne_gui_addons._ieeg_locate import IntracranialElectrodeLocator except Exception: pass else: diff --git a/mne/gui/tests/test_coreg.py b/mne/gui/tests/test_coreg.py index 409b5c6665c..9c0db7164c3 100644 --- a/mne/gui/tests/test_coreg.py +++ b/mne/gui/tests/test_coreg.py @@ -239,6 +239,9 @@ def test_coreg_gui_pyvista_basic(tmp_path, monkeypatch, renderer_interactive_pyv assert not coreg._helmet assert coreg._actors["helmet"] is None coreg._set_helmet(True) + assert coreg._eeg_channels + coreg._set_eeg_channels(False) + assert not coreg._eeg_channels assert coreg._helmet with catch_logging() as log: coreg._redraw(verbose="debug") @@ -251,11 +254,17 @@ def test_coreg_gui_pyvista_basic(tmp_path, monkeypatch, renderer_interactive_pyv log = log.getvalue() assert "Drawing helmet" in log assert not coreg._meg_channels + assert coreg._actors["helmet"] is not None + # TODO: Someday test our file dialogs like: + # coreg._widgets["save_trans"].widget.click() + assert len(coreg._actors["sensors"]) == 0 coreg._set_meg_channels(True) assert coreg._meg_channels with catch_logging() as log: coreg._redraw(verbose="debug") assert "Drawing meg sensors" in log.getvalue() + assert coreg._actors["helmet"] is not None + assert len(coreg._actors["sensors"]) == 306 assert coreg._orient_glyphs assert coreg._scale_by_distance assert coreg._mark_inside @@ -263,7 +272,6 @@ def test_coreg_gui_pyvista_basic(tmp_path, monkeypatch, renderer_interactive_pyv coreg._head_opacity, float(config.get("MNE_COREG_HEAD_OPACITY", "0.8")) ) assert coreg._hpi_coils - assert coreg._eeg_channels assert coreg._head_shape_points assert coreg._scale_mode == "None" assert coreg._icp_fid_match == "matched" diff --git a/mne/html_templates/report/html.html.jinja b/mne/html_templates/report/html.html.jinja index 62b9da07911..a9b4f881f12 100644 --- a/mne/html_templates/report/html.html.jinja +++ b/mne/html_templates/report/html.html.jinja @@ -1,7 +1,6 @@
-
- - -
+
{{ html | safe }}
diff --git a/mne/html_templates/report/image.html.jinja b/mne/html_templates/report/image.html.jinja index 06a6855ace5..41cf47e1395 100644 --- a/mne/html_templates/report/image.html.jinja +++ b/mne/html_templates/report/image.html.jinja @@ -1,17 +1,17 @@ {% extends "section.html.jinja" %} {% block html_content %} -
- {% if image_format == 'svg' %} -
- {{ img|safe }} -
- {% else %} - {{ title }} - {% endif %} +
+ {% if image_format == 'svg' %} +
+ {{ img|safe }} +
+ {% else %} + {{ title }} + {% endif %} - {% if caption is not none %} -
{{ caption }}
- {% endif %} -
+ {% if caption is not none %} +
{{ caption }}
+ {% endif %} +
{% endblock html_content %} diff --git a/mne/html_templates/report/section.html.jinja b/mne/html_templates/report/section.html.jinja index 584ff86dda9..baddf7dd8b6 100644 --- a/mne/html_templates/report/section.html.jinja +++ b/mne/html_templates/report/section.html.jinja @@ -1,7 +1,6 @@
-
- - -
+
{% block html_content %} {% for html in htmls %} diff --git a/mne/html_templates/report/slider.html.jinja b/mne/html_templates/report/slider.html.jinja index 58ee8a9f9fc..fab7f56472d 100644 --- a/mne/html_templates/report/slider.html.jinja +++ b/mne/html_templates/report/slider.html.jinja @@ -1,8 +1,7 @@
-
- -
+
- -