diff --git a/.circleci/config.yml b/.circleci/config.yml index 5e93be99fc0..8c05b80a769 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,26 +1,61 @@ -# Tagging a commit with [circle front] will build the front page and perform test-doc. -# Tagging a commit with [circle full] will build everything. -version: 2 +# Tagging a commit with: +# - [circle front] will build the front page and perform test-doc +# - [circle full] will build everything +# - [circle linkcheck] will run our linkcheck +# - [circle interactive_test] will run our test suite (useful for debugging +# issues using "Rerun with SSH") + +version: 2.1 + +_xvfb: &xvfb + name: Start Xvfb virtual framebuffer + command: | + echo "export DISPLAY=:99" >> $BASH_ENV + /sbin/start-stop-daemon --start --quiet --pidfile /tmp/custom_xvfb_99.pid --make-pidfile --background --exec /usr/bin/Xvfb -- :99 -screen 0 1280x1024x24 -ac +extension GLX +render -noreset -nolisten tcp -nolisten unix + jobs: build_docs: docker: - image: circleci/python:3.8.5-buster steps: + - restore_cache: + keys: + - source-cache - checkout + - run: + name: Complete checkout + command: | + if ! git remote -v | grep upstream; then + git remote add upstream git://github.com/mne-tools/mne-python.git + fi + git fetch upstream + - save_cache: + key: source-cache + paths: + - ".git" + - run: + name: Check-skip + command: | + export COMMIT_MESSAGE=$(git log --format=oneline -n 1); + if [[ "$COMMIT_MESSAGE" == *"[skip circle]"* ]] || [[ "$COMMIT_MESSAGE" == *"[circle skip]"* ]]; then + echo "Skip detected, exiting job ${CIRCLE_JOB}." + circleci-agent step halt; + fi - run: name: Set BASH_ENV command: | set -e echo "set -e" >> $BASH_ENV - echo "export DISPLAY=:99" >> $BASH_ENV echo "export OPENBLAS_NUM_THREADS=4" >> $BASH_ENV - echo "export XDG_RUNTIME_DIR=/tmp/runtime-circleci" >> $BASH_ENV + echo "export XDG_RUNTIME_DIR=/tmp/runtime-circleci" >> $BASH_ENV + echo "export MNE_FULL_DATE=true" >> $BASH_ENV source tools/get_minimal_commands.sh - echo "source ${PWD}/tools/get_minimal_commands.sh" >> $BASH_ENV echo "export MNE_3D_BACKEND=pyvista" >> $BASH_ENV - echo "export PATH=~/.local/bin/:${MNE_ROOT}/bin:$PATH" >> $BASH_ENV + echo "export PATH=~/.local/bin/:$PATH" >> $BASH_ENV echo "BASH_ENV:" cat $BASH_ENV + mkdir -p ~/mne_data + touch pattern.txt; - run: name: check neuromag2ft command: | @@ -33,69 +68,53 @@ jobs: echo ${CI_PULL_REQUEST//*pull\//} | tee merge.txt if [[ $(cat merge.txt) != "" ]]; then echo "Merging $(cat merge.txt)"; - git remote add upstream git://github.com/mne-tools/mne-python.git; git pull --ff-only upstream "refs/pull/$(cat merge.txt)/merge"; - git fetch upstream master; fi - # Load our data - - restore_cache: - keys: - - data-cache-0 - - data-cache-1 - - data-cache-2 - - data-cache-3 - - data-cache-4 - - data-cache-5 - - data-cache-6 - - data-cache-7 - - data-cache-8 - - pip-cache - - run: - name: Install 3D rendering libraries \ PyQt5 dependencies \ graphviz \ optipng (for optimized images) - command: | - sudo apt-get install libosmesa6 libglx-mesa0 libopengl0 libglx0 libdbus-1-3 \ - libxkbcommon-x11-0 libxcb-icccm4 libxcb-image0 libxcb-keysyms1 libxcb-randr0 libxcb-render-util0 libxcb-shape0 libxcb-xfixes0 libxcb-xinerama0 \ - graphviz \ - optipng - - - run: - name: Spin up Xvfb - command: | - /sbin/start-stop-daemon --start --quiet --pidfile /tmp/custom_xvfb_99.pid --make-pidfile --background --exec /usr/bin/Xvfb -- :99 -screen 0 1400x900x24 -ac +extension GLX +render -noreset; - - # https://github.com/ContinuumIO/anaconda-issues/issues/9190#issuecomment-386508136 - # https://github.com/golemfactory/golem/issues/1019 + <<: *xvfb - run: name: Install fonts needed for diagrams command: | mkdir -p $HOME/.fonts - curl https://codeload.github.com/adobe-fonts/source-code-pro/tar.gz/2.030R-ro/1.050R-it | tar xz -C $HOME/.fonts - curl https://codeload.github.com/adobe-fonts/source-sans-pro/tar.gz/3.006R | tar xz -C $HOME/.fonts + curl https://codeload.github.com/adobe-fonts/source-code-pro/tar.gz/2.038R-ro/1.058R-it/1.018R-VAR | tar xz -C $HOME/.fonts + curl https://codeload.github.com/adobe-fonts/source-sans-pro/tar.gz/3.028R | tar xz -C $HOME/.fonts fc-cache -f + # Load pip cache + - restore_cache: + keys: + - pip-cache + - restore_cache: + keys: + - user-install-bin-cache + + # Hack in uninstalls of libraries as necessary if pip doesn't do the right thing in upgrading for us... - run: name: Get Python running command: | - python -m pip install --user --upgrade --progress-bar off pip setuptools - python -m pip install --user --upgrade --progress-bar off --pre sphinx - python -m pip install --user --progress-bar off https://github.com/pyvista/pyvista/zipball/master - python -m pip install --user --progress-bar off https://github.com/pyvista/pyvistaqt/zipball/master - python -m pip install --user --upgrade --progress-bar off -r requirements.txt -r requirements_testing.txt -r requirements_doc.txt - python -m pip uninstall -yq pysurfer mayavi - python -m pip install --user -e . + ./tools/circleci_dependencies.sh - save_cache: key: pip-cache paths: - ~/.cache/pip + - save_cache: + key: user-install-bin-cache + paths: + - ~/.local/lib/python3.8/site-packages + - ~/.local/bin - run: name: Check PyQt5 command: LD_DEBUG=libs python -c "from PyQt5.QtWidgets import QApplication, QWidget; app = QApplication([])" + # Load tiny cache so that ~/.mne does not need to be created below + - restore_cache: + keys: + - data-cache-tiny-0 + # Look at what we have and fail early if there is some library conflict - run: name: Check installation @@ -109,20 +128,61 @@ jobs: python -c "import mne; level = mne.get_config('MNE_LOGGING_LEVEL'); assert level.lower() == 'info', repr(level)" # Figure out if we should run a full, pattern, or noplot version + - restore_cache: + keys: + - data-cache-tiny-1 + - restore_cache: + keys: + - data-cache-multimodal + - restore_cache: + keys: + - data-cache-limo + - restore_cache: + keys: + - data-cache-fsaverage + - restore_cache: + keys: + - data-cache-bst-phantom-ctf + - restore_cache: + keys: + - data-cache-bst-raw + - restore_cache: + keys: + - data-cache-bst-phantom-elekta + - restore_cache: + keys: + - data-cache-bst-auditory + - restore_cache: + keys: + - data-cache-bst-resting + - restore_cache: + keys: + - data-cache-fieldtrip + - restore_cache: + keys: + - data-cache-somato + - restore_cache: + keys: + - data-cache-hf-sef + - restore_cache: + keys: + - data-cache-opm + - restore_cache: + keys: + - data-cache-sample + - restore_cache: + keys: + - data-cache-spm-face + - restore_cache: + keys: + - data-cache-testing + - restore_cache: + keys: + - data-cache-visual - run: name: Get data command: | - python setup.py develop --user - mkdir -p ~/mne_data - touch pattern.txt; ./tools/circleci_download.sh - - - run: - name: Get data (again) - when: on_fail - command: | - ./tools/circleci_download.sh - - run: name: Verify build type command: | @@ -143,8 +203,7 @@ jobs: - run: name: make html command: | - cd doc; - PATTERN=$(cat ../pattern.txt) make $(cat ../build.txt); + PATTERN=$(cat pattern.txt) make -C doc $(cat build.txt); - run: name: Show profiling output when: always @@ -165,15 +224,11 @@ jobs: name: Reduce artifact upload time command: | if grep -q html_dev-pattern-memory build.txt || grep -q html_dev-noplot build.txt; then - tar czf doc/_build/html/_downloads.tgz doc/_build/html/_downloads - rm -Rf doc/_build/html/_downloads - rm -f doc/auto_*/*/*.pickle - rm -f doc/auto_*/*/*.codeobj - rm -f doc/auto_*/*/*.md5 - rm -f doc/auto_*/*/*.py - rm -f doc/auto_*/*/*.ipynb - rm -f doc/generated/*.examples + zip -rm doc/_build/html/_downloads.zip doc/_build/html/_downloads fi + for NAME in generated auto_tutorials auto_examples; do + zip -rm doc/${NAME}.zip doc/${NAME} + done # Save the JUnit file - store_test_results: @@ -183,14 +238,11 @@ jobs: destination: test-results # Save the SG RST - store_artifacts: - path: doc/auto_examples - destination: auto_examples + path: doc/auto_examples.zip - store_artifacts: - path: doc/auto_tutorials - destination: auto_tutorials + path: doc/auto_tutorials.zip - store_artifacts: - path: doc/generated - destination: generated + path: doc/generated.zip # Save the HTML - store_artifacts: path: doc/_build/html/ @@ -206,77 +258,133 @@ jobs: # Keep these separate, maybe better in terms of size limitations (?) - save_cache: - key: data-cache-0 + key: data-cache-tiny-0 # < 100 M, might as well combine paths: - ~/.mne - - ~/mne_data/mTRF_1.5 + - ~/mne_data/MNE-kiloword-data # (28 M) + - ~/mne_data/MNE-eegbci-data # (35 M) + - ~/mne_data/MNE-misc-data # (39 M) + - ~/mne_data/mTRF_1.5 # (56 M) + - ~/mne_data/MNE-phantom-4DBTi # (77 M) - save_cache: - key: data-cache-1 + key: data-cache-tiny-1 # more to combine paths: - - ~/mne_data/HF_SEF - - ~/mne_data/MEGSIM + - ~/mne_data/MNE-fNIRS-motor-data # (71 M) + - ~/mne_data/MNE-refmeg-noise-data # (93 M) + - ~/mne_data/physionet-sleep-data # (95 M) - save_cache: - key: data-cache-2 + key: data-cache-multimodal paths: - - ~/mne_data/MNE-brainstorm-data - - ~/mne_data/MNE-eegbci-data + - ~/mne_data/MNE-multimodal-data # (240 M) - save_cache: - key: data-cache-3 + key: data-cache-limo paths: - - ~/mne_data/MNE-fieldtrip_cmc-data - - ~/mne_data/MNE-kiloword-data + - ~/mne_data/MNE-limo-data # (244 M) - save_cache: - key: data-cache-4 + key: data-cache-fsaverage paths: - - ~/mne_data/MNE-misc-data - - ~/mne_data/MNE-multimodal-data + - ~/mne_data/MNE-fsaverage-data # (762 M) - save_cache: - key: data-cache-5 + key: data-cache-bst-phantom-ctf paths: - - ~/mne_data/MNE-OPM-data - - ~/mne_data/MNE-phantom-4DBTi + - ~/mne_data/MNE-brainstorm-data/bst_phantom_ctf # (177 M) - save_cache: - key: data-cache-6 + key: data-cache-bst-raw paths: - - ~/mne_data/MNE-sample-data - - ~/mne_data/MNE-somato-data + - ~/mne_data/MNE-brainstorm-data/bst_raw # (830 M) - save_cache: - key: data-cache-7 + key: data-cache-bst-phantom-elekta paths: - - ~/mne_data/MNE-spm-face - - ~/mne_data/MNE-testing-ata + - ~/mne_data/MNE-brainstorm-data/bst_phantom_elekta # (1.4 G) - save_cache: - key: data-cache-8 + key: data-cache-bst-auditory paths: - - ~/mne_data/MNE-visual_92_categories-data - - ~/mne_data/MNE-limo-data + - ~/mne_data/MNE-brainstorm-data/bst_auditory # (2.9 G) + - save_cache: + key: data-cache-bst-resting + paths: + - ~/mne_data/MNE-brainstorm-data/bst_resting # (4.5 G) + - save_cache: + key: data-cache-fieldtrip + paths: + - ~/mne_data/MNE-fieldtrip_cmc-data # (699 M) + - save_cache: + key: data-cache-somato + paths: + - ~/mne_data/MNE-somato-data # (750 M) + - save_cache: + key: data-cache-hf-sef + paths: + - ~/mne_data/HF_SEF # (1.3 G) + - save_cache: + key: data-cache-opm + paths: + - ~/mne_data/MNE-OPM-data # (1.9 G) + - save_cache: + key: data-cache-sample + paths: + - ~/mne_data/MNE-sample-data # (3.2 G) + - save_cache: + key: data-cache-spm-face + paths: + - ~/mne_data/MNE-spm-face # (1.5 G) + - save_cache: + key: data-cache-testing + paths: + - ~/mne_data/MNE-testing-data # (2.5 G) + - save_cache: + key: data-cache-visual + paths: + - ~/mne_data/MNE-visual_92_categories-data # (6 G) linkcheck: # there are a few files excluded from this for expediency, see Makefile + parameters: + scheduled: + type: string + default: "false" docker: - - image: circleci/python:3.6-jessie + - image: circleci/python:3.9.2-buster steps: + - restore_cache: + keys: + - source-cache - checkout - run: - name: pip install dependencies + name: Set BASH_ENV command: | - set -e; - python -m pip install --user --progress-bar off numpy scipy matplotlib pillow - python -m pip install --user --progress-bar off -r requirements_doc.txt - python -m pip install --user -e . + set -e + echo "set -e" >> $BASH_ENV + echo "export PATH=~/.local/bin/:$PATH" >> $BASH_ENV + - run: + name: Check-skip + command: | + export COMMIT_MESSAGE=$(git log --format=oneline -n 1); + if [[ "$COMMIT_MESSAGE" != *"[circle linkcheck]"* ]] && [ "<< parameters.scheduled >>" != "true" ]; then + echo "Skip detected, exiting job ${CIRCLE_JOB}." + circleci-agent step halt; + fi + - restore_cache: + keys: + - pip-cache + - run: + name: Get Python running + command: | + ./tools/circleci_dependencies.sh + - run: + name: Check installation + command: | + mne sys_info - run: name: make linkcheck command: | - set -e - cd doc - PATH=~/.local/bin:$PATH make linkcheck + make -C doc linkcheck - run: name: make linkcheck-grep when: always command: | - cd doc - make linkcheck-grep + make -C doc linkcheck-grep - store_artifacts: path: doc/_build/linkcheck destination: linkcheck @@ -292,9 +400,21 @@ jobs: keys: - website-cache - run: - name: Fetch docs + name: Set BASH_ENV command: | set -e + echo "set -e" >> $BASH_ENV + # Don't try to deploy if nothing is there or not on the right branch + - run: + name: Check docs + command: | + if [ ! -f /tmp/build/html/index.html ] && [ ! -f /tmp/build/html_stable/index.html ]; then + echo "No files found to upload (build: ${CIRCLE_BRANCH})."; + circleci-agent step halt; + fi; + - run: + name: Fetch docs + command: | mkdir -p ~/.ssh echo -e "Host *\nStrictHostKeyChecking no" > ~/.ssh/config chmod og= ~/.ssh/config @@ -304,33 +424,28 @@ jobs: - run: name: Deploy docs command: | - set -e; - if [ "${CIRCLE_BRANCH}" == "master" ] || [ "${CIRCLE_BRANCH}" == "maint/0.21" ]; then - git config --global user.email "circle@mne.com"; - git config --global user.name "Circle CI"; - cd ~/mne-tools.github.io; - git checkout master - git remote -v - git fetch origin - git reset --hard origin/master - git clean -xdf - if [ "${CIRCLE_BRANCH}" == "master" ]; then - echo "Deploying dev docs for ${CIRCLE_BRANCH}."; - rm -Rf dev; - cp -a /tmp/build/html dev; - git add -A; - git commit -m "CircleCI update of dev docs (${CIRCLE_BUILD_NUM})."; - else - echo "Deploying stable docs for ${CIRCLE_BRANCH}."; - rm -Rf stable; - cp -a /tmp/build/html_stable stable; - git add -A; - git commit -m "CircleCI update of stable docs (${CIRCLE_BUILD_NUM})."; - fi; - git push origin master; + git config --global user.email "circle@mne.com"; + git config --global user.name "Circle CI"; + cd ~/mne-tools.github.io; + git checkout main + git remote -v + git fetch origin + git reset --hard origin/main + git clean -xdf + if [ "${CIRCLE_BRANCH}" == "main" ]; then + echo "Deploying dev docs for ${CIRCLE_BRANCH}."; + rm -Rf dev; + cp -a /tmp/build/html dev; + git add -A; + git commit -m "CircleCI update of dev docs (${CIRCLE_BUILD_NUM})."; else - echo "No deployment (build: ${CIRCLE_BRANCH})."; - fi + echo "Deploying stable docs for ${CIRCLE_BRANCH}."; + rm -Rf stable; + cp -a /tmp/build/html_stable stable; + git add -A; + git commit -m "CircleCI update of stable docs (${CIRCLE_BUILD_NUM})."; + fi; + git push origin main; - save_cache: key: website-cache paths: @@ -339,57 +454,71 @@ jobs: interactive_test: docker: - - image: circleci/python:3.8.5-buster + - image: circleci/python:3.9.2-buster steps: + - restore_cache: + keys: + - source-cache - checkout + - run: + name: Set BASH_ENV + command: | + set -e + echo "set -e" >> $BASH_ENV + echo "export OPENBLAS_NUM_THREADS=1" >> $BASH_ENV + mkdir -p ~/mne_data + - run: + name: Check-skip + command: | + export COMMIT_MESSAGE=$(git log --format=oneline -n 1); + if [[ "$COMMIT_MESSAGE" != *"[circle interactive_test]"* ]]; then + echo "Skip detected, exiting job ${CIRCLE_JOB}." + circleci-agent step halt; + fi + - run: + <<: *xvfb + - restore_cache: + keys: + - pip-cache - run: name: Get Python running command: | - python -m pip install --user --upgrade --progress-bar off pip setuptools - python -m pip install -i "https://pypi.anaconda.org/scipy-wheels-nightly/simple" --pre numpy - python -m pip install -f "https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" scipy pandas scikit-learn matplotlib h5py Pillow - python -m pip install -r requirements_testing.txt - python -m pip install --user -e . - - # Look at what we have and fail early if there is some library conflict + ./tools/circleci_dependencies.sh - run: name: Check installation command: | - which python - python -c "import numpy; numpy.show_config()" - python -c "from numpy._pytesttester import _show_numpy_info; _show_numpy_info()" - - # Figure out if we should run a full, pattern, or noplot version + mne sys_info + - restore_cache: + keys: + - data-cache-testing - run: name: Get data command: | python -c "import mne; mne.datasets.testing.data_path(verbose=True)" - - # Run doctest (if it's full or front) before building the docs - run: name: pytest command: | - pytest -m "not ultraslowtest" mne -xv + pytest -m "not slowtest" mne -xv workflows: - version: 2 - default: jobs: - build_docs + - linkcheck + - interactive_test - deploy: requires: - build_docs filters: branches: only: - - master - - maint/0.21 - # interactive_test + - main + - maint/0.22 weekly: jobs: - - linkcheck + - linkcheck: + scheduled: "true" triggers: - schedule: # "At 00:00 on Sunday" should be often enough @@ -397,4 +526,4 @@ workflows: filters: branches: only: - - master + - main diff --git a/.coveragerc b/.coveragerc index 7e3c94bdfcc..be3b5ef225d 100644 --- a/.coveragerc +++ b/.coveragerc @@ -14,3 +14,5 @@ omit = exclude_lines = pragma: no cover if __name__ == .__main__.: + @abstractmethod + @abstractclassmethod diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 23d1e2b80de..665da3a85ff 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -7,13 +7,11 @@ assignees: '' --- **READ THIS FIRST:** If you are having trouble getting MNE-Python to work with -your own data, you should ask for help on one of our other channels: - -- [email list](https://mail.nmr.mgh.harvard.edu/mailman/listinfo/mne_analysis) -- [Gitter (chat)](https://gitter.im/mne-tools/mne-python) +your own data, you should ask for help on the +[MNE Forum](https://mne.discourse.group). Our GitHub issue tracker is only used to report bugs and suggest improvements -to MNE-Python. For any other questions, please use the email list or Gitter. +to MNE-Python. For any other questions, please use the forum. Usage questions that are posted as GitHub issues are usually closed without being answered. See [the FAQ entry on filing bug reports](https://mne.tools/dev/overview/faq.html#i-think-i-found-a-bug-what-do-i-do) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 323e1429cd0..dd881afeece 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,8 +1,5 @@ blank_issues_enabled: false contact_links: - - name: Mailing list - url: http://mail.nmr.mgh.harvard.edu/mailman/listinfo/mne_analysis + - name: Forum + url: https://mne.discourse.group about: For questions about analysis and usage, and announcements of interest to the neuroimaging community - - name: Gitter - url: https://gitter.im/mne-tools/mne-python - about: Users and developers can chat, troubleshoot and share code samples on our gitter channel diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md index 67592c2e0d2..425059fcc82 100644 --- a/.github/ISSUE_TEMPLATE/question.md +++ b/.github/ISSUE_TEMPLATE/question.md @@ -8,12 +8,8 @@ assignees: '' # https://github.com/matplotlib/matplotlib/blob/d9aee8eb2bd989d6cfbe21c31ff086dab935bde5/.github/ISSUE_TEMPLATE/questions.md --- -If your issue is a usage question, you should ask for help on one of our other channels: - -- [email list](https://mail.nmr.mgh.harvard.edu/mailman/listinfo/mne_analysis) -- [Gitter (chat)](https://gitter.im/mne-tools/mne-python) +If your issue is a usage question, you should ask for help on the +[MNE Forum](https://mne.discourse.group). Our GitHub issue tracker is only used to report bugs and suggest improvements -to MNE-Python. For any other questions, please use the email list or Gitter. -Usage questions that are posted as GitHub issues are usually closed without -being answered. +to MNE-Python. For any other questions, please use the forum. diff --git a/.github/config.yml b/.github/config.yml new file mode 100644 index 00000000000..cfee8214c4a --- /dev/null +++ b/.github/config.yml @@ -0,0 +1,21 @@ +# Configuration for welcome - https://github.com/behaviorbot/welcome + +# Configuration for new-issue-welcome - https://github.com/behaviorbot/new-issue-welcome + +# Comment to be posted to on first time issues +newIssueWelcomeComment: > + Hello! 👋 Thanks for opening your first issue here! ❤️ We will try to get back to you soon. 🚴🏽‍♂️ + +# Configuration for new-pr-welcome - https://github.com/behaviorbot/new-pr-welcome + +# Comment to be posted to on PRs from first time contributors in your repository +newPRWelcomeComment: > + Hello! 👋 Thanks for opening your first pull request here! ❤️ We will try to get back to you soon. 🚴🏽‍♂️ + +# Configuration for first-pr-merge - https://github.com/behaviorbot/first-pr-merge + +# Comment to be posted to on pull requests merged by a first time user +firstPRMergeComment: > + 🎉 Congrats on merging your first pull request! 🥳 Looking forward to seeing more from you in the future! 💪 + +# It is recommended to include as many gifs and emojis as possible! diff --git a/.github/workflows/codespell_and_flake.yml b/.github/workflows/codespell_and_flake.yml index 8b0affbcbdd..0a655d1b3e7 100644 --- a/.github/workflows/codespell_and_flake.yml +++ b/.github/workflows/codespell_and_flake.yml @@ -8,27 +8,12 @@ on: - '*' jobs: - check_skip: - runs-on: ubuntu-20.04 - outputs: - skip: ${{ steps.result_step.outputs.ci-skip }} - steps: - - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - id: result_step - uses: mstachniuk/ci-skip@master - with: - commit-filter: '[skip ci];[ci skip];[skip github]' - commit-filter-separator: ';' - style: - needs: check_skip - if: ${{ needs.check_skip.outputs.skip == 'false' }} + timeout-minutes: 90 runs-on: ubuntu-20.04 env: CODESPELL_DIRS: 'mne/ doc/ tutorials/ examples/' - CODESPELL_SKIPS: 'doc/auto_*,*.fif,*.eve,*.gz,*.tgz,*.zip,*.mat,*.stc,*.label,*.w,*.bz2,*.annot,*.sulc,*.log,*.local-copy,*.orig_avg,*.inflated_avg,*.gii,*.pyc,*.doctree,*.pickle,*.inv,*.png,*.edf,*.touch,*.thickness,*.nofix,*.volume,*.defect_borders,*.mgh,lh.*,rh.*,COR-*,FreeSurferColorLUT.txt,*.examples,.xdebug_mris_calc,bad.segments,BadChannels,*.hist,empty_file,*.orig,*.js,*.map,*.ipynb,searchindex.dat,install_mne_c.rst,plot_*.rst,*.rst.txt,c_EULA.rst*,*.html,gdf_encodes.txt,*.svg' + CODESPELL_SKIPS: 'doc/auto_*,*.fif,*.eve,*.gz,*.tgz,*.zip,*.mat,*.stc,*.label,*.w,*.bz2,*.annot,*.sulc,*.log,*.local-copy,*.orig_avg,*.inflated_avg,*.gii,*.pyc,*.doctree,*.pickle,*.inv,*.png,*.edf,*.touch,*.thickness,*.nofix,*.volume,*.defect_borders,*.mgh,lh.*,rh.*,COR-*,FreeSurferColorLUT.txt,*.examples,.xdebug_mris_calc,bad.segments,BadChannels,*.hist,empty_file,*.orig,*.js,*.map,*.ipynb,searchindex.dat,install_mne_c.rst,plot_*.rst,*.rst.txt,c_EULA.rst*,*.html,gdf_encodes.txt,*.svg,*.bib,' steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 diff --git a/.github/workflows/compat_minimal.yml b/.github/workflows/compat_minimal.yml index b1593219571..8d7a03e52b9 100644 --- a/.github/workflows/compat_minimal.yml +++ b/.github/workflows/compat_minimal.yml @@ -24,6 +24,7 @@ jobs: # Minimal (runs with and without testing data) job: + timeout-minutes: 90 needs: check_skip if: ${{ needs.check_skip.outputs.skip == 'false' }} name: 'py3.7' @@ -64,6 +65,14 @@ jobs: - shell: bash -el {0} run: ./tools/github_actions_infos.sh name: 'Show infos' + - shell: bash -el {0} + run: ./tools/get_testing_version.sh + name: 'Get testing version' + - uses: actions/cache@v2 + with: + key: ${{ env.TESTING_VERSION }} + path: ~/mne_data + name: 'Cache testing data' - shell: bash -el {0} run: ./tools/github_actions_download.sh name: 'Download testing data' diff --git a/.github/workflows/compat_old.yml b/.github/workflows/compat_old.yml index 74ab0b523c2..fbaaae0a828 100644 --- a/.github/workflows/compat_old.yml +++ b/.github/workflows/compat_old.yml @@ -24,6 +24,7 @@ jobs: # Old dependencies job: + timeout-minutes: 90 needs: check_skip if: ${{ needs.check_skip.outputs.skip == 'false' }} name: 'py3.6' @@ -57,6 +58,14 @@ jobs: - shell: bash -el {0} run: ./tools/github_actions_infos.sh name: 'Show infos' + - shell: bash -el {0} + run: ./tools/get_testing_version.sh + name: 'Get testing version' + - uses: actions/cache@v2 + with: + key: ${{ env.TESTING_VERSION }} + path: ~/mne_data + name: 'Cache testing data' - shell: bash -el {0} run: ./tools/github_actions_download.sh name: 'Download testing data' diff --git a/.github/workflows/linux_conda.yml b/.github/workflows/linux_conda.yml index 92558c4ea1e..212fda84674 100644 --- a/.github/workflows/linux_conda.yml +++ b/.github/workflows/linux_conda.yml @@ -24,6 +24,7 @@ jobs: # Linux job: + timeout-minutes: 90 needs: check_skip if: ${{ needs.check_skip.outputs.skip == 'false' }} name: 'py3.8' @@ -53,12 +54,23 @@ jobs: ./tools/github_actions_dependencies.sh source tools/get_minimal_commands.sh name: 'Install dependencies' + - shell: bash -el {0} + run: mne_surf2bem --version + name: 'Check minimal commands' - shell: bash -el {0} run: ./tools/github_actions_install.sh name: 'Install MNE' - shell: bash -el {0} run: ./tools/github_actions_infos.sh name: 'Show infos' + - shell: bash -el {0} + run: ./tools/get_testing_version.sh + name: 'Get testing version' + - uses: actions/cache@v2 + with: + key: ${{ env.TESTING_VERSION }} + path: ~/mne_data + name: 'Cache testing data' - shell: bash -el {0} run: ./tools/github_actions_download.sh name: 'Download testing data' diff --git a/.github/workflows/linux_pip.yml b/.github/workflows/linux_pip.yml index 674dfc98bb0..9a0cebc0dcc 100644 --- a/.github/workflows/linux_pip.yml +++ b/.github/workflows/linux_pip.yml @@ -1,4 +1,4 @@ -name: 'linux / pip' +name: 'linux / pip-pre' on: push: branches: @@ -22,8 +22,9 @@ jobs: commit-filter: '[skip ci];[ci skip];[skip github]' commit-filter-separator: ';' - # PIP + non-default stim channel + log level info + # PIP-pre + non-default stim channel + log level info job: + timeout-minutes: 90 needs: check_skip if: ${{ needs.check_skip.outputs.skip == 'false' }} name: 'py3.9' @@ -51,12 +52,23 @@ jobs: ./tools/github_actions_dependencies.sh source tools/get_minimal_commands.sh name: 'Install dependencies' + - shell: bash -el {0} + run: mne_surf2bem --version + name: 'Check minimal commands' - shell: bash -el {0} run: ./tools/github_actions_install.sh name: 'Install MNE' - shell: bash -el {0} run: ./tools/github_actions_infos.sh name: 'Show infos' + - shell: bash -el {0} + run: ./tools/get_testing_version.sh + name: 'Get testing version' + - uses: actions/cache@v2 + with: + key: ${{ env.TESTING_VERSION }} + path: ~/mne_data + name: 'Cache testing data' - shell: bash -el {0} run: ./tools/github_actions_download.sh name: 'Download testing data' diff --git a/.github/workflows/macos_conda.yml b/.github/workflows/macos_conda.yml index 475ccd33088..0cbd6d25973 100644 --- a/.github/workflows/macos_conda.yml +++ b/.github/workflows/macos_conda.yml @@ -23,6 +23,7 @@ jobs: commit-filter-separator: ';' job: + timeout-minutes: 90 needs: check_skip if: ${{ needs.check_skip.outputs.skip == 'false' }} name: 'py3.8' @@ -50,12 +51,23 @@ jobs: ./tools/github_actions_dependencies.sh source tools/get_minimal_commands.sh name: 'Install dependencies' + - shell: bash -el {0} + run: mne_surf2bem --version + name: 'Check minimal commands' - shell: bash -el {0} run: ./tools/github_actions_install.sh name: 'Install MNE' - shell: bash -el {0} run: ./tools/github_actions_infos.sh name: 'Show infos' + - shell: bash -el {0} + run: ./tools/get_testing_version.sh + name: 'Get testing version' + - uses: actions/cache@v2 + with: + key: ${{ env.TESTING_VERSION }} + path: ~/mne_data + name: 'Cache testing data' - shell: bash -el {0} run: ./tools/github_actions_download.sh name: 'Download testing data' diff --git a/.gitignore b/.gitignore index 0a14f5f8047..499eb271dcd 100644 --- a/.gitignore +++ b/.gitignore @@ -64,6 +64,7 @@ doc/coverages doc/samples doc/*.dat doc/fil-result +doc/optipng.exe cover *.html diff --git a/.mailmap b/.mailmap index 72ef02e1f4a..1a4fe58e265 100644 --- a/.mailmap +++ b/.mailmap @@ -7,6 +7,8 @@ Martin Luessi mluessi@nmr.mgh.harvard.edu martin Martin Luessi martin Matti Hämäläinen Matti Hämäläinen +Matti Hämäläinen Matti Hamalainen +Matti Hämäläinen Matti Hamalainen Matti Hämäläinen mshamalainen Christian Brodbeck christianmbrodbeck Christian Brodbeck Christian Brodbeck @@ -26,12 +28,17 @@ Denis A. Engemann Denis A. Engemann Denis A. Engemann Daniel Strohmeier joewalter Daniel Strohmeier Daniel Strohmeier +Dan G. Wakeman Daniel G. Wakeman +Dan G. Wakeman dgwakeman Dan G. Wakeman dgwakeman +Dan G. Wakeman Daniel Wakeman Teon Brooks Teon Brooks Teon Teon Brooks Teon Brooks Teon Brooks -Romain Trachel +Romain Trachel Romain Trachel +Romain Trachel Romain Trachel +Romain Trachel trachelr Roman Goj Andrew Dykstra Yousra Bekhti Yoursa BEKHTI @@ -50,7 +57,6 @@ Mainak Jas Mainak Jas Mainak Jas mainakjas Mainak Jas Mainak Jas Mainak Jas Mainak Jas -Dan G. Wakeman Daniel Wakeman Marmaduke Woodman maedoc Brad Buran Brad Buran Cathy Nangini CN @@ -68,13 +74,16 @@ Basile Pinsard Clément Moutard Manoj Kumar MechCoder Ingoo Lee dlsrnsi -Jona Sassenhagen +Jona Sassenhagen Jona Sassenhagen +Jona Sassenhagen jona-sassenhagen +Jona Sassenhagen jona-sassenhagen@ Jona Sassenhagen jona-sassenhagen Jona Sassenhagen jona Jona Sassenhagen sassenha Jona Sassenhagen jona.sassenhagen@gmail.com Yousra Bekhti Yousra BEKHTI Ross Maddox unknown +Jaakko Leppakangas Jaakko Leppakangas Jaakko Leppakangas jaeilepp Jaakko Leppakangas jaeilepp Jair Montoya jmontoyam @@ -82,31 +91,40 @@ Natalie Klein natalieklein Daniel McCloy Daniel McCloy Daniel McCloy drammock Fede Raimondo Fede +Fede Raimondo Fede Raimondo Fede Raimondo Fede Raimondo +Fede Raimondo Federico Raimondo +Fede Raimondo Federico Raimondo Fede Raimondo Fede Raimondo Emily Stephen emilyps14 +Emily Stephen Emily P. Stephen Marian Dovgialo Guillaume Dumas deep-introspection Guillaume Dumas Guillaume Dumas Félix Raimundo Felix Raimundo Asish Panda kaichogami -Mikolaj Magnuski mmagnuski +Mikołaj Magnuski mmagnuski +Mikołaj Magnuski Mikolaj Magnuski Alexandre Barachant alexandre barachant Lorenzo Alfine lorrandal Paul Pasler ppasler Jon Houck Jon Houck Cristóbal Moënne-Loccoz Cristóbal Chris Holdgraf Chris Holdgraf +Chris Holdgraf Christopher Holdgraf Britta Westner britta-wstnr Jesper Duemose Nielsen jdue Laetitia Grabot LaetitiaG Nicolas Barascud nbara +Nicolas Barascud Nicolas Barascud <10333715+nbara@users.noreply.github.com> Lukáš Hejtmánek hejtmy Ramonapariciog Apariciogarcia ramonapariciog Mathurin Massias mathurinm Simon Kern skjerns -Simon Kern <14980558+skjerns@users.noreply.github.com> skjerns -S. M. Gutstein smgutstein +Simon Kern skjerns <14980558+skjerns@users.noreply.github.com> +Simon Kern Simon Kern <14980558+skjerns@users.noreply.github.com> +Steven M. Gutstein S. M. Gutstein +Steven M. Gutstein smgutstein Robin Tibor Schirrmeister robintibor Anne-Sophie Dubarry annesodub Claire Braboszcz claire-braboszcz @@ -114,9 +132,11 @@ Larry Eisenman lneisenman Mathurin Massias mathurinm Pierre Ablin pierreablin Erik Hornberger er06645810 +Erik Hornberger Erik Hornberger Kostiantyn Maksymenko Maksymenko Kostiantyn Kostiantyn Maksymenko kostiantyn maksymenko Nathalie Gayraud Nathalie +Nathalie Gayraud Nathalie Dominik Krzemiński dokato Ezequiel Mikulan <39155887+ezemikulan@users.noreply.github.com> ezemikulan <39155887+ezemikulan@users.noreply.github.com> Hubert Banville hubertjb @@ -130,7 +150,9 @@ Katarina Slama katarinaslama Evgenii Kalenkovich kalenkovich Thomas Donoghue Tom Steve Matindi stevemats -Legrand Nicolas LegrandNico +Nicolas Legrand LegrandNico +Nicolas Legrand Legrand Nicolas +Nicolas Legrand Nicolas Legrand Johannes Kasper jeythekey <44215387+jeythekey@users.noreply.github.com> Thomas Radman Joshua J Bear @@ -138,6 +160,7 @@ Paul Roujansky José C. García Alanis José C. García Alanis <12409129+JoseAlanis@users.noreply.github.com> José C. García Alanis José C. G. Alanis <12409129+JoseAlanis@users.noreply.github.com> José C. García Alanis Jose C. G. Alanis <12409129+JoseAlanis@users.noreply.github.com> +José C. García Alanis Jose Alanis Kostiantyn Maksymenko Maksymenko Kostiantyn Samuel Deslauriers-Gauthier Samuel Deslauriers-Gauthier Eberhard Eich ebeich @@ -154,3 +177,29 @@ Demetres Kostas <40433000+kostasde@users.noreply.github.com> kostasde <40433000+ Mohammad Daneshzand <55800429+mdaneshzand@users.noreply.github.com> mdaneshzand <55800429+mdaneshzand@users.noreply.github.com> Mohamed Sherif mohdsherif Dmitrii Altukhov dmalt +Jeff Stout jstout211 +Eduard Ort examplename +Eduard Ort eort +Tod Flak <45362686+todflak@users.noreply.github.com> todflak <45362686+todflak@users.noreply.github.com> +Hongjiang Ye YE Hongjiang +Victoria Peterson vpeterson +Phillip Alday Phillip Alday +Phillip Alday Phillip Alday +Niklas Wilming Niklas Wilming +Ellen Lau ellenlau +Kambiz Tabavi kambysese +Kambiz Tabavi Kambiz Tavabi +Lenny Varghese lennyvarghese +Mads Jensen mads jensen +Christoph Dinh Christoph Dinh +Matteo Visconti di Oleggio Castello Matteo Visconti dOC +Marijn van Vliet Marijn van Vliet +Jean-Baptiste Schiratti Jean-Baptiste SCHIRATTI +Pedro Silva pbnsilva +Christopher J. Bailey Chris Bailey +Alexander Rudiuk Alexander Rudiuk +Tanay Gahlot Tanay +Erkka Heinila Teekuningas +Burkhard Maess Burkhard Maess +Lorenz Esch Lorenz Esch +Joris Van den Bossche diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 9fd19582f6d..a104cd32b5a 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -7,9 +7,8 @@ documentation improvements (even just typo corrections). The best way to start contributing is by `opening an issue`_ on our GitHub page to discuss your ideas for changes or enhancements, or to tell us about behavior that you think might be a bug in MNE-Python. *For general troubleshooting of scripts that use -MNE-Python*, you should instead write to the `MNE mailing list`_ or chat with -developers on the `MNE gitter channel`_. Users and contributors to MNE-Python -are expected to follow our `code of conduct`_. +MNE-Python*, you should instead post on the `MNE Forum`_. Users and +contributors to MNE-Python are expected to follow our `code of conduct`_. The `contributing guide`_ has details on the preferred contribution workflow and how best to configure your system for a smooth experience contributing to @@ -17,8 +16,6 @@ MNE-Python. .. _`opening an issue`: https://github.com/mne-tools/mne-python/issues/new/choose -.. _`MNE mailing list`: http://mail.nmr.mgh.harvard.edu/mailman/listinfo/mne_analysis -.. _`MNE gitter channel`: https://gitter.im/mne-tools/mne-python - -.. _`code of conduct`: https://github.com/mne-tools/.github/blob/master/CODE_OF_CONDUCT.md +.. _`MNE Forum`: https://mne.discourse.group +.. _`code of conduct`: https://github.com/mne-tools/.github/blob/main/CODE_OF_CONDUCT.md .. _`contributing guide`: https://mne-tools.github.io/dev/install/contributing.html diff --git a/MANIFEST.in b/MANIFEST.in index 3dc449cb44e..07e5e0bf6dc 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -3,6 +3,7 @@ include LICENSE.txt include SECURITY.md include requirements.txt include requirements_testing.txt +include requirements_testing_extra.txt include requirements_doc.txt include mne/__init__.py @@ -19,6 +20,7 @@ recursive-include mne/data/image * recursive-include mne/data/fsaverage * include mne/datasets/_fsaverage/root.txt include mne/datasets/_fsaverage/bem.txt +include mne/datasets/_infant/*.txt recursive-include mne/channels/data/layouts * recursive-include mne/channels/data/montages * @@ -35,7 +37,6 @@ recursive-include mne mne/datasets *.csv include mne/io/edf/gdf_encodes.txt include mne/datasets/sleep_physionet/SHA1SUMS include mne/externals/tqdm/_tqdm/tqdm.1 -include mne/viz/_brain/tests/test.ipynb ### Exclude @@ -50,6 +51,7 @@ exclude .coveragerc exclude *.yml exclude ignore_words.txt exclude .mailmap +exclude codemeta.json recursive-exclude mne *.pyc recursive-exclude doc * diff --git a/Makefile b/Makefile index 47395575c99..cf4bbe1f833 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ PYTHON ?= python PYTESTS ?= py.test CTAGS ?= ctags -CODESPELL_SKIPS ?= "doc/auto_*,*.fif,*.eve,*.gz,*.tgz,*.zip,*.mat,*.stc,*.label,*.w,*.bz2,*.annot,*.sulc,*.log,*.local-copy,*.orig_avg,*.inflated_avg,*.gii,*.pyc,*.doctree,*.pickle,*.inv,*.png,*.edf,*.touch,*.thickness,*.nofix,*.volume,*.defect_borders,*.mgh,lh.*,rh.*,COR-*,FreeSurferColorLUT.txt,*.examples,.xdebug_mris_calc,bad.segments,BadChannels,*.hist,empty_file,*.orig,*.js,*.map,*.ipynb,searchindex.dat,install_mne_c.rst,plot_*.rst,*.rst.txt,c_EULA.rst*,*.html,gdf_encodes.txt,*.svg" +CODESPELL_SKIPS ?= "doc/auto_*,*.fif,*.eve,*.gz,*.tgz,*.zip,*.mat,*.stc,*.label,*.w,*.bz2,*.annot,*.sulc,*.log,*.local-copy,*.orig_avg,*.inflated_avg,*.gii,*.pyc,*.doctree,*.pickle,*.inv,*.png,*.edf,*.touch,*.thickness,*.nofix,*.volume,*.defect_borders,*.mgh,lh.*,rh.*,COR-*,FreeSurferColorLUT.txt,*.examples,.xdebug_mris_calc,bad.segments,BadChannels,*.hist,empty_file,*.orig,*.js,*.map,*.ipynb,searchindex.dat,install_mne_c.rst,plot_*.rst,*.rst.txt,c_EULA.rst*,*.html,gdf_encodes.txt,*.svg,references.bib" CODESPELL_DIRS ?= mne/ doc/ tutorials/ examples/ all: clean inplace test test-doc diff --git a/README.rst b/README.rst index e2a380e5bfd..405eee64fa5 100644 --- a/README.rst +++ b/README.rst @@ -4,19 +4,19 @@ |MNE|_ -.. |GH-Linux| image:: https://github.com/mne-tools/mne-python/workflows/linux%20/%20conda/badge.svg?branch=master -.. _GH-Linux: https://github.com/mne-tools/mne-python/actions?query=branch:master+event:push +.. |GH-Linux| image:: https://github.com/mne-tools/mne-python/workflows/linux%20/%20conda/badge.svg?branch=main +.. _GH-Linux: https://github.com/mne-tools/mne-python/actions?query=branch:main+event:push -.. |GH-macOS| image:: https://github.com/mne-tools/mne-python/workflows/macos%20/%20conda/badge.svg?branch=master -.. _GH-macOS: https://github.com/mne-tools/mne-python/actions?query=branch:master+event:push +.. |GH-macOS| image:: https://github.com/mne-tools/mne-python/workflows/macos%20/%20conda/badge.svg?branch=main +.. _GH-macOS: https://github.com/mne-tools/mne-python/actions?query=branch:main+event:push -.. |Azure| image:: https://dev.azure.com/mne-tools/mne-python/_apis/build/status/mne-tools.mne-python?branchName=master -.. _Azure: https://dev.azure.com/mne-tools/mne-python/_build/latest?definitionId=1&branchName=master +.. |Azure| image:: https://dev.azure.com/mne-tools/mne-python/_apis/build/status/mne-tools.mne-python?branchName=main +.. _Azure: https://dev.azure.com/mne-tools/mne-python/_build/latest?definitionId=1&branchName=main .. |Circle| image:: https://circleci.com/gh/mne-tools/mne-python.svg?style=shield .. _Circle: https://circleci.com/gh/mne-tools/mne-python -.. |Codecov| image:: https://codecov.io/gh/mne-tools/mne-python/branch/master/graph/badge.svg +.. |Codecov| image:: https://codecov.io/gh/mne-tools/mne-python/branch/main/graph/badge.svg .. _Codecov: https://codecov.io/gh/mne-tools/mne-python .. |PyPI| image:: https://img.shields.io/pypi/dm/mne.svg?label=PyPI%20downloads @@ -71,7 +71,7 @@ To install the latest version of the code using pip_ open a terminal and type: .. code-block:: bash - pip install -U https://github.com/mne-tools/mne-python/archive/master.zip + pip install -U https://github.com/mne-tools/mne-python/archive/main.zip To get the latest code using `git `__, open a terminal and type: @@ -80,7 +80,7 @@ To get the latest code using `git `__, open a terminal and git clone git://github.com/mne-tools/mne-python.git Alternatively, you can also download a -`zip file of the latest development version `__. +`zip file of the latest development version `__. Dependencies @@ -115,10 +115,10 @@ Please see the documentation on the MNE-Python homepage: https://mne.tools/dev/install/contributing.html -Mailing list -^^^^^^^^^^^^ +Forum +^^^^^^ -http://mail.nmr.mgh.harvard.edu/mailman/listinfo/mne_analysis +https://mne.discourse.group Licensing diff --git a/SECURITY.md b/SECURITY.md index 4390f1a9c13..292e7e8e357 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -10,9 +10,9 @@ without a proper 6-month deprecation cycle. | Version | Supported | | ------- | ------------------------ | -| 0.22.x | :heavy_check_mark: (dev) | -| 0.21.x | :heavy_check_mark: | -| < 0.21 | :x: | +| 0.23.x | :heavy_check_mark: (dev) | +| 0.22.x | :heavy_check_mark: | +| < 0.22 | :x: | ## Reporting a Vulnerability diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 535d2c69e57..d7e26182cbe 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -3,7 +3,7 @@ trigger: batch: False branches: include: - - 'master' + - 'main' - 'maint/*' pr: branches: @@ -19,6 +19,7 @@ stages: pool: vmImage: 'ubuntu-18.04' variables: + DECODE_PERCENTS: 'false' RET: 'true' steps: - bash: | @@ -32,6 +33,8 @@ stages: - stage: Main condition: and(succeeded(), eq(dependencies.Check.outputs['Skip.result.start_main'], 'true')) dependsOn: Check + variables: + AZURE_CI: 'true' jobs: - job: Style pool: @@ -86,12 +89,6 @@ stages: displayName: 'Install Ubuntu dependencies' - bash: | source tools/get_minimal_commands.sh - echo "##vso[task.setvariable variable=MNE_ROOT]${MNE_ROOT}" - echo "##vso[task.setvariable variable=PATH]${PATH}" - echo "##vso[task.setvariable variable=LD_LIBRARY_PATH]${LD_LIBRARY_PATH}" - echo "##vso[task.setvariable variable=NEUROMAG2FT_ROOT]${NEUROMAG2FT_ROOT}" - echo "##vso[task.setvariable variable=FREESURFER_HOME]${FREESURFER_HOME}" - echo "##vso[task.setvariable variable=MNE_SKIP_FS_FLASH_CALL]${MNE_SKIP_FS_FLASH_CALL}" displayName: 'Install minimal commands' - bash: | echo $PATH @@ -110,8 +107,15 @@ stages: - bash: | set -e python -m pip install --upgrade pip setuptools - python -m pip install --upgrade numpy scipy vtk -r requirements.txt -r requirements_testing.txt codecov + python -m pip install --upgrade numpy scipy vtk -r requirements.txt -r requirements_testing.txt -r requirements_testing_extra.txt codecov displayName: 'Install dependencies with pip' + - bash: source tools/get_testing_version.sh + displayName: 'Get testing version' + - task: Cache@2 + inputs: + key: $(testing_version) + path: /home/vsts/mne_data + displayName: 'Cache testing data' - script: python -c "import mne; mne.datasets.testing.data_path(verbose=True)" displayName: 'Get test data' - script: pytest -m "ultraslowtest" --tb=short --cov=mne -vv mne @@ -142,12 +146,19 @@ stages: conda env update --file server_environment.yml pip uninstall -yq mne pip install -ve . - pip install pytest pytest-cov pytest-timeout pytest-sugar pytest-xdist flake8 codecov + pip install -r requirements_testing.txt -r requirements_testing_extra.txt codecov echo "##vso[task.setvariable variable=PATH]${PATH}" echo "##vso[task.setvariable variable=LD_LIBRARY_PATH]${LD_LIBRARY_PATH}" displayName: 'Install dependencies' - script: mne sys_info displayName: 'Print config and test access to commands' + - bash: source tools/get_testing_version.sh + displayName: 'Get testing version' + - task: Cache@2 + inputs: + key: $(testing_version) + path: /home/vsts/mne_data + displayName: 'Cache testing data' - script: python -c "import mne; mne.datasets.testing.data_path(verbose=True)" displayName: 'Get test data' - script: pytest --tb=short --cov=mne -vv mne/viz @@ -187,7 +198,7 @@ stages: TEST_MODE: 'pip' PYTHON_VERSION: '3.7' 3.8 pip pre: - TEST_MODE: 'pre-pip' + TEST_MODE: 'pip-pre' PYTHON_VERSION: '3.8' OPENBLAS_CORETYPE: 'prescott' # workaround for https://github.com/numpy/numpy/issues/16913 steps: @@ -196,7 +207,7 @@ stages: versionSpec: $(PYTHON_VERSION) architecture: $(PYTHON_ARCH) addToPath: true - condition: in(variables['TEST_MODE'], 'pip', 'pre-pip') + condition: in(variables['TEST_MODE'], 'pip', 'pip-pre') displayName: 'Get Python' # https://docs.microsoft.com/en-us/azure/devops/pipelines/ecosystems/anaconda # https://github.com/MicrosoftDocs/pipelines-anaconda @@ -212,23 +223,9 @@ stages: displayName: Install OpenGL - bash: | set -e - python -m pip install --upgrade pip setuptools - python -m pip install --upgrade numpy scipy vtk - python -m pip install --upgrade --use-deprecated=legacy-resolver --only-binary="numba,llvmlite" -r requirements.txt -r requirements_testing.txt - python -m pip install codecov - condition: eq(variables['TEST_MODE'], 'pip') + ./tools/azure_dependencies.sh + condition: in(variables['TEST_MODE'], 'pip', 'pip-pre') displayName: 'Install dependencies with pip' - - bash: | - set -e - python -m pip install --upgrade pip setuptools - python -m pip install --use-deprecated=legacy-resolver --upgrade --pre --only-binary ":all:" -i "https://pypi.anaconda.org/scipy-wheels-nightly/simple" numpy - python -m pip install --use-deprecated=legacy-resolver --upgrade --pre --only-binary ":all:" -f "https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" scipy pandas scikit-learn matplotlib h5py Pillow - python -m pip install --upgrade --only-binary vtk vtk; - python -m pip install https://github.com/pyvista/pyvista/zipball/master - python -m pip install https://github.com/pyvista/pyvistaqt/zipball/master - python -m pip install --use-deprecated=legacy-resolver --only-binary="numba,llvmlite" -r requirements.txt -r requirements_testing.txt codecov - condition: eq(variables['TEST_MODE'], 'pre-pip') - displayName: 'Install dependencies with pip --pre' - powershell: | Set-StrictMode -Version Latest $ErrorActionPreference = "Stop" @@ -236,7 +233,7 @@ stages: conda update -n base -c defaults conda conda env update --name base --file environment.yml pip uninstall -yq mne - pip install -r requirements_testing.txt codecov + pip install -r requirements_testing.txt -r requirements_testing_extra.txt codecov condition: eq(variables['TEST_MODE'], 'conda') displayName: 'Install dependencies with conda' - script: python setup.py develop @@ -245,6 +242,13 @@ stages: displayName: 'Print config and test access to commands' - script: python -c "import numpy; numpy.show_config()" displayName: Print NumPy config + - bash: source tools/get_testing_version.sh + displayName: 'Get testing version' + - task: Cache@2 + inputs: + key: $(testing_version) + path: C:\Users\VssAdministrator\mne_data + displayName: 'Cache testing data' - script: python -c "import mne; mne.datasets.testing.data_path(verbose=True)" displayName: 'Get test data' - script: pytest -m "not slowtest" --tb=short --cov=mne -vv mne diff --git a/codecov.yml b/codecov.yml index 2963824ca62..a011f80f1c6 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1,4 +1,6 @@ comment: false +github_checks: # too noisy, even though "a" interactively disables them + annotations: false codecov: notify: @@ -8,6 +10,7 @@ coverage: status: patch: default: + informational: true target: 95% if_no_uploads: error if_not_found: success @@ -15,6 +18,7 @@ coverage: project: default: false library: + informational: true target: 90% if_no_uploads: error if_not_found: success diff --git a/codemeta.json b/codemeta.json new file mode 100644 index 00000000000..2ff4460bd9e --- /dev/null +++ b/codemeta.json @@ -0,0 +1,1414 @@ +{ + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "@type": "SoftwareSourceCode", + "license": "https://spdx.org/licenses/BSD-3-Clause", + "codeRepository": "git+https://github.com/mne-tools/mne-python.git", + "dateCreated": "2010-12-26", + "datePublished": "2014-08-04", + "dateModified": "2020-12-17", + "downloadUrl": "https://github.com/mne-tools/mne-python/archive/v0.22.0.zip", + "issueTracker": "https://github.com/mne-tools/mne-python/issues", + "name": "MNE-Python", + "version": "0.22.0", + "description": "MNE-Python is an open-source Python package for exploring, visualizing, and analyzing human neurophysiological data. It provides methods for data input/output, preprocessing, visualization, source estimation, time-frequency analysis, connectivity analysis, machine learning, and statistics.", + "applicationCategory": "Neuroscience", + "developmentStatus": "active", + "referencePublication": "https://doi.org/10.3389/fnins.2013.00267", + "keywords": [ + "MEG", + "EEG", + "fNIRS", + "ECoG", + "sEEG", + "DBS" + ], + "programmingLanguage": [ + "Python" + ], + "operatingSystem": [ + "Linux", + "Windows", + "macOS" + ], + "softwareRequirements": [ + "python>=3.6", + "numpy>=1.15.4", + "scipy>=1.1.0" + ], + "author": [ + { + "@type":"Person", + "email":"larson.eric.d@gmail.com", + "givenName":"Eric", + "familyName": "Larson" + }, + { + "@type":"Person", + "email":"alexandre.gramfort@inria.fr", + "givenName":"Alexandre", + "familyName": "Gramfort" + }, + { + "@type":"Person", + "email":"denis.engemann@gmail.com", + "givenName":"Denis A", + "familyName": "Engemann" + }, + { + "@type":"Person", + "email":"jaeilepp@gmail.com", + "givenName":"Jaakko", + "familyName": "Leppakangas" + }, + { + "@type":"Person", + "email":"christianmbrodbeck@gmail.com", + "givenName":"Christian", + "familyName": "Brodbeck" + }, + { + "@type":"Person", + "email":"mainakjas@gmail.com", + "givenName":"Mainak", + "familyName": "Jas" + }, + { + "@type":"Person", + "email":"teon.brooks@gmail.com", + "givenName":"Teon", + "familyName": "Brooks" + }, + { + "@type":"Person", + "email":"jona.sassenhagen@gmail.com", + "givenName":"Jona", + "familyName": "Sassenhagen" + }, + { + "@type":"Person", + "email":"mluessi@nmr.mgh.harvard.edu", + "givenName":"Martin", + "familyName": "Luessi" + }, + { + "@type":"Person", + "email":"jeanremi.kibng+github@gmail.com", + "givenName":"Jean-Remi", + "familyName": "King" + }, + { + "@type":"Person", + "email":"roman.goj@gmail.com", + "givenName":"Roman", + "familyName": "Goj" + }, + { + "@type":"Person", + "email":"dan@mccloy.info", + "givenName":"Daniel", + "familyName": "McCloy" + }, + { + "@type":"Person", + "email":"wronk.mark@gmail.com", + "givenName":"Mark", + "familyName": "Wronkiewicz" + }, + { + "@type":"Person", + "email":"w.m.vanvliet@gmail.com", + "givenName":"Marijn", + "familyName": "van Vliet" + }, + { + "@type":"Person", + "email":"guillaume.favelier@gmail.com", + "givenName":"Guillaume", + "familyName": "Favelier" + }, + { + "@type":"Person", + "email":"clemens.brunner@gmail.com", + "givenName":"Clemens", + "familyName": "Brunner" + }, + { + "@type":"Person", + "email":"choldgraf@gmail.com", + "givenName":"Chris", + "familyName": "Holdgraf" + }, + { + "@type":"Person", + "email":"mailsik@gmail.com", + "givenName":"Joan", + "familyName": "Massich" + }, + { + "@type":"Person", + "email":"yousra.bekhti@gmail.com", + "givenName":"Yousra", + "familyName": "Bekhti" + }, + { + "@type":"Person", + "email":"leggitta3@gmail.com", + "givenName":"Alan", + "familyName": "Leggitt" + }, + { + "@type":"Person", + "email":"andrew.r.dykstra@gmail.com", + "givenName":"Andrew", + "familyName": "Dykstra" + }, + { + "@type":"Person", + "email":"romain.trachel@inria.fr", + "givenName":"Romain", + "familyName": "Trachel" + }, + { + "@type":"Person", + "email":"desantis.lnz@gmail.com", + "givenName":"Lorenzo", + "familyName": "De Santis" + }, + { + "@type":"Person", + "email":"asishrocks95@gmail.com", + "givenName":"Asish", + "familyName": "Panda" + }, + { + "@type":"Person", + "email":"richard.hoechenberger@gmail.com", + "givenName":"Richard", + "familyName": "Höchenberger" + }, + { + "@type":"Person", + "email":"stefan.appelhoff@mailbox.org", + "givenName":"Stefan", + "familyName": "Appelhoff" + }, + { + "@type":"Person", + "email":"mmagnuski@swps.edu.pl", + "givenName":"Mikołaj", + "familyName": "Magnuski" + }, + { + "@type":"Person", + "email":"martin.billinger@tugraz.at", + "givenName":"Martin", + "familyName": "Billinger" + }, + { + "@type":"Person", + "email":"britta.wstnr@gmail.com", + "givenName":"Britta", + "familyName": "Westner" + }, + { + "@type":"Person", + "email":"dgwakeman@gmail.com", + "givenName":"Dan G", + "familyName": "Wakeman" + }, + { + "@type":"Person", + "email":"daniel.strohmeier@googlemail.com", + "givenName":"Daniel", + "familyName": "Strohmeier" + }, + { + "@type":"Person", + "email":"", + "givenName":"Robert", + "familyName": "Luke" + }, + { + "@type":"Person", + "email":"hari@nmr.mgh.harvard.edu", + "givenName":"Hari", + "familyName": "Bharadwaj" + }, + { + "@type":"Person", + "email":"tal.linzen@gmail.com", + "givenName":"Tal", + "familyName": "Linzen" + }, + { + "@type":"Person", + "email":"alexandre.barachant@gmail.com", + "givenName":"Alexandre", + "familyName": "Barachant" + }, + { + "@type":"Person", + "email":"emilyr@nmr.mgh.harvard.edu", + "givenName":"Emily", + "familyName": "Ruzich" + }, + { + "@type":"Person", + "email":"bailey.cj@gmail.com", + "givenName":"Christopher J", + "familyName": "Bailey" + }, + { + "@type":"Person", + "email":"clement.moutard@gmail.com", + "givenName":"Clément", + "familyName": "Moutard" + }, + { + "@type":"Person", + "email":"luke.bloy@gmail.com", + "givenName":"Luke", + "familyName": "Bloy" + }, + { + "@type":"Person", + "email":"federaimondo@gmail.com", + "givenName":"Fede", + "familyName": "Raimondo" + }, + { + "@type":"Person", + "email":"jnu@iki.fi", + "givenName":"Jussi", + "familyName": "Nurminen" + }, + { + "@type":"Person", + "email":"montoya.jair.m@gmail.com", + "givenName":"Jair", + "familyName": "Montoya" + }, + { + "@type":"Person", + "email":"mmwoodman@gmail.com", + "givenName":"Marmaduke", + "familyName": "Woodman" + }, + { + "@type":"Person", + "email":"dlsrnsladlek@naver.com", + "givenName":"Ingoo", + "familyName": "Lee" + }, + { + "@type":"Person", + "email":"nfoti01@gmail.com", + "givenName":"Nick", + "familyName": "Foti" + }, + { + "@type":"Person", + "email":"cnangini@gmail.com", + "givenName":"Cathy", + "familyName": "Nangini" + }, + { + "@type":"Person", + "email":"joialanisson@gmail.com", + "givenName":"José C", + "familyName": "García Alanis" + }, + { + "@type":"Person", + "email":"aestrivex@gmail.com", + "givenName":"Roan", + "familyName": "LaPlante" + }, + { + "@type":"Person", + "email":"rkmaddox@uw.edu", + "givenName":"Ross", + "familyName": "Maddox" + }, + { + "@type":"Person", + "email":"chdinh@nmr.mgh.harvard.edu", + "givenName":"Christoph", + "familyName": "Dinh" + }, + { + "@type":"Person", + "email":"olaf.hauk@mrc-cbu.cam.ac.uk", + "givenName":"Olaf", + "familyName": "Hauk" + }, + { + "@type":"Person", + "email":"deep@introspection.eu", + "givenName":"Guillaume", + "familyName": "Dumas" + }, + { + "@type":"Person", + "email":"adam2392@gmail.com", + "givenName":"Adam", + "familyName": "Li" + }, + { + "@type":"Person", + "email":"paul@ppasler.de", + "givenName":"Paul", + "familyName": "Pasler" + }, + { + "@type":"Person", + "email":"stefan.repplinger@posteo.net", + "givenName":"Stefan", + "familyName": "Repplinger" + }, + { + "@type":"Person", + "email":"thomas.hartmann@th-ht.de", + "givenName":"Thomas", + "familyName": "Hartmann" + }, + { + "@type":"Person", + "email":"alxanderr@gmail.com", + "givenName":"Alexander", + "familyName": "Rudiuk" + }, + { + "@type":"Person", + "email":"bburan@galenea.com", + "givenName":"Brad", + "familyName": "Buran" + }, + { + "@type":"Person", + "email":"mathurin.massias@gmail.com", + "givenName":"Mathurin", + "familyName": "Massias" + }, + { + "@type":"Person", + "email":"msh@nmr.mgh.harvard.edu", + "givenName":"Matti", + "familyName": "Hämäläinen" + }, + { + "@type":"Person", + "email":"pravsripad@gmail.com", + "givenName":"Praveen", + "familyName": "Sripad" + }, + { + "@type":"Person", + "email":"christopherrmullins@gmail.com", + "givenName":"Christopher", + "familyName": "Mullins" + }, + { + "@type":"Person", + "email":"gamaz3ps@gmail.com", + "givenName":"Félix", + "familyName": "Raimundo" + }, + { + "@type":"Person", + "email":"phillip.alday@mpi.nl", + "givenName":"Phillip", + "familyName": "Alday" + }, + { + "@type":"Person", + "email":"simon@simonster.com", + "givenName":"Simon", + "familyName": "Kornblith" + }, + { + "@type":"Person", + "email":"debian@onerussian.com", + "givenName":"Yaroslav", + "familyName": "Halchenko" + }, + { + "@type":"Person", + "email":"jeythekey@tutanota.com", + "givenName":"Johannes", + "familyName": "Kasper" + }, + { + "@type":"Person", + "email":"kd889@nyu.edu", + "givenName":"Keith", + "familyName": "Doelling" + }, + { + "@type":"Person", + "email":"mje.mads@gmail.com", + "givenName":"Mads", + "familyName": "Jensen" + }, + { + "@type":"Person", + "email":"tanaygahlot@gmail.com", + "givenName":"Tanay", + "familyName": "Gahlot" + }, + { + "@type":"Person", + "email":"adonay.s.nunes@gmail.com", + "givenName":"Adonay", + "familyName": "Nunes" + }, + { + "@type":"Person", + "email":"", + "givenName":"Dirk", + "familyName": "Gütlin" + }, + { + "@type":"Person", + "email":"yuhanluo1994@gmail.com", + "givenName":"Yu-Han", + "familyName": "Luo" + }, + { + "@type":"Person", + "email":"kjs@llama", + "givenName":"", + "familyName": "kjs" + }, + { + "@type":"Person", + "email":"alejandro.weinstein@gmail.com", + "givenName":"Alejandro", + "familyName": "Weinstein" + }, + { + "@type":"Person", + "email":"camilo@neurostat.mit.edu", + "givenName":"Camilo", + "familyName": "Lamus" + }, + { + "@type":"Person", + "email":"cmmoenne@gmail.com", + "givenName":"Cristóbal", + "familyName": "Moënne-Loccoz" + }, + { + "@type":"Person", + "email":"neklein@andrew.cmu.edu", + "givenName":"Natalie", + "familyName": "Klein" + }, + { + "@type":"Person", + "email":"aprockhill206@gmail.com", + "givenName":"Alex", + "familyName": "Rockhill" + }, + { + "@type":"Person", + "email":"antti.rantala90@gmail.com", + "givenName":"Antti", + "familyName": "Rantala" + }, + { + "@type":"Person", + "email":"burkhard.maess@arcor.de", + "givenName":"Burkhard", + "familyName": "Maess" + }, + { + "@type":"Person", + "email":"erkkahe@gmail.com", + "givenName":"Erkka", + "familyName": "Heinila" + }, + { + "@type":"Person", + "email":"", + "givenName":"Henrich", + "familyName": "Kolkhorst" + }, + { + "@type":"Person", + "email":"jeff.hanna@gmail.com", + "givenName":"Jeff", + "familyName": "Hanna" + }, + { + "@type":"Person", + "email":"jon.houck@gmail.com", + "givenName":"Jon", + "familyName": "Houck" + }, + { + "@type":"Person", + "email":"saketkc@gmail.com", + "givenName":"Saket", + "familyName": "Choudhary" + }, + { + "@type":"Person", + "email":"zuxfoucault@gmail.com", + "givenName":"Fu-Te", + "familyName": "Wong" + }, + { + "@type":"Person", + "email":"hubert.jbanville@gmail.com", + "givenName":"Hubert", + "familyName": "Banville" + }, + { + "@type":"Person", + "email":"", + "givenName":"Ivana", + "familyName": "Kojcic" + }, + { + "@type":"Person", + "email":"jdue@dtu.dk", + "givenName":"Jesper Duemose", + "familyName": "Nielsen" + }, + { + "@type":"Person", + "email":"", + "givenName":"Kaisu", + "familyName": "Lankinen" + }, + { + "@type":"Person", + "email":"ktavabi@gmail.com", + "givenName":"Kambiz", + "familyName": "Tabavi" + }, + { + "@type":"Person", + "email":"makkostya@ukr.net", + "givenName":"Kostiantyn", + "familyName": "Maksymenko" + }, + { + "@type":"Person", + "email":"louist87@gmail.com", + "givenName":"Louis", + "familyName": "Thibault" + }, + { + "@type":"Person", + "email":"nathalie.gayraud@inria.fr", + "givenName":"Nathalie", + "familyName": "Gayraud" + }, + { + "@type":"Person", + "email":"ward.nickjames@gmail.com", + "givenName":"Nick", + "familyName": "Ward" + }, + { + "@type":"Person", + "email":"antoine.gauthier@ensta.fr", + "givenName":"Antoine", + "familyName": "Gauthier" + }, + { + "@type":"Person", + "email":"basile.pinsard@umontreal.ca", + "givenName":"Basile", + "familyName": "Pinsard" + }, + { + "@type":"Person", + "email":"christian.oreilly@gmail.com", + "givenName":"Christian", + "familyName": "O'Reilly" + }, + { + "@type":"Person", + "email":"emilyps14@gmail.com", + "givenName":"Emily", + "familyName": "Stephen" + }, + { + "@type":"Person", + "email":"erik.hornberger@shi-g.com", + "givenName":"Erik", + "familyName": "Hornberger" + }, + { + "@type":"Person", + "email":"e.kalenkovich@gmail.com", + "givenName":"Evgenii", + "familyName": "Kalenkovich" + }, + { + "@type":"Person", + "email":"", + "givenName":"Fahimeh", + "familyName": "Mamashli" + }, + { + "@type":"Person", + "email":"hafiza.taj@gmail.com", + "givenName":"Hafeza", + "familyName": "Anevar" + }, + { + "@type":"Person", + "email":"johann.benerradi@gmail.com", + "givenName":"Johann", + "familyName": "Benerradi" + }, + { + "@type":"Person", + "email":"leisenman@wustl.edu", + "givenName":"Larry", + "familyName": "Eisenman" + }, + { + "@type":"Person", + "email":"Lorenz.Esch@tu-ilmenau.de", + "givenName":"Lorenz", + "familyName": "Esch" + }, + { + "@type":"Person", + "email":"", + "givenName":"Nicolas", + "familyName": "Barascud" + }, + { + "@type":"Person", + "email":"legrand@cyceron.fr", + "givenName":"Nicolas", + "familyName": "Legrand" + }, + { + "@type":"Person", + "email":"sam.deslauriers@gmail.com", + "givenName":"Samuel", + "familyName": "Deslauriers-Gauthier" + }, + { + "@type":"Person", + "email":"simon.kern@online.de", + "givenName":"Simon", + "familyName": "Kern" + }, + { + "@type":"Person", + "email":"victor.ferat@live.Fr", + "givenName":"Victor", + "familyName": "Férat" + }, + { + "@type":"Person", + "email":"alexander.kovrig@gmail.com", + "givenName":"Alexander", + "familyName": "Kovrig" + }, + { + "@type":"Person", + "email":"a.pascarella@iac.cnr.it", + "givenName":"Annalisa", + "familyName": "Pascarella" + }, + { + "@type":"Person", + "email":"raymon92@gmail.com", + "givenName":"Dominik", + "familyName": "Krzemiński" + }, + { + "@type":"Person", + "email":"", + "givenName":"Ezequiel", + "familyName": "Mikulan" + }, + { + "@type":"Person", + "email":"jean.baptiste.schiratti@gmail.com", + "givenName":"Jean-Baptiste", + "familyName": "Schiratti" + }, + { + "@type":"Person", + "email":"", + "givenName":"Jen", + "familyName": "Evans" + }, + { + "@type":"Person", + "email":"kylemath@gmail.com", + "givenName":"Kyle", + "familyName": "Mathewson" + }, + { + "@type":"Person", + "email":"lgwilliams90@gmail.com", + "givenName":"Laura", + "familyName": "Gwilliams" + }, + { + "@type":"Person", + "email":"", + "givenName":"Lenny", + "familyName": "Varghese" + }, + { + "@type":"Person", + "email":"capmanip@DESKTOP-TLIFEG1.localdomain", + "givenName":"", + "familyName": "Lx37" + }, + { + "@type":"Person", + "email":"", + "givenName":"Martin", + "familyName": "Schulz" + }, + { + "@type":"Person", + "email":"", + "givenName":"Matt", + "familyName": "Boggess" + }, + { + "@type":"Person", + "email":"molpsychistb@gmail.com", + "givenName":"Mohamed", + "familyName": "Sherif" + }, + { + "@type":"Person", + "email":"natakozh22@gmail.com", + "givenName":"Nataliia", + "familyName": "Kozhemiako" + }, + { + "@type":"Person", + "email":"niklas.wilming@gmail.com", + "givenName":"Niklas", + "familyName": "Wilming" + }, + { + "@type":"Person", + "email":"", + "givenName":"Oleh", + "familyName": "Kozynets" + }, + { + "@type":"Person", + "email":"pierreablin@gmail.com", + "givenName":"Pierre", + "familyName": "Ablin" + }, + { + "@type":"Person", + "email":"quentinbertrand54@gmail.com", + "givenName":"Quentin", + "familyName": "Bertrand" + }, + { + "@type":"Person", + "email":"rhubner@gmail.com", + "givenName":"Rodrigo", + "familyName": "Hübner" + }, + { + "@type":"Person", + "email":"sommariva@dima.unige.it", + "givenName":"Sara", + "familyName": "Sommariva" + }, + { + "@type":"Person", + "email":"sheraz@nmr.mgh.harvard.edu", + "givenName":"Sheraz", + "familyName": "Khan" + }, + { + "@type":"Person", + "email":"ksherbst@gmail.com", + "givenName":"Sophie", + "familyName": "Herbst" + }, + { + "@type":"Person", + "email":"", + "givenName":"Thomas", + "familyName": "Jochmann" + }, + { + "@type":"Person", + "email":"", + "givenName":"Tod", + "familyName": "Flak" + }, + { + "@type":"Person", + "email":"tom.dupre-la-tour@m4x.org", + "givenName":"Tom", + "familyName": "Dupré la Tour" + }, + { + "@type":"Person", + "email":"akshay0724@gmail.com", + "givenName":"", + "familyName": "akshay0724" + }, + { + "@type":"Person", + "email":"sviter33@gmail.com", + "givenName":"", + "familyName": "sviter" + }, + { + "@type":"Person", + "email":"abram.hindle@softwareprocess.es", + "givenName":"Abram", + "familyName": "Hindle" + }, + { + "@type":"Person", + "email":"achilleas.k@gmail.com", + "givenName":"Achilleas", + "familyName": "Koutsou" + }, + { + "@type":"Person", + "email":"aniket17133@iiitd.ac.in", + "givenName":"Aniket", + "familyName": "Pradhan" + }, + { + "@type":"Person", + "email":"as_dub@hotmail.com", + "givenName":"Anne-Sophie", + "familyName": "Dubarry" + }, + { + "@type":"Person", + "email":"", + "givenName":"Anton Nikolas", + "familyName": "Waniek" + }, + { + "@type":"Person", + "email":"arokem@gmail.com", + "givenName":"Ariel", + "familyName": "Rokem" + }, + { + "@type":"Person", + "email":"mynameisaustinhurst@gmail.com", + "givenName":"Austin", + "familyName": "Hurst" + }, + { + "@type":"Person", + "email":"bruno.nicenboim@gmail.com", + "givenName":"Bruno", + "familyName": "Nicenboim" + }, + { + "@type":"Person", + "email":"ctorre@mailbox.org", + "givenName":"Carlos", + "familyName": "de la Torre" + }, + { + "@type":"Person", + "email":"cclauss@me.com", + "givenName":"Christian", + "familyName": "Clauss" + }, + { + "@type":"Person", + "email":"", + "givenName":"Chun-Hui", + "familyName": "Li" + }, + { + "@type":"Person", + "email":"claire@guakamole.org", + "givenName":"Claire", + "familyName": "Braboszcz" + }, + { + "@type":"Person", + "email":"haslacherdavid@gmail.com", + "givenName":"David", + "familyName": "Haslacher" + }, + { + "@type":"Person", + "email":"dav.sabbagh@gmail.com", + "givenName":"David", + "familyName": "Sabbagh" + }, + { + "@type":"Person", + "email":"", + "givenName":"Demetres", + "familyName": "Kostas" + }, + { + "@type":"Person", + "email":"desislavka@gmail.com", + "givenName":"Desislava", + "familyName": "Petkova" + }, + { + "@type":"Person", + "email":"dm.altukhov@ya.ru", + "givenName":"Dmitrii", + "familyName": "Altukhov" + }, + { + "@type":"Person", + "email":"dominik.welke@ae.mpg.de", + "givenName":"Dominik", + "familyName": "Welke" + }, + { + "@type":"Person", + "email":"e.eich@fz-juelich.de", + "givenName":"Eberhard", + "familyName": "Eich" + }, + { + "@type":"Person", + "email":"edaurdxort@gmail.com", + "givenName":"Eduard", + "familyName": "Ort" + }, + { + "@type":"Person", + "email":"emd222@cornell.edu", + "givenName":"Elizabeth", + "familyName": "DuPre" + }, + { + "@type":"Person", + "email":"ellenlau@umd.edu", + "givenName":"Ellen", + "familyName": "Lau" + }, + { + "@type":"Person", + "email":"emanuele@relativita.com", + "givenName":"Emanuele", + "familyName": "Olivetti" + }, + { + "@type":"Person", + "email":"", + "givenName":"Evan", + "familyName": "Hathaway" + }, + { + "@type":"Person", + "email":"Geoff.Brookshire@gmail.com", + "givenName":"Geoff", + "familyName": "Brookshire" + }, + { + "@type":"Person", + "email":"hermann.sonntag@gmail.com", + "givenName":"Hermann", + "familyName": "Sonntag" + }, + { + "@type":"Person", + "email":"rubyyhj@gmail.com", + "givenName":"Hongjiang", + "familyName": "Ye" + }, + { + "@type":"Person", + "email":"", + "givenName":"Jakub", + "familyName": "Kaczmarzyk" + }, + { + "@type":"Person", + "email":"japsai@gmail.com", + "givenName":"Jasper J F", + "familyName": "van den Bosch" + }, + { + "@type":"Person", + "email":"stoutjd@nih.gov", + "givenName":"Jeff", + "familyName": "Stout" + }, + { + "@type":"Person", + "email":"", + "givenName":"Jeroen", + "familyName": "Van Der Donckt" + }, + { + "@type":"Person", + "email":"johanvandermeer@gmail.com", + "givenName":"Johan", + "familyName": "van der Meer" + }, + { + "@type":"Person", + "email":"", + "givenName":"Johannes", + "familyName": "Niediek" + }, + { + "@type":"Person", + "email":"", + "givenName":"Joshua J", + "familyName": "Bear" + }, + { + "@type":"Person", + "email":"j.dammers@fz-juelich.de", + "givenName":"Juergen", + "familyName": "Dammers" + }, + { + "@type":"Person", + "email":"slama@berkeley.edu", + "givenName":"Katarina", + "familyName": "Slama" + }, + { + "@type":"Person", + "email":"", + "givenName":"Katrin", + "familyName": "Leinweber" + }, + { + "@type":"Person", + "email":"laetitia.grabot@gmail.com", + "givenName":"Laetitia", + "familyName": "Grabot" + }, + { + "@type":"Person", + "email":"ualsbombe@protonmail.com", + "givenName":"Lau Møller", + "familyName": "Andersen" + }, + { + "@type":"Person", + "email":"lsbarbosa@gmail.com", + "givenName":"Leonardo S", + "familyName": "Barbosa" + }, + { + "@type":"Person", + "email":"", + "givenName":"Liberty", + "familyName": "Hamilton" + }, + { + "@type":"Person", + "email":"lorenzo.alfine@gmail.com", + "givenName":"Lorenzo", + "familyName": "Alfine" + }, + { + "@type":"Person", + "email":"hejtmy@gmail.com", + "givenName":"Lukáš", + "familyName": "Hejtmánek" + }, + { + "@type":"Person", + "email":"manfredg@nmr.mgh.harvard.edu", + "givenName":"Manfred", + "familyName": "Kitzbichler" + }, + { + "@type":"Person", + "email":"manojkumarsivaraj334@gmail.com", + "givenName":"Manoj", + "familyName": "Kumar" + }, + { + "@type":"Person", + "email":"manu.sutela@gmail.com", + "givenName":"Manu", + "familyName": "Sutela" + }, + { + "@type":"Person", + "email":"koculak.marcin@gmail.com", + "givenName":"Marcin", + "familyName": "Koculak" + }, + { + "@type":"Person", + "email":"mdovgialo@fabrizzio.zfb.fuw.edu.pl", + "givenName":"Marian", + "familyName": "Dovgialo" + }, + { + "@type":"Person", + "email":"", + "givenName":"Martin", + "familyName": "van Harmelen" + }, + { + "@type":"Person", + "email":"Martinb.nmb@gmail.com", + "givenName":"", + "familyName": "MartinBaBer" + }, + { + "@type":"Person", + "email":"matt.tucker@nyu.edu", + "givenName":"Matt", + "familyName": "Tucker" + }, + { + "@type":"Person", + "email":"matteo.visconti.gr@dartmouth.edu", + "givenName":"Matteo", + "familyName": "Visconti di Oleggio Castello" + }, + { + "@type":"Person", + "email":"krause@mpib-berlin.mpg.de", + "givenName":"Michael", + "familyName": "Krause" + }, + { + "@type":"Person", + "email":"kontakt@milanrybar.cz", + "givenName":"Milan", + "familyName": "Rybář" + }, + { + "@type":"Person", + "email":"", + "givenName":"Mohammad", + "familyName": "Daneshzand" + }, + { + "@type":"Person", + "email":"nh.proulx@gmail.com", + "givenName":"Nicole", + "familyName": "Proulx" + }, + { + "@type":"Person", + "email":"nikos.ch01@gmail.com", + "givenName":"Nikolas", + "familyName": "Chalas" + }, + { + "@type":"Person", + "email":"tottochan@gmail.com", + "givenName":"Padma", + "familyName": "Sundaram" + }, + { + "@type":"Person", + "email":"paul@roujansky.eu", + "givenName":"Paul", + "familyName": "Roujansky" + }, + { + "@type":"Person", + "email":"pedrobnsilva@gmail.com", + "givenName":"Pedro", + "familyName": "Silva" + }, + { + "@type":"Person", + "email":"pmolfese@gmail.com", + "givenName":"Peter J", + "familyName": "Molfese" + }, + { + "@type":"Person", + "email":"glia@dtu.dk", + "givenName":"Quanliang", + "familyName": "Li" + }, + { + "@type":"Person", + "email":"rahuln@cs.washington.edu", + "givenName":"Rahul", + "familyName": "Nadkarni" + }, + { + "@type":"Person", + "email":"rmrgatti@gmail.com", + "givenName":"Ramiro", + "familyName": "Gatti" + }, + { + "@type":"Person", + "email":"moncho_apa@hotmail.com", + "givenName":"Ramonapariciog", + "familyName": "Apariciogarcia" + }, + { + "@type":"Person", + "email":"r.oostenveld@gmail.com", + "givenName":"Robert", + "familyName": "Oostenveld" + }, + { + "@type":"Person", + "email":"robbyseymour@gmail.com", + "givenName":"Robert", + "familyName": "Seymour" + }, + { + "@type":"Person", + "email":"robintibor@gmail.com", + "givenName":"Robin Tibor", + "familyName": "Schirrmeister" + }, + { + "@type":"Person", + "email":"sagung.pai@gmail.com", + "givenName":"Sagun", + "familyName": "Pai" + }, + { + "@type":"Person", + "email":"u1265119@unimail.hud.ac.uk", + "givenName":"Sam", + "familyName": "Perry" + }, + { + "@type":"Person", + "email":"", + "givenName":"Sebastian", + "familyName": "Major" + }, + { + "@type":"Person", + "email":"sebastian.castano@blbt.uni-freiburg.de", + "givenName":"Sebastián", + "familyName": "Castaño" + }, + { + "@type":"Person", + "email":"s.antopolsky@gmail.com", + "givenName":"Sergey", + "familyName": "Antopolskiy" + }, + { + "@type":"Person", + "email":"", + "givenName":"Simeon", + "familyName": "Wong" + }, + { + "@type":"Person", + "email":"", + "givenName":"Simon-Shlomo", + "familyName": "Poil" + }, + { + "@type":"Person", + "email":"", + "givenName":"Sourav", + "familyName": "Singh" + }, + { + "@type":"Person", + "email":"stan.chambon@gmail.com", + "givenName":"Stanislas", + "familyName": "Chambon" + }, + { + "@type":"Person", + "email":"stevematindi@gmail.com", + "givenName":"Steve", + "familyName": "Matindi" + }, + { + "@type":"Person", + "email":"bethard@email.arizona.edu", + "givenName":"Steven", + "familyName": "Bethard" + }, + { + "@type":"Person", + "email":"", + "givenName":"Steven", + "familyName": "Bierer" + }, + { + "@type":"Person", + "email":"s.m.gutstein@gmail.com", + "givenName":"Steven M", + "familyName": "Gutstein" + }, + { + "@type":"Person", + "email":"", + "givenName":"Svea Marie", + "familyName": "Meyer" + }, + { + "@type":"Person", + "email":"Theodore.Papadopoulo@inria.fr", + "givenName":"Theodore", + "familyName": "Papadopoulo" + }, + { + "@type":"Person", + "email":"tdonoghue.research@gmail.com", + "givenName":"Thomas", + "familyName": "Donoghue" + }, + { + "@type":"Person", + "email":"radman.thomas@gmail.com", + "givenName":"Thomas", + "familyName": "Radman" + }, + { + "@type":"Person", + "email":"", + "givenName":"Tommy", + "familyName": "Clausner" + }, + { + "@type":"Person", + "email":"derstenner@gmail.com", + "givenName":"Tristan", + "familyName": "Stenner" + }, + { + "@type":"Person", + "email":"", + "givenName":"", + "familyName": "buildqa" + }, + { + "@type":"Person", + "email":"", + "givenName":"", + "familyName": "chapochn" + }, + { + "@type":"Person", + "email":"", + "givenName":"", + "familyName": "mshader" + } + ] +} diff --git a/doc/_includes/bem_model.rst b/doc/_includes/bem_model.rst index 00450fb3c52..1985d85fc31 100644 --- a/doc/_includes/bem_model.rst +++ b/doc/_includes/bem_model.rst @@ -3,10 +3,6 @@ Creating the BEM meshes ======================= -.. contents:: Page contents - :local: - :depth: 2 - .. NOTE: part of this file is included in doc/overview/implementation.rst. Changes here are reflected there. If you want to link to this content, link to :ref:`bem-model` to link to that section of the implementation.rst page. @@ -54,8 +50,7 @@ reconstructions but it is strongly recommended that they are collected at the same time with the MPRAGEs or at least with the same scanner. For easy co-registration, the images should have FOV, matrix, slice thickness, gap, and slice orientation as the MPRAGE data. For information on suitable pulse -sequences, see reference [B. Fischl *et al.* and J. Jovicich *et al.*, 2006] in -:ref:`CEGEGDEI`. +sequences, see :footcite:`FischlEtAl2004`. Creation of the BEM meshes using this method involves the following steps: diff --git a/doc/_includes/channel_interpolation.rst b/doc/_includes/channel_interpolation.rst index 3eec4312130..da2ae1dc12f 100644 --- a/doc/_includes/channel_interpolation.rst +++ b/doc/_includes/channel_interpolation.rst @@ -68,10 +68,4 @@ and the bad channel will be fixed. .. topic:: Examples: - * :ref:`sphx_glr_auto_examples_preprocessing_plot_interpolate_bad_channels.py` - - -References -~~~~~~~~~~ - -.. footbibliography:: + * :ref:`ex-interpolate-bad-channels` diff --git a/doc/_includes/channel_types.rst b/doc/_includes/channel_types.rst index fc370fb36bb..be2abe2a50c 100644 --- a/doc/_includes/channel_types.rst +++ b/doc/_includes/channel_types.rst @@ -36,6 +36,8 @@ ecg Electrocardiography (ECG) Volts seeg Stereotactic EEG channels Volts +dbs Deep brain stimulation (DBS) Volts + ecog Electrocorticography (ECoG) Volts fnirs (hbo) Functional near-infrared spectroscopy Moles/liter diff --git a/doc/_includes/forward.rst b/doc/_includes/forward.rst index 7070b426dcd..4a4f03fb785 100644 --- a/doc/_includes/forward.rst +++ b/doc/_includes/forward.rst @@ -7,10 +7,6 @@ This page covers the definitions of different coordinate systems employed in MNE software and FreeSurfer, the details of the computation of the forward solutions, and the associated low-level utilities. -.. contents:: Page contents - :local: - :depth: 2 - .. NOTE: part of this file is included in doc/overview/implementation.rst. Changes here are reflected there. If you want to link to this content, link to :ref:`ch_forward` to link to that section of the implementation.rst page. @@ -30,7 +26,7 @@ MEG/EEG and MRI coordinate systems :class:`~mne.SourceSpaces`, etc), information about the coordinate frame is encoded as a constant integer value. The meaning of those integers is determined `in the source code - `__. + `__. The coordinate systems used in MNE software (and FreeSurfer) and their relationships are depicted in :ref:`coordinate_system_figure`. Except for the @@ -726,8 +722,3 @@ solutions. Usually the EEG forward solution is identical across runs because the electrode locations do not change. .. target for :end-before: forward-end-content - -References -~~~~~~~~~~ - -.. footbibliography:: diff --git a/doc/_includes/inverse.rst b/doc/_includes/inverse.rst index 12adbf308f0..bb5dd274d98 100644 --- a/doc/_includes/inverse.rst +++ b/doc/_includes/inverse.rst @@ -3,10 +3,6 @@ The minimum-norm current estimates ================================== -.. contents:: Page contents - :local: - :depth: 2 - .. NOTE: part of this file is included in doc/overview/implementation.rst. Changes here are reflected there. If you want to link to this content, link to :ref:`ch_mne` to link to that section of the implementation.rst page. @@ -606,8 +602,3 @@ Generalizing, for any combination of sums and differences, where :math:`w_i = .. math:: 1 / L_{eff} = \sum_{i = 1}^n {1/{L_i}} .. target for :end-before: inverse-end-content - -References -~~~~~~~~~~ - -.. footbibliography:: diff --git a/doc/_includes/morph.rst b/doc/_includes/morph.rst index 99aaed321f9..b41769eff15 100644 --- a/doc/_includes/morph.rst +++ b/doc/_includes/morph.rst @@ -12,11 +12,6 @@ anatomically analogous labels in another. :meth:`mne.SourceMorph.apply` offers the capability to transform all subject data to the same space and, e.g., compute averages of data across subjects. -.. contents:: Page contents - :local: - :depth: 2 - - .. NOTE: part of this file is included in doc/overview/implementation.rst. Changes here are reflected there. If you want to link to this content, link to :ref:`ch_morph` to link to that section of the implementation.rst page. diff --git a/doc/_includes/ssp.rst b/doc/_includes/ssp.rst index d34741c1a41..ccfaeb3041a 100644 --- a/doc/_includes/ssp.rst +++ b/doc/_includes/ssp.rst @@ -80,7 +80,7 @@ the brain, it is necessary to apply the projection to the forward solution in the course of inverse computations. For more information on SSP, please consult the references listed in -:ref:`CEGIEEBB`. +:footcite:`TescheEtAl1995,UusitaloIlmoniemi1997`. Estimation of the noise subspace ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/_includes/units.rst b/doc/_includes/units.rst index f1319c3d281..f37f03ebaf4 100644 --- a/doc/_includes/units.rst +++ b/doc/_includes/units.rst @@ -14,7 +14,7 @@ Irrespective of the units used in your manufacturer's format, when importing data, MNE-Python will always convert measurements to the same standard units. Thus the in-memory representation of data are always in: -- Volts (eeg, eog, seeg, emg, ecg, bio, ecog) +- Volts (eeg, eog, seeg, emg, ecg, bio, ecog, dbs) - Teslas (magnetometers) - Teslas/meter (gradiometers) - Amperes*meter (dipole fits, minimum-norm estimates, etc.) diff --git a/doc/_static/copybutton.js b/doc/_static/copybutton.js deleted file mode 100644 index 01ee2bab36b..00000000000 --- a/doc/_static/copybutton.js +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2014 PSF. Licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 -// File originates from the cpython source found in Doc/tools/sphinxext/static/copybutton.js - -$(document).ready(function() { - /* Add a [>>>] button on the top-right corner of code samples to hide - * the >>> and ... prompts and the output and thus make the code - * copyable. */ - var div = $('.highlight-python .highlight,' + - '.highlight-default .highlight,' + - '.highlight-python3 .highlight') - var pre = div.find('pre'); - - // get the styles from the current theme - pre.parent().parent().css('position', 'relative'); - var hide_text = 'Hide the prompts and output'; - var show_text = 'Show the prompts and output'; - var border_width = pre.css('border-top-width'); - var border_style = pre.css('border-top-style'); - var border_color = pre.css('border-top-color'); - var button_styles = { - 'cursor':'pointer', 'position': 'absolute', 'top': '0', 'right': '0', - 'border-color': border_color, 'border-style': border_style, - 'border-width': border_width, 'color': border_color, 'text-size': '75%', - 'font-family': 'monospace', 'padding-left': '0.2em', 'padding-right': '0.2em', - 'border-radius': '0 3px 0 0' - } - - // create and add the button to all the code blocks that contain >>> - div.each(function(index) { - var jthis = $(this); - if (jthis.find('.gp').length > 0) { - var button = $('>>>'); - button.css(button_styles) - button.attr('title', hide_text); - button.data('hidden', 'false'); - jthis.prepend(button); - } - // tracebacks (.gt) contain bare text elements that need to be - // wrapped in a span to work with .nextUntil() (see later) - jthis.find('pre:has(.gt)').contents().filter(function() { - return ((this.nodeType == 3) && (this.data.trim().length > 0)); - }).wrap(''); - }); - - // define the behavior of the button when it's clicked - $('.copybutton').click(function(e){ - e.preventDefault(); - var button = $(this); - if (button.data('hidden') === 'false') { - // hide the code output - button.parent().find('.go, .gp, .gt').hide(); - button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'hidden'); - button.css('text-decoration', 'line-through'); - button.attr('title', show_text); - button.data('hidden', 'true'); - } else { - // show the code output - button.parent().find('.go, .gp, .gt').show(); - button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'visible'); - button.css('text-decoration', 'none'); - button.attr('title', hide_text); - button.data('hidden', 'false'); - } - }); -}); - diff --git a/doc/_static/flag-icon.css b/doc/_static/flag-icon.css deleted file mode 100644 index c2b4e5b4cec..00000000000 --- a/doc/_static/flag-icon.css +++ /dev/null @@ -1,23 +0,0 @@ -.flag-icon-background { - background-size: contain; - background-position: 50%; - background-repeat: no-repeat; -} -.flag-icon { - background-size: contain; - background-position: 50%; - background-repeat: no-repeat; - position: relative; - display: inline-block; - width: 1.33333333em; - line-height: 1em; -} -.flag-icon:before { - content: "\00a0"; -} -.flag-icon-fr { - background-image: url(fr.svg); -} -.flag-icon-us { - background-image: url(us.svg); -} diff --git a/doc/_static/font-awesome.css b/doc/_static/font-awesome.css deleted file mode 100644 index 5414e96d3cf..00000000000 --- a/doc/_static/font-awesome.css +++ /dev/null @@ -1,2337 +0,0 @@ -/*! - * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome - * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) - */ -/* FONT PATH - * -------------------------- */ -@font-face { - font-family: 'FontAwesome'; - src: url('./fonts/fontawesome/fontawesome-webfont.eot?v=4.7.0'); - src: url('./fonts/fontawesome/fontawesome-webfont.eot?#iefix&v=4.7.0') format('embedded-opentype'), url('./fonts/fontawesome/fontawesome-webfont.woff2?v=4.7.0') format('woff2'), url('./fonts/fontawesome/fontawesome-webfont.woff?v=4.7.0') format('woff'), url('./fonts/fontawesome/fontawesome-webfont.ttf?v=4.7.0') format('truetype'); - font-weight: normal; - font-style: normal; -} -.fa { - display: inline-block; - font: normal normal normal 14px/1 FontAwesome; - font-size: inherit; - text-rendering: auto; - -webkit-font-smoothing: antialiased; - -moz-osx-font-smoothing: grayscale; -} -/* makes the font 33% larger relative to the icon container */ -.fa-lg { - font-size: 1.33333333em; - line-height: 0.75em; - vertical-align: -15%; -} -.fa-2x { - font-size: 2em; -} -.fa-3x { - font-size: 3em; -} -.fa-4x { - font-size: 4em; -} -.fa-5x { - font-size: 5em; -} -.fa-fw { - width: 1.28571429em; - text-align: center; -} -.fa-ul { - padding-left: 0; - margin-left: 2.14285714em; - list-style-type: none; -} -.fa-ul > li { - position: relative; -} -.fa-li { - position: absolute; - left: -2.14285714em; - width: 2.14285714em; - top: 0.14285714em; - text-align: center; -} -.fa-li.fa-lg { - left: -1.85714286em; -} -.fa-border { - padding: .2em .25em .15em; - border: solid 0.08em #eeeeee; - border-radius: .1em; -} -.fa-pull-left { - float: left; -} -.fa-pull-right { - float: right; -} -.fa.fa-pull-left { - margin-right: .3em; -} -.fa.fa-pull-right { - margin-left: .3em; -} -/* Deprecated as of 4.4.0 */ -.pull-right { - float: right; -} -.pull-left { - float: left; -} -.fa.pull-left { - margin-right: .3em; -} -.fa.pull-right { - margin-left: .3em; -} -.fa-spin { - -webkit-animation: fa-spin 2s infinite linear; - animation: fa-spin 2s infinite linear; -} -.fa-pulse { - -webkit-animation: fa-spin 1s infinite steps(8); - animation: fa-spin 1s infinite steps(8); -} -@-webkit-keyframes fa-spin { - 0% { - -webkit-transform: rotate(0deg); - transform: rotate(0deg); - } - 100% { - -webkit-transform: rotate(359deg); - transform: rotate(359deg); - } -} -@keyframes fa-spin { - 0% { - -webkit-transform: rotate(0deg); - transform: rotate(0deg); - } - 100% { - -webkit-transform: rotate(359deg); - transform: rotate(359deg); - } -} -.fa-rotate-90 { - -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=1)"; - -webkit-transform: rotate(90deg); - -ms-transform: rotate(90deg); - transform: rotate(90deg); -} -.fa-rotate-180 { - -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=2)"; - -webkit-transform: rotate(180deg); - -ms-transform: rotate(180deg); - transform: rotate(180deg); -} -.fa-rotate-270 { - -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=3)"; - -webkit-transform: rotate(270deg); - -ms-transform: rotate(270deg); - transform: rotate(270deg); -} -.fa-flip-horizontal { - -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)"; - -webkit-transform: scale(-1, 1); - -ms-transform: scale(-1, 1); - transform: scale(-1, 1); -} -.fa-flip-vertical { - -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)"; - -webkit-transform: scale(1, -1); - -ms-transform: scale(1, -1); - transform: scale(1, -1); -} -:root .fa-rotate-90, -:root .fa-rotate-180, -:root .fa-rotate-270, -:root .fa-flip-horizontal, -:root .fa-flip-vertical { - filter: none; -} -.fa-stack { - position: relative; - display: inline-block; - width: 2em; - height: 2em; - line-height: 2em; - vertical-align: middle; -} -.fa-stack-1x, -.fa-stack-2x { - position: absolute; - left: 0; - width: 100%; - text-align: center; -} -.fa-stack-1x { - line-height: inherit; -} -.fa-stack-2x { - font-size: 2em; -} -.fa-inverse { - color: #ffffff; -} -/* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen - readers do not read off random characters that represent icons */ -.fa-glass:before { - content: "\f000"; -} -.fa-music:before { - content: "\f001"; -} -.fa-search:before { - content: "\f002"; -} -.fa-envelope-o:before { - content: "\f003"; -} -.fa-heart:before { - content: "\f004"; -} -.fa-star:before { - content: "\f005"; -} -.fa-star-o:before { - content: "\f006"; -} -.fa-user:before { - content: "\f007"; -} -.fa-film:before { - content: "\f008"; -} -.fa-th-large:before { - content: "\f009"; -} -.fa-th:before { - content: "\f00a"; -} -.fa-th-list:before { - content: "\f00b"; -} -.fa-check:before { - content: "\f00c"; -} -.fa-remove:before, -.fa-close:before, -.fa-times:before { - content: "\f00d"; -} -.fa-search-plus:before { - content: "\f00e"; -} -.fa-search-minus:before { - content: "\f010"; -} -.fa-power-off:before { - content: "\f011"; -} -.fa-signal:before { - content: "\f012"; -} -.fa-gear:before, -.fa-cog:before { - content: "\f013"; -} -.fa-trash-o:before { - content: "\f014"; -} -.fa-home:before { - content: "\f015"; -} -.fa-file-o:before { - content: "\f016"; -} -.fa-clock-o:before { - content: "\f017"; -} -.fa-road:before { - content: "\f018"; -} -.fa-download:before { - content: "\f019"; -} -.fa-arrow-circle-o-down:before { - content: "\f01a"; -} -.fa-arrow-circle-o-up:before { - content: "\f01b"; -} -.fa-inbox:before { - content: "\f01c"; -} -.fa-play-circle-o:before { - content: "\f01d"; -} -.fa-rotate-right:before, -.fa-repeat:before { - content: "\f01e"; -} -.fa-refresh:before { - content: "\f021"; -} -.fa-list-alt:before { - content: "\f022"; -} -.fa-lock:before { - content: "\f023"; -} -.fa-flag:before { - content: "\f024"; -} -.fa-headphones:before { - content: "\f025"; -} -.fa-volume-off:before { - content: "\f026"; -} -.fa-volume-down:before { - content: "\f027"; -} -.fa-volume-up:before { - content: "\f028"; -} -.fa-qrcode:before { - content: "\f029"; -} -.fa-barcode:before { - content: "\f02a"; -} -.fa-tag:before { - content: "\f02b"; -} -.fa-tags:before { - content: "\f02c"; -} -.fa-book:before { - content: "\f02d"; -} -.fa-bookmark:before { - content: "\f02e"; -} -.fa-print:before { - content: "\f02f"; -} -.fa-camera:before { - content: "\f030"; -} -.fa-font:before { - content: "\f031"; -} -.fa-bold:before { - content: "\f032"; -} -.fa-italic:before { - content: "\f033"; -} -.fa-text-height:before { - content: "\f034"; -} -.fa-text-width:before { - content: "\f035"; -} -.fa-align-left:before { - content: "\f036"; -} -.fa-align-center:before { - content: "\f037"; -} -.fa-align-right:before { - content: "\f038"; -} -.fa-align-justify:before { - content: "\f039"; -} -.fa-list:before { - content: "\f03a"; -} -.fa-dedent:before, -.fa-outdent:before { - content: "\f03b"; -} -.fa-indent:before { - content: "\f03c"; -} -.fa-video-camera:before { - content: "\f03d"; -} -.fa-photo:before, -.fa-image:before, -.fa-picture-o:before { - content: "\f03e"; -} -.fa-pencil:before { - content: "\f040"; -} -.fa-map-marker:before { - content: "\f041"; -} -.fa-adjust:before { - content: "\f042"; -} -.fa-tint:before { - content: "\f043"; -} -.fa-edit:before, -.fa-pencil-square-o:before { - content: "\f044"; -} -.fa-share-square-o:before { - content: "\f045"; -} -.fa-check-square-o:before { - content: "\f046"; -} -.fa-arrows:before { - content: "\f047"; -} -.fa-step-backward:before { - content: "\f048"; -} -.fa-fast-backward:before { - content: "\f049"; -} -.fa-backward:before { - content: "\f04a"; -} -.fa-play:before { - content: "\f04b"; -} -.fa-pause:before { - content: "\f04c"; -} -.fa-stop:before { - content: "\f04d"; -} -.fa-forward:before { - content: "\f04e"; -} -.fa-fast-forward:before { - content: "\f050"; -} -.fa-step-forward:before { - content: "\f051"; -} -.fa-eject:before { - content: "\f052"; -} -.fa-chevron-left:before { - content: "\f053"; -} -.fa-chevron-right:before { - content: "\f054"; -} -.fa-plus-circle:before { - content: "\f055"; -} -.fa-minus-circle:before { - content: "\f056"; -} -.fa-times-circle:before { - content: "\f057"; -} -.fa-check-circle:before { - content: "\f058"; -} -.fa-question-circle:before { - content: "\f059"; -} -.fa-info-circle:before { - content: "\f05a"; -} -.fa-crosshairs:before { - content: "\f05b"; -} -.fa-times-circle-o:before { - content: "\f05c"; -} -.fa-check-circle-o:before { - content: "\f05d"; -} -.fa-ban:before { - content: "\f05e"; -} -.fa-arrow-left:before { - content: "\f060"; -} -.fa-arrow-right:before { - content: "\f061"; -} -.fa-arrow-up:before { - content: "\f062"; -} -.fa-arrow-down:before { - content: "\f063"; -} -.fa-mail-forward:before, -.fa-share:before { - content: "\f064"; -} -.fa-expand:before { - content: "\f065"; -} -.fa-compress:before { - content: "\f066"; -} -.fa-plus:before { - content: "\f067"; -} -.fa-minus:before { - content: "\f068"; -} -.fa-asterisk:before { - content: "\f069"; -} -.fa-exclamation-circle:before { - content: "\f06a"; -} -.fa-gift:before { - content: "\f06b"; -} -.fa-leaf:before { - content: "\f06c"; -} -.fa-fire:before { - content: "\f06d"; -} -.fa-eye:before { - content: "\f06e"; -} -.fa-eye-slash:before { - content: "\f070"; -} -.fa-warning:before, -.fa-exclamation-triangle:before { - content: "\f071"; -} -.fa-plane:before { - content: "\f072"; -} -.fa-calendar:before { - content: "\f073"; -} -.fa-random:before { - content: "\f074"; -} -.fa-comment:before { - content: "\f075"; -} -.fa-magnet:before { - content: "\f076"; -} -.fa-chevron-up:before { - content: "\f077"; -} -.fa-chevron-down:before { - content: "\f078"; -} -.fa-retweet:before { - content: "\f079"; -} -.fa-shopping-cart:before { - content: "\f07a"; -} -.fa-folder:before { - content: "\f07b"; -} -.fa-folder-open:before { - content: "\f07c"; -} -.fa-arrows-v:before { - content: "\f07d"; -} -.fa-arrows-h:before { - content: "\f07e"; -} -.fa-bar-chart-o:before, -.fa-bar-chart:before { - content: "\f080"; -} -.fa-twitter-square:before { - content: "\f081"; -} -.fa-facebook-square:before { - content: "\f082"; -} -.fa-camera-retro:before { - content: "\f083"; -} -.fa-key:before { - content: "\f084"; -} -.fa-gears:before, -.fa-cogs:before { - content: "\f085"; -} -.fa-comments:before { - content: "\f086"; -} -.fa-thumbs-o-up:before { - content: "\f087"; -} -.fa-thumbs-o-down:before { - content: "\f088"; -} -.fa-star-half:before { - content: "\f089"; -} -.fa-heart-o:before { - content: "\f08a"; -} -.fa-sign-out:before { - content: "\f08b"; -} -.fa-linkedin-square:before { - content: "\f08c"; -} -.fa-thumb-tack:before { - content: "\f08d"; -} -.fa-external-link:before { - content: "\f08e"; -} -.fa-sign-in:before { - content: "\f090"; -} -.fa-trophy:before { - content: "\f091"; -} -.fa-github-square:before { - content: "\f092"; -} -.fa-upload:before { - content: "\f093"; -} -.fa-lemon-o:before { - content: "\f094"; -} -.fa-phone:before { - content: "\f095"; -} -.fa-square-o:before { - content: "\f096"; -} -.fa-bookmark-o:before { - content: "\f097"; -} -.fa-phone-square:before { - content: "\f098"; -} -.fa-twitter:before { - content: "\f099"; -} -.fa-facebook-f:before, -.fa-facebook:before { - content: "\f09a"; -} -.fa-github:before { - content: "\f09b"; -} -.fa-unlock:before { - content: "\f09c"; -} -.fa-credit-card:before { - content: "\f09d"; -} -.fa-feed:before, -.fa-rss:before { - content: "\f09e"; -} -.fa-hdd-o:before { - content: "\f0a0"; -} -.fa-bullhorn:before { - content: "\f0a1"; -} -.fa-bell:before { - content: "\f0f3"; -} -.fa-certificate:before { - content: "\f0a3"; -} -.fa-hand-o-right:before { - content: "\f0a4"; -} -.fa-hand-o-left:before { - content: "\f0a5"; -} -.fa-hand-o-up:before { - content: "\f0a6"; -} -.fa-hand-o-down:before { - content: "\f0a7"; -} -.fa-arrow-circle-left:before { - content: "\f0a8"; -} -.fa-arrow-circle-right:before { - content: "\f0a9"; -} -.fa-arrow-circle-up:before { - content: "\f0aa"; -} -.fa-arrow-circle-down:before { - content: "\f0ab"; -} -.fa-globe:before { - content: "\f0ac"; -} -.fa-wrench:before { - content: "\f0ad"; -} -.fa-tasks:before { - content: "\f0ae"; -} -.fa-filter:before { - content: "\f0b0"; -} -.fa-briefcase:before { - content: "\f0b1"; -} -.fa-arrows-alt:before { - content: "\f0b2"; -} -.fa-group:before, -.fa-users:before { - content: "\f0c0"; -} -.fa-chain:before, -.fa-link:before { - content: "\f0c1"; -} -.fa-cloud:before { - content: "\f0c2"; -} -.fa-flask:before { - content: "\f0c3"; -} -.fa-cut:before, -.fa-scissors:before { - content: "\f0c4"; -} -.fa-copy:before, -.fa-files-o:before { - content: "\f0c5"; -} -.fa-paperclip:before { - content: "\f0c6"; -} -.fa-save:before, -.fa-floppy-o:before { - content: "\f0c7"; -} -.fa-square:before { - content: "\f0c8"; -} -.fa-navicon:before, -.fa-reorder:before, -.fa-bars:before { - content: "\f0c9"; -} -.fa-list-ul:before { - content: "\f0ca"; -} -.fa-list-ol:before { - content: "\f0cb"; -} -.fa-strikethrough:before { - content: "\f0cc"; -} -.fa-underline:before { - content: "\f0cd"; -} -.fa-table:before { - content: "\f0ce"; -} -.fa-magic:before { - content: "\f0d0"; -} -.fa-truck:before { - content: "\f0d1"; -} -.fa-pinterest:before { - content: "\f0d2"; -} -.fa-pinterest-square:before { - content: "\f0d3"; -} -.fa-google-plus-square:before { - content: "\f0d4"; -} -.fa-google-plus:before { - content: "\f0d5"; -} -.fa-money:before { - content: "\f0d6"; -} -.fa-caret-down:before { - content: "\f0d7"; -} -.fa-caret-up:before { - content: "\f0d8"; -} -.fa-caret-left:before { - content: "\f0d9"; -} -.fa-caret-right:before { - content: "\f0da"; -} -.fa-columns:before { - content: "\f0db"; -} -.fa-unsorted:before, -.fa-sort:before { - content: "\f0dc"; -} -.fa-sort-down:before, -.fa-sort-desc:before { - content: "\f0dd"; -} -.fa-sort-up:before, -.fa-sort-asc:before { - content: "\f0de"; -} -.fa-envelope:before { - content: "\f0e0"; -} -.fa-linkedin:before { - content: "\f0e1"; -} -.fa-rotate-left:before, -.fa-undo:before { - content: "\f0e2"; -} -.fa-legal:before, -.fa-gavel:before { - content: "\f0e3"; -} -.fa-dashboard:before, -.fa-tachometer:before { - content: "\f0e4"; -} -.fa-comment-o:before { - content: "\f0e5"; -} -.fa-comments-o:before { - content: "\f0e6"; -} -.fa-flash:before, -.fa-bolt:before { - content: "\f0e7"; -} -.fa-sitemap:before { - content: "\f0e8"; -} -.fa-umbrella:before { - content: "\f0e9"; -} -.fa-paste:before, -.fa-clipboard:before { - content: "\f0ea"; -} -.fa-lightbulb-o:before { - content: "\f0eb"; -} -.fa-exchange:before { - content: "\f0ec"; -} -.fa-cloud-download:before { - content: "\f0ed"; -} -.fa-cloud-upload:before { - content: "\f0ee"; -} -.fa-user-md:before { - content: "\f0f0"; -} -.fa-stethoscope:before { - content: "\f0f1"; -} -.fa-suitcase:before { - content: "\f0f2"; -} -.fa-bell-o:before { - content: "\f0a2"; -} -.fa-coffee:before { - content: "\f0f4"; -} -.fa-cutlery:before { - content: "\f0f5"; -} -.fa-file-text-o:before { - content: "\f0f6"; -} -.fa-building-o:before { - content: "\f0f7"; -} -.fa-hospital-o:before { - content: "\f0f8"; -} -.fa-ambulance:before { - content: "\f0f9"; -} -.fa-medkit:before { - content: "\f0fa"; -} -.fa-fighter-jet:before { - content: "\f0fb"; -} -.fa-beer:before { - content: "\f0fc"; -} -.fa-h-square:before { - content: "\f0fd"; -} -.fa-plus-square:before { - content: "\f0fe"; -} -.fa-angle-double-left:before { - content: "\f100"; -} -.fa-angle-double-right:before { - content: "\f101"; -} -.fa-angle-double-up:before { - content: "\f102"; -} -.fa-angle-double-down:before { - content: "\f103"; -} -.fa-angle-left:before { - content: "\f104"; -} -.fa-angle-right:before { - content: "\f105"; -} -.fa-angle-up:before { - content: "\f106"; -} -.fa-angle-down:before { - content: "\f107"; -} -.fa-desktop:before { - content: "\f108"; -} -.fa-laptop:before { - content: "\f109"; -} -.fa-tablet:before { - content: "\f10a"; -} -.fa-mobile-phone:before, -.fa-mobile:before { - content: "\f10b"; -} -.fa-circle-o:before { - content: "\f10c"; -} -.fa-quote-left:before { - content: "\f10d"; -} -.fa-quote-right:before { - content: "\f10e"; -} -.fa-spinner:before { - content: "\f110"; -} -.fa-circle:before { - content: "\f111"; -} -.fa-mail-reply:before, -.fa-reply:before { - content: "\f112"; -} -.fa-github-alt:before { - content: "\f113"; -} -.fa-folder-o:before { - content: "\f114"; -} -.fa-folder-open-o:before { - content: "\f115"; -} -.fa-smile-o:before { - content: "\f118"; -} -.fa-frown-o:before { - content: "\f119"; -} -.fa-meh-o:before { - content: "\f11a"; -} -.fa-gamepad:before { - content: "\f11b"; -} -.fa-keyboard-o:before { - content: "\f11c"; -} -.fa-flag-o:before { - content: "\f11d"; -} -.fa-flag-checkered:before { - content: "\f11e"; -} -.fa-terminal:before { - content: "\f120"; -} -.fa-code:before { - content: "\f121"; -} -.fa-mail-reply-all:before, -.fa-reply-all:before { - content: "\f122"; -} -.fa-star-half-empty:before, -.fa-star-half-full:before, -.fa-star-half-o:before { - content: "\f123"; -} -.fa-location-arrow:before { - content: "\f124"; -} -.fa-crop:before { - content: "\f125"; -} -.fa-code-fork:before { - content: "\f126"; -} -.fa-unlink:before, -.fa-chain-broken:before { - content: "\f127"; -} -.fa-question:before { - content: "\f128"; -} -.fa-info:before { - content: "\f129"; -} -.fa-exclamation:before { - content: "\f12a"; -} -.fa-superscript:before { - content: "\f12b"; -} -.fa-subscript:before { - content: "\f12c"; -} -.fa-eraser:before { - content: "\f12d"; -} -.fa-puzzle-piece:before { - content: "\f12e"; -} -.fa-microphone:before { - content: "\f130"; -} -.fa-microphone-slash:before { - content: "\f131"; -} -.fa-shield:before { - content: "\f132"; -} -.fa-calendar-o:before { - content: "\f133"; -} -.fa-fire-extinguisher:before { - content: "\f134"; -} -.fa-rocket:before { - content: "\f135"; -} -.fa-maxcdn:before { - content: "\f136"; -} -.fa-chevron-circle-left:before { - content: "\f137"; -} -.fa-chevron-circle-right:before { - content: "\f138"; -} -.fa-chevron-circle-up:before { - content: "\f139"; -} -.fa-chevron-circle-down:before { - content: "\f13a"; -} -.fa-html5:before { - content: "\f13b"; -} -.fa-css3:before { - content: "\f13c"; -} -.fa-anchor:before { - content: "\f13d"; -} -.fa-unlock-alt:before { - content: "\f13e"; -} -.fa-bullseye:before { - content: "\f140"; -} -.fa-ellipsis-h:before { - content: "\f141"; -} -.fa-ellipsis-v:before { - content: "\f142"; -} -.fa-rss-square:before { - content: "\f143"; -} -.fa-play-circle:before { - content: "\f144"; -} -.fa-ticket:before { - content: "\f145"; -} -.fa-minus-square:before { - content: "\f146"; -} -.fa-minus-square-o:before { - content: "\f147"; -} -.fa-level-up:before { - content: "\f148"; -} -.fa-level-down:before { - content: "\f149"; -} -.fa-check-square:before { - content: "\f14a"; -} -.fa-pencil-square:before { - content: "\f14b"; -} -.fa-external-link-square:before { - content: "\f14c"; -} -.fa-share-square:before { - content: "\f14d"; -} -.fa-compass:before { - content: "\f14e"; -} -.fa-toggle-down:before, -.fa-caret-square-o-down:before { - content: "\f150"; -} -.fa-toggle-up:before, -.fa-caret-square-o-up:before { - content: "\f151"; -} -.fa-toggle-right:before, -.fa-caret-square-o-right:before { - content: "\f152"; -} -.fa-euro:before, -.fa-eur:before { - content: "\f153"; -} -.fa-gbp:before { - content: "\f154"; -} -.fa-dollar:before, -.fa-usd:before { - content: "\f155"; -} -.fa-rupee:before, -.fa-inr:before { - content: "\f156"; -} -.fa-cny:before, -.fa-rmb:before, -.fa-yen:before, -.fa-jpy:before { - content: "\f157"; -} -.fa-ruble:before, -.fa-rouble:before, -.fa-rub:before { - content: "\f158"; -} -.fa-won:before, -.fa-krw:before { - content: "\f159"; -} -.fa-bitcoin:before, -.fa-btc:before { - content: "\f15a"; -} -.fa-file:before { - content: "\f15b"; -} -.fa-file-text:before { - content: "\f15c"; -} -.fa-sort-alpha-asc:before { - content: "\f15d"; -} -.fa-sort-alpha-desc:before { - content: "\f15e"; -} -.fa-sort-amount-asc:before { - content: "\f160"; -} -.fa-sort-amount-desc:before { - content: "\f161"; -} -.fa-sort-numeric-asc:before { - content: "\f162"; -} -.fa-sort-numeric-desc:before { - content: "\f163"; -} -.fa-thumbs-up:before { - content: "\f164"; -} -.fa-thumbs-down:before { - content: "\f165"; -} -.fa-youtube-square:before { - content: "\f166"; -} -.fa-youtube:before { - content: "\f167"; -} -.fa-xing:before { - content: "\f168"; -} -.fa-xing-square:before { - content: "\f169"; -} -.fa-youtube-play:before { - content: "\f16a"; -} -.fa-dropbox:before { - content: "\f16b"; -} -.fa-stack-overflow:before { - content: "\f16c"; -} -.fa-instagram:before { - content: "\f16d"; -} -.fa-flickr:before { - content: "\f16e"; -} -.fa-adn:before { - content: "\f170"; -} -.fa-bitbucket:before { - content: "\f171"; -} -.fa-bitbucket-square:before { - content: "\f172"; -} -.fa-tumblr:before { - content: "\f173"; -} -.fa-tumblr-square:before { - content: "\f174"; -} -.fa-long-arrow-down:before { - content: "\f175"; -} -.fa-long-arrow-up:before { - content: "\f176"; -} -.fa-long-arrow-left:before { - content: "\f177"; -} -.fa-long-arrow-right:before { - content: "\f178"; -} -.fa-apple:before { - content: "\f179"; -} -.fa-windows:before { - content: "\f17a"; -} -.fa-android:before { - content: "\f17b"; -} -.fa-linux:before { - content: "\f17c"; -} -.fa-dribbble:before { - content: "\f17d"; -} -.fa-skype:before { - content: "\f17e"; -} -.fa-foursquare:before { - content: "\f180"; -} -.fa-trello:before { - content: "\f181"; -} -.fa-female:before { - content: "\f182"; -} -.fa-male:before { - content: "\f183"; -} -.fa-gittip:before, -.fa-gratipay:before { - content: "\f184"; -} -.fa-sun-o:before { - content: "\f185"; -} -.fa-moon-o:before { - content: "\f186"; -} -.fa-archive:before { - content: "\f187"; -} -.fa-bug:before { - content: "\f188"; -} -.fa-vk:before { - content: "\f189"; -} -.fa-weibo:before { - content: "\f18a"; -} -.fa-renren:before { - content: "\f18b"; -} -.fa-pagelines:before { - content: "\f18c"; -} -.fa-stack-exchange:before { - content: "\f18d"; -} -.fa-arrow-circle-o-right:before { - content: "\f18e"; -} -.fa-arrow-circle-o-left:before { - content: "\f190"; -} -.fa-toggle-left:before, -.fa-caret-square-o-left:before { - content: "\f191"; -} -.fa-dot-circle-o:before { - content: "\f192"; -} -.fa-wheelchair:before { - content: "\f193"; -} -.fa-vimeo-square:before { - content: "\f194"; -} -.fa-turkish-lira:before, -.fa-try:before { - content: "\f195"; -} -.fa-plus-square-o:before { - content: "\f196"; -} -.fa-space-shuttle:before { - content: "\f197"; -} -.fa-slack:before { - content: "\f198"; -} -.fa-envelope-square:before { - content: "\f199"; -} -.fa-wordpress:before { - content: "\f19a"; -} -.fa-openid:before { - content: "\f19b"; -} -.fa-institution:before, -.fa-bank:before, -.fa-university:before { - content: "\f19c"; -} -.fa-mortar-board:before, -.fa-graduation-cap:before { - content: "\f19d"; -} -.fa-yahoo:before { - content: "\f19e"; -} -.fa-google:before { - content: "\f1a0"; -} -.fa-reddit:before { - content: "\f1a1"; -} -.fa-reddit-square:before { - content: "\f1a2"; -} -.fa-stumbleupon-circle:before { - content: "\f1a3"; -} -.fa-stumbleupon:before { - content: "\f1a4"; -} -.fa-delicious:before { - content: "\f1a5"; -} -.fa-digg:before { - content: "\f1a6"; -} -.fa-pied-piper-pp:before { - content: "\f1a7"; -} -.fa-pied-piper-alt:before { - content: "\f1a8"; -} -.fa-drupal:before { - content: "\f1a9"; -} -.fa-joomla:before { - content: "\f1aa"; -} -.fa-language:before { - content: "\f1ab"; -} -.fa-fax:before { - content: "\f1ac"; -} -.fa-building:before { - content: "\f1ad"; -} -.fa-child:before { - content: "\f1ae"; -} -.fa-paw:before { - content: "\f1b0"; -} -.fa-spoon:before { - content: "\f1b1"; -} -.fa-cube:before { - content: "\f1b2"; -} -.fa-cubes:before { - content: "\f1b3"; -} -.fa-behance:before { - content: "\f1b4"; -} -.fa-behance-square:before { - content: "\f1b5"; -} -.fa-steam:before { - content: "\f1b6"; -} -.fa-steam-square:before { - content: "\f1b7"; -} -.fa-recycle:before { - content: "\f1b8"; -} -.fa-automobile:before, -.fa-car:before { - content: "\f1b9"; -} -.fa-cab:before, -.fa-taxi:before { - content: "\f1ba"; -} -.fa-tree:before { - content: "\f1bb"; -} -.fa-spotify:before { - content: "\f1bc"; -} -.fa-deviantart:before { - content: "\f1bd"; -} -.fa-soundcloud:before { - content: "\f1be"; -} -.fa-database:before { - content: "\f1c0"; -} -.fa-file-pdf-o:before { - content: "\f1c1"; -} -.fa-file-word-o:before { - content: "\f1c2"; -} -.fa-file-excel-o:before { - content: "\f1c3"; -} -.fa-file-powerpoint-o:before { - content: "\f1c4"; -} -.fa-file-photo-o:before, -.fa-file-picture-o:before, -.fa-file-image-o:before { - content: "\f1c5"; -} -.fa-file-zip-o:before, -.fa-file-archive-o:before { - content: "\f1c6"; -} -.fa-file-sound-o:before, -.fa-file-audio-o:before { - content: "\f1c7"; -} -.fa-file-movie-o:before, -.fa-file-video-o:before { - content: "\f1c8"; -} -.fa-file-code-o:before { - content: "\f1c9"; -} -.fa-vine:before { - content: "\f1ca"; -} -.fa-codepen:before { - content: "\f1cb"; -} -.fa-jsfiddle:before { - content: "\f1cc"; -} -.fa-life-bouy:before, -.fa-life-buoy:before, -.fa-life-saver:before, -.fa-support:before, -.fa-life-ring:before { - content: "\f1cd"; -} -.fa-circle-o-notch:before { - content: "\f1ce"; -} -.fa-ra:before, -.fa-resistance:before, -.fa-rebel:before { - content: "\f1d0"; -} -.fa-ge:before, -.fa-empire:before { - content: "\f1d1"; -} -.fa-git-square:before { - content: "\f1d2"; -} -.fa-git:before { - content: "\f1d3"; -} -.fa-y-combinator-square:before, -.fa-yc-square:before, -.fa-hacker-news:before { - content: "\f1d4"; -} -.fa-tencent-weibo:before { - content: "\f1d5"; -} -.fa-qq:before { - content: "\f1d6"; -} -.fa-wechat:before, -.fa-weixin:before { - content: "\f1d7"; -} -.fa-send:before, -.fa-paper-plane:before { - content: "\f1d8"; -} -.fa-send-o:before, -.fa-paper-plane-o:before { - content: "\f1d9"; -} -.fa-history:before { - content: "\f1da"; -} -.fa-circle-thin:before { - content: "\f1db"; -} -.fa-header:before { - content: "\f1dc"; -} -.fa-paragraph:before { - content: "\f1dd"; -} -.fa-sliders:before { - content: "\f1de"; -} -.fa-share-alt:before { - content: "\f1e0"; -} -.fa-share-alt-square:before { - content: "\f1e1"; -} -.fa-bomb:before { - content: "\f1e2"; -} -.fa-soccer-ball-o:before, -.fa-futbol-o:before { - content: "\f1e3"; -} -.fa-tty:before { - content: "\f1e4"; -} -.fa-binoculars:before { - content: "\f1e5"; -} -.fa-plug:before { - content: "\f1e6"; -} -.fa-slideshare:before { - content: "\f1e7"; -} -.fa-twitch:before { - content: "\f1e8"; -} -.fa-yelp:before { - content: "\f1e9"; -} -.fa-newspaper-o:before { - content: "\f1ea"; -} -.fa-wifi:before { - content: "\f1eb"; -} -.fa-calculator:before { - content: "\f1ec"; -} -.fa-paypal:before { - content: "\f1ed"; -} -.fa-google-wallet:before { - content: "\f1ee"; -} -.fa-cc-visa:before { - content: "\f1f0"; -} -.fa-cc-mastercard:before { - content: "\f1f1"; -} -.fa-cc-discover:before { - content: "\f1f2"; -} -.fa-cc-amex:before { - content: "\f1f3"; -} -.fa-cc-paypal:before { - content: "\f1f4"; -} -.fa-cc-stripe:before { - content: "\f1f5"; -} -.fa-bell-slash:before { - content: "\f1f6"; -} -.fa-bell-slash-o:before { - content: "\f1f7"; -} -.fa-trash:before { - content: "\f1f8"; -} -.fa-copyright:before { - content: "\f1f9"; -} -.fa-at:before { - content: "\f1fa"; -} -.fa-eyedropper:before { - content: "\f1fb"; -} -.fa-paint-brush:before { - content: "\f1fc"; -} -.fa-birthday-cake:before { - content: "\f1fd"; -} -.fa-area-chart:before { - content: "\f1fe"; -} -.fa-pie-chart:before { - content: "\f200"; -} -.fa-line-chart:before { - content: "\f201"; -} -.fa-lastfm:before { - content: "\f202"; -} -.fa-lastfm-square:before { - content: "\f203"; -} -.fa-toggle-off:before { - content: "\f204"; -} -.fa-toggle-on:before { - content: "\f205"; -} -.fa-bicycle:before { - content: "\f206"; -} -.fa-bus:before { - content: "\f207"; -} -.fa-ioxhost:before { - content: "\f208"; -} -.fa-angellist:before { - content: "\f209"; -} -.fa-cc:before { - content: "\f20a"; -} -.fa-shekel:before, -.fa-sheqel:before, -.fa-ils:before { - content: "\f20b"; -} -.fa-meanpath:before { - content: "\f20c"; -} -.fa-buysellads:before { - content: "\f20d"; -} -.fa-connectdevelop:before { - content: "\f20e"; -} -.fa-dashcube:before { - content: "\f210"; -} -.fa-forumbee:before { - content: "\f211"; -} -.fa-leanpub:before { - content: "\f212"; -} -.fa-sellsy:before { - content: "\f213"; -} -.fa-shirtsinbulk:before { - content: "\f214"; -} -.fa-simplybuilt:before { - content: "\f215"; -} -.fa-skyatlas:before { - content: "\f216"; -} -.fa-cart-plus:before { - content: "\f217"; -} -.fa-cart-arrow-down:before { - content: "\f218"; -} -.fa-diamond:before { - content: "\f219"; -} -.fa-ship:before { - content: "\f21a"; -} -.fa-user-secret:before { - content: "\f21b"; -} -.fa-motorcycle:before { - content: "\f21c"; -} -.fa-street-view:before { - content: "\f21d"; -} -.fa-heartbeat:before { - content: "\f21e"; -} -.fa-venus:before { - content: "\f221"; -} -.fa-mars:before { - content: "\f222"; -} -.fa-mercury:before { - content: "\f223"; -} -.fa-intersex:before, -.fa-transgender:before { - content: "\f224"; -} -.fa-transgender-alt:before { - content: "\f225"; -} -.fa-venus-double:before { - content: "\f226"; -} -.fa-mars-double:before { - content: "\f227"; -} -.fa-venus-mars:before { - content: "\f228"; -} -.fa-mars-stroke:before { - content: "\f229"; -} -.fa-mars-stroke-v:before { - content: "\f22a"; -} -.fa-mars-stroke-h:before { - content: "\f22b"; -} -.fa-neuter:before { - content: "\f22c"; -} -.fa-genderless:before { - content: "\f22d"; -} -.fa-facebook-official:before { - content: "\f230"; -} -.fa-pinterest-p:before { - content: "\f231"; -} -.fa-whatsapp:before { - content: "\f232"; -} -.fa-server:before { - content: "\f233"; -} -.fa-user-plus:before { - content: "\f234"; -} -.fa-user-times:before { - content: "\f235"; -} -.fa-hotel:before, -.fa-bed:before { - content: "\f236"; -} -.fa-viacoin:before { - content: "\f237"; -} -.fa-train:before { - content: "\f238"; -} -.fa-subway:before { - content: "\f239"; -} -.fa-medium:before { - content: "\f23a"; -} -.fa-yc:before, -.fa-y-combinator:before { - content: "\f23b"; -} -.fa-optin-monster:before { - content: "\f23c"; -} -.fa-opencart:before { - content: "\f23d"; -} -.fa-expeditedssl:before { - content: "\f23e"; -} -.fa-battery-4:before, -.fa-battery:before, -.fa-battery-full:before { - content: "\f240"; -} -.fa-battery-3:before, -.fa-battery-three-quarters:before { - content: "\f241"; -} -.fa-battery-2:before, -.fa-battery-half:before { - content: "\f242"; -} -.fa-battery-1:before, -.fa-battery-quarter:before { - content: "\f243"; -} -.fa-battery-0:before, -.fa-battery-empty:before { - content: "\f244"; -} -.fa-mouse-pointer:before { - content: "\f245"; -} -.fa-i-cursor:before { - content: "\f246"; -} -.fa-object-group:before { - content: "\f247"; -} -.fa-object-ungroup:before { - content: "\f248"; -} -.fa-sticky-note:before { - content: "\f249"; -} -.fa-sticky-note-o:before { - content: "\f24a"; -} -.fa-cc-jcb:before { - content: "\f24b"; -} -.fa-cc-diners-club:before { - content: "\f24c"; -} -.fa-clone:before { - content: "\f24d"; -} -.fa-balance-scale:before { - content: "\f24e"; -} -.fa-hourglass-o:before { - content: "\f250"; -} -.fa-hourglass-1:before, -.fa-hourglass-start:before { - content: "\f251"; -} -.fa-hourglass-2:before, -.fa-hourglass-half:before { - content: "\f252"; -} -.fa-hourglass-3:before, -.fa-hourglass-end:before { - content: "\f253"; -} -.fa-hourglass:before { - content: "\f254"; -} -.fa-hand-grab-o:before, -.fa-hand-rock-o:before { - content: "\f255"; -} -.fa-hand-stop-o:before, -.fa-hand-paper-o:before { - content: "\f256"; -} -.fa-hand-scissors-o:before { - content: "\f257"; -} -.fa-hand-lizard-o:before { - content: "\f258"; -} -.fa-hand-spock-o:before { - content: "\f259"; -} -.fa-hand-pointer-o:before { - content: "\f25a"; -} -.fa-hand-peace-o:before { - content: "\f25b"; -} -.fa-trademark:before { - content: "\f25c"; -} -.fa-registered:before { - content: "\f25d"; -} -.fa-creative-commons:before { - content: "\f25e"; -} -.fa-gg:before { - content: "\f260"; -} -.fa-gg-circle:before { - content: "\f261"; -} -.fa-tripadvisor:before { - content: "\f262"; -} -.fa-odnoklassniki:before { - content: "\f263"; -} -.fa-odnoklassniki-square:before { - content: "\f264"; -} -.fa-get-pocket:before { - content: "\f265"; -} -.fa-wikipedia-w:before { - content: "\f266"; -} -.fa-safari:before { - content: "\f267"; -} -.fa-chrome:before { - content: "\f268"; -} -.fa-firefox:before { - content: "\f269"; -} -.fa-opera:before { - content: "\f26a"; -} -.fa-internet-explorer:before { - content: "\f26b"; -} -.fa-tv:before, -.fa-television:before { - content: "\f26c"; -} -.fa-contao:before { - content: "\f26d"; -} -.fa-500px:before { - content: "\f26e"; -} -.fa-amazon:before { - content: "\f270"; -} -.fa-calendar-plus-o:before { - content: "\f271"; -} -.fa-calendar-minus-o:before { - content: "\f272"; -} -.fa-calendar-times-o:before { - content: "\f273"; -} -.fa-calendar-check-o:before { - content: "\f274"; -} -.fa-industry:before { - content: "\f275"; -} -.fa-map-pin:before { - content: "\f276"; -} -.fa-map-signs:before { - content: "\f277"; -} -.fa-map-o:before { - content: "\f278"; -} -.fa-map:before { - content: "\f279"; -} -.fa-commenting:before { - content: "\f27a"; -} -.fa-commenting-o:before { - content: "\f27b"; -} -.fa-houzz:before { - content: "\f27c"; -} -.fa-vimeo:before { - content: "\f27d"; -} -.fa-black-tie:before { - content: "\f27e"; -} -.fa-fonticons:before { - content: "\f280"; -} -.fa-reddit-alien:before { - content: "\f281"; -} -.fa-edge:before { - content: "\f282"; -} -.fa-credit-card-alt:before { - content: "\f283"; -} -.fa-codiepie:before { - content: "\f284"; -} -.fa-modx:before { - content: "\f285"; -} -.fa-fort-awesome:before { - content: "\f286"; -} -.fa-usb:before { - content: "\f287"; -} -.fa-product-hunt:before { - content: "\f288"; -} -.fa-mixcloud:before { - content: "\f289"; -} -.fa-scribd:before { - content: "\f28a"; -} -.fa-pause-circle:before { - content: "\f28b"; -} -.fa-pause-circle-o:before { - content: "\f28c"; -} -.fa-stop-circle:before { - content: "\f28d"; -} -.fa-stop-circle-o:before { - content: "\f28e"; -} -.fa-shopping-bag:before { - content: "\f290"; -} -.fa-shopping-basket:before { - content: "\f291"; -} -.fa-hashtag:before { - content: "\f292"; -} -.fa-bluetooth:before { - content: "\f293"; -} -.fa-bluetooth-b:before { - content: "\f294"; -} -.fa-percent:before { - content: "\f295"; -} -.fa-gitlab:before { - content: "\f296"; -} -.fa-wpbeginner:before { - content: "\f297"; -} -.fa-wpforms:before { - content: "\f298"; -} -.fa-envira:before { - content: "\f299"; -} -.fa-universal-access:before { - content: "\f29a"; -} -.fa-wheelchair-alt:before { - content: "\f29b"; -} -.fa-question-circle-o:before { - content: "\f29c"; -} -.fa-blind:before { - content: "\f29d"; -} -.fa-audio-description:before { - content: "\f29e"; -} -.fa-volume-control-phone:before { - content: "\f2a0"; -} -.fa-braille:before { - content: "\f2a1"; -} -.fa-assistive-listening-systems:before { - content: "\f2a2"; -} -.fa-asl-interpreting:before, -.fa-american-sign-language-interpreting:before { - content: "\f2a3"; -} -.fa-deafness:before, -.fa-hard-of-hearing:before, -.fa-deaf:before { - content: "\f2a4"; -} -.fa-glide:before { - content: "\f2a5"; -} -.fa-glide-g:before { - content: "\f2a6"; -} -.fa-signing:before, -.fa-sign-language:before { - content: "\f2a7"; -} -.fa-low-vision:before { - content: "\f2a8"; -} -.fa-viadeo:before { - content: "\f2a9"; -} -.fa-viadeo-square:before { - content: "\f2aa"; -} -.fa-snapchat:before { - content: "\f2ab"; -} -.fa-snapchat-ghost:before { - content: "\f2ac"; -} -.fa-snapchat-square:before { - content: "\f2ad"; -} -.fa-pied-piper:before { - content: "\f2ae"; -} -.fa-first-order:before { - content: "\f2b0"; -} -.fa-yoast:before { - content: "\f2b1"; -} -.fa-themeisle:before { - content: "\f2b2"; -} -.fa-google-plus-circle:before, -.fa-google-plus-official:before { - content: "\f2b3"; -} -.fa-fa:before, -.fa-font-awesome:before { - content: "\f2b4"; -} -.fa-handshake-o:before { - content: "\f2b5"; -} -.fa-envelope-open:before { - content: "\f2b6"; -} -.fa-envelope-open-o:before { - content: "\f2b7"; -} -.fa-linode:before { - content: "\f2b8"; -} -.fa-address-book:before { - content: "\f2b9"; -} -.fa-address-book-o:before { - content: "\f2ba"; -} -.fa-vcard:before, -.fa-address-card:before { - content: "\f2bb"; -} -.fa-vcard-o:before, -.fa-address-card-o:before { - content: "\f2bc"; -} -.fa-user-circle:before { - content: "\f2bd"; -} -.fa-user-circle-o:before { - content: "\f2be"; -} -.fa-user-o:before { - content: "\f2c0"; -} -.fa-id-badge:before { - content: "\f2c1"; -} -.fa-drivers-license:before, -.fa-id-card:before { - content: "\f2c2"; -} -.fa-drivers-license-o:before, -.fa-id-card-o:before { - content: "\f2c3"; -} -.fa-quora:before { - content: "\f2c4"; -} -.fa-free-code-camp:before { - content: "\f2c5"; -} -.fa-telegram:before { - content: "\f2c6"; -} -.fa-thermometer-4:before, -.fa-thermometer:before, -.fa-thermometer-full:before { - content: "\f2c7"; -} -.fa-thermometer-3:before, -.fa-thermometer-three-quarters:before { - content: "\f2c8"; -} -.fa-thermometer-2:before, -.fa-thermometer-half:before { - content: "\f2c9"; -} -.fa-thermometer-1:before, -.fa-thermometer-quarter:before { - content: "\f2ca"; -} -.fa-thermometer-0:before, -.fa-thermometer-empty:before { - content: "\f2cb"; -} -.fa-shower:before { - content: "\f2cc"; -} -.fa-bathtub:before, -.fa-s15:before, -.fa-bath:before { - content: "\f2cd"; -} -.fa-podcast:before { - content: "\f2ce"; -} -.fa-window-maximize:before { - content: "\f2d0"; -} -.fa-window-minimize:before { - content: "\f2d1"; -} -.fa-window-restore:before { - content: "\f2d2"; -} -.fa-times-rectangle:before, -.fa-window-close:before { - content: "\f2d3"; -} -.fa-times-rectangle-o:before, -.fa-window-close-o:before { - content: "\f2d4"; -} -.fa-bandcamp:before { - content: "\f2d5"; -} -.fa-grav:before { - content: "\f2d6"; -} -.fa-etsy:before { - content: "\f2d7"; -} -.fa-imdb:before { - content: "\f2d8"; -} -.fa-ravelry:before { - content: "\f2d9"; -} -.fa-eercast:before { - content: "\f2da"; -} -.fa-microchip:before { - content: "\f2db"; -} -.fa-snowflake-o:before { - content: "\f2dc"; -} -.fa-superpowers:before { - content: "\f2dd"; -} -.fa-wpexplorer:before { - content: "\f2de"; -} -.fa-meetup:before { - content: "\f2e0"; -} -.sr-only { - position: absolute; - width: 1px; - height: 1px; - padding: 0; - margin: -1px; - overflow: hidden; - clip: rect(0, 0, 0, 0); - border: 0; -} -.sr-only-focusable:active, -.sr-only-focusable:focus { - position: static; - width: auto; - height: auto; - margin: 0; - overflow: visible; - clip: auto; -} diff --git a/doc/_static/font-source-code-pro.css b/doc/_static/font-source-code-pro.css deleted file mode 100755 index 9ad812d1e20..00000000000 --- a/doc/_static/font-source-code-pro.css +++ /dev/null @@ -1,167 +0,0 @@ -@font-face{ - font-family: 'Source Code Pro'; - font-weight: 200; - font-style: normal; - font-stretch: normal; - src: url('./fonts/source_code_pro/EOT/SourceCodePro-ExtraLight.eot') format('embedded-opentype'), - url('./fonts/source_code_pro/WOFF2/SourceCodePro-ExtraLight.ttf.woff2') format('woff2'), - url('./fonts/source_code_pro/WOFF/SourceCodePro-ExtraLight.otf.woff') format('woff'), - url('./fonts/source_code_pro/OTF/SourceCodePro-ExtraLight.otf') format('opentype'), - url('./fonts/source_code_pro/TTF/SourceCodePro-ExtraLight.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Code Pro'; - font-weight: 200; - font-style: italic; - font-stretch: normal; - src: url('./fonts/source_code_pro/EOT/SourceCodePro-ExtraLightIt.eot') format('embedded-opentype'), - url('./fonts/source_code_pro/WOFF2/SourceCodePro-ExtraLightIt.ttf.woff2') format('woff2'), - url('./fonts/source_code_pro/WOFF/SourceCodePro-ExtraLightIt.otf.woff') format('woff'), - url('./fonts/source_code_pro/OTF/SourceCodePro-ExtraLightIt.otf') format('opentype'), - url('./fonts/source_code_pro/TTF/SourceCodePro-ExtraLightIt.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Code Pro'; - font-weight: 300; - font-style: normal; - font-stretch: normal; - src: url('./fonts/source_code_pro/EOT/SourceCodePro-Light.eot') format('embedded-opentype'), - url('./fonts/source_code_pro/WOFF2/SourceCodePro-Light.ttf.woff2') format('woff2'), - url('./fonts/source_code_pro/WOFF/SourceCodePro-Light.otf.woff') format('woff'), - url('./fonts/source_code_pro/OTF/SourceCodePro-Light.otf') format('opentype'), - url('./fonts/source_code_pro/TTF/SourceCodePro-Light.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Code Pro'; - font-weight: 300; - font-style: italic; - font-stretch: normal; - src: url('./fonts/source_code_pro/EOT/SourceCodePro-LightIt.eot') format('embedded-opentype'), - url('./fonts/source_code_pro/WOFF2/SourceCodePro-LightIt.ttf.woff2') format('woff2'), - url('./fonts/source_code_pro/WOFF/SourceCodePro-LightIt.otf.woff') format('woff'), - url('./fonts/source_code_pro/OTF/SourceCodePro-LightIt.otf') format('opentype'), - url('./fonts/source_code_pro/TTF/SourceCodePro-LightIt.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Code Pro'; - font-weight: 400; - font-style: normal; - font-stretch: normal; - src: url('./fonts/source_code_pro/EOT/SourceCodePro-Regular.eot') format('embedded-opentype'), - url('./fonts/source_code_pro/WOFF2/SourceCodePro-Regular.ttf.woff2') format('woff2'), - url('./fonts/source_code_pro/WOFF/SourceCodePro-Regular.otf.woff') format('woff'), - url('./fonts/source_code_pro/OTF/SourceCodePro-Regular.otf') format('opentype'), - url('./fonts/source_code_pro/TTF/SourceCodePro-Regular.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Code Pro'; - font-weight: 400; - font-style: italic; - font-stretch: normal; - src: url('./fonts/source_code_pro/EOT/SourceCodePro-It.eot') format('embedded-opentype'), - url('./fonts/source_code_pro/WOFF2/SourceCodePro-It.ttf.woff2') format('woff2'), - url('./fonts/source_code_pro/WOFF/SourceCodePro-It.otf.woff') format('woff'), - url('./fonts/source_code_pro/OTF/SourceCodePro-It.otf') format('opentype'), - url('./fonts/source_code_pro/TTF/SourceCodePro-It.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Code Pro'; - font-weight: 500; - font-style: normal; - font-stretch: normal; - src: url('./fonts/source_code_pro/EOT/SourceCodePro-Medium.eot') format('embedded-opentype'), - url('./fonts/source_code_pro/WOFF2/SourceCodePro-Medium.ttf.woff2') format('woff2'), - url('./fonts/source_code_pro/WOFF/SourceCodePro-Medium.otf.woff') format('woff'), - url('./fonts/source_code_pro/OTF/SourceCodePro-Medium.otf') format('opentype'), - url('./fonts/source_code_pro/TTF/SourceCodePro-Medium.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Code Pro'; - font-weight: 500; - font-style: italic; - font-stretch: normal; - src: url('./fonts/source_code_pro/EOT/SourceCodePro-MediumIt.eot') format('embedded-opentype'), - url('./fonts/source_code_pro/WOFF2/SourceCodePro-MediumIt.ttf.woff2') format('woff2'), - url('./fonts/source_code_pro/WOFF/SourceCodePro-MediumIt.otf.woff') format('woff'), - url('./fonts/source_code_pro/OTF/SourceCodePro-MediumIt.otf') format('opentype'), - url('./fonts/source_code_pro/TTF/SourceCodePro-MediumIt.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Code Pro'; - font-weight: 600; - font-style: normal; - font-stretch: normal; - src: url('./fonts/source_code_pro/EOT/SourceCodePro-Semibold.eot') format('embedded-opentype'), - url('./fonts/source_code_pro/WOFF2/SourceCodePro-Semibold.ttf.woff2') format('woff2'), - url('./fonts/source_code_pro/WOFF/SourceCodePro-Semibold.otf.woff') format('woff'), - url('./fonts/source_code_pro/OTF/SourceCodePro-Semibold.otf') format('opentype'), - url('./fonts/source_code_pro/TTF/SourceCodePro-Semibold.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Code Pro'; - font-weight: 600; - font-style: italic; - font-stretch: normal; - src: url('./fonts/source_code_pro/EOT/SourceCodePro-SemiboldIt.eot') format('embedded-opentype'), - url('./fonts/source_code_pro/WOFF2/SourceCodePro-SemiboldIt.ttf.woff2') format('woff2'), - url('./fonts/source_code_pro/WOFF/SourceCodePro-SemiboldIt.otf.woff') format('woff'), - url('./fonts/source_code_pro/OTF/SourceCodePro-SemiboldIt.otf') format('opentype'), - url('./fonts/source_code_pro/TTF/SourceCodePro-SemiboldIt.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Code Pro'; - font-weight: 700; - font-style: normal; - font-stretch: normal; - src: url('./fonts/source_code_pro/EOT/SourceCodePro-Bold.eot') format('embedded-opentype'), - url('./fonts/source_code_pro/WOFF2/SourceCodePro-Bold.ttf.woff2') format('woff2'), - url('./fonts/source_code_pro/WOFF/SourceCodePro-Bold.otf.woff') format('woff'), - url('./fonts/source_code_pro/OTF/SourceCodePro-Bold.otf') format('opentype'), - url('./fonts/source_code_pro/TTF/SourceCodePro-Bold.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Code Pro'; - font-weight: 700; - font-style: italic; - font-stretch: normal; - src: url('./fonts/source_code_pro/EOT/SourceCodePro-BoldIt.eot') format('embedded-opentype'), - url('./fonts/source_code_pro/WOFF2/SourceCodePro-BoldIt.ttf.woff2') format('woff2'), - url('./fonts/source_code_pro/WOFF/SourceCodePro-BoldIt.otf.woff') format('woff'), - url('./fonts/source_code_pro/OTF/SourceCodePro-BoldIt.otf') format('opentype'), - url('./fonts/source_code_pro/TTF/SourceCodePro-BoldIt.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Code Pro'; - font-weight: 900; - font-style: normal; - font-stretch: normal; - src: url('./fonts/source_code_pro/EOT/SourceCodePro-Black.eot') format('embedded-opentype'), - url('./fonts/source_code_pro/WOFF2/SourceCodePro-Black.ttf.woff2') format('woff2'), - url('./fonts/source_code_pro/WOFF/SourceCodePro-Black.otf.woff') format('woff'), - url('./fonts/source_code_pro/OTF/SourceCodePro-Black.otf') format('opentype'), - url('./fonts/source_code_pro/TTF/SourceCodePro-Black.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Code Pro'; - font-weight: 900; - font-style: italic; - font-stretch: normal; - src: url('./fonts/source_code_pro/EOT/SourceCodePro-BlackIt.eot') format('embedded-opentype'), - url('./fonts/source_code_pro/WOFF2/SourceCodePro-BlackIt.ttf.woff2') format('woff2'), - url('./fonts/source_code_pro/WOFF/SourceCodePro-BlackIt.otf.woff') format('woff'), - url('./fonts/source_code_pro/OTF/SourceCodePro-BlackIt.otf') format('opentype'), - url('./fonts/source_code_pro/TTF/SourceCodePro-BlackIt.ttf') format('truetype'); -} diff --git a/doc/_static/font-source-sans-pro.css b/doc/_static/font-source-sans-pro.css deleted file mode 100755 index 051af7f60be..00000000000 --- a/doc/_static/font-source-sans-pro.css +++ /dev/null @@ -1,131 +0,0 @@ -@font-face{ - font-family: 'Source Sans Pro'; - font-weight: 200; - font-style: normal; - font-stretch: normal; - src: url('./fonts/source_sans_pro/WOFF2/SourceSansPro-ExtraLight.ttf.woff2') format('woff2'), - url('./fonts/source_sans_pro/WOFF/SourceSansPro-ExtraLight.otf.woff') format('woff'), - url('./fonts/source_sans_pro/OTF/SourceSansPro-ExtraLight.otf') format('opentype'), - url('./fonts/source_sans_pro/TTF/SourceSansPro-ExtraLight.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Sans Pro'; - font-weight: 200; - font-style: italic; - font-stretch: normal; - src: url('./fonts/source_sans_pro/WOFF2/SourceSansPro-ExtraLightIt.ttf.woff2') format('woff2'), - url('./fonts/source_sans_pro/WOFF/SourceSansPro-ExtraLightIt.otf.woff') format('woff'), - url('./fonts/source_sans_pro/OTF/SourceSansPro-ExtraLightIt.otf') format('opentype'), - url('./fonts/source_sans_pro/TTF/SourceSansPro-ExtraLightIt.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Sans Pro'; - font-weight: 300; - font-style: normal; - font-stretch: normal; - src: url('./fonts/source_sans_pro/WOFF2/SourceSansPro-Light.ttf.woff2') format('woff2'), - url('./fonts/source_sans_pro/WOFF/SourceSansPro-Light.otf.woff') format('woff'), - url('./fonts/source_sans_pro/OTF/SourceSansPro-Light.otf') format('opentype'), - url('./fonts/source_sans_pro/TTF/SourceSansPro-Light.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Sans Pro'; - font-weight: 300; - font-style: italic; - font-stretch: normal; - src: url('./fonts/source_sans_pro/WOFF2/SourceSansPro-LightIt.ttf.woff2') format('woff2'), - url('./fonts/source_sans_pro/WOFF/SourceSansPro-LightIt.otf.woff') format('woff'), - url('./fonts/source_sans_pro/OTF/SourceSansPro-LightIt.otf') format('opentype'), - url('./fonts/source_sans_pro/TTF/SourceSansPro-LightIt.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Sans Pro'; - font-weight: 400; - font-style: normal; - font-stretch: normal; - src: url('./fonts/source_sans_pro/WOFF2/SourceSansPro-Regular.ttf.woff2') format('woff2'), - url('./fonts/source_sans_pro/WOFF/SourceSansPro-Regular.otf.woff') format('woff'), - url('./fonts/source_sans_pro/OTF/SourceSansPro-Regular.otf') format('opentype'), - url('./fonts/source_sans_pro/TTF/SourceSansPro-Regular.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Sans Pro'; - font-weight: 400; - font-style: italic; - font-stretch: normal; - src: url('./fonts/source_sans_pro/WOFF2/SourceSansPro-It.ttf.woff2') format('woff2'), - url('./fonts/source_sans_pro/WOFF/SourceSansPro-It.otf.woff') format('woff'), - url('./fonts/source_sans_pro/OTF/SourceSansPro-It.otf') format('opentype'), - url('./fonts/source_sans_pro/TTF/SourceSansPro-It.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Sans Pro'; - font-weight: 600; - font-style: normal; - font-stretch: normal; - src: url('./fonts/source_sans_pro/WOFF2/SourceSansPro-Semibold.ttf.woff2') format('woff2'), - url('./fonts/source_sans_pro/WOFF/SourceSansPro-Semibold.otf.woff') format('woff'), - url('./fonts/source_sans_pro/OTF/SourceSansPro-Semibold.otf') format('opentype'), - url('./fonts/source_sans_pro/TTF/SourceSansPro-Semibold.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Sans Pro'; - font-weight: 600; - font-style: italic; - font-stretch: normal; - src: url('./fonts/source_sans_pro/WOFF2/SourceSansPro-SemiboldIt.ttf.woff2') format('woff2'), - url('./fonts/source_sans_pro/WOFF/SourceSansPro-SemiboldIt.otf.woff') format('woff'), - url('./fonts/source_sans_pro/OTF/SourceSansPro-SemiboldIt.otf') format('opentype'), - url('./fonts/source_sans_pro/TTF/SourceSansPro-SemiboldIt.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Sans Pro'; - font-weight: 700; - font-style: normal; - font-stretch: normal; - src: url('./fonts/source_sans_pro/WOFF2/SourceSansPro-Bold.ttf.woff2') format('woff2'), - url('./fonts/source_sans_pro/WOFF/SourceSansPro-Bold.otf.woff') format('woff'), - url('./fonts/source_sans_pro/OTF/SourceSansPro-Bold.otf') format('opentype'), - url('./fonts/source_sans_pro/TTF/SourceSansPro-Bold.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Sans Pro'; - font-weight: 700; - font-style: italic; - font-stretch: normal; - src: url('./fonts/source_sans_pro/WOFF2/SourceSansPro-BoldIt.ttf.woff2') format('woff2'), - url('./fonts/source_sans_pro/WOFF/SourceSansPro-BoldIt.otf.woff') format('woff'), - url('./fonts/source_sans_pro/OTF/SourceSansPro-BoldIt.otf') format('opentype'), - url('./fonts/source_sans_pro/TTF/SourceSansPro-BoldIt.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Sans Pro'; - font-weight: 900; - font-style: normal; - font-stretch: normal; - src: url('./fonts/source_sans_pro/WOFF2/SourceSansPro-Black.ttf.woff2') format('woff2'), - url('./fonts/source_sans_pro/WOFF/SourceSansPro-Black.otf.woff') format('woff'), - url('./fonts/source_sans_pro/OTF/SourceSansPro-Black.otf') format('opentype'), - url('./fonts/source_sans_pro/TTF/SourceSansPro-Black.ttf') format('truetype'); -} - -@font-face{ - font-family: 'Source Sans Pro'; - font-weight: 900; - font-style: italic; - font-stretch: normal; - src: url('./fonts/source_sans_pro/WOFF2/SourceSansPro-BlackIt.ttf.woff2') format('woff2'), - url('./fonts/source_sans_pro/WOFF/SourceSansPro-BlackIt.otf.woff') format('woff'), - url('./fonts/source_sans_pro/OTF/SourceSansPro-BlackIt.otf') format('opentype'), - url('./fonts/source_sans_pro/TTF/SourceSansPro-BlackIt.ttf') format('truetype'); -} diff --git a/doc/_static/fonts/fontawesome/fontawesome-webfont.eot b/doc/_static/fonts/fontawesome/fontawesome-webfont.eot deleted file mode 100644 index e9f60ca953f..00000000000 Binary files a/doc/_static/fonts/fontawesome/fontawesome-webfont.eot and /dev/null differ diff --git a/doc/_static/fonts/fontawesome/fontawesome-webfont.ttf b/doc/_static/fonts/fontawesome/fontawesome-webfont.ttf deleted file mode 100644 index 35acda2fa11..00000000000 Binary files a/doc/_static/fonts/fontawesome/fontawesome-webfont.ttf and /dev/null differ diff --git a/doc/_static/fonts/fontawesome/fontawesome-webfont.woff b/doc/_static/fonts/fontawesome/fontawesome-webfont.woff deleted file mode 100644 index 400014a4b06..00000000000 Binary files a/doc/_static/fonts/fontawesome/fontawesome-webfont.woff and /dev/null differ diff --git a/doc/_static/fonts/fontawesome/fontawesome-webfont.woff2 b/doc/_static/fonts/fontawesome/fontawesome-webfont.woff2 deleted file mode 100644 index 4d13fc60404..00000000000 Binary files a/doc/_static/fonts/fontawesome/fontawesome-webfont.woff2 and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/EOT/SourceCodePro-Bold.eot b/doc/_static/fonts/source_code_pro/EOT/SourceCodePro-Bold.eot deleted file mode 100644 index 783b4f518f5..00000000000 Binary files a/doc/_static/fonts/source_code_pro/EOT/SourceCodePro-Bold.eot and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/EOT/SourceCodePro-BoldIt.eot b/doc/_static/fonts/source_code_pro/EOT/SourceCodePro-BoldIt.eot deleted file mode 100644 index dc0087b5b6f..00000000000 Binary files a/doc/_static/fonts/source_code_pro/EOT/SourceCodePro-BoldIt.eot and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/EOT/SourceCodePro-It.eot b/doc/_static/fonts/source_code_pro/EOT/SourceCodePro-It.eot deleted file mode 100644 index f4aa7ae9a7a..00000000000 Binary files a/doc/_static/fonts/source_code_pro/EOT/SourceCodePro-It.eot and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/EOT/SourceCodePro-Regular.eot b/doc/_static/fonts/source_code_pro/EOT/SourceCodePro-Regular.eot deleted file mode 100644 index 121176c4661..00000000000 Binary files a/doc/_static/fonts/source_code_pro/EOT/SourceCodePro-Regular.eot and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/EOT/SourceCodePro-Semibold.eot b/doc/_static/fonts/source_code_pro/EOT/SourceCodePro-Semibold.eot deleted file mode 100644 index 8aa922acfbd..00000000000 Binary files a/doc/_static/fonts/source_code_pro/EOT/SourceCodePro-Semibold.eot and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/EOT/SourceCodePro-SemiboldIt.eot b/doc/_static/fonts/source_code_pro/EOT/SourceCodePro-SemiboldIt.eot deleted file mode 100644 index b887a66f48b..00000000000 Binary files a/doc/_static/fonts/source_code_pro/EOT/SourceCodePro-SemiboldIt.eot and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/OTF/SourceCodePro-Bold.otf b/doc/_static/fonts/source_code_pro/OTF/SourceCodePro-Bold.otf deleted file mode 100644 index 0b273d9877e..00000000000 Binary files a/doc/_static/fonts/source_code_pro/OTF/SourceCodePro-Bold.otf and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/OTF/SourceCodePro-BoldIt.otf b/doc/_static/fonts/source_code_pro/OTF/SourceCodePro-BoldIt.otf deleted file mode 100644 index 2acc97490ba..00000000000 Binary files a/doc/_static/fonts/source_code_pro/OTF/SourceCodePro-BoldIt.otf and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/OTF/SourceCodePro-It.otf b/doc/_static/fonts/source_code_pro/OTF/SourceCodePro-It.otf deleted file mode 100644 index d0f1790ed1b..00000000000 Binary files a/doc/_static/fonts/source_code_pro/OTF/SourceCodePro-It.otf and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/OTF/SourceCodePro-Regular.otf b/doc/_static/fonts/source_code_pro/OTF/SourceCodePro-Regular.otf deleted file mode 100644 index 1bae0027fff..00000000000 Binary files a/doc/_static/fonts/source_code_pro/OTF/SourceCodePro-Regular.otf and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/OTF/SourceCodePro-Semibold.otf b/doc/_static/fonts/source_code_pro/OTF/SourceCodePro-Semibold.otf deleted file mode 100644 index a61686ccafe..00000000000 Binary files a/doc/_static/fonts/source_code_pro/OTF/SourceCodePro-Semibold.otf and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/OTF/SourceCodePro-SemiboldIt.otf b/doc/_static/fonts/source_code_pro/OTF/SourceCodePro-SemiboldIt.otf deleted file mode 100644 index f419ab91731..00000000000 Binary files a/doc/_static/fonts/source_code_pro/OTF/SourceCodePro-SemiboldIt.otf and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/TTF/SourceCodePro-Bold.ttf b/doc/_static/fonts/source_code_pro/TTF/SourceCodePro-Bold.ttf deleted file mode 100644 index 5a5be2fd456..00000000000 Binary files a/doc/_static/fonts/source_code_pro/TTF/SourceCodePro-Bold.ttf and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/TTF/SourceCodePro-BoldIt.ttf b/doc/_static/fonts/source_code_pro/TTF/SourceCodePro-BoldIt.ttf deleted file mode 100644 index 0b6d2122e24..00000000000 Binary files a/doc/_static/fonts/source_code_pro/TTF/SourceCodePro-BoldIt.ttf and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/TTF/SourceCodePro-It.ttf b/doc/_static/fonts/source_code_pro/TTF/SourceCodePro-It.ttf deleted file mode 100644 index 437cbe16d60..00000000000 Binary files a/doc/_static/fonts/source_code_pro/TTF/SourceCodePro-It.ttf and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/TTF/SourceCodePro-Regular.ttf b/doc/_static/fonts/source_code_pro/TTF/SourceCodePro-Regular.ttf deleted file mode 100644 index c58300335a7..00000000000 Binary files a/doc/_static/fonts/source_code_pro/TTF/SourceCodePro-Regular.ttf and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/TTF/SourceCodePro-Semibold.ttf b/doc/_static/fonts/source_code_pro/TTF/SourceCodePro-Semibold.ttf deleted file mode 100644 index f57d68cb1bc..00000000000 Binary files a/doc/_static/fonts/source_code_pro/TTF/SourceCodePro-Semibold.ttf and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/TTF/SourceCodePro-SemiboldIt.ttf b/doc/_static/fonts/source_code_pro/TTF/SourceCodePro-SemiboldIt.ttf deleted file mode 100644 index 6c7eb455d2a..00000000000 Binary files a/doc/_static/fonts/source_code_pro/TTF/SourceCodePro-SemiboldIt.ttf and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-Bold.otf.woff b/doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-Bold.otf.woff deleted file mode 100644 index b64f2cfc7b4..00000000000 Binary files a/doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-Bold.otf.woff and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-BoldIt.otf.woff b/doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-BoldIt.otf.woff deleted file mode 100644 index 69a466dbafa..00000000000 Binary files a/doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-BoldIt.otf.woff and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-It.otf.woff b/doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-It.otf.woff deleted file mode 100644 index a8b8cea137f..00000000000 Binary files a/doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-It.otf.woff and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-Regular.otf.woff b/doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-Regular.otf.woff deleted file mode 100644 index 68c98f10503..00000000000 Binary files a/doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-Regular.otf.woff and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-Semibold.otf.woff b/doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-Semibold.otf.woff deleted file mode 100644 index 3db105e00e4..00000000000 Binary files a/doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-Semibold.otf.woff and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-SemiboldIt.otf.woff b/doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-SemiboldIt.otf.woff deleted file mode 100644 index 3822bc2ffc0..00000000000 Binary files a/doc/_static/fonts/source_code_pro/WOFF/SourceCodePro-SemiboldIt.otf.woff and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-Bold.ttf.woff2 b/doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-Bold.ttf.woff2 deleted file mode 100644 index 1331a0a465b..00000000000 Binary files a/doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-Bold.ttf.woff2 and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-BoldIt.ttf.woff2 b/doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-BoldIt.ttf.woff2 deleted file mode 100644 index ed564da7722..00000000000 Binary files a/doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-BoldIt.ttf.woff2 and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-It.ttf.woff2 b/doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-It.ttf.woff2 deleted file mode 100644 index 443d16f4bae..00000000000 Binary files a/doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-It.ttf.woff2 and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-Regular.ttf.woff2 b/doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-Regular.ttf.woff2 deleted file mode 100644 index d97cd54936a..00000000000 Binary files a/doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-Regular.ttf.woff2 and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-Semibold.ttf.woff2 b/doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-Semibold.ttf.woff2 deleted file mode 100644 index c4134ee7d8f..00000000000 Binary files a/doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-Semibold.ttf.woff2 and /dev/null differ diff --git a/doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-SemiboldIt.ttf.woff2 b/doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-SemiboldIt.ttf.woff2 deleted file mode 100644 index 92ebea1758b..00000000000 Binary files a/doc/_static/fonts/source_code_pro/WOFF2/SourceCodePro-SemiboldIt.ttf.woff2 and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-Bold.otf b/doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-Bold.otf deleted file mode 100644 index 8665855c888..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-Bold.otf and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-BoldIt.otf b/doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-BoldIt.otf deleted file mode 100644 index cec364ebc3b..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-BoldIt.otf and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-It.otf b/doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-It.otf deleted file mode 100644 index 2d06c4a6c4b..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-It.otf and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-Regular.otf b/doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-Regular.otf deleted file mode 100644 index 8d207399e5c..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-Regular.otf and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-Semibold.otf b/doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-Semibold.otf deleted file mode 100644 index 4ace6296211..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-Semibold.otf and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-SemiboldIt.otf b/doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-SemiboldIt.otf deleted file mode 100644 index 1810d90dfb9..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/OTF/SourceSansPro-SemiboldIt.otf and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-Bold.ttf b/doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-Bold.ttf deleted file mode 100644 index 4d80fce21b2..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-Bold.ttf and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-BoldIt.ttf b/doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-BoldIt.ttf deleted file mode 100644 index 0ad479d44d7..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-BoldIt.ttf and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-It.ttf b/doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-It.ttf deleted file mode 100644 index b7c0fce787d..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-It.ttf and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-Regular.ttf b/doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-Regular.ttf deleted file mode 100644 index a8eae164ea1..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-Regular.ttf and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-Semibold.ttf b/doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-Semibold.ttf deleted file mode 100644 index 9e6b1b52956..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-Semibold.ttf and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-SemiboldIt.ttf b/doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-SemiboldIt.ttf deleted file mode 100644 index f55e601425f..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/TTF/SourceSansPro-SemiboldIt.ttf and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-Bold.otf.woff b/doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-Bold.otf.woff deleted file mode 100644 index 0a086ef334b..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-Bold.otf.woff and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-BoldIt.otf.woff b/doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-BoldIt.otf.woff deleted file mode 100644 index 1e5fd3edd41..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-BoldIt.otf.woff and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-It.otf.woff b/doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-It.otf.woff deleted file mode 100644 index cf29a4924d0..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-It.otf.woff and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-Regular.otf.woff b/doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-Regular.otf.woff deleted file mode 100644 index 94659aeb739..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-Regular.otf.woff and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-Semibold.otf.woff b/doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-Semibold.otf.woff deleted file mode 100644 index 691dccfadef..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-Semibold.otf.woff and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-SemiboldIt.otf.woff b/doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-SemiboldIt.otf.woff deleted file mode 100644 index 1a0ad59255a..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/WOFF/SourceSansPro-SemiboldIt.otf.woff and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-Bold.ttf.woff2 b/doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-Bold.ttf.woff2 deleted file mode 100644 index 30e01df9cfa..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-Bold.ttf.woff2 and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-BoldIt.ttf.woff2 b/doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-BoldIt.ttf.woff2 deleted file mode 100644 index fba3625dad6..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-BoldIt.ttf.woff2 and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-It.ttf.woff2 b/doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-It.ttf.woff2 deleted file mode 100644 index 8b0348a9aae..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-It.ttf.woff2 and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-Regular.ttf.woff2 b/doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-Regular.ttf.woff2 deleted file mode 100644 index df1d2115466..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-Regular.ttf.woff2 and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-Semibold.ttf.woff2 b/doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-Semibold.ttf.woff2 deleted file mode 100644 index 0fc83eda4e9..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-Semibold.ttf.woff2 and /dev/null differ diff --git a/doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-SemiboldIt.ttf.woff2 b/doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-SemiboldIt.ttf.woff2 deleted file mode 100644 index 5dc3d4c8e70..00000000000 Binary files a/doc/_static/fonts/source_sans_pro/WOFF2/SourceSansPro-SemiboldIt.ttf.woff2 and /dev/null differ diff --git a/doc/_static/fr.svg b/doc/_static/fr.svg deleted file mode 100644 index b17c8ad7c3c..00000000000 --- a/doc/_static/fr.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - diff --git a/doc/_static/institution_logos/Telecom_Paris_Tech.png b/doc/_static/institution_logos/Telecom_Paris_Tech.png deleted file mode 100644 index 32eb616fe43..00000000000 Binary files a/doc/_static/institution_logos/Telecom_Paris_Tech.png and /dev/null differ diff --git a/doc/_static/institution_logos/Telecom_Paris_Tech.svg b/doc/_static/institution_logos/Telecom_Paris_Tech.svg new file mode 100644 index 00000000000..493a00d5e79 --- /dev/null +++ b/doc/_static/institution_logos/Telecom_Paris_Tech.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/doc/_static/mne_logo.svg b/doc/_static/mne_logo.svg index c38d5586c3b..4fbd6baf3bf 100644 --- a/doc/_static/mne_logo.svg +++ b/doc/_static/mne_logo.svg @@ -1,744 +1,176 @@ - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - + + + + + + + + diff --git a/doc/_static/mne_logo_small.svg b/doc/_static/mne_logo_small.svg index 1d212edde7f..f1b7430d484 100644 --- a/doc/_static/mne_logo_small.svg +++ b/doc/_static/mne_logo_small.svg @@ -1,70 +1,82 @@ - - - - - - + + + + + + + + + - - - - - - + + + + + diff --git a/doc/_static/style.css b/doc/_static/style.css index b3855eefcdb..5b973d4de03 100644 --- a/doc/_static/style.css +++ b/doc/_static/style.css @@ -1,596 +1,197 @@ :root { - --a-color-dark: rgb(31, 92, 153); - --a-color: rgb(41, 122, 204); - --a-color-light: rgb(102, 179, 255); - --a-color-lighter: rgb(204, 230, 255); - --text-color: #2c3e50; - --carousel-radius: 12px; -} -html { - position: relative; - min-height: 100%; -} - -/* Syntax reset */ -.highlight a { - color: var(--a-color); -} -.highlight * { - color: inherit; - font-weight: inherit; - font-style: inherit; - background-color: #fafafa; -} - -/* overrides of bootstrap fonts */ -body { - max-width: unset !important; - min-width: 300px !important; - font-family: "Source Sans Pro", sans-serif; - margin-bottom: 150px; -} -div.body { - max-width: unset !important; - min-width: 300px !important; -} -.footer { - width: 100%; - background-color: #fff; - position: absolute; - bottom: 0; - height: 140px; -} -h1, -h2, -h3, -h4, -h5, -h6, -.h1, -.h2, -.h3, -.h4, -.h5, -.h6 { - font-family: "Source Sans Pro", sans-serif; -} -code, -kbd, -pre, -samp { - font-family: "Source Code Pro", monospace; - font-size: 100%; -} -/* reduce horizontal padding for inline code (bootstrap defaults to 4px) */ -p code { - padding: 0 2px 0; -} -/* use semibold instead of bold for API links */ -code.xref, -a code { - font-weight: 600; -} -/* .. unless in a param list */ -span.classifier a.reference code.literal { - font-weight: bold; -} -/* use semibold weight for version dropdown */ -.btn { - font-weight: 600; -} -/* Modify our definition lists to be more like sklearn */ -dl.field-list dt.field-odd, -dt.field-even { - border-right-style: solid; - border-width: 1px; - border-color: var(--a-color-lighter); -} -dl.field-list dd dl dd { - padding-left: 30px; -} -dl.field-list dt, -dd { - margin-top: 10px; - margin-bottom: 10px; -} -.navbar-version { - display: none; -} + --pst-font-size-base: 16px; + --pst-font-size-milli: 13px; + --pst-font-family-base: 'Source Sans Pro', var(--pst-font-family-base-system); + --pst-font-family-heading: var(--pst-font-family-base); + --pst-font-family-monospace: 'Source Code Pro', var(--pst-font-family-monospace-system); -.navbar-collapse { - max-height: 600px !important; } -a { - color: var(--a-color); - text-decoration: none; +/* ************************************************************ Sphinx fixes */ +dl.field-list { + grid-template-columns: auto 1fr; } -a:hover { - color: var(--a-color-light); -} +/* ********************************************************** Sphinx-gallery */ -blockquote { - font-size: 100% !important; +/* backreference links: restore hover decoration that SG removes */ +a.sphx-glr-backref-instance:hover { + text-decoration: underline; } - -.devbar { - text-align: center; - padding: 5px; - margin-bottom: 5px; - border-radius: 0 0 4px 4px !important; +/* backreference links: make non-MNE func/meth calls resemble regular code */ +a[class^="sphx-glr-backref-module"] { + color: rgb(var(--pst-color-text-base)); } - -.devbar a { - color: #fff; - font-weight: bold; +/* backreference links: make MNE calls bold and colorful */ +a[class^="sphx-glr-backref-module-mne"] { + font-weight: 600; + color: rgb(var(--pst-color-link)); +} +/* suppress redundant note at top of every tutorial and signature at the end */ +div.sphx-glr-download-link-note, +p.sphx-glr-signature { + visibility: hidden; + height: 0; + margin: 0; + padding: 0; +} +/* script/notebook download buttons */ +.sphx-glr-download a.download { + background-image: none; + background-color: rgba(var(--pst-color-info), 0.1); + border-color: rgb(var(--pst-color-info)); +} +/* Report embedding */ +iframe.sg_report { + width: 95%; + height: 70vh; + margin: 20px auto; + display: block; + border-style: solid; } - -.pad-top-30 { - padding-top: 30px; +/* gallery thumbnail size */ +.sphx-glr-thumbcontainer { + min-width: 160px; + height: 250px; } -div.crop-wrapper { - width: 380px; - height: 190px; - overflow: hidden; +/* ******************************** make HTML'd pandas dataframes scrollable */ +table.dataframe { + display: block; + overflow: auto; } -a.button-front { - width: 32%; - height: 100%; - margin: 5px; - float: left; - background-color: #e7e7e7; - color: black; - border: 1px solid; - font-size: larger; - display: flex; - align-items: center; - justify-content: center; +/* ********************************* Long API titles need to wrap for mobile */ +div[id^="mne-"] h1, +div[id^="examples-using-"] h2 { + word-break: break-word; } -.anchor-doc { - position: relative; - padding-top: 200px !important; - display: none !important; +/* ******************************************* in-text sidebar callout boxes */ +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + padding: 7px; + width: 40%; + float: right; + clear: right; + overflow-x: auto; + /* above copied from div.sidebar in basic.css; below are our overrides */ + background-color: rgba(var(--pst-color-info), 0.1); + border: 1px solid rgb(var(--pst-color-info)); + border-radius: 4px; } -a code { - color: var(--a-color); -} -a:hover code { - color: var(--a-color-light); +/* **************************************************************** homepage */ +img.logo { + max-width: 360px; + width: 100%; } -/* Background is light */ -.alert-info a, -.alert-info a code { - color: var(--a-color-dark); -} -.alert-info a:hover, -.alert-info a:hover code { - color: var(--a-color); -} -.alert-info { - background-color: var(--a-color-lighter); - color: rgb(0, 0, 0); - border: none; +/* ************************************* homepage quick links & funders list */ +ul.quicklinks { + font-weight: 600; } -.warning { - background-color: #e74c3c; - color: white; +ul.quicklinks a:hover { + text-decoration: none; } -.warning a, -.warning a code { - color: rgb(255, 238, 0); +ul.funders li { + margin-left: 36px; + text-indent: -36px; + padding-bottom: 9px; } -.warning a:hover, -.warning a:hover code { - color: rgb(255, 200, 60); +ul.funders li img { + width: 30px; + max-height: 24px; + object-fit: contain; } -blockquote { - padding: 0 0 0 15px; - margin: 0 0 20px; - border-left: 5px solid #eeeeee; +/* these two also affect collapsible divs */ +h5.card-header { + margin-top: 0px; + margin-bottom: 0px; } - -.span.box { - width: 47%; - height: 230px; - float: left; - border-radius: 20px; - background: rgb(242, 249, 255); - border-style: solid; - border-color: rgb(143, 173, 204); - margin: 5px; - padding: 0px 0px 0px 10px; -} - -.span h2 { - background: rgb(242, 249, 255); -} -/* OVERRIDES */ -div.sphx-glr-download { - width: inherit; -} -div.sphx-glr-footer { - width: fit-content; -} -div.sphx-glr-download a { - background-color: rgb(230, 242, 255); - background-image: inherit; - border: 1px solid #000; - color: inherit; - width: fit-content; - display: inherit; - font-weight: inherit; - padding: 0px; - text-align: center; -} -div.sphx-glr-download a:hover { - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), - 0 1px 5px rgba(0, 0, 0, 0.25); - text-decoration: none; - background-image: none; - background-color: inherit; -} -/* hide the top link */ -div.sphx-glr-download-link-note { - height: 0px; - visibility: hidden; -} -dt:target, -.highlighted { - background-color: rgb(255, 238, 0); -} -dt:target code { - color: inherit !important; - background-color: inherit !important; -} -div.sidebar { - background-color: rgb(221, 236, 236) !important; - border: 1px solid rgb(164, 195, 195) !important; - border-radius: 4px; -} -.label { - /* Necessary for multiple refs, from bootstrap.min.css:7 */ - color: var(--text-color); -} -/* Override bootstrap.min.css to avoid .. container:: padding problems */ -.nosize { - width: auto; -} -.nopad { - padding: 0px !important; -} -.halfpad { - margin-left: 1%; - margin-right: 1%; - width: 48%; - padding: 0px; - margin: 10px; - float: left; -} -.table.midvalign td { - vertical-align: middle; - border-width: 2px; -} -.table.midvalign th { - border-width: 2px; - border-top-width: 2px; -} -/* our carousel */ -div.carousel-outer { - padding-top: 10px; -} -div.slide { - border-radius: var(--carousel-radius); - overflow: hidden; -} -div.carousel .chopper { - border-radius: var(--carousel-radius); - overflow: hidden; - min-height: 275px; - width: 100%; - background-size: cover; - background-position: center; - background-repeat: no-repeat; -} -div.jumbotron { - background-color: #fff0; - padding-top: 12px; - padding-bottom: 12px; -} -.carousel-control.left { - background-image: linear-gradient( - to right, - #aaa 0, - rgba(0, 0, 0, 0.0001) 100% - ); - border-radius: var(--carousel-radius); -} -.carousel-control.right { - background-image: linear-gradient( - to left, - #aaa 0, - rgba(0, 0, 0, 0.0001) 100% - ); - border-radius: var(--carousel-radius); +h5.card-header::before { + height: 0px; + margin-top: 0px; } -.cont { - width: 100%; - display: table; - border-spacing: 20px 5px; -} -.cont:before { - content: ""; -} -.cont:after { - content: ""; -} -.btn-cont { - display: table-cell; - width: 100%; -} -.carousel-caption { - background-color: #444a; - border-radius: var(--carousel-radius); - padding-left: 15px; - padding-right: 15px; - text-shadow: none; -} -.carousel-caption h3 { - font-size: 24px; - font-weight: 600; -} -.carousel-caption p { - font-weight: 300; - font-size: 18px; -} -.topmargin { - margin-top: 10px; -} -.bottommargin { - margin-bottom: 10px; -} -h4.list-group-item-heading { - font-size: 15px; -} -.table-like { - display: table; -} -.cell-like { - display: table-cell; - vertical-align: middle; - float: none; -} -.limitedwidth { - max-width: 1024px; - float: none; - margin: auto; -} -/* Fix collapsing */ -@media (max-width: 991px) { - .navbar-header { - float: none; - } - .navbar-left, - .navbar-right { - float: none !important; - } - .navbar-toggle { - display: block; - } - .navbar-collapse { - border-top: 1px solid transparent; - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1); - } - .navbar-fixed-top { - top: 0; - border-width: 0 0 1px; - } - .navbar-collapse.collapse { - display: none !important; - } - .navbar-nav { - float: none !important; - margin-top: 7.5px; - } - .navbar-nav > li { - float: none; - } - .navbar-nav > li > a { - padding-top: 10px; - padding-bottom: 10px; - } - .collapse.in { - display: block !important; - } -} -details.example_details summary { - font-weight: bold; -} -.panel { - margin-bottom: 3px; +/* ************************************************* dev version warning bar */ +.devbar { + /* body top padding minus navbar height; */ + /*might be possible to calc from theme variables */ + margin-top: -20px; } -.skinnytable { - width: auto; -} -.skinnytable thead { - border-bottom: 1px solid black; +/* ******************************************************** version dropdown */ +.dropdown { + padding: 0 .5rem; /* match other items in the hamburger menu */ } -/* Unify color of refs and classes in parameter lists */ -span.classifier a span.xref, -span.classifier a code.docutils.literal.notranslate, -span.classifier a.reference.external { - color: var(--a-color); -} -span.classifier a:hover span.xref, -span.classifier a:hover code.docutils.literal.notranslate, -span.classifier a.reference.external:hover { - color: var(--a-color-light); +.dropdown-toggle { + font-weight: 600; } -/* Allow easier copy-paste in code blocks */ -div.highlight-console div.highlight span.gp { - -webkit-touch-callout: none; /* iOS Safari */ - -webkit-user-select: none; /* Safari */ - -khtml-user-select: none; /* Konqueror HTML */ - -moz-user-select: none; /* Firefox */ - -ms-user-select: none; /* Internet Explorer/Edge */ - user-select: none; /* Non-prefixed version, currently - supported by Chrome and Opera */ -} -/* Callout */ -.callout { - padding: 20px; - margin: 20px 0; - border: 1px solid #eee; - border-left-width: 5px; - border-left-color: #ddd; - /* background-color: #eee; */ -} -.callout h3:first-child { - margin-top: 0; -} -.callout p:last-child { - margin-bottom: 0; -} -dl.cmd-list span.option { - white-space: nowrap; -} -dl.cmd-list dt, -dl.cmd-list dd { - margin-top: 5px; - margin-bottom: 5px; - padding-top: 0px; - padding-bottom: 5px; - text-align: left; - border-bottom: 1px solid #ddd; -} -dl.cmd-list dt::after { - visibility: hidden; -} -/* Attributes / methods not taking up entire width ends up looking odd */ -dd table.align-center { - width: 100%; -} -/* Sphinx-gallery Report embedding */ -iframe.sg_report { - width: 95%; - height: 70vh; - margin: 20px auto; - display: block; - border-style: solid; -} -/* institutions logos in footer */ -div.institutions { - text-align: center; - margin-top: 3px; - margin-bottom: 3px; -} -div.institutions ul { - display: grid; - line-height: 48px; - grid-template-columns: repeat(auto-fill, 59px); - justify-content: center; -} -div.institutions a { - border: 1px; -} -img.institution { - vertical-align: middle; - max-height: 42px; - max-width: 52px; -} -img.logo { - max-width: 360px; - width: 100%; -} -img.hidden { - visibility: hidden; +/* ***************************************************** front page carousel */ +div.frontpage-gallery { + overflow: hidden; + height: 180px; + justify-content: center; } -div.border-top { - border-top: 1px solid #ccc; +div.frontpage-gallery a { + text-decoration: none; + color: rgb(var(--pst-color-text-base)); } -.hidden { - display: none; -} -ul.list-funding { - padding-left: 0; - list-style: none; -} -ul.list-funding li { - padding-left: 28px; - margin-top: 0px; - margin-bottom: 8px; - min-height: 24px; -} -ul.list-funding li img { - float: left; - position: relative; - left: -28px; - top: 0px; - width: 24px; - margin: 0px -24px 0px 0px; - clear: both; -} -ul.list-funding li p { - font-size: 12px; - margin-bottom: 0px; -} -/* Pandas DataFrame _repr_html_ */ -table.dataframe th, -td { - padding: 5px; +div.frontpage-gallery img.card-img { + transform: scale(1.8); + transform-origin: 40% 20%; + opacity: 0.2; + transition: 400ms ease-out; } - -/* Sphinx-gallery backreference links */ -a.sphx-glr-backref-instance:hover { - /* restore hover decoration that SG removes */ - text-decoration: underline; +div.frontpage-gallery:hover img.card-img { + transform: scale(1.2); + opacity: 1.0; + transition: 400ms ease-out; } -a[class^="sphx-glr-backref-module"] { - /* make non-MNE instances look like regular code */ - color: var(--text-color); +div.frontpage-gallery .fadeout { + opacity: 1.0; + transition: 200ms linear; } -a[class^="sphx-glr-backref-module"]:hover { - color: var(--a-color); +div.frontpage-gallery:hover .fadeout { + opacity: 0.0; + transition: 200ms linear; } -a[class^="sphx-glr-backref-module-mne"] { - /* make all MNE things bold */ - font-weight: 600; - color: var(--a-color); + +/* ****************************************************** navbar quick links */ + +i.fa-github-square:before { + color: #000; } -a[class^="sphx-glr-backref-module-mne"]:hover { - color: var(--a-color-light); +i.fa-twitter-square:before { + color: #55acee; } -/* Long API titles need to wrap for mobile */ -div[id^="mne-"] h1 { - word-break: break-word; +i.fa-discourse:before { + color: #231e20; } -div[id^="examples-using-"] h2 { - word-break: break-word; +i.fa-discord:before { + color: #7289da; /* 99aab5 is also in-brand for discord */ } -/* Avoid awkward extra spacing in .. contents:: */ -div.contents ul li p { - margin: 0 0 0px; + +/* ************************************************* Previous / Next buttons */ +.prev-next-bottom a.left-prev:before { + content:"❮\00A0" } -/* Avoid extra spacing in our nested version */ -h3.panel-title p { - margin: 0 0 0px; +.prev-next-bottom a.right-next:after { + content:"\00A0❯" } -/* Disable hyphenation in API reference table for Webkit-based browsers - to work around alignment bug */ -#python-api-reference table p { - -webkit-hyphens: none; +.prev-next-bottom a.right-next { + text-align: right; } -.section h5 { - font-weight: bold; +/* *********************************************************** miscellaneous */ +.hidden { + display: none; +} +img.hidden { + visibility: hidden; } diff --git a/doc/_static/us.svg b/doc/_static/us.svg deleted file mode 100644 index 95e707b4106..00000000000 --- a/doc/_static/us.svg +++ /dev/null @@ -1,18 +0,0 @@ - - - - - - - - - - - - - - - - - - diff --git a/doc/_templates/copyright.html b/doc/_templates/copyright.html new file mode 100644 index 00000000000..abea05537ff --- /dev/null +++ b/doc/_templates/copyright.html @@ -0,0 +1 @@ +

© Copyright {{ copyright }}

diff --git a/doc/_templates/homepage.html b/doc/_templates/homepage.html new file mode 100644 index 00000000000..963906cadb8 --- /dev/null +++ b/doc/_templates/homepage.html @@ -0,0 +1,44 @@ + +
+
+ {% for item in carousel %} + + {% endfor %} +
+
+ +
+

Funders

+
+ {% for item in funders -%} +
+ + {{ item.title }} + +
+ {% endfor %} +
+
+ +
+

Supporting institutions

+
+ {% for inst in institutions -%} +
+ + {{ inst.name }} + +
+ {% endfor %} +
+
diff --git a/doc/_templates/layout.html b/doc/_templates/layout.html index 8ba519efa2e..648ff6dab90 100755 --- a/doc/_templates/layout.html +++ b/doc/_templates/layout.html @@ -1,89 +1,33 @@ -{% extends "!layout.html" %} +{%- extends 'pydata_sphinx_theme/layout.html' %} {% block extrahead %} - - - -{% if use_google_analytics|tobool %} - -{% endif %} - - - - - - - -{% if use_media_buttons|tobool %} - -{% endif %} - -{{ super() }} - + {{ super() }} {% endblock %} -{% block relbar2 %}{% endblock %} - -{% block relbar1 %} -{% if build_dev_html|tobool %} -
-This is documentation for the unstable development version of MNE-Python, -available here. -Or, switch to documentation for the current stable version. - + +{% block docs_toc %} +
+ {% if meta is defined and not (meta is not none and 'notoc' in meta) %} + {% for toc_item in theme_page_sidebar_items %} +
+ {% include toc_item %} +
+ {% endfor %} + {% endif %}
-{% endif %} - -{{ super() }} {% endblock %} -{# put the sidebar before the body #} -{% block sidebar1 %}{% endblock %} -{% block sidebar2 %}{% endblock %} +{% block docs_body %} +
+ {% block body %} {% endblock %} + {% if pagename == 'index' %} + {%- include 'homepage.html' -%} + {% endif %} +
+{% endblock %} -{%- block footer %} -
-
-
-
    -
  • Massachusetts General Hospital
  • -
  • Athinoula A. Martinos Center for Biomedical Imaging
  • -
  • Harvard Medical School
  • -
  • Massachusetts Institute of Technology
  • -
  • New York University
  • -
  • Commissariat à l´énergie atomique et aux énergies alternatives
  • -
  • Aalto-yliopiston perustieteiden korkeakoulu
  • -
  • Télécom ParisTech
  • -
  • University of Washington
  • -
  • Institut du Cerveau et de la Moelle épinière
  • -
  • Boston University
  • -
  • Institut national de la santé et de la recherche médicale
  • -
  • Forschungszentrum Jülich
  • -
  • Technische Universität Ilmenau
  • -
  • Berkeley Institute for Data Science
  • -
  • Institut national de recherche en informatique et en automatique
  • -
  • Aarhus Universitet
  • -
  • Karl-Franzens-Universität Graz
  • -
-

© Copyright 2012-2020, MNE Developers. Last updated on 2020-03-27.

-
-
- +{%- block scripts_end %} + + {{ super() }} {%- endblock %} diff --git a/doc/_templates/navbar.html b/doc/_templates/navbar.html deleted file mode 100644 index 841891a9400..00000000000 --- a/doc/_templates/navbar.html +++ /dev/null @@ -1,34 +0,0 @@ -{% extends "!navbar.html" %} - -{% block navbartoc %} - - - -{% endblock %} diff --git a/doc/_templates/sidebar-quicklinks.html b/doc/_templates/sidebar-quicklinks.html new file mode 100644 index 00000000000..68377f87c8e --- /dev/null +++ b/doc/_templates/sidebar-quicklinks.html @@ -0,0 +1,12 @@ +
+
Version {{ release }}
+ +
diff --git a/doc/_templates/version-switcher.html b/doc/_templates/version-switcher.html new file mode 100644 index 00000000000..235dcf4f5c8 --- /dev/null +++ b/doc/_templates/version-switcher.html @@ -0,0 +1,11 @@ + diff --git a/doc/bibliography.rst b/doc/bibliography.rst index 6463422d262..4a310352d8d 100644 --- a/doc/bibliography.rst +++ b/doc/bibliography.rst @@ -1,3 +1,7 @@ +:orphan: + +.. _general_bibliography: + General bibliography ==================== diff --git a/doc/carousel.inc b/doc/carousel.inc deleted file mode 100644 index 96e82239774..00000000000 --- a/doc/carousel.inc +++ /dev/null @@ -1,81 +0,0 @@ -.. raw:: html - - diff --git a/doc/changes/0.12.inc b/doc/changes/0.12.inc index b788cbc4a6d..39b48393d92 100644 --- a/doc/changes/0.12.inc +++ b/doc/changes/0.12.inc @@ -50,7 +50,7 @@ Changelog - Add epoch rejection based on annotated segments by `Jaakko Leppakangas`_ -- Add option to use new-style MEG channel names in :func:`mne.read_selection` by `Eric Larson`_ +- Add option to use new-style MEG channel names in ``mne.read_selection`` by `Eric Larson`_ - Add option for ``proj`` in :class:`mne.EpochsArray` by `Eric Larson`_ diff --git a/doc/changes/0.22.inc b/doc/changes/0.22.inc new file mode 100644 index 00000000000..83826080837 --- /dev/null +++ b/doc/changes/0.22.inc @@ -0,0 +1,272 @@ +.. NOTE: we are now using links to highlight new functions and classes. + Please follow the examples below like :func:`mne.stats.f_mway_rm`, so the + whats_new page will have a link to the function/class documentation. + +.. NOTE: there are 3 separate sections for changes, based on type: + - "Enhancements" for new features + - "Bugs" for bug fixes + - "API changes" for backward-incompatible changes + +.. _changes_0_22: + +Version 0.22.0 +-------------- + +.. |Austin Hurst| replace:: **Austin Hurst** + +.. |Aniket Pradhan| replace:: **Aniket Pradhan** + +.. |Eduard Ort| replace:: **Eduard Ort** + +.. |Evan Hathaway| replace:: **Evan Hathaway** + +.. |Hongjiang Ye| replace:: **Hongjiang Ye** + +.. |Jeff Stout| replace:: **Jeff Stout** + +.. |Jonathan Kuziek| replace:: **Jonathan Kuziek** + +.. |Qianliang Li| replace:: **Qianliang Li** + +.. |Tod Flak| replace:: **Tod Flak** + +.. |Victoria Peterson| replace:: **Victoria Peterson** + + +Enhancements +~~~~~~~~~~~~ +- Add :func:`mne.read_evokeds_mff` to read averaged MFFs (requires mffpy >= 0.5.7) **by new contributor** |Evan Hathaway|_ (:gh:`8354`) + +- Add :class:`mne.decoding.SSD` for spatial filtering with spatio-spectral-decomposition (:gh:`7070` **by new contributor** |Victoria Peterson|_ and `Denis Engemann`_) + +- Add reader for optical imaging data recorded using ISS Imgagent I/II hardware and BOXY recording software in :func:`mne.io.read_raw_boxy` (:gh:`7717` **by new contributor** |Jonathan Kuziek|_ and `Kyle Mathewson`_) + +- Add options to use labels in :func:`mne.minimum_norm.get_point_spread` and :func:`mne.minimum_norm.get_cross_talk` (:gh:`8275` by `Olaf Hauk`_) + +- Update ``surfaces`` argument in :func:`mne.viz.plot_alignment` to allow dict for transparency values, and set default for sEEG data to have transparency (:gh:`8445` by `Keith Doelling`_) + +- Add support for ``mri_fiducials='estimated'`` in :func:`mne.viz.plot_alignment` to allow estimating MRI fiducial locations using :func:`mne.coreg.get_mni_fiducials` (:gh:`8553` by `Eric Larson`_) + +- Update default values in :ref:`mne coreg` and :func:`mne.viz.plot_alignment` for clearer representation of MRI and digitized fiducial points (:gh:`8553` by `Alex Gramfort`_ and `Eric Larson`_) + +- Add ``n_pca_components`` argument to :func:`mne.viz.plot_ica_overlay` (:gh:`8351` by `Eric Larson`_) + +- Add :func:`mne.stc_near_sensors` to facilitate plotting ECoG data (:gh:`8190` by `Eric Larson`_) + +- Add ``proj`` argument to :func:`mne.make_fixed_length_epochs` (:gh:`8351` by `Eric Larson`_) + +- Add :func:`mne.preprocessing.realign_raw` to realign simultaneous raw recordings in the presence of clock drift (:gh:`8539` by `Eric Larson`_) + +- Reduce memory usage of volume source spaces (:gh:`8379` by `Eric Larson`_) + +- Speed up heavy use of :meth:`mne.SourceMorph.apply` for volumetric source spaces by use of the method :meth:`mne.SourceMorph.compute_vol_morph_mat` (:gh:`8366` by `Eric Larson`_) + +- Add support for non-uniform ``zooms`` (e.g., when using a surrogate MRI via :func:`mne.scale_mri`) in volumetric morphing (:gh:`8642` by `Eric Larson`_) + +- In :func:`mne.compute_source_morph` ``zooms`` are no longer required to match the spacing of ``src_to``, which is useful to ensure the morphing is accurate when the ``src_to`` spacing is large (e.g., 1 cm) (:gh:`8642` by `Eric Larson`_) + +- Add volumetric source space support to :func:`mne.labels_to_stc` (:gh:`8447` by `Eric Larson`_) + +- Speed up :class:`mne.decoding.TimeDelayingRidge` with edge correction using Numba (:gh:`8323` by `Eric Larson`_) + +- Add :meth:`mne.Epochs.reset_drop_log_selection` to facilitate writing epochs with many ignored entries in their drop log (:gh:`8449` by `Eric Larson`_) + +- Add sEEG source visualization using :func:`mne.stc_near_sensors` and sEEG working tutorial (:gh:`8402` by `Eric Larson`_ and `Adam Li`_) + +- Add :meth:`mne.channels.DigMontage.get_positions`, which will return a dictionary of channel positions, coordinate frame and fiducial locations (:gh:`8460` by `Adam Li`_) + +- Add support for writing digitization points in a coordinate frame other than head in :meth:`mne.channels.DigMontage.save` (:gh:`8532` by `Eric Larson`_) + +- Add ``picks`` parameter to :func:`mne.preprocessing.fix_stim_artifact` to specify which channel needs to be fixed (:gh:`8482` by `Alex Gramfort`_) + +- Add progress bar support to :func:`mne.time_frequency.csd_morlet` (:gh:`8608` by `Eric Larson`_) + +- Further improved documentation building instructions and execution on Windows (:gh:`8502` by `kalenkovich`_ and `Eric Larson`_) + +- Add option to disable TQDM entirely with ``MNE_TQDM='off'`` (:gh:`8515` by `Eric Larson`_) + +- Add option ``on_header_missing`` to :func:`mne.channels.read_polhemus_fastscan` (:gh:`8622` by `Eric Larson`_) + +- Add option ``window`` to :func:`mne.time_frequency.psd_welch` and related functions (:gh:`8862` by `Eric Larson`_) + +- `mne.preprocessing.ICA.plot_sources` now displays an `mne.preprocessing.ICA.plot_properties` window when right-clicking on component names on the y-axis (:gh:`8381` by `Daniel McCloy`_) + +- :func:`mne.io.read_raw_edf`, :func:`mne.io.read_raw_bdf`, and :func:`mne.io.read_raw_gdf` now detect and handle invalid highpass/lowpass filter settings (:gh:`8584` by `Clemens Brunner`_) + +- If a ``baseline`` tuple containing one or two ``None`` values – e.g. ``(None, 0)``, ``(0, None)``, or ``(None, None)`` – is passed to `~mne.Epochs` or `~mne.Epochs.apply_baseline`, the ``None`` value(s) will be replaced with the actual time (i.e., :attr:`~mne.Epochs.tmin` and :attr:`~mne.Epochs.tmax`, respectively) when populating ``Epochs.baseline`` (:gh:`8442` by `Richard Höchenberger`_) + +- `~mne.Epochs` will now retain the information about an applied baseline correction, even if the baseline period is partially or completely removed through cropping later on (:gh:`8442` by `Richard Höchenberger`_) + +- Add :func:`mne.source_space.compute_distance_to_sensors` to compute distances between vertices and sensors (:gh:`8534` by `Olaf Hauk`_ and `Marijn van Vliet`_) + +- Annotations can now be shown/hidden interactively in raw plots (:gh:`8624` by `Daniel McCloy`_) + +- Added argument ``colors`` to `mne.grow_labels` (:gh:`8519` by `Olaf Hauk`_) + +- Added `mne.SourceEstimate.apply_baseline` method for baseline-correction of source estimates (:gh:`8452` by `Olaf Hauk`_) + +- New `mne.viz.Brain.set_time` method to set the displayed time in seconds (:gh:`8415` by `Daniel McCloy`_) + +- Update the ``backend`` parameter of :func:`mne.viz.plot_source_estimates` to integrate ``pyvista`` (:gh:`8395` by `Guillaume Favelier`_) + +- Add ``group_by`` parameter to `mne.viz.plot_epochs` and `mne.Epochs.plot` to allow displaying channel data by sensor position (:gh:`8381` by `Daniel McCloy`_) + +Bugs +~~~~ +- Fix orthogonalization of power envelopes in :func:`mne.connectivity.envelope_correlation` (:gh:`8658` **by new contributor** |Qianliang Li|_ and `Eric Larson`_) + +- Fix data overwrite of cascading simulation operations :`mne.simulation.simulate_raw` (:gh:`8633` **by new contributor** |Jeff Stout|_) + +- Fix a transpose issue of :func:`mne.decoding.CSP.plot_filters` (:gh:`8580` **by new contributor** |Hongjiang Ye|_) + +- Fix :func:`mne.io.read_raw_curry` to deal with Curry datasets that have channels that are listed in the labels file, but which are absent from the saved data file (e.g. 'Ref' channel). Also now populates info['meas_date'] if possible (:gh:`8400` **by new contributor** |Tod Flak|_) + +- Fix bug with mne.io.egi.tests/test_egi.py where it mandatorily downloaded testing data when it was not necessary (:gh:`8474` **by new contributor** |Aniket Pradhan|_) + +- Fix bug with reading split files that have dashes in the filename (:gh:`8339` **by new contributor** |Eduard Ort|_) + +- Fix bug with parsing EDF dates and date integers (:gh:`8558` **by new contributor** |Austin Hurst|_ and `Eric Larson`_) + +- Fix bug with reading EDF and KIT files on big endian architectures such as s390x (:gh:`8618` by `Eric Larson`_) + +- Fix bug with :func:`mne.beamformer.make_dics` where the ``rank`` parameter was not properly handled (:gh:`8594` by `Marijn van Vliet`_ and `Eric Larson`_) + +- Fix bug with :func:`mne.beamformer.apply_dics` where the whitener was not properly applied (:gh:`8610` by `Eric Larson`_) + +- Fix bug with `~mne.viz.plot_epochs_image` when ``order`` is supplied and multiple conditions are plotted (:gh:`8377` by `Daniel McCloy`_ ) + +- Fix bug with :func:`mne.viz.plot_source_estimates` when using the PyVista backend where singleton time points were not handled properly (:gh:`8285` by `Eric Larson`_) + +- Fix bug when passing ``axes`` to plotting functions, :func:`matplotlib.pyplot.tight_layout` will not be called when the figure was created using a constrained layout (:gh:`8344` by `Eric Larson`_) + +- Fix bug with compensated CTF data when picking channels without preload (:gh:`8318` by `Eric Larson`_) + +- Fix bug with plotting MEG topographies where the wrong extrapolation made was used in ICA (:gh:`8637` by `Eric Larson`_) + +- Fix bug when merging fNIRS channels in :func:`mne.viz.plot_evoked_topomap` and related functions (:gh:`8306` by `Robert Luke`_) + +- Fix bug where events could overflow when writing to FIF (:gh:`8448` by `Eric Larson`_) + +- :func:`mne.io.read_raw_edf` now supports EDF files with invalid recording dates (:gh:`8283` by `Clemens Brunner`_) + +- Fix bug with :func:`mne.io.Raw.save` when using ``split_naming='bids'`` where non-split files would still be named ``name_split-01_meg.fif`` instead of the requested ``name_meg.fif`` (:gh:`8464` by `Alex Gramfort`_ and `Eric Larson`_) + +- Fix bug with :class:`mne.preprocessing.ICA` where ``n_pca_components`` as a :class:`python:float` would give the number of components that explained less than or equal to the given variance. It now gives greater than the given number for better usability and consistency with :class:`sklearn.decomposition.PCA`. Generally this will mean that one more component will be included (:gh:`8326` by `Eric Larson`_) + +- Fix bug with :class:`mne.preprocessing.ICA` where projections were not tracked properly (:gh:`8343` by `Eric Larson`_) + +- Fix bug where extrapolation points created artifacts in :func:`mne.viz.plot_evoked_topomap` and related functions (:gh:`8425` by `Mikołaj Magnuski`_) + +- Fix bug with :func:`mne.preprocessing.read_ica_eeglab` where full-rank data were not handled properly (:gh:`8326` by `Eric Larson`_) + +- Fix bug with :ref:`somato-dataset` where the BEM was not included (:gh:`8317` by `Eric Larson`_) + +- Fix bug with coordinate frames when performing volumetric morphs via :func:`mne.compute_source_morph` and :meth:`mne.SourceMorph.apply` that could lead to ~5 mm bias (:gh:`8642` by `Eric Larson`_) + +- Fix bug with volumetric rendering alpha in :meth:`mne.VolSourceEstimate.plot_3d` and related functions (:gh:`8663` by `Eric Larson`_) + +- Fix missing documentation of :func:`mne.io.read_raw_nihon` in :ref:`tut-imorting-eeg-data` (:gh`8320` by `Adam Li`_) + +- Fix bug with :func:`mne.add_reference_channels` when :func:`mne.io.Raw.reorder_channels` or related methods are used afterward (:gh:`8303`, :gh:`#8484` by `Eric Larson`_) + +- Fix bug where the ``verbose`` arguments to :meth:`mne.Evoked.apply_baseline` and :meth:`mne.Epochs.apply_baseline` were not keyword-only (:gh:`8349` by `Eric Larson`_) + +- ``ICA.max_pca_components`` will not be altered by calling `~mne.preprocessing.ICA.fit` anymore. Instead, the new attribute ``ICA.max_pca_components_`` will be set (:gh:`8321` by `Richard Höchenberger`_) + +- Fix bug that `~mne.viz.plot_ica_overlay` would sometimes not create red traces (:gh:`8341` by `Richard Höchenberger`_) + +- Fix bug with :class:`~mne.preprocessing.ICA` where ``n_components=None, n_pca_components=None`` could lead to unstable unmixing matrix inversion by making ``n_components=None`` also use the lesser of ``n_components=0.999999`` and ``n_components=n_pca_components`` (:gh:`8351` by `Eric Larson`_) + +- The ``ica.n_pca_components`` property is no longer be updated during :meth:`mne.preprocessing.ICA.fit`, instead ``ica.n_components_`` will be added to the instance (:gh:`8351` by `Eric Larson`_) + +- Pass ``rank`` everyhwere in forward preparation for source imaging. This bug affected sparse solvers when using maxfilter data (:gh:`8368` by `Alex Gramfort`_) + +- Fix bug in :func:`mne.viz.plot_alignment` where ECoG and sEEG channels were not plotted and fNIRS channels were always plotted in the head coordinate frame (:gh:`8393` by `Eric Larson`_) + +- Fix bug in :func:`mne.set_bipolar_reference` where ``ch_info`` could contain invalid channel information keys (:gh:`8416` by `Eric Larson`_) + +- When reading BrainVision raw data, the channel units and types were sometimes not inferred correctly (:gh:`8434` by `Richard Höchenberger`_) + +- Attempting to remove baseline correction from preloaded `~mne.Epochs` will now raise an exception (:gh:`8435` by `Richard Höchenberger`_) + +- :meth:`mne.Report.parse_folder` will now correctly handle split FIFF files (:gh:`8486`, :gh:`8491` by `Richard Höchenberger`_) + +- Fix bug where BrainVision channel names, event types, and event descriptions containing commas were incorrectly parsed (:gh:`8492` by `Stefan Appelhoff`_) + +- Fix bug in :func:`mne.preprocessing.compute_fine_calibration` where the magnetometer calibration coefficients were computed incorrectly (:gh:`8522` by `Eric Larson`_) + +- Fix bug in :func:`mne.io.read_raw_eeglab` where empty event durations led to an error (:gh:`8384` by `Mikołaj Magnuski`_) + +- Fix inset sensor plots to always use equal aspect (:gh:`8545` by `Daniel McCloy`_) + +- Fix bug in `mne.viz.plot_compare_evokeds` where evokeds with identical ``comment`` attributes would not plot properly if passed as a list (:gh:`8590` by `Daniel McCloy`_) + +- Fix bug in :func:`mne.time_frequency.psd_welch` and related functions where the window default errantly changed from ``'hamming'`` to ``('tukey', 0.25)`` (:gh:`8862` by `Eric Larson`_) + +- Fix bug in :func:`mne.io.read_raw_kit` where scale factors for EEG channels could be set to zero (:gh:`8542` by `Eric Larson`_) + +- Fix reading GDF files with excluded channels in :func:`mne.io.read_raw_gdf` (:gh:`8520` by `Clemens Brunner`_) + +- Fix automatic selection of extrapolation mask type from channel type when plotting field maps (:gh:`8589` by `Daniel McCloy`_) + +- Fix bug in :func:`mne.viz.set_3d_title` where 3D plot could have multiple titles that overlap (:gh:`8564` by `Guillaume Favelier`_) + +- Fix bug in :func:`mne.viz.set_3d_view` where plotter is not updated properly causing camera issues in the doc (:gh:`8564` by `Guillaume Favelier`_) + +- :func:`mne.preprocessing.find_ecg_events` didn't take the ``tstart`` parameter value into account when calculating the average heart rate (:gh:`8605` by `Richard Höchenberger`_) + +API changes +~~~~~~~~~~~ + +- Minimum required versions were increased for core dependencies NumPy (1.15.4), SciPy (1.1.0), and Matplotlib (3.0) and for the optional dependencies scikit-learn (0.20.2) and pandas (0.23.4) (:gh:`8374` by `Eric Larson`_) + +- The parameter ``on_split_missing`` has been added to :func:`mne.io.read_raw_fif` and its default will change from ``'warn'`` to ``'raise'`` in 0.23, by (:gh:`8357` `Eric Larson`_) + +- The ``max_pca_components`` argument of :class:`~mne.preprocessing.ICA` has been deprecated, use ``n_components`` during initialization and ``n_pca_components`` in :meth:`~mne.preprocessing.ICA.apply` instead (:gh:`8351` by `Eric Larson`_) + +- The ``n_pca_components`` argument of :class:`~mne.preprocessing.ICA` has been deprecated, use ``n_pca_components`` in :meth:`~mne.preprocessing.ICA.apply` (:gh:`8356` by `Eric Larson`_) + +- The ``trans`` argument of :func:`mne.extract_label_time_course` is deprecated and will be removed in 0.23 as it is no longer necessary (:gh:`8389` by `Eric Larson`_) + +- Parameter ``event_colors`` in `mne.viz.plot_epochs` and `mne.Epochs.plot` is deprecated, replaced by ``event_color`` which is consistent with `mne.viz.plot_raw` and provides greater flexibility (:gh:`8381` by `Daniel McCloy`_) + +Authors +~~~~~~~ + +People who contributed to this release in alphabetical order +(people with a + are first time contributors): + +* Adam Li +* Alexandre Gramfort +* Aniket Pradhan + +* Austin Hurst + +* Christian Brodbeck +* Clemens Brunner +* Daniel McCloy +* Denis A. Engemann +* Eduard Ort + +* Eric Larson +* Evan Hathaway + +* Evgenii Kalenkovich +* Fede Raimondo +* Guillaume Favelier +* Hongjiang Ye + +* Jean-Remi King +* Jeff Stout + +* Jonathan Kuziek + +* Jussi Nurminen +* Justus Schwabedal +* Keith Doelling +* Kyle Mathewson +* Mads Jensen +* Mainak Jas +* Marijn van Vliet +* Mikolaj Magnuski +* Olaf Hauk +* Qianliang Li + +* Richard Höchenberger +* Robert Luke +* Stefan Appelhoff +* Thomas Hartmann +* Tod Flak + +* Victoria Peterson + diff --git a/doc/changes/0.9.inc b/doc/changes/0.9.inc index dae8ef881d8..a7318719051 100644 --- a/doc/changes/0.9.inc +++ b/doc/changes/0.9.inc @@ -180,7 +180,7 @@ API - Deprecated ``fmin, fmid, fmax`` in stc.plot and added ``clim`` by `Mark Wronkiewicz`_ -- Use ``scipy.signal.welch`` instead of matplotlib.psd inside ``compute_raw_psd`` and ``compute_epochs_psd`` by `Yousra Bekhti`_ `Eric Larson`_ and `Denis Engemann`_. As a consquence, ``Raw.plot_raw_psds`` has been deprecated. +- Use ``scipy.signal.welch`` instead of matplotlib.psd inside ``compute_raw_psd`` and ``compute_epochs_psd`` by `Yousra Bekhti`_ `Eric Larson`_ and `Denis Engemann`_. As a consequence, ``Raw.plot_raw_psds`` has been deprecated. - ``Raw`` instances returned by ``mne.forward.apply_forward_raw`` now always have times starting from zero to be consistent with all other ``Raw`` instances. To get the former ``start`` and ``stop`` times, diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 17c553e6858..5c93e59cfa5 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -7,195 +7,318 @@ - "Bugs" for bug fixes - "API changes" for backward-incompatible changes +.. NOTE: changes from first-time contributors should be added to the TOP of + the relevant section (Enhancements / Bugs / API changes), and should look + like this (where xxxx is the pull request number): + - description of enhancement/bugfix/API change (:gh:`xxxx` **by new contributor** |Firstname Lastname|_) + .. _current: -Current (0.22.dev0) +Current (0.23.dev0) ------------------- -.. |Eduard Ort| replace:: **Eduard Ort** +.. |New Contributor| replace:: **New Contributor** + +.. |Jack Zhang| replace:: **Jack Zhang** + +.. |Sumalyo Datta| replace:: **Sumalyo Datta** + +.. |Anna Padee| replace:: **Anna Padee** + +.. |Richard Koehler| replace:: **Richard Koehler** + +.. |Zhi Zhang| replace:: **Zhi Zhang** + +.. |Rotem Falach| replace:: **Rotem Falach** + +.. |Andres Rodriguez| replace:: **Andres Rodriguez** -.. |Aniket Pradhan| replace:: **Aniket Pradhan** +.. |Matt Sanderson| replace:: **Matt Sanderson** -.. |Tod Flak| replace:: **Tod Flak** +.. |Enrico Varano| replace:: **Enrico Varano** -.. |Victoria Peterson| replace:: **Victoria Peterson** +.. |Dominik Welke| replace:: **Dominik Welke** -.. |Jonathan Kuziek| replace:: **Jonathan Kuziek** +.. |Judy D Zhu| replace:: **Judy D Zhu** -.. |Evan Hathaway| replace:: **Evan Hathaway** +.. |Valerii Chirkov| replace:: **Valerii Chirkov** -.. |Austin Hurst| replace:: **Austin Hurst** +.. |Matteo Anelli| replace:: **Matteo Anelli** -.. |Hongjiang Ye| replace:: **Hongjiang Ye** +.. |Apoorva Karekal| replace:: **Apoorva Karekal** +.. |Cora Kim| replace:: **Cora Kim** + +.. |Silvia Cotroneo| replace:: **Silvia Cotroneo** + +.. |Ram Pari| replace:: **Ram Pari** + +.. |Erica Peterson| replace:: **Erica Peterson** + +.. |Maggie Clarke| replace:: **Maggie Clarke** Enhancements ~~~~~~~~~~~~ -- Add :func:`mne.source_space.compute_distance_to_sensors` to compute distances between vertices and sensors (:gh:`8534` by `Olaf Hauk`_ and `Marijn van Vliet`_) +- Add support for exporting to EEGLAB's set format with :mod:`eeglabio` with new methods :meth:`mne.io.Raw.export` and :meth:`mne.Epochs.export`. (:gh:`9192` **by new contributor** |Jack Zhang|_) + +- Add exclude parameter to :func:`mne.viz.plot_evoked_topo` (:gh:`9278` by |Ram Pari|_) + +- Add CSV, TSV, and XYZ support to :func:`mne.channels.read_custom_montage` (:gh:`9203` **by new contributor** |Jack Zhang|_) + +- Add HTML representation for `~mne.Epochs` in Jupyter Notebooks (:gh:`9174` by |Valerii Chirkov|_) + +- Speed up :func:`mne.viz.plot_ica_properties` by refactoring (:gh:`9174` **by new contributor** |Valerii Chirkov|_) + +- Add ``apply_function`` method to epochs and evoked objects (:gh:`9088` **by new contributor** |Erica Peterson|_ and `Victoria Peterson`_) + +- New tutorial for function :func:`mne.make_fixed_length_epochs` (:gh:`9156` **by new contributor** |Erica Peterson|_) + +- Add different colors for each volume source space in :func:`mne.viz.plot_alignment` (:gh:`9043` **by new contributor** |Valerii Chirkov|_) + +- Add ``overlap`` parameter to :func:`mne.make_fixed_length_epochs` to allow creating overlapping fixed length epochs (:gh:`9096` **by new contributor** |Silvia Cotroneo|_) + +- Add :meth:`mne.Dipole.to_mni` for more convenient dipole.pos to MNI conversion (:gh:`9043` **by new contributor** |Valerii Chirkov|_) + +- Update citations in maxwell.py (:gh:`9043` **by new contributor** |Valerii Chirkov|_) + +- New Tutorial for analyzing frequency-tagging data (:gh:`8867` **by new contributor** |Dominik Welke|_ and `kalenkovich`_) + +- Add dbs as new channel type for deep brain stimulation (DBS) recordings (:gh:`8739` **by new contributor** |Richard Koehler|_) + +- Add some preprocessing functions to the EEGLAB migration guide (:gh:`9169` **by new contributor** |Apoorva Karekal|_) + +- Add :func:`mne.chpi.extract_chpi_locs_kit` to read cHPI coil locations from KIT/Yokogawa data (:gh:`` **by new contributor** |Matt Sanderson|_, `Robert Seymour`_, and `Eric Larson`_) + +- Add ``match_alias`` parameter to :meth:`mne.io.Raw.set_montage` and related functions to match unrecognized channel location names to known aliases (:gh`8799` **by new contributor** |Zhi Zhang|_) + +- Update the ``notebook`` 3d backend to use ``ipyvtk_simple`` for a better integration within ``Jupyter`` (:gh:`8503` by `Guillaume Favelier`_) + +- Remove the 15-character limitation for channel names when writing to FIF format. If you need the old 15-character names, you can use something like ``raw.rename_channels({n: n[:13] for n in raw.ch_names}, allow_duplicates=True)``, by `Eric Larson`_ (:gh:`8346`) + +- Add channel-specific annotation support to :class:`mne.Annotations` via ``ch_names`` parameter (:gh:`8896` by `Eric Larson`_) + +- Add toggle-all button to :class:`mne.Report` HTML and ``width`` argument to :meth:`mne.Report.add_bem_to_section` (:gh:`8723` by `Eric Larson`_) + +- Add infant template MRI dataset downloader :func:`mne.datasets.fetch_infant_template` (:gh:`8738` by `Eric Larson`_ and `Christian O'Reilly`_) -- Add :func:`mne.read_evokeds_mff` to read averaged MFFs (requires mffpy >= 0.5.7) **by new contributor** |Evan Hathaway|_ (:gh:`8354`) +- Add digitizer information to :func:`mne.io.read_raw_egi` (:gh:`8789` by `Christian Brodbeck`_) -- Add :class:`mne.decoding.SSD` for spatial filtering with spatio-spectral-decomposition (:gh:`7070` **by new contributor** |Victoria Peterson|_ and `Denis Engemann`_) +- Add support for reading some incomplete raw FIF files in :func:`mne.io.read_raw_fif` (:gh:`9268` by `Eric Larson`_) -- Add reader for optical imaging data recorded using ISS Imgagent I/II hardware and BOXY recording software in :func:`mne.io.read_raw_boxy` (:gh:`7717` **by new contributor** |Jonathan Kuziek|_ and `Kyle Mathewson`_) +- Allow reading digitization from files other than ``*.fif`` in the coregistration GUI (:gh:`8790` by `Christian Brodbeck`_) -- Add options to use labels in :func:`mne.minimum_norm.get_point_spread` and :func:`mne.minimum_norm.get_cross_talk` (:gh:`8275` by `Olaf Hauk`_) +- Speed up :func:`mne.inverse_sparse.tf_mixed_norm` using STFT/ISTFT linearity (:gh:`8697` by `Eric Larson`_) -- Update ``surfaces`` argument in :func:`mne.viz.plot_alignment` to allow dict for transparency values, and set default for sEEG data to have transparency (:gh:`8445` by `Keith Doelling`_) +- Reduce memory consumption of `mne.io.Raw` and speed up epoching when thousands of events are present for `mne.Epochs` (:gh:`8801` by `Eric Larson`_) -- Add ``n_pca_components`` argument to :func:`mne.viz.plot_ica_overlay` (:gh:`8351` by `Eric Larson`_) +- Speed up ``import mne`` by reducing function creation overhead (:gh:`8829` by `Eric Larson`_) -- Add :func:`mne.stc_near_sensors` to facilitate plotting ECoG data (:gh:`8190` by `Eric Larson`_) +- `mne.Report.parse_folder` now processes supported non-FIFF files by default, too (:gh:`8744` by `Richard Höchenberger`_) -- Add ``proj`` argument to :func:`mne.make_fixed_length_epochs` (:gh:`8351` by `Eric Larson`_) +- `mne.Report` has gained the new methods `~mne.Report.add_custom_js` and `~mne.Report.add_custom_css` for adding user-defined JavaScript and styles (:gh:`8762`, :gh:`9037` by `Richard Höchenberger`_) -- Add :func:`mne.preprocessing.realign_raw` to realign simultaneous raw recordings in the presence of clock drift (:gh:`8539` by `Eric Larson`_) +- Add option to control appearance of opaque inside surface of the head to :ref:`mne coreg` (:gh:`8793` by `Eric Larson`_) -- Reduce memory usage of volume source spaces (:gh:`8379` by `Eric Larson`_) +- Add option to disable projection using ``--projoff`` in :ref:`mne browse_raw` (:gh:`9262` by `Eric Larson`_) -- Speed up heavy use of :meth:`mne.SourceMorph.apply` for volumetric source spaces by use of the method :meth:`mne.SourceMorph.compute_vol_morph_mat` (:gh:`8366` by `Eric Larson`_) +- Add keypress to toggle projection using ``shift+j`` in :meth:`mne.io.Raw.plot` and :ref:`mne browse_raw` (:gh:`9262` by `Eric Larson`_) -- Add support for non-uniform ``zooms`` (e.g., when using a surrogate MRI via :func:`mne.scale_mri`) in volumetric morphing (:gh:`8642` by `Eric Larson`_) +- Add support for non-FIF files in :ref:`mne browse_raw` using :func:`mne.io.read_raw` (:gh:`8806` by `Eric Larson`_) -- In :func:`mne.compute_source_morph` ``zooms`` are no longer required to match the spacing of ``src_to``, which is useful to ensure the morphing is accurate when the ``src_to`` spacing is large (e.g., 1 cm) (:gh:`8642` by `Eric Larson`_) +- Add :func:`mne.io.read_raw_nedf` for reading StarStim / enobio NEDF files (:gh:`8734` by `Tristan Stenner`_) -- Add volumetric source space support to :func:`mne.labels_to_stc` (:gh:`8447` by `Eric Larson`_) +- Add :meth:`raw.describe() ` to display (or return) descriptive statistics for each channel (:gh:`8760` by `Clemens Brunner`_) -- Speed up :class:`mne.decoding.TimeDelayingRidge` with edge correction using Numba (:gh:`8323` by `Eric Larson`_) +- Add :meth:`annotations.to_data_frame() ` to return annotations as a pandas dataframe (:gh:`8783` by `Robert Luke`_) -- Add :meth:`mne.Epochs.reset_drop_log_selection` to facilitate writing epochs with many ignored entries in their drop log (:gh:`8449` by `Eric Larson`_) +- Add :func:`mne.preprocessing.compute_maxwell_basis` to compute the SSS basis function (:gh:`8822` by `Eric Larson`_) -- Add sEEG source visualization using :func:`mne.stc_near_sensors` and sEEG working tutorial (:gh:`8402` by `Eric Larson`_ and `Adam Li`_) +- Add the ``silhouette`` parameter to :class:`mne.viz.Brain` to display sharp edges and improve perception (:gh:`8771` by `Guillaume Favelier`_) -- Add :meth:`mne.channels.DigMontage.get_positions`, which will return a dictionary of channel positions, coordinate frame and fiducial locations (:gh:`8460` by `Adam Li`_) +- Add warning to :func:`mne.cov.compute_whitener` when an explicit ``rank`` parameter leads to a large increase in condition number (:gh:`8805` by `Eric Larson`_) -- Add support for writing digitization points in a coordinate frame other than head in :meth:`mne.channels.DigMontage.save` (:gh:`8532` by `Eric Larson`_) +- Add parameter ``align=True`` to `mne.viz.Brain.show_view` to make views relative to the closest canonical (MNI) axes rather than the native MRI surface RAS coordinates (:gh:`8794` by `Eric Larson`_) -- Add ``picks`` parameter to :func:`mne.preprocessing.fix_stim_artifact` to specify which channel needs to be fixed (:gh:`8482` by `Alex Gramfort`_) +- Add ``auto_close`` to `mne.Report.add_figs_to_section` and `mne.Report.add_slider_to_section` to manage closing figures (:gh`8730` by `Guillaume Favelier`_) -- Further improved documentation building instructions and execution on Windows (:gh:`8502` by `kalenkovich`_ and `Eric Larson`_) +- Add :func:`mne.write_head_bem` to support writing head surface files (:gh:`8841` by `Yu-Han Luo`_) -- Add option to disable TQDM entirely with ``MNE_TQDM='off'`` (:gh:`8515` by `Eric Larson`_) +- The signal of ``resp`` (respiratory) channels is now assumed to be in the unit Volt (:gh:`8858` by `Richard Höchenberger`_) -- Add option ``on_header_missing`` to :func:`mne.channels.read_polhemus_fastscan` (:gh:`8622` by `Eric Larson`_) +- Static type checkers like Pylance (comes with VS Code) now display the parameters of many more functions correctly, largely improving overall usability for VS Code users (:gh:`8862` by `Richard Höchenberger`_) -- `mne.preprocessing.ICA.plot_sources` now displays an `mne.preprocessing.ICA.plot_properties` window when right-clicking on component names on the y-axis (:gh:`8381` by `Daniel McCloy`_) +- Support new EEGLAB file format (:gh:`8874` by `Clemens Brunner`_) -- :func:`mne.io.read_raw_edf`, :func:`mne.io.read_raw_bdf`, and :func:`mne.io.read_raw_gdf` now detect and handle invalid highpass/lowpass filter settings (:gh:`8584` by `Clemens Brunner`_) +- Reading and writing FIFF files whose filenames end with ``_meg.fif.gz``, ``_eeg.fif(.gz)``, and ``_ieeg.fif(.gz)`` doesn't emit a warning anymore; this improves interobaility with BIDS-formatted datasets (:gh:`8868` by `Richard Höchenberger`_) -- If a ``baseline`` tuple containing one or two ``None`` values – e.g. ``(None, 0)``, ``(0, None)``, or ``(None, None)`` – is passed to `~mne.Epochs` or `~mne.Epochs.apply_baseline`, the ``None`` value(s) will be replaced with the actual time (i.e., :attr:`~mne.Epochs.tmin` and :attr:`~mne.Epochs.tmax`, respectively) when populating ``Epochs.baseline`` (:gh:`8442` by `Richard Höchenberger`_) +- On macOS, we now set the environment variable ``QT_MAC_WANTS_LAYER`` to ``"1"`` if it hasn't been set explicitly by the user, in order to ensure that `~mne.SourceEstimate` plots work on macOS 11 with older versions of Qt and PyQt (:gh:`8959` by `Richard Höchenberger`_) -- `~mne.Epochs` will now retain the information about an applied baseline correction, even if the baseline period is partially or completely removed through cropping later on (:gh:`8442` by `Richard Höchenberger`_) +- :func:`mne.time_frequency.EpochsTFR.average` now allows different ways of averaging, such as "median", or callable functions (:gh:`8879` by `Adam Li`_) + +- `~mne.Epochs` metadata can now be generated automatically from events using `mne.epochs.make_metadata` (:gh:`8834` by `Richard Höchenberger`_) + +- Interactions with sliders in `mne.Report` will now continuously update the linked content (it was updated only on mouse button release before) (:gh:`9023` by `Richard Höchenberger`_) + +- `mne.viz.plot_drop_log` and :meth:`mne.Epochs.plot_drop_log` now omit displaying the subject name in the title if ``subject=None`` is passed (:gh:`9015` by `Richard Höchenberger`_) + +- Plot ECoG tutorial now uses a real epilepsy seizure dataset and visualizes the seizure onset (:gh:`9087` by `Eric Larson`_, `Adam Li`_, `Alex Rockhill`_ and `Liberty Hamilton`_) + +- Improve documentation of Report-Class (:gh:`9113` by `Martin Schulz`_) + +- Add :func:`mne.channels.DigMontage.add_estimated_fiducials` which will add LPA, RPA and Nasion fiducial points to the ``DigMontage`` object in ``mri`` coordinate frame (:gh:`9118` by `Adam Li`_) + +- :func:`mne.io.anonymize_info` now anonymizes also sex and hand fields when ``keep_his`` is ``False`` (:gh:`9103`, :gh:`9175` by |Rotem Falach|_ and `Richard Höchenberger`_) + +- Add parameter ``theme`` to :class:`mne.viz.Brain` for optional Dark-Mode (:gh:`9149` by `Martin Schulz`_, `Guillaume Favelier`_) + +- Add first_samp support for raw simulations with `mne.simulation.simulate_raw` and `mne.simulation.SourceSimulator` (:gh:`9166` by `Steven Bierer`_) + +- `~mne.Evoked` gained a ``baseline`` attribute that is automatically assembled based on the baseline of the averaged `~mne.Epochs` (:gh:`9210` by `Richard Höchenberger`_) + +- Add ``units`` parameter to :meth:`mne.io.Raw.get_data` to return data in the desired unit (:gh:`9136` by `Johann Benerradi`_ and `Stefan Appelhoff`_) + +- Add :func:`mne.preprocessing.equalize_bads` to interpolate bad channels in a list of `~mne.Evoked`, `~mne.Epochs` or `~mne.io.Raw` having different sets of bad channels (:gh:`9241` by `Alex Gramfort`_) + +- :meth:`mne.Epochs.equalize_event_counts` can now be called without providing a list of event names, and will equalize the counts of **all** event types present in the `~mne.Epochs` (:gh:`9261` by `Richard Höchenberger`_) + +- :func:`mne.preprocessing.find_eog_events` and :func:`mne.preprocessing.create_eog_epochs` now accept a list of channel names, allowing you to specify multiple EOG channels at once (:gh:`9269` by `Richard Höchenberger`_) + +- Improve performance of :func:`mne.set_bipolar_reference` (:gh:`9270` by `Martin Schulz`_) + +- Add support for setting montages on fNIRS data, with built in standard montages for Artinis OctaMon and Artinis Brite23 devices (:gh:`9141` by `Johann Benerradi`_, `Robert Luke`_ and `Eric Larson`_) Bugs ~~~~ -- Fix a transpose issue of :func:`mne.decoding.CSP.plot_filters` (:gh:`8580` **by new contributor** |Hongjiang Ye|_) +- Fix bug with :func:`mne.viz.plot_evoked_topo` where set ylim parameters gets swapped across channel types. (:gh:`9207` by |Ram Pari|_) -- Fix :func:`mne.io.read_raw_curry` to deal with Curry datasets that have channels that are listed in the labels file, but which are absent from the saved data file (e.g. 'Ref' channel). Also now populates info['meas_date'] if possible (:gh:`8400` **by new contributor** |Tod Flak|_) +- Fix bug with :func:`mne.io.read_raw_edf` where µV was not correctly recognized (:gh:`9187` **by new contributor** |Sumalyo Datta|_) -- Fix bug with mne.io.egi.tests/test_egi.py where it mandatorily downloaded testing data when it was not necessary (:gh:`8474` **by new contributor** |Aniket Pradhan|_) +- Fix bug with :func:`mne.viz.plot_compare_evokeds` did not check type of combine. (:gh:`9151` **by new contributor** |Matteo Anelli|_) -- Fix bug with reading split files that have dashes in the filename (:gh:`8339` **by new contributor** |Eduard Ort|_) +- Fix bug with :func:`mne.viz.plot_evoked_topo` where ``ylim`` was only being applied to the first channel in the dataset (:gh:`9162` **by new contributor** |Ram Pari|_ ) -- Fix bug with parsing EDF dates and date integers (:gh:`8558` **by new contributor** |Austin Hurst|_ and `Eric Larson`_) +- Fix bug with :func:`mne.Epochs.plot_image` allowing interactive zoom to work properly (:gh:`9152` by **by new contributor** |Maggie Clarke|_ and `Daniel McCloy`_) -- Fix bug with reading EDF and KIT files on big endian architectures such as s390x (:gh:`8618` by `Eric Larson`_) +- Fix bug with :func:`mne.Epochs.plot_image` where the ``x_label`` was different depending on the evoked parameter (:gh:`9115` **by new contributor** |Matteo Anelli|_) -- Fix bug with `~mne.viz.plot_epochs_image` when ``order`` is supplied and multiple conditions are plotted (:gh:`8377` by `Daniel McCloy`_ ) +- Fix bug with restricting :func:`mne.io.Raw.save` saving options to .fif and .fif.gz extensions (:gh:`9062` by |Valerii Chirkov|_) -- Fix bug with :func:`mne.viz.plot_source_estimates` when using the PyVista backend where singleton time points were not handled properly (:gh:`8285` by `Eric Larson`_) +- Fix bug with :func:`mne.io.read_raw_kit` where missing marker coils were not handled (:gh:`8989` **by new contributor** |Judy D Zhu|_) -- Fix bug when passing ``axes`` to plotting functions, :func:`matplotlib.pyplot.tight_layout` will not be called when the figure was created using a constrained layout (:gh:`8344` by `Eric Larson`_) +- Fix bug with `mne.connectivity.spectral_connectivity` where time axis in Epochs data object was dropped. (:gh:`8839` **by new contributor** |Anna Padee|_) -- Fix bug with compensated CTF data when picking channels without preload (:gh:`8318` by `Eric Larson`_) +- Fix bug with `mne.io.Raw.resample` to allow passing ``stim_picks='misc'`` (:gh:`8844` **by new contributor** |Enrico Varano|_ and `Eric Larson`_) -- Fix bug when merging fNIRS channels in :func:`mne.viz.plot_evoked_topomap` and related functions (:gh:`8306` by `Robert Luke`_) +- Fix bugs with `mne.io.read_raw_persyst` where multiple ``Comments`` with the same name are allowed, and ``Comments`` with a "," character are now allowed (:gh:`8311` and :gh:`8806` **by new contributor** |Andres Rodriguez|_ and `Adam Li`_) -- Fix bug where events could overflow when writing to FIF (:gh:`8448` by `Eric Larson`_) +- Fix zen mode and scalebar toggling for :meth:`raw.plot() ` when using the ``macosx`` matplotlib backend (:gh:`8688` by `Daniel McCloy`_) -- :func:`mne.io.read_raw_edf` now supports EDF files with invalid recording dates (:gh:`8283` by `Clemens Brunner`_) +- Fix bug with :func:`mne.viz.snapshot_brain_montage` where the positions were incorrect (:gh:`8983` by `Eric Larson`_) -- Fix bug with :func:`mne.io.Raw.save` when using ``split_naming='bids'`` where non-split files would still be named ``name_split-01_meg.fif`` instead of the requested ``name_meg.fif`` (:gh:`8464` by `Alex Gramfort`_ and `Eric Larson`_) +- Fix bug with :func:`mne.preprocessing.maxwell_filter` where the eSSS basis had to exactly match the good channels instead of being a superset (:gh:`8675` by `Eric Larson`_) -- Fix bug with :class:`mne.preprocessing.ICA` where ``n_pca_components`` as a :class:`python:float` would give the number of components that explained less than or equal to the given variance. It now gives greater than the given number for better usability and consistency with :class:`sklearn.decomposition.PCA`. Generally this will mean that one more component will be included (:gh:`8326` by `Eric Larson`_) +- Fix bug with :meth:`mne.Report.add_bem_to_section` where ``n_jobs != 1`` would cause ``n_jobs`` subsets of MRI images in some orientations to be flipped (:gh:`8713` by `Eric Larson`_) -- Fix bug with :class:`mne.preprocessing.ICA` where projections were not tracked properly (:gh:`8343` by `Eric Larson`_) +- Fix bug with :meth:`raw.plot() ` where annotations didn't immediately appear when changing window duration (:gh:`8689` by `Daniel McCloy`_) -- Fix bug with :func:`mne.preprocessing.read_ica_eeglab` where full-rank data were not handled properly (:gh:`8326` by `Eric Larson`_) +- Fix bug with :meth:`raw.plot() ` where ``scalings='auto'`` did not compute scalings using the full range of data (:gh:`8806` by `Eric Larson`_) -- Fix bug with :ref:`somato-dataset` where the BEM was not included (:gh:`8317` by `Eric Larson`_) +- Fix bug with :meth:`raw.plot() ` where setting a ``lowpass`` could lead to non-data-channels not plotting (:gh:`8954` by `Eric Larson`_) -- Fix bug with coordinate frames when performing volumetric morphs via :func:`mne.compute_source_morph` and :meth:`mne.SourceMorph.apply` that could lead to ~5 mm bias (:gh:`8642` by `Eric Larson`_) +- Fix bug with :meth:`mne.io.Raw.load_data` and :meth:`mne.Epochs.drop_bad` where ``verbose`` logging was not handled properly (:gh:`8884` by `Eric Larson`_) -- Fix missing documentation of :func:`mne.io.read_raw_nihon` in :ref:`tut-imorting-eeg-data` (:gh`8320` by `Adam Li`_) +- Fix bug with :func:`mne.io.read_raw_nicolet` where header type values such as num_sample and duration_in_sec where not parsed properly (:gh:`8712` by `Alex Gramfort`_) -- Fix bug with :func:`mne.add_reference_channels` when :func:`mne.io.Raw.reorder_channels` or related methods are used afterward (:gh:`8303`, :gh:`#8484` by `Eric Larson`_) +- Fix bug with :func:`mne.preprocessing.read_ica_eeglab` when reading decompositions using PCA dimensionality reduction (:gh:`8780` by `Alex Gramfort`_ and `Eric Larson`_) -- Fix bug where the ``verbose`` arguments to :meth:`mne.Evoked.apply_baseline` and :meth:`mne.Epochs.apply_baseline` were not keyword-only (:gh:`8349` by `Eric Larson`_) +- Fix bug with :func:`mne.minimum_norm.make_inverse_operator` where ``depth`` was errantly restricted to be less than or equal to 1. (:gh:`8804` by `Eric Larson`_) -- ``ICA.max_pca_components`` will not be altered by calling `~mne.preprocessing.ICA.fit` anymore. Instead, the new attribute ``ICA.max_pca_components_`` will be set (:gh:`8321` by `Richard Höchenberger`_) +- Fix bug with :func:`mne.stats.permutation_cluster_1samp_test` and related clustering functions when ``adjacency=None`` and ``out_type='indices'`` (:gh:`#8842` by `Eric Larson`_) -- Fix bug that `~mne.viz.plot_ica_overlay` would sometimes not create red traces (:gh:`8341` by `Richard Höchenberger`_) +- Fix bug with :func:`mne.viz.plot_alignment` where plotting a sphere model could ignore the ``brain`` argument (:gh:`8857` by `Eric Larson`_) -- Fix bug with :class:`~mne.preprocessing.ICA` where ``n_components=None, n_pca_components=None`` could lead to unstable unmixing matrix inversion by making ``n_components=None`` also use the lesser of ``n_components=0.999999`` and ``n_components=n_pca_components`` (:gh:`8351` by `Eric Larson`_) +- Fix bug with :func:`mne.SourceEstimate.plot` where flatmaps were not positioned properly when using ``hemi='both'`` (:gh:`9315` by `Eric Larson`_) -- The ``ica.n_pca_components`` property is no longer be updated during :meth:`mne.preprocessing.ICA.fit`, instead ``ica.n_components_`` will be added to the instance (:gh:`8351` by `Eric Larson`_) +- Fix bug with :meth:`mne.Annotations.save` where files could be overwritten accidentally, it can now be controlled via the ``overwrite`` argument (:gh:`8896` by `Eric Larson`_) -- Pass ``rank`` everyhwere in forward preparation for source imaging. This bug affected sparse solvers when using maxfilter data (:gh:`8368` by `Alex Gramfort`_) +- Fix bug with ``replace`` argument of :meth:`mne.Report.add_bem_to_section` and :meth:`mne.Report.add_slider_to_section` (:gh:`8723` by `Eric Larson`_) -- Fix bug in :func:`mne.viz.plot_alignment` where ECoG and sEEG channels were not plotted and fNIRS channels were always plotted in the head coordinate frame (:gh:`8393` by `Eric Larson`_) +- Fix bug with :func:`mne.chpi.compute_chpi_locs` where all cHPI coils being off would lead to an empty array of the wrong dimensionality (:gh:`8956` by `Eric Larson`_) -- Fix bug in :func:`mne.set_bipolar_reference` where ``ch_info`` could contain invalid channel information keys (:gh:`8416` by `Eric Larson`_) +- Fix bug with :func:`mne.extract_label_time_course` where labels, STCs, and the source space were not checked for compatible ``subject`` attributes (:gh:`9284` by `Eric Larson`_) -- When reading BrainVision raw data, the channel units and types were sometimes not inferred correctly (:gh:`8434` by `Richard Höchenberger`_) +- Fix bug with :func:`mne.grow_labels` where ``overlap=False`` could run forever or raise an error (:gh:`9317` by `Eric Larson`_) -- Attempting to remove baseline correction from preloaded `~mne.Epochs` will now raise an exception (:gh:`8435` by `Richard Höchenberger`_) +- Fix compatibility bugs with :mod:`mne_realtime` (:gh:`8845` by `Eric Larson`_) -- :meth:`mne.Report.parse_folder` will now correctly handle split FIFF files (:gh:`8486`, :gh:`8491` by `Richard Höchenberger`_) +- Fix bug with `mne.viz.Brain` where non-inflated surfaces had an X-offset imposed by default (:gh:`8794` by `Eric Larson`_) -- Fix bug where BrainVision channel names, event types, and event descriptions containing commas were incorrectly parsed (:gh:`8492` by `Stefan Appelhoff`_) +- Fix bug with :ref:`mne coreg` where nasion values were not updated when clicking (:gh:`8793` by `Eric Larson`_) -- Fix bug in :func:`mne.preprocessing.compute_fine_calibration` where the magnetometer calibration coefficients were computed incorrectly (:gh:`8522` by `Eric Larson`_) +- Fix bug with matplotlib-based 3D plotting where ``Axes3D`` were not properly initialized in :func:`mne.viz.plot_source_estimates` (:gh:`8811` by `Chris Bailey`_) -- Fix bug in :func:`mne.io.read_raw_eeglab` where empty event durations led to an error (:gh:`8384` by `Mikołaj Magnuski`_) +- Allow sEEG channel types in :meth:`mne.Evoked.plot_joint` (:gh:`8736` by `Daniel McCloy`_) -- Fix inset sensor plots to always use equal aspect (:gh:`8545` by `Daniel McCloy`_) +- Fix bug where hidden annotations could be deleted interactively in :meth:`mne.io.Raw.plot` windows (:gh:`8831` by `Daniel McCloy`_) -- Fix bug in `mne.viz.plot_compare_evokeds` where evokeds with identical ``comment`` attributes would not plot properly if passed as a list (:gh:`8590` by `Daniel McCloy`_) +- Function :func:`mne.set_bipolar_reference` was not working when passing ``Epochs`` constructed with some ``picks`` (:gh:`8728` by `Alex Gramfort`_) -- Fix bug in :func:`mne.io.read_raw_kit` where scale factors for EEG channels could be set to zero (:gh:`8542` by `Eric Larson`_) +- Fix anonymization issue of FIF files after IO round trip (:gh:`8731` by `Alex Gramfort`_) -- Fix reading GDF files with excluded channels in :func:`mne.io.read_raw_gdf` (:gh:`8520` by `Clemens Brunner`_) +- Fix bug in `mne.preprocessing.ICA.plot_sources` where right-clicking component names could yield `~mne.preprocessing.ICA.plot_properties` windows for the wrong component if ``picks`` had been specified (:gh:`8996` by `Daniel McCloy`_) -- Fix automatic selection of extrapolation mask type from channel type when plotting field maps (:gh:`8589` by `Daniel McCloy`_) +- Fix title not shown in :func:`mne.viz.plot_montage` (:gh:`8752` by `Clemens Brunner`_) -- Fix bug in :func:`mne.viz.set_3d_title` where 3D plot could have multiple titles that overlap (:gh:`8564` by `Guillaume Favelier`_) +- `mne.io.read_raw_egi` now correctly handles `pathlib.Path` filenames (:gh:`8759` by `Richard Höchenberger`_) -- Fix bug in :func:`mne.viz.set_3d_view` where plotter is not updated properly causing camera issues in the doc (:gh:`8564` by `Guillaume Favelier`_) +- `mne.viz.plot_evoked` and `mne.Evoked.plot` now correctly plot global field power (GFP) for EEG data when ``gfp=True`` or ``gfp='only'`` is passed (used to plot RMS). For MEG data, we continue to plot the RMS, but now label it correctly as such (:gh:`8775` by `Richard Höchenberger`_) -- :func:`mne.preprocessing.find_ecg_events` didn't take the ``tstart`` parameter value into account when calculating the average heart rate (:gh:`8605` by `Richard Höchenberger`_) +- Fix bug with :ref:`mne make_scalp_surfaces` where ``--overwrite`` was not functional (:gh:`8800` by `Yu-Han Luo`_) -API changes -~~~~~~~~~~~ +- Fix bug with :func:`mne.viz.plot_topomap` when plotting gradiometers with a missing channel in a pair (:gh:`8817` by `Alex Gramfort`_) + +- :meth:`epochs.crop() ` now also adjusts the ``reject_tmin`` and ``reject_tmax`` attributes if necessary (:gh:`8821` by `Richard Höchenberger`_) + +- When creating `~mne.Epochs`, we now ensure that ``reject_tmin`` and ``reject_tmax`` cannot fall outside of the epochs' time interval anymore (:gh:`8821` by `Richard Höchenberger`_) + +- `~mne.io.read_raw_bti` erroneously treated response channels as respiratory channels (:gh:`8855` by `Richard Höchenberger`_) + +- The RMS trace shown in the time viewer of `~mne.SourceEstimate` plots is now correctly labeled as ``RMS`` (was ``GFP`` before) (:gh:`8965` by `Richard Höchenberger`_) + +- Fix bug with :meth:`mne.SourceEstimate.plot` and related functions where the scalars were not interactively updated properly (:gh:`8985` by `Eric Larson`_) + +- Fix bug with mne.channels.find_ch_adjacency() returning wrong adjacency for Neuromag122-Data (:gh:`8891` by `Martin Schulz`_) -- Added argument ``colors`` to `mne.grow_labels` (:gh:`8519` by `Olaf Hauk`_) +- Fix :func:`mne.read_dipole` yielding :class:`mne.Dipole` objects that could not be indexed (:gh:`8963` by `Marijn van Vliet`_) -- Added `mne.SourceEstimate.apply_baseline` method for baseline-correction of source estimates (:gh:`8452` by `Olaf Hauk`_) +- Fix bug when setting n_jobs > 1 in :meth:`mne.Report.parse_folder` (:gh:`9109` by `Martin Schulz`_) -- Minimum required versions were increased for core dependencies NumPy (1.15.4), SciPy (1.1.0), and Matplotlib (3.0) and for the optional dependencies scikit-learn (0.20.2) and pandas (0.23.4) (:gh:`8374` by `Eric Larson`_) +- Fix bug with :meth:`mne.Evoked.plot_image` where an incorrect clim parameter did not raise any error (:gh:`9115` **by new contributor** |Matteo Anelli|_) -- The parameter ``on_split_missing`` has been added to :func:`mne.io.read_raw_fif` and its default will change from ``'warn'`` to ``'raise'`` in 0.23, by (:gh:`8357` `Eric Larson`_) +- Fix bug with ``mne.io.Raw.pick`` where incorrect fnirs types were returned (:gh:`9178` by `Robert Luke`_) -- The ``max_pca_components`` argument of :class:`~mne.preprocessing.ICA` has been deprecated, use ``n_components`` during initialization and ``n_pca_components`` in :meth:`~mne.preprocessing.ICA.apply` instead (:gh:`8351` by `Eric Larson`_) +- Fix bug when passing both axes and picks to `mne.viz.plot_compare_evokeds` (:gh:`9252` by `Daniel McCloy`_) -- The ``n_pca_components`` argument of :class:`~mne.preprocessing.ICA` has been deprecated, use ``n_pca_components`` in :meth:`~mne.preprocessing.ICA.apply` (:gh:`8356` by `Eric Larson`_) +- Improved string representation of `~mne.Epochs` containing multiple event types; improved (and more mathematically correct) ``evoked.comment`` in the `mne.combine_evoked` output; and better (and often more concise) legend labels in the figures created via `~mne.viz.plot_compare_evokeds` (:gh:`9027` by `Richard Höchenberger`_) + +- :func:`mne.preprocessing.find_ecg_events` now correctly handles situation where no ECG activity could be detected, and correctly returns an empty array of ECG events (:gh:`9236` by `Richard Höchenberger`_) + +- Fix bug with ``picks`` attribute for `~mne.Epochs` after calling :meth:`mne.Epochs.add_channels` (:gh:`9246` by `Alex Gramfort`_) + +- Fix bug where ``backend='notebook'`` could not be used in :meth:`mne.SourceEstimate.plot` (:gh:`9305` by `Jean-Remi King`_) + +- `mne.preprocessing.compute_proj_eog` and `mne.preprocessing.compute_proj_ecg` now return empty lists if no EOG or ECG events, respectively, could be found. Previously, we'd return ``None`` in these situations, which does not match the documented behavior of returning a list of projectors (:gh:`9277` by `Richard Höchenberger`_) + +API changes +~~~~~~~~~~~ +- Introduced new ``'auto'`` settings for ``ICA.max_iter``. The old default ``max_iter=200`` will be removed in MNE-Python 0.24 (:gh:`9099` **by new contributor** |Cora Kim|_) -- The ``trans`` argument of :func:`mne.extract_label_time_course` is deprecated and will be removed in 0.23 as it is no longer necessary (:gh:`8389` by `Eric Larson`_) +- `mne.viz.plot_sensors_connectivity` now allows setting the colorbar label via the ``cbar_label`` parameter (:gh:`9248` by `Daniel McCloy`_) -- New `mne.viz.Brain.set_time` method to set the displayed time in seconds (:gh:`8415` by `Daniel McCloy`_) +- ``mne.read_selection`` has been deprecated in favor of `mne.read_vectorview_selection`. ``mne.read_selection`` will be removed in MNE-Python 0.24 (:gh:`8870` by `Richard Höchenberger`_) -- Update the ``backend`` parameter of :func:`mne.viz.plot_source_estimates` to integrate ``pyvista`` (:gh:`8395` by `Guillaume Favelier`_) +- ``mne.beamformer.tf_dics`` has been deprecated and will be removed in MNE-Python 0.24 (:gh:`9122` by `Britta Westner`_) -- Add ``group_by`` parameter to `mne.viz.plot_epochs` and `mne.Epochs.plot` to allow displaying channel data by sensor position (:gh:`8381` by `Daniel McCloy`_) +- Fitting `~mne.preprocessing.ICA` on baseline-corrected `~mne.Epochs`, and / or applying it on baseline-corrected `~mne.Epochs` or `~mne.Evoked` data will now display a warning. Users are advised to only baseline correct their data after cleaning is completed (:gh:`9033` by `Richard Höchenberger`_) -- Parameter ``event_colors`` in `mne.viz.plot_epochs` and `mne.Epochs.plot` is deprecated, replaced by ``event_color`` which is consistent with `mne.viz.plot_raw` and provides greater flexibility (:gh:`8381` by `Daniel McCloy`_) +- Supplying multiple channel names to `mne.preprocessing.find_eog_events` or `mne.preprocessing.compute_proj_eog` as a string of comma-separated channel names has been deprecated; please pass a list of channel names instead. Support for comma-separated strings will be removed in MNE-Python 0.24 (:gh:`9269` by `Richard Höchenberger`_) diff --git a/doc/changes/names.inc b/doc/changes/names.inc index 756bdf9cbff..4cbc7cb387f 100644 --- a/doc/changes/names.inc +++ b/doc/changes/names.inc @@ -1,10 +1,10 @@ .. _Alex Gramfort: http://alexandre.gramfort.net -.. _Martin Luessi: https://www.martinos.org/user/8245 +.. _Martin Luessi: https://github.com/mluessi .. _Yaroslav Halchenko: http://www.onerussian.com/ -.. _Daniel Strohmeier: http://www.tu-ilmenau.de/bmti/fachgebiete/biomedizinische-technik/dipl-ing-daniel-strohmeier/ +.. _Daniel Strohmeier: https://github.com/joewalter .. _Eric Larson: http://larsoner.com @@ -38,7 +38,7 @@ .. _Qunxi Dong: https://github.com/dongqunxi -.. _Martin Billinger: https://github.com/kazemakase +.. _Martin Billinger: https://github.com/mbillingr .. _Federico Raimondo: https://github.com/fraimondo @@ -68,7 +68,7 @@ .. _Yousra Bekhti: https://www.linkedin.com/pub/yousra-bekhti/56/886/421 -.. _Mark Wronkiewicz: http://ilabs.washington.edu/graduate-students/bio/i-labs-mark-wronkiewicz +.. _Mark Wronkiewicz: https://ml.jpl.nasa.gov/people/wronkiewicz/wronkiewicz.html .. _Sébastien Marti: http://www.researchgate.net/profile/Sebastien_Marti @@ -88,15 +88,15 @@ .. _Jukka Nenonen: https://www.linkedin.com/pub/jukka-nenonen/28/b5a/684 -.. _Jussi Nurminen: https://scholar.google.fi/citations?user=R6CQz5wAAAAJ&hl=en +.. _Jussi Nurminen: https://github.com/jjnurminen -.. _Clemens Brunner: https://github.com/cle1109 +.. _Clemens Brunner: https://github.com/cbrnr .. _Asish Panda: https://github.com/kaichogami -.. _Natalie Klein: http://www.stat.cmu.edu/people/students/neklein +.. _Natalie Klein: http://natklein.weebly.com -.. _Jon Houck: https://scholar.google.com/citations?user=DNoS05IAAAAJ&hl=en +.. _Jon Houck: https://www.mrn.org/people/jon-m.-houck/principal-investigators .. _Pablo-Arias: https://github.com/Pablo-Arias @@ -122,7 +122,7 @@ .. _Antti Rantala: https://github.com/Odingod -.. _Keith Doelling: http://science.keithdoelling.com +.. _Keith Doelling: https://github.com/kdoelling1919 .. _Paul Pasler: https://github.com/ppasler @@ -130,13 +130,13 @@ .. _Annalisa Pascarella: http://www.iac.rm.cnr.it/~pasca/ -.. _Luke Bloy: https://scholar.google.com/citations?hl=en&user=Ad_slYcAAAAJ&view_op=list_works&sortby=pubdate +.. _Luke Bloy: https://imaging.research.chop.edu/people/dr-luke-bloy .. _Leonardo Barbosa: https://github.com/noreun .. _Erkka Heinila: https://github.com/Teekuningas -.. _Andrea Brovelli: http://www.int.univ-amu.fr/_BROVELLI-Andrea_?lang=en +.. _Andrea Brovelli: http://andrea-brovelli.net .. _Richard Höchenberger: https://github.com/hoechenberger @@ -188,7 +188,7 @@ .. _Thomas Hartmann: https://github.com/thht -.. _Steven Gutstein: http://robust.cs.utep.edu/~gutstein +.. _Steven Gutstein: http://vll.cs.utep.edu/team.html .. _Peter Molfese: https://github.com/pmolfese @@ -232,9 +232,9 @@ .. _Guillaume Favelier: https://github.com/GuillaumeFavelier -.. _Katarina Slama: https://katarinaslama.github.io +.. _Katarina Slama: https://github.com/katarinaslama -.. _Bruno Nicenboim: http://nicenboim.org +.. _Bruno Nicenboim: https://bnicenboim.github.io .. _Ivana Kojcic: https://github.com/ikojcic @@ -266,6 +266,10 @@ .. _Robert Luke: https://github.com/rob-luke +.. _Robert Seymour: https://neurofractal.github.io + +.. _Matt Sanderson: https://github.com/monkeyman192 + .. _Mohammad Daneshzand: https://github.com/mdaneshzand .. _Fahimeh Mamashli: https://github.com/fmamashli @@ -284,7 +288,7 @@ .. _Chun-Hui Li: https://github.com/iamsc -.. _Christian O'Reilly: https://scholar.google.ca/citations?user=NllRAkwAAAAJ&hl=en +.. _Christian O'Reilly: https://github.com/christian-oreilly .. _Yu-Han Luo: https://github.com/yh-luo @@ -341,3 +345,45 @@ .. _Evan Hathaway: https://github.com/ephathaway .. _Hongjiang Ye: https://github.com/rubyyhj + +.. _Jeff Stout: https://megcore.nih.gov/index.php/Staff + +.. _Qianliang Li: https://www.dtu.dk/english/service/phonebook/person?id=126774 + +.. _Richard Koehler: https://github.com/richardkoehler + +.. _Tristan Stenner: https://github.com/tstenner/ + +.. _Anna Padee: https://github.com/apadee/ + +.. _Andres Rodriguez: https://github.com/infinitejest/ + +.. _Rotem Falach: https://github.com/Falach + +.. _Zhi Zhang: https://github.com/tczhangzhi/ + +.. _Enrico Varano: https://github.com/enricovara/ + +.. _Dominik Welke: https://github.com/dominikwelke/ + +.. _Judy D Zhu: https://github.com/JD-Zhu + +.. _Valerii Chirkov: https://github.com/vagechirkov + +.. _Maggie Clarke: https://github.com/mdclarke + +.. _Apoorva Karekal: https://github.com/apoorva6262 + +.. _Matteo Anelli: https://github.com/matteoanelli + +.. _Cora Kim: https://github.com/kimcoco + +.. _Silvia Cotroneo: https://github.com/sfc-neuro + +.. _Ram Pari: https://github.com/ramkpari + +.. _Erica Peterson: https://github.com/nordme + +.. _Sumalyo Datta: https://github.com/Sumalyo + +.. _Jack Zhang: https://github.com/jackz314 diff --git a/doc/cited.rst b/doc/cited.rst index 9dfb7b2b409..782bb34ef87 100644 --- a/doc/cited.rst +++ b/doc/cited.rst @@ -1,11 +1,9 @@ -:orphan: - .. _cited: -Publications by users -===================== +Papers citing MNE-Python +======================== -Estimates provided by Google Scholar as of July 9th 2020: +Estimates provided by Google Scholar as of 27 January 2021: -- `MNE (838) `_ -- `MNE-Python (655) `_ +- `MNE (908) `_ +- `MNE-Python (771) `_ diff --git a/doc/conf.py b/doc/conf.py index df315811271..8e156d13f15 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -1,40 +1,39 @@ # -*- coding: utf-8 -*- # -# MNE documentation build configuration file, created by -# sphinx-quickstart on Fri Jun 11 10:45:48 2010. +# Configuration file for the Sphinx documentation builder. # -# This file is execfile()d with the current directory set to its containing -# dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html -from datetime import date -from distutils.version import LooseVersion +import gc import os -import os.path as op import sys import time import warnings +from datetime import datetime, timezone +from distutils.version import LooseVersion +import matplotlib +import sphinx import sphinx_gallery from sphinx_gallery.sorting import FileNameSortKey, ExplicitOrder from numpydoc import docscrape -import matplotlib + import mne -from mne.viz import Brain +from mne.tests.test_docstring_parameters import error_ignores from mne.utils import (linkcode_resolve, # noqa, analysis:ignore _assert_no_instances, sizeof_fmt) +from mne.viz import Brain # noqa if LooseVersion(sphinx_gallery.__version__) < LooseVersion('0.2'): raise ImportError('Must have at least version 0.2 of sphinx-gallery, got ' - '%s' % (sphinx_gallery.__version__,)) + f'{sphinx_gallery.__version__}') matplotlib.use('agg') +# -- Path setup -------------------------------------------------------------- + # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. @@ -42,14 +41,38 @@ sys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne'))) sys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext'))) -# -- General configuration ------------------------------------------------ + +# -- Project information ----------------------------------------------------- + +project = 'MNE' +td = datetime.now(tz=timezone.utc) + +# We need to triage which date type we use so that incremental builds work +# (Sphinx looks at variable changes and rewrites all files if some change) +copyright = ( + f'2012–{td.year}, MNE Developers. Last updated \n' # noqa: E501 + '') # noqa: E501 +if os.getenv('MNE_FULL_DATE', 'false').lower() != 'true': + copyright = f'2012–{td.year}, MNE Developers. Last updated locally.' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = mne.__version__ +# The full version, including alpha/beta/rc tags. +release = version + + +# -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '2.0' # Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. - +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', @@ -62,28 +85,14 @@ 'sphinx.ext.graphviz', 'numpydoc', 'sphinx_gallery.gen_gallery', - 'sphinx_fontawesome', 'gen_commands', 'gh_substitutions', 'mne_substitutions', - 'sphinx_bootstrap_theme', + 'gen_names', 'sphinx_bootstrap_divs', 'sphinxcontrib.bibtex', - 'sphinxcontrib.bibtex2', -] - -linkcheck_ignore = [ - 'https://doi.org/10.1088/0031-9155/57/7/1937', # noqa 403 Client Error: Forbidden for url: http://iopscience.iop.org/article/10.1088/0031-9155/57/7/1937/meta - 'https://doi.org/10.1088/0031-9155/51/7/008', # noqa 403 Client Error: Forbidden for url: https://iopscience.iop.org/article/10.1088/0031-9155/51/7/008 - 'https://sccn.ucsd.edu/wiki/.*', # noqa HTTPSConnectionPool(host='sccn.ucsd.edu', port=443): Max retries exceeded with url: /wiki/Firfilt_FAQ (Caused by SSLError(SSLError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:847)'),)) - 'https://docs.python.org/dev/howto/logging.html', # noqa ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer')) - 'https://docs.python.org/3/library/.*', # noqa ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer')) - 'https://hal.archives-ouvertes.fr/hal-01848442/', # noqa Sometimes: 503 Server Error: Service Unavailable for url: https://hal.archives-ouvertes.fr/hal-01848442/ + 'sphinx_copybutton', ] -linkcheck_anchors = False # saves a bit of time - -autosummary_generate = True -autodoc_default_options = {'inherited-members': None} # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -96,68 +105,9 @@ # The suffix of source filenames. source_suffix = '.rst' -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. +# The main toctree document. master_doc = 'index' -# General information about the project. -project = u'MNE' -td = date.today() -copyright = u'2012-%s, MNE Developers. Last updated on %s' % (td.year, - td.isoformat()) - -nitpicky = True -nitpick_ignore = [ - ("py:class", "None. Remove all items from D."), - ("py:class", "a set-like object providing a view on D's items"), - ("py:class", "a set-like object providing a view on D's keys"), - ("py:class", "v, remove specified key and return the corresponding value."), # noqa: E501 - ("py:class", "None. Update D from dict/iterable E and F."), - ("py:class", "an object providing a view on D's values"), - ("py:class", "a shallow copy of D"), - ("py:class", "(k, v), remove and return some (key, value) pair as a"), -] -for key in ('AcqParserFIF', 'BiHemiLabel', 'Dipole', 'DipoleFixed', 'Label', - 'MixedSourceEstimate', 'MixedVectorSourceEstimate', 'Report', - 'SourceEstimate', 'SourceMorph', 'VectorSourceEstimate', - 'VolSourceEstimate', 'VolVectorSourceEstimate', - 'channels.DigMontage', 'channels.Layout', - 'decoding.CSP', 'decoding.EMS', 'decoding.FilterEstimator', - 'decoding.GeneralizingEstimator', 'decoding.LinearModel', - 'decoding.PSDEstimator', 'decoding.ReceptiveField', 'decoding.SSD', - 'decoding.SPoC', 'decoding.Scaler', 'decoding.SlidingEstimator', - 'decoding.TemporalFilter', 'decoding.TimeDelayingRidge', - 'decoding.TimeFrequency', 'decoding.UnsupervisedSpatialFilter', - 'decoding.Vectorizer', - 'preprocessing.ICA', 'preprocessing.Xdawn', - 'simulation.SourceSimulator', - 'time_frequency.CrossSpectralDensity', - 'utils.deprecated', - 'viz.ClickableImage'): - nitpick_ignore.append(('py:obj', f'mne.{key}.__hash__')) -suppress_warnings = ['image.nonlocal_uri'] # we intentionally link outside - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = mne.__version__ -# The full version, including alpha/beta/rc tags. -release = version - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - # List of documents that shouldn't be included in the build. unused_docs = [] @@ -169,173 +119,15 @@ # documents. default_role = "py:obj" -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'default' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['mne.'] -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'bootstrap' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - 'navbar_title': ' ', # we replace this with an image - 'source_link_position': "nav", # default - 'bootswatch_theme': "flatly", # yeti paper lumen - 'navbar_sidebarrel': False, # Render the next/prev links in navbar? - 'navbar_pagenav': False, - 'navbar_class': "navbar", - 'bootstrap_version': "3", # default - 'navbar_links': [ - ("Install", "install/index"), - ("Overview", "overview/index"), - ("Tutorials", "auto_tutorials/index"), - ("Examples", "auto_examples/index"), - ("Glossary", "glossary"), - ("API", "python_reference"), - ], -} - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -html_logo = "_static/mne_logo_small.svg" - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -html_favicon = "_static/favicon.ico" - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -html_extra_path = [ - 'contributing.html', - 'documentation.html', - 'getting_started.html', - 'install_mne_python.html', -] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -html_show_sourcelink = False -html_copy_source = False - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -html_show_sphinx = False - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# variables to pass to HTML templating engine -build_dev_html = bool(int(os.environ.get('BUILD_DEV_HTML', False))) - -html_context = {'use_google_analytics': True, - 'use_media_buttons': True, 'build_dev_html': build_dev_html} - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'mne-doc' - - -# -- Options for LaTeX output --------------------------------------------- - -# The paper size ('letter' or 'a4'). -# latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -# latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - # ('index', 'MNE.tex', u'MNE Manual', - # u'MNE Contributors', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -latex_logo = "_static/logo.png" -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -latex_toplevel_sectioning = 'part' - -# Additional stuff for the LaTeX preamble. -# latex_preamble = '' +# -- Intersphinx configuration ----------------------------------------------- -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - -trim_doctests_flags = True - -# Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { 'python': ('https://docs.python.org/3', None), 'numpy': ('https://numpy.org/devdocs', None), @@ -354,28 +146,167 @@ 'patsy': ('https://patsy.readthedocs.io/en/latest', None), 'pyvista': ('https://docs.pyvista.org', None), 'imageio': ('https://imageio.readthedocs.io/en/latest', None), - # We need to stick with 1.2.0 for now: - # https://github.com/dipy/dipy/issues/2290 - 'dipy': ('https://dipy.org/documentation/1.2.0.', None), 'mne_realtime': ('https://mne.tools/mne-realtime', None), 'picard': ('https://pierreablin.github.io/picard/', None), + 'qdarkstyle': ('https://qdarkstylesheet.readthedocs.io/en/latest', None), + 'eeglabio': ('https://eeglabio.readthedocs.io/en/latest', None) } -############################################################################## -# sphinxcontrib-bibtex +# NumPyDoc configuration ----------------------------------------------------- -bibtex_bibfiles = ['./references.bib'] -bibtex_style = 'unsrt' -bibtex_footbibliography_header = '' +# XXX This hack defines what extra methods numpydoc will document +docscrape.ClassDoc.extra_public_methods = mne.utils._doc_special_members +numpydoc_class_members_toctree = False +numpydoc_attributes_as_param_list = True +numpydoc_xref_param_type = True +numpydoc_xref_aliases = { + # Python + 'file-like': ':term:`file-like `', + # Matplotlib + 'colormap': ':doc:`colormap `', + 'color': ':doc:`color `', + 'collection': ':doc:`collections `', + 'Axes': 'matplotlib.axes.Axes', + 'Figure': 'matplotlib.figure.Figure', + 'Axes3D': 'mpl_toolkits.mplot3d.axes3d.Axes3D', + 'ColorbarBase': 'matplotlib.colorbar.ColorbarBase', + # Mayavi + 'mayavi.mlab.Figure': 'mayavi.core.api.Scene', + 'mlab.Figure': 'mayavi.core.api.Scene', + # sklearn + 'LeaveOneOut': 'sklearn.model_selection.LeaveOneOut', + # joblib + 'joblib.Parallel': 'joblib.Parallel', + # nibabel + 'Nifti1Image': 'nibabel.nifti1.Nifti1Image', + 'Nifti2Image': 'nibabel.nifti2.Nifti2Image', + 'SpatialImage': 'nibabel.spatialimages.SpatialImage', + # MNE + 'Label': 'mne.Label', 'Forward': 'mne.Forward', 'Evoked': 'mne.Evoked', + 'Info': 'mne.Info', 'SourceSpaces': 'mne.SourceSpaces', + 'SourceMorph': 'mne.SourceMorph', + 'Epochs': 'mne.Epochs', 'Layout': 'mne.channels.Layout', + 'EvokedArray': 'mne.EvokedArray', 'BiHemiLabel': 'mne.BiHemiLabel', + 'AverageTFR': 'mne.time_frequency.AverageTFR', + 'EpochsTFR': 'mne.time_frequency.EpochsTFR', + 'Raw': 'mne.io.Raw', 'ICA': 'mne.preprocessing.ICA', + 'Covariance': 'mne.Covariance', 'Annotations': 'mne.Annotations', + 'DigMontage': 'mne.channels.DigMontage', + 'VectorSourceEstimate': 'mne.VectorSourceEstimate', + 'VolSourceEstimate': 'mne.VolSourceEstimate', + 'VolVectorSourceEstimate': 'mne.VolVectorSourceEstimate', + 'MixedSourceEstimate': 'mne.MixedSourceEstimate', + 'MixedVectorSourceEstimate': 'mne.MixedVectorSourceEstimate', + 'SourceEstimate': 'mne.SourceEstimate', 'Projection': 'mne.Projection', + 'ConductorModel': 'mne.bem.ConductorModel', + 'Dipole': 'mne.Dipole', 'DipoleFixed': 'mne.DipoleFixed', + 'InverseOperator': 'mne.minimum_norm.InverseOperator', + 'CrossSpectralDensity': 'mne.time_frequency.CrossSpectralDensity', + 'SourceMorph': 'mne.SourceMorph', + 'Xdawn': 'mne.preprocessing.Xdawn', + 'Report': 'mne.Report', 'Forward': 'mne.Forward', + 'TimeDelayingRidge': 'mne.decoding.TimeDelayingRidge', + 'Vectorizer': 'mne.decoding.Vectorizer', + 'UnsupervisedSpatialFilter': 'mne.decoding.UnsupervisedSpatialFilter', + 'TemporalFilter': 'mne.decoding.TemporalFilter', + 'SSD': 'mne.decoding.SSD', + 'Scaler': 'mne.decoding.Scaler', 'SPoC': 'mne.decoding.SPoC', + 'PSDEstimator': 'mne.decoding.PSDEstimator', + 'LinearModel': 'mne.decoding.LinearModel', + 'FilterEstimator': 'mne.decoding.FilterEstimator', + 'EMS': 'mne.decoding.EMS', 'CSP': 'mne.decoding.CSP', + 'Beamformer': 'mne.beamformer.Beamformer', + 'Transform': 'mne.transforms.Transform', +} +numpydoc_xref_ignore = { + # words + 'instance', 'instances', 'of', 'default', 'shape', 'or', + 'with', 'length', 'pair', 'matplotlib', 'optional', 'kwargs', 'in', + 'dtype', 'object', 'self.verbose', + # shapes + 'n_vertices', 'n_faces', 'n_channels', 'm', 'n', 'n_events', 'n_colors', + 'n_times', 'obj', 'n_chan', 'n_epochs', 'n_picks', 'n_ch_groups', + 'n_dipoles', 'n_ica_components', 'n_pos', 'n_node_names', 'n_tapers', + 'n_signals', 'n_step', 'n_freqs', 'wsize', 'Tx', 'M', 'N', 'p', 'q', + 'n_observations', 'n_regressors', 'n_cols', 'n_frequencies', 'n_tests', + 'n_samples', 'n_permutations', 'nchan', 'n_points', 'n_features', + 'n_parts', 'n_features_new', 'n_components', 'n_labels', 'n_events_in', + 'n_splits', 'n_scores', 'n_outputs', 'n_trials', 'n_estimators', 'n_tasks', + 'nd_features', 'n_classes', 'n_targets', 'n_slices', 'n_hpi', 'n_fids', + 'n_elp', 'n_pts', 'n_tris', 'n_nodes', 'n_nonzero', 'n_events_out', + 'n_segments', 'n_orient_inv', 'n_orient_fwd', 'n_orient', 'n_dipoles_lcmv', + 'n_dipoles_fwd', 'n_picks_ref', 'n_coords', 'n_meg', 'n_good_meg', + 'n_moments', + # Undocumented (on purpose) + 'RawKIT', 'RawEximia', 'RawEGI', 'RawEEGLAB', 'RawEDF', 'RawCTF', 'RawBTi', + 'RawBrainVision', 'RawCurry', 'RawNIRX', 'RawGDF', 'RawSNIRF', 'RawBOXY', + 'RawPersyst', 'RawNihon', 'RawNedf', + # sklearn subclasses + 'mapping', 'to', 'any', + # unlinkable + 'mayavi.mlab.pipeline.surface', + 'CoregFrame', 'Kit2FiffFrame', 'FiducialsFrame', + # dipy has resolution problems, wait for them to be solved, e.g. + # https://github.com/dipy/dipy/issues/2290 + 'dipy.align.AffineMap', + 'dipy.align.DiffeomorphicMap', +} +numpydoc_validate = True +numpydoc_validation_checks = {'all'} | set(error_ignores) +numpydoc_validation_exclude = { # set of regex + # dict subclasses + r'\.clear', r'\.get$', r'\.copy$', r'\.fromkeys', r'\.items', r'\.keys', + r'\.pop', r'\.popitem', r'\.setdefault', r'\.update', r'\.values', + # list subclasses + r'\.append', r'\.count', r'\.extend', r'\.index', r'\.insert', r'\.remove', + r'\.sort', + # we currently don't document these properly (probably okay) + r'\.__getitem__', r'\.__contains__', r'\.__hash__', r'\.__mul__', + r'\.__sub__', r'\.__add__', r'\.__iter__', r'\.__div__', r'\.__neg__', + # copied from sklearn + r'mne\.utils\.deprecated', +} + + +# -- Sphinx-gallery configuration -------------------------------------------- + +class Resetter(object): + """Simple class to make the str(obj) static for Sphinx build env hash.""" + + def __init__(self): + self.t0 = time.time() + + def __repr__(self): + return f'<{self.__class__.__name__}>' + + def __call__(self, gallery_conf, fname): + import matplotlib.pyplot as plt + try: + from pyvista import Plotter # noqa + except ImportError: + Plotter = None # noqa + reset_warnings(gallery_conf, fname) + # in case users have interactive mode turned on in matplotlibrc, + # turn it off here (otherwise the build can be very slow) + plt.ioff() + plt.rcParams['animation.embed_limit'] = 30. + gc.collect() + # _assert_no_instances(Brain, 'running') # calls gc.collect() + # if Plotter is not None: + # _assert_no_instances(Plotter, 'running') + # This will overwrite some Sphinx printing but it's useful + # for memory timestamps + if os.getenv('SG_STAMP_STARTS', '').lower() == 'true': + import psutil + process = psutil.Process(os.getpid()) + mem = sizeof_fmt(process.memory_info().rss) + print(f'{time.time() - self.t0:6.1f} s : {mem}'.ljust(22)) -############################################################################## -# sphinx-gallery examples_dirs = ['../tutorials', '../examples'] gallery_dirs = ['auto_tutorials', 'auto_examples'] os.environ['_MNE_BUILDING_DOC'] = 'true' - scrapers = ('matplotlib',) try: mlab = mne.utils._import_mlab() @@ -411,6 +342,60 @@ scrapers.insert(scrapers.index('pyvista'), brain_scraper) scrapers = tuple(scrapers) +sphinx_gallery_conf = { + 'doc_module': ('mne',), + 'reference_url': dict(mne=None), + 'examples_dirs': examples_dirs, + 'subsection_order': ExplicitOrder(['../examples/io/', + '../examples/simulation/', + '../examples/preprocessing/', + '../examples/visualization/', + '../examples/time_frequency/', + '../examples/stats/', + '../examples/decoding/', + '../examples/connectivity/', + '../examples/forward/', + '../examples/inverse/', + '../examples/realtime/', + '../examples/datasets/', + '../tutorials/intro/', + '../tutorials/io/', + '../tutorials/raw/', + '../tutorials/preprocessing/', + '../tutorials/epochs/', + '../tutorials/evoked/', + '../tutorials/time-freq/', + '../tutorials/forward/', + '../tutorials/inverse/', + '../tutorials/stats-sensor-space/', + '../tutorials/stats-source-space/', + '../tutorials/machine-learning/', + '../tutorials/clinical/', + '../tutorials/simulation/', + '../tutorials/sample-datasets/', + '../tutorials/misc/']), + 'gallery_dirs': gallery_dirs, + 'default_thumb_file': os.path.join('_static', 'mne_helmet.png'), + 'backreferences_dir': 'generated', + 'plot_gallery': 'True', # Avoid annoying Unicode/bool default warning + 'thumbnail_size': (160, 112), + 'remove_config_comments': True, + 'min_reported_time': 1., + 'abort_on_example_error': False, + 'reset_modules': ('matplotlib', Resetter()), # called w/each script + 'image_scrapers': scrapers, + 'show_memory': not sys.platform.startswith('win'), + 'line_numbers': False, # XXX currently (0.3.dev0) messes with style + 'within_subsection_order': FileNameSortKey, + 'capture_repr': ('_repr_html_',), + 'junit': os.path.join('..', 'test-results', 'sphinx-gallery', 'junit.xml'), + 'matplotlib_animations': True, + 'compress_images': ('images', 'thumbnails'), + 'filename_pattern': '^((?!sgskip).)*$', +} +# Files were renamed from plot_* with: +# find . -type f -name 'plot_*.py' -exec sh -c 'x="{}"; xn=`basename "${x}"`; git mv "$x" `dirname "${x}"`/${xn:5}' \; # noqa + def append_attr_meth_examples(app, what, name, obj, options, lines): """Append SG examples backreferences to method and attr docstrings.""" @@ -419,8 +404,8 @@ def append_attr_meth_examples(app, what, name, obj, options, lines): # the .. include:: lines, so we need to do it. # Eventually this could perhaps live in SG. if what in ('attribute', 'method'): - size = os.path.getsize(op.join( - op.dirname(__file__), 'generated', '%s.examples' % (name,))) + size = os.path.getsize(os.path.join( + os.path.dirname(__file__), 'generated', '%s.examples' % (name,))) if size > 0: lines += """ .. _sphx_glr_backreferences_{1}: @@ -432,45 +417,321 @@ def append_attr_meth_examples(app, what, name, obj, options, lines): """.format(name.split('.')[-1], name).split('\n') -def setup(app): - """Set up the Sphinx app.""" - app.connect('autodoc-process-docstring', append_attr_meth_examples) - if report_scraper is not None: - report_scraper.app = app - app.connect('build-finished', report_scraper.copyfiles) +# -- Other extension configuration ------------------------------------------- + +linkcheck_request_headers = dict(user_agent='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36') # noqa: E501 +linkcheck_ignore = [ # will be compiled to regex + r'https://datashare.is.ed.ac.uk/handle/10283/2189\?show=full', # noqa Max retries exceeded with url: /handle/10283/2189?show=full (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1123)'))) + 'https://doi.org/10.1002/mds.870120629', # Read timed out. + 'https://doi.org/10.1088/0031-9155/32/1/004', # noqa Read timed out. (read timeout=15) + 'https://doi.org/10.1088/0031-9155/40/3/001', # noqa Read timed out. (read timeout=15) + 'https://doi.org/10.1088/0031-9155/51/7/008', # noqa Read timed out. (read timeout=15) + 'https://doi.org/10.1088/0031-9155/57/7/1937', # noqa Read timed out. (read timeout=15) + 'https://doi.org/10.1088/0967-3334/22/4/305', # noqa Read timed out. (read timeout=15) + 'https://doi.org/10.1088/1741-2552/aacfe4', # noqa Read timed out. (read timeout=15) + 'https://doi.org/10.1093/sleep/18.7.557', # noqa 403 Client Error: Forbidden for url: https://academic.oup.com/sleep/article-lookup/doi/10.1093/sleep/18.7.557 + 'https://doi.org/10.1162/089976699300016719', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/neco/article/11/2/417-441/6242 + 'https://doi.org/10.1162/jocn.1993.5.2.162', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/jocn/article/5/2/162-176/3095 + 'https://doi.org/10.1162/neco.1995.7.6.1129', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/neco/article/7/6/1129-1159/5909 + 'https://doi.org/10.1162/jocn_a_00405', # noqa 403 Client Error: Forbidden for url: https://direct.mit.edu/jocn/article/25/9/1477-1492/27980 + 'https://doi.org/10.1167/15.6.4', # noqa 403 Client Error: Forbidden for url: https://jov.arvojournals.org/article.aspx?doi=10.1167/15.6.4 + 'https://doi.org/10.7488/ds/1556', # noqa Max retries exceeded with url: /handle/10283/2189 (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1122)'))) + 'https://imaging.mrc-cbu.cam.ac.uk/imaging/MniTalairach', # noqa Max retries exceeded with url: /imaging/MniTalairach (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1122)'))) + 'https://www.nyu.edu/', # noqa Max retries exceeded with url: / (Caused by SSLError(SSLError(1, '[SSL: DH_KEY_TOO_SMALL] dh key too small (_ssl.c:1122)'))) + 'https://docs.python.org/3/library/.*', # noqa ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer')) + 'https://hal.archives-ouvertes.fr/hal-01848442.*', # noqa Sometimes: 503 Server Error: Service Unavailable for url: https://hal.archives-ouvertes.fr/hal-01848442/ +] +linkcheck_anchors = False # saves a bit of time +linkcheck_timeout = 15 # some can be quite slow +# autodoc / autosummary +autosummary_generate = True +autodoc_default_options = {'inherited-members': None} -class Resetter(object): - """Simple class to make the str(obj) static for Sphinx build env hash.""" +# sphinxcontrib-bibtex +bibtex_bibfiles = ['./references.bib'] +bibtex_style = 'unsrt' +bibtex_footbibliography_header = '' - def __init__(self): - self.t0 = time.time() - def __repr__(self): - return '<%s>' % (self.__class__.__name__,) +# -- Nitpicky ---------------------------------------------------------------- - def __call__(self, gallery_conf, fname): - import matplotlib.pyplot as plt - try: - from pyvista import Plotter - except ImportError: - Plotter = None - reset_warnings(gallery_conf, fname) - # in case users have interactive mode turned on in matplotlibrc, - # turn it off here (otherwise the build can be very slow) - plt.ioff() - plt.rcParams['animation.embed_limit'] = 30. - _assert_no_instances(Brain, 'running') # calls gc.collect() - if Plotter is not None: - _assert_no_instances(Plotter, 'running') - # This will overwrite some Sphinx printing but it's useful - # for memory timestamps - if os.getenv('SG_STAMP_STARTS', '').lower() == 'true': - import psutil - process = psutil.Process(os.getpid()) - mem = sizeof_fmt(process.memory_info().rss) - print(f'{time.time() - self.t0:6.1f} s : {mem}'.ljust(22)) +nitpicky = True +nitpick_ignore = [ + ("py:class", "None. Remove all items from D."), + ("py:class", "a set-like object providing a view on D's items"), + ("py:class", "a set-like object providing a view on D's keys"), + ("py:class", "v, remove specified key and return the corresponding value."), # noqa: E501 + ("py:class", "None. Update D from dict/iterable E and F."), + ("py:class", "an object providing a view on D's values"), + ("py:class", "a shallow copy of D"), + ("py:class", "(k, v), remove and return some (key, value) pair as a"), + ("py:class", "_FuncT"), # type hint used in @verbose decorator + ("py:class", "mne.utils._logging._FuncT"), +] +for key in ('AcqParserFIF', 'BiHemiLabel', 'Dipole', 'DipoleFixed', 'Label', + 'MixedSourceEstimate', 'MixedVectorSourceEstimate', 'Report', + 'SourceEstimate', 'SourceMorph', 'VectorSourceEstimate', + 'VolSourceEstimate', 'VolVectorSourceEstimate', + 'channels.DigMontage', 'channels.Layout', + 'decoding.CSP', 'decoding.EMS', 'decoding.FilterEstimator', + 'decoding.GeneralizingEstimator', 'decoding.LinearModel', + 'decoding.PSDEstimator', 'decoding.ReceptiveField', 'decoding.SSD', + 'decoding.SPoC', 'decoding.Scaler', 'decoding.SlidingEstimator', + 'decoding.TemporalFilter', 'decoding.TimeDelayingRidge', + 'decoding.TimeFrequency', 'decoding.UnsupervisedSpatialFilter', + 'decoding.Vectorizer', + 'preprocessing.ICA', 'preprocessing.Xdawn', + 'simulation.SourceSimulator', + 'time_frequency.CrossSpectralDensity', + 'utils.deprecated', + 'viz.ClickableImage'): + nitpick_ignore.append(('py:obj', f'mne.{key}.__hash__')) +suppress_warnings = ['image.nonlocal_uri'] # we intentionally link outside + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'pydata_sphinx_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + 'icon_links': [ + dict(name='GitHub', + url='https://github.com/mne-tools/mne-python', + icon='fab fa-github-square'), + dict(name='Twitter', + url='https://twitter.com/mne_python', + icon='fab fa-twitter-square'), + dict(name='Discourse', + url='https://mne.discourse.group/', + icon='fab fa-discourse'), + dict(name='Discord', + url='https://discord.gg/rKfvxTuATa', + icon='fab fa-discord') + ], + 'icon_links_label': 'Quick Links', # for screen reader + 'use_edit_page_button': False, + 'navigation_with_keys': False, + 'show_toc_level': 1, + 'navbar_end': ['version-switcher', 'navbar-icon-links'], + 'footer_items': ['copyright'], + 'google_analytics_id': 'UA-37225609-1', +} + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +html_logo = "_static/mne_logo_small.svg" +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +html_favicon = "_static/favicon.ico" + + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] +html_css_files = [ + 'style.css', +] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +html_extra_path = [ + 'contributing.html', + 'documentation.html', + 'getting_started.html', + 'install_mne_python.html', +] + +# Custom sidebar templates, maps document names to template names. +html_sidebars = { + 'index': ['search-field.html', 'sidebar-quicklinks.html'], +} + +# If true, links to the reST sources are added to the pages. +html_show_sourcelink = False +html_copy_source = False + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +html_show_sphinx = False + +# accommodate different logo shapes (width values in rem) +xs = '2' +sm = '2.5' +md = '3' +lg = '4.5' +xl = '5' +# variables to pass to HTML templating engine +html_context = { + 'build_dev_html': bool(int(os.environ.get('BUILD_DEV_HTML', False))), + 'versions_dropdown': { + 'dev': 'v0.23 (devel)', + 'stable': 'v0.22 (stable)', + '0.21': 'v0.21', + '0.20': 'v0.20', + '0.19': 'v0.19', + '0.18': 'v0.18', + '0.17': 'v0.17', + '0.16': 'v0.16', + '0.15': 'v0.15', + '0.14': 'v0.14', + '0.13': 'v0.13', + '0.12': 'v0.12', + '0.11': 'v0.11', + }, + 'funders': [ + dict(img='nih.png', size='3', title='National Institutes of Health'), + dict(img='nsf.png', size='3.5', + title='US National Science Foundation'), + dict(img='erc.svg', size='3.5', title='European Research Council'), + dict(img='doe.svg', size='3', title='US Department of Energy'), + dict(img='anr.svg', size='4.5', + title='Agence Nationale de la Recherche'), + dict(img='cds.png', size='2.25', + title='Paris-Saclay Center for Data Science'), + dict(img='google.svg', size='2.25', title='Google'), + dict(img='amazon.svg', size='2.5', title='Amazon'), + dict(img='czi.svg', size='2.5', title='Chan Zuckerberg Initiative'), + ], + 'institutions': [ + dict(name='Massachusetts General Hospital', + img='MGH.svg', + url='https://www.massgeneral.org/', + size=sm), + dict(name='Athinoula A. Martinos Center for Biomedical Imaging', + img='Martinos.png', + url='https://martinos.org/', + size=md), + dict(name='Harvard Medical School', + img='Harvard.png', + url='https://hms.harvard.edu/', + size=sm), + dict(name='Massachusetts Institute of Technology', + img='MIT.svg', + url='https://web.mit.edu/', + size=md), + dict(name='New York University', + img='NYU.png', + url='https://www.nyu.edu/', + size=xs), + dict(name='Commissariat à l´énergie atomique et aux énergies alternatives', # noqa E501 + img='CEA.png', + url='http://www.cea.fr/', + size=md), + dict(name='Aalto-yliopiston perustieteiden korkeakoulu', + img='Aalto.svg', + url='https://sci.aalto.fi/', + size=md), + dict(name='Télécom ParisTech', + img='Telecom_Paris_Tech.svg', + url='https://www.telecom-paris.fr/', + size=md), + dict(name='University of Washington', + img='Washington.png', + url='https://www.washington.edu/', + size=md), + dict(name='Institut du Cerveau et de la Moelle épinière', + img='ICM.jpg', + url='https://icm-institute.org/', + size=md), + dict(name='Boston University', + img='BU.svg', + url='https://www.bu.edu/', + size=lg), + dict(name='Institut national de la santé et de la recherche médicale', + img='Inserm.svg', + url='https://www.inserm.fr/', + size=xl), + dict(name='Forschungszentrum Jülich', + img='Julich.svg', + url='https://www.fz-juelich.de/', + size=xl), + dict(name='Technische Universität Ilmenau', + img='Ilmenau.gif', + url='https://www.tu-ilmenau.de/', + size=xl), + dict(name='Berkeley Institute for Data Science', + img='BIDS.png', + url='https://bids.berkeley.edu/', + size=lg), + dict(name='Institut national de recherche en informatique et en automatique', # noqa E501 + img='inria.png', + url='https://www.inria.fr/', + size=xl), + dict(name='Aarhus Universitet', + img='Aarhus.png', + url='https://www.au.dk/', + size=xl), + dict(name='Karl-Franzens-Universität Graz', + img='Graz.jpg', + url='https://www.uni-graz.at/', + size=md), + ], + # \u00AD is an optional hyphen (not rendered unless needed) + 'carousel': [ + dict(title='Source Estimation', + text='Distributed, sparse, mixed-norm, beam\u00ADformers, dipole fitting, and more.', # noqa E501 + url='auto_tutorials/inverse/30_mne_dspm_loreta.html', + img='sphx_glr_30_mne_dspm_loreta_008.gif', + alt='dSPM'), + dict(title='Machine Learning', + text='Advanced decoding models including time general\u00ADiza\u00ADtion.', # noqa E501 + url='auto_tutorials/machine-learning/50_decoding.html', + img='sphx_glr_50_decoding_006.png', + alt='Decoding'), + dict(title='Encoding Models', + text='Receptive field estima\u00ADtion with optional smooth\u00ADness priors.', # noqa E501 + url='auto_tutorials/machine-learning/30_strf.html', + img='sphx_glr_30_strf_001.png', + alt='STRF'), + dict(title='Statistics', + text='Parametric and non-parametric, permutation tests and clustering.', # noqa E501 + url='auto_tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.html', # noqa E501 + img='sphx_glr_20_cluster_1samp_spatiotemporal_001.png', + alt='Clusters'), + dict(title='Connectivity', + text='All-to-all spectral and effective connec\u00ADtivity measures.', # noqa E501 + url='auto_examples/connectivity/mne_inverse_label_connectivity.html', # noqa E501 + img='sphx_glr_mne_inverse_label_connectivity_001.png', + alt='Connectivity'), + dict(title='Data Visualization', + text='Explore your data from multiple perspectives.', + url='auto_tutorials/evoked/20_visualize_evoked.py', + img='sphx_glr_20_visualize_evoked_007.png', + alt='Visualization'), + ] +} + +# Output file base name for HTML help builder. +htmlhelp_basename = 'mne-doc' + + +# -- Options for LaTeX output ------------------------------------------------ + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass +# [howto/manual]). +latex_documents = [] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +latex_logo = "_static/logo.png" + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +latex_toplevel_sectioning = 'part' + + +# -- Warnings management ----------------------------------------------------- def reset_warnings(gallery_conf, fname): """Ensure we are future compatible and ignore silly warnings.""" @@ -489,6 +750,9 @@ def reset_warnings(gallery_conf, fname): warnings.filterwarnings('always', '.*cannot make axes width small.*') warnings.filterwarnings('always', '.*Axes that are not compatible.*') warnings.filterwarnings('always', '.*FastICA did not converge.*') + # ECoG BIDS spec violations: + warnings.filterwarnings('always', '.*Fiducial point nasion not found.*') + warnings.filterwarnings('always', '.*DigMontage is only a subset of.*') warnings.filterwarnings( # xhemi morph (should probably update sample) 'always', '.*does not exist, creating it and saving it.*') warnings.filterwarnings('default', module='sphinx') # internal warnings @@ -515,6 +779,7 @@ def reset_warnings(gallery_conf, fname): r'Converting `np\.character` to a dtype is deprecated', # vtk r'sphinx\.util\.smartypants is deprecated', 'is a deprecated alias for the builtin', # NumPy + 'the old name will be removed', # Jinja, via sphinx ): warnings.filterwarnings( # deal with other modules having bad imports 'ignore', message=".*%s.*" % key, category=DeprecationWarning) @@ -526,6 +791,8 @@ def reset_warnings(gallery_conf, fname): category=FutureWarning) warnings.filterwarnings( # nilearn 'ignore', message=r'The sklearn.* module is.*', category=FutureWarning) + warnings.filterwarnings( # nilearn + 'ignore', message=r'Fetchers from the nilea.*', category=FutureWarning) warnings.filterwarnings( # deal with other modules having bad imports 'ignore', message=".*ufunc size changed.*", category=RuntimeWarning) warnings.filterwarnings( # realtime @@ -539,149 +806,245 @@ def reset_warnings(gallery_conf, fname): reset_warnings(None, None) -sphinx_gallery_conf = { - 'doc_module': ('mne',), - 'reference_url': dict(mne=None), - 'examples_dirs': examples_dirs, - 'subsection_order': ExplicitOrder(['../examples/io/', - '../examples/simulation/', - '../examples/preprocessing/', - '../examples/visualization/', - '../examples/time_frequency/', - '../examples/stats/', - '../examples/decoding/', - '../examples/connectivity/', - '../examples/forward/', - '../examples/inverse/', - '../examples/realtime/', - '../examples/datasets/', - '../tutorials/intro/', - '../tutorials/io/', - '../tutorials/raw/', - '../tutorials/preprocessing/', - '../tutorials/epochs/', - '../tutorials/evoked/', - '../tutorials/time-freq/', - '../tutorials/source-modeling/', - '../tutorials/stats-sensor-space/', - '../tutorials/stats-source-space/', - '../tutorials/machine-learning/', - '../tutorials/simulation/', - '../tutorials/sample-datasets/', - '../tutorials/discussions/', - '../tutorials/misc/']), - 'gallery_dirs': gallery_dirs, - 'default_thumb_file': os.path.join('_static', 'mne_helmet.png'), - 'backreferences_dir': 'generated', - 'plot_gallery': 'True', # Avoid annoying Unicode/bool default warning - 'download_section_examples': False, - 'thumbnail_size': (160, 112), - 'remove_config_comments': True, - 'min_reported_time': 1., - 'abort_on_example_error': False, - 'reset_modules': ('matplotlib', Resetter()), # called w/each script - 'image_scrapers': scrapers, - 'show_memory': not sys.platform.startswith('win'), - 'line_numbers': False, # XXX currently (0.3.dev0) messes with style - 'within_subsection_order': FileNameSortKey, - 'capture_repr': ('_repr_html_',), - 'junit': op.join('..', 'test-results', 'sphinx-gallery', 'junit.xml'), - 'matplotlib_animations': True, - 'compress_images': ('images', 'thumbnails'), -} -############################################################################## -# numpydoc -# XXX This hack defines what extra methods numpydoc will document -docscrape.ClassDoc.extra_public_methods = mne.utils._doc_special_members -numpydoc_class_members_toctree = False -numpydoc_attributes_as_param_list = True -numpydoc_xref_param_type = True -numpydoc_xref_aliases = { - # Python - 'file-like': ':term:`file-like `', - # Matplotlib - 'colormap': ':doc:`colormap `', - 'color': ':doc:`color `', - 'collection': ':doc:`collections `', - 'Axes': 'matplotlib.axes.Axes', - 'Figure': 'matplotlib.figure.Figure', - 'Axes3D': 'mpl_toolkits.mplot3d.axes3d.Axes3D', - 'ColorbarBase': 'matplotlib.colorbar.ColorbarBase', - # Mayavi - 'mayavi.mlab.Figure': 'mayavi.core.api.Scene', - 'mlab.Figure': 'mayavi.core.api.Scene', - # sklearn - 'LeaveOneOut': 'sklearn.model_selection.LeaveOneOut', - # joblib - 'joblib.Parallel': 'joblib.Parallel', - # nibabel - 'Nifti1Image': 'nibabel.nifti1.Nifti1Image', - 'Nifti2Image': 'nibabel.nifti2.Nifti2Image', - 'SpatialImage': 'nibabel.spatialimages.SpatialImage', - # MNE - 'Label': 'mne.Label', 'Forward': 'mne.Forward', 'Evoked': 'mne.Evoked', - 'Info': 'mne.Info', 'SourceSpaces': 'mne.SourceSpaces', - 'SourceMorph': 'mne.SourceMorph', - 'Epochs': 'mne.Epochs', 'Layout': 'mne.channels.Layout', - 'EvokedArray': 'mne.EvokedArray', 'BiHemiLabel': 'mne.BiHemiLabel', - 'AverageTFR': 'mne.time_frequency.AverageTFR', - 'EpochsTFR': 'mne.time_frequency.EpochsTFR', - 'Raw': 'mne.io.Raw', 'ICA': 'mne.preprocessing.ICA', - 'Covariance': 'mne.Covariance', 'Annotations': 'mne.Annotations', - 'DigMontage': 'mne.channels.DigMontage', - 'VectorSourceEstimate': 'mne.VectorSourceEstimate', - 'VolSourceEstimate': 'mne.VolSourceEstimate', - 'VolVectorSourceEstimate': 'mne.VolVectorSourceEstimate', - 'MixedSourceEstimate': 'mne.MixedSourceEstimate', - 'MixedVectorSourceEstimate': 'mne.MixedVectorSourceEstimate', - 'SourceEstimate': 'mne.SourceEstimate', 'Projection': 'mne.Projection', - 'ConductorModel': 'mne.bem.ConductorModel', - 'Dipole': 'mne.Dipole', 'DipoleFixed': 'mne.DipoleFixed', - 'InverseOperator': 'mne.minimum_norm.InverseOperator', - 'CrossSpectralDensity': 'mne.time_frequency.CrossSpectralDensity', - 'SourceMorph': 'mne.SourceMorph', - 'Xdawn': 'mne.preprocessing.Xdawn', - 'Report': 'mne.Report', 'Forward': 'mne.Forward', - 'TimeDelayingRidge': 'mne.decoding.TimeDelayingRidge', - 'Vectorizer': 'mne.decoding.Vectorizer', - 'UnsupervisedSpatialFilter': 'mne.decoding.UnsupervisedSpatialFilter', - 'TemporalFilter': 'mne.decoding.TemporalFilter', - 'SSD': 'mne.decoding.SSD', - 'Scaler': 'mne.decoding.Scaler', 'SPoC': 'mne.decoding.SPoC', - 'PSDEstimator': 'mne.decoding.PSDEstimator', - 'LinearModel': 'mne.decoding.LinearModel', - 'FilterEstimator': 'mne.decoding.FilterEstimator', - 'EMS': 'mne.decoding.EMS', 'CSP': 'mne.decoding.CSP', - 'Beamformer': 'mne.beamformer.Beamformer', - 'Transform': 'mne.transforms.Transform', +# -- Fontawesome support ----------------------------------------------------- + +# here the "b" and "s" refer to "brand" and "solid" (determines which font file +# to look in). "fw-" prefix indicates fixed width. +icons = { + 'apple': 'b', + 'linux': 'b', + 'windows': 'b', + 'hand-paper': 's', + 'question': 's', + 'quote-left': 's', + 'rocket': 's', + 'server': 's', + 'fw-book': 's', + 'fw-code-branch': 's', + 'fw-newspaper': 's', + 'fw-question-circle': 's', + 'fw-quote-left': 's', } -numpydoc_xref_ignore = { - # words - 'instance', 'instances', 'of', 'default', 'shape', 'or', - 'with', 'length', 'pair', 'matplotlib', 'optional', 'kwargs', 'in', - 'dtype', 'object', 'self.verbose', - # shapes - 'n_vertices', 'n_faces', 'n_channels', 'm', 'n', 'n_events', 'n_colors', - 'n_times', 'obj', 'n_chan', 'n_epochs', 'n_picks', 'n_ch_groups', - 'n_dipoles', 'n_ica_components', 'n_pos', 'n_node_names', 'n_tapers', - 'n_signals', 'n_step', 'n_freqs', 'wsize', 'Tx', 'M', 'N', 'p', 'q', - 'n_observations', 'n_regressors', 'n_cols', 'n_frequencies', 'n_tests', - 'n_samples', 'n_permutations', 'nchan', 'n_points', 'n_features', - 'n_parts', 'n_features_new', 'n_components', 'n_labels', 'n_events_in', - 'n_splits', 'n_scores', 'n_outputs', 'n_trials', 'n_estimators', 'n_tasks', - 'nd_features', 'n_classes', 'n_targets', 'n_slices', 'n_hpi', 'n_fids', - 'n_elp', 'n_pts', 'n_tris', 'n_nodes', 'n_nonzero', 'n_events_out', - 'n_segments', 'n_orient_inv', 'n_orient_fwd', 'n_orient', 'n_dipoles_lcmv', - 'n_dipoles_fwd', 'n_picks_ref', 'n_coords', - # Undocumented (on purpose) - 'RawKIT', 'RawEximia', 'RawEGI', 'RawEEGLAB', 'RawEDF', 'RawCTF', 'RawBTi', - 'RawBrainVision', 'RawCurry', 'RawNIRX', 'RawGDF', 'RawSNIRF', 'RawBOXY', - 'RawPersyst', 'RawNihon', - # sklearn subclasses - 'mapping', 'to', 'any', - # unlinkable - 'mayavi.mlab.pipeline.surface', - 'CoregFrame', 'Kit2FiffFrame', 'FiducialsFrame', + +prolog = '' +for icon, cls in icons.items(): + fw = ' fa-fw' if icon.startswith('fw-') else '' + prolog += f''' +.. |{icon}| raw:: html + + +''' + +# -- website redirects -------------------------------------------------------- + +# Static list created 2021/04/13 based on what we needed to redirect, +# since we don't need to add redirects for examples added after this date. +needed_plot_redirects = { + # tutorials + '10_epochs_overview.py', '10_evoked_overview.py', '10_overview.py', + '10_preprocessing_overview.py', '10_raw_overview.py', + '10_reading_meg_data.py', '15_handling_bad_channels.py', + '20_event_arrays.py', '20_events_from_raw.py', '20_reading_eeg_data.py', + '20_rejecting_bad_data.py', '20_visualize_epochs.py', + '20_visualize_evoked.py', '30_annotate_raw.py', '30_epochs_metadata.py', + '30_filtering_resampling.py', '30_info.py', '30_reading_fnirs_data.py', + '35_artifact_correction_regression.py', '40_artifact_correction_ica.py', + '40_autogenerate_metadata.py', '40_sensor_locations.py', + '40_visualize_raw.py', '45_projectors_background.py', + '50_artifact_correction_ssp.py', '50_configure_mne.py', + '50_epochs_to_data_frame.py', '55_setting_eeg_reference.py', + '59_head_positions.py', '60_make_fixed_length_epochs.py', + '60_maxwell_filtering_sss.py', '70_fnirs_processing.py', + # examples + '3d_to_2d.py', 'brainstorm_data.py', 'channel_epochs_image.py', + 'cluster_stats_evoked.py', 'compute_csd.py', + 'compute_mne_inverse_epochs_in_label.py', + 'compute_mne_inverse_raw_in_label.py', 'compute_mne_inverse_volume.py', + 'compute_source_psd_epochs.py', 'covariance_whitening_dspm.py', + 'custom_inverse_solver.py', 'cwt_sensor_connectivity.py', + 'decoding_csp_eeg.py', 'decoding_csp_timefreq.py', + 'decoding_spatio_temporal_source.py', 'decoding_spoc_CMC.py', + 'decoding_time_generalization_conditions.py', + 'decoding_unsupervised_spatial_filter.py', 'decoding_xdawn_eeg.py', + 'define_target_events.py', 'dics_source_power.py', 'eeg_csd.py', + 'eeg_on_scalp.py', 'eeglab_head_sphere.py', 'elekta_epochs.py', + 'ems_filtering.py', 'eog_artifact_histogram.py', 'evoked_arrowmap.py', + 'evoked_ers_source_power.py', 'evoked_topomap.py', 'evoked_whitening.py', + 'fdr_stats_evoked.py', 'find_ref_artifacts.py', + 'fnirs_artifact_removal.py', 'forward_sensitivity_maps.py', + 'gamma_map_inverse.py', 'hf_sef_data.py', 'ica_comparison.py', + 'interpolate_bad_channels.py', 'label_activation_from_stc.py', + 'label_from_stc.py', 'label_source_activations.py', + 'left_cerebellum_volume_source.py', 'limo_data.py', + 'linear_model_patterns.py', 'linear_regression_raw.py', + 'meg_sensors.py', 'mixed_norm_inverse.py', + 'mixed_source_space_connectivity.py', 'mixed_source_space_inverse.py', + 'mne_cov_power.py', 'mne_helmet.py', 'mne_inverse_coherence_epochs.py', + 'mne_inverse_connectivity_spectrum.py', + 'mne_inverse_envelope_correlation.py', + 'mne_inverse_envelope_correlation_volume.py', + 'mne_inverse_label_connectivity.py', 'mne_inverse_psi_visual.py', + 'morph_surface_stc.py', 'morph_volume_stc.py', 'movement_compensation.py', + 'movement_detection.py', 'multidict_reweighted_tfmxne.py', + 'muscle_detection.py', 'opm_data.py', 'otp.py', 'parcellation.py', + 'psf_ctf_label_leakage.py', 'psf_ctf_vertices.py', + 'psf_ctf_vertices_lcmv.py', 'publication_figure.py', 'rap_music.py', + 'read_inverse.py', 'read_neo_format.py', 'read_noise_covariance_matrix.py', + 'read_stc.py', 'receptive_field_mtrf.py', 'resolution_metrics.py', + 'resolution_metrics_eegmeg.py', 'roi_erpimage_by_rt.py', + 'sensor_connectivity.py', 'sensor_noise_level.py', + 'sensor_permutation_test.py', 'sensor_regression.py', + 'shift_evoked.py', 'simulate_evoked_data.py', 'simulate_raw_data.py', + 'simulated_raw_data_using_subject_anatomy.py', 'snr_estimate.py', + 'source_label_time_frequency.py', 'source_power_spectrum.py', + 'source_power_spectrum_opm.py', 'source_simulator.py', + 'source_space_morphing.py', 'source_space_snr.py', + 'source_space_time_frequency.py', 'ssd_spatial_filters.py', + 'ssp_projs_sensitivity_map.py', 'temporal_whitening.py', + 'time_frequency_erds.py', 'time_frequency_global_field_power.py', + 'time_frequency_mixed_norm_inverse.py', 'time_frequency_simulated.py', + 'topo_compare_conditions.py', 'topo_customized.py', + 'vector_mne_solution.py', 'virtual_evoked.py', 'xdawn_denoising.py', + 'xhemi.py', +} +tu = 'auto_tutorials' +di = 'discussions' +sm = 'source-modeling' +fw = 'forward' +nv = 'inverse' +sn = 'stats-sensor-space' +sr = 'stats-source-space' +sd = 'sample-datasets' +ml = 'machine-learning' +tf = 'time-freq' +si = 'simulation' +custom_redirects = { + # Custom redirects (one HTML path to another, relative to outdir) + # can be added here as fr->to key->value mappings + f'{tu}/evoked/plot_eeg_erp.html': f'{tu}/evoked/30_eeg_erp.html', + f'{tu}/evoked/plot_whitened.html': f'{tu}/evoked/40_whitened.html', + f'{tu}/misc/plot_modifying_data_inplace.html': f'{tu}/intro/15_inplace.html', # noqa E501 + f'{tu}/misc/plot_report.html': f'{tu}/intro/70_report.html', + f'{tu}/misc/plot_seeg.html': f'{tu}/clinical/20_seeg.html', + f'{tu}/misc/plot_ecog.html': f'{tu}/clinical/30_ecog.html', + f'{tu}/{ml}/plot_receptive_field.html': f'{tu}/{ml}/30_strf.html', + f'{tu}/{ml}/plot_sensors_decoding.html': f'{tu}/{ml}/50_decoding.html', + f'{tu}/{sm}/plot_background_freesurfer.html': f'{tu}/{fw}/10_background_freesurfer.html', # noqa E501 + f'{tu}/{sm}/plot_source_alignment.html': f'{tu}/{fw}/20_source_alignment.html', # noqa E501 + f'{tu}/{sm}/plot_forward.html': f'{tu}/{fw}/30_forward.html', + f'{tu}/{sm}/plot_eeg_no_mri.html': f'{tu}/{fw}/35_eeg_no_mri.html', + f'{tu}/{sm}/plot_background_freesurfer_mne.html': f'{tu}/{fw}/50_background_freesurfer_mne.html', # noqa E501 + f'{tu}/{sm}/plot_fix_bem_in_blender.html': f'{tu}/{fw}/80_fix_bem_in_blender.html', # noqa E501 + f'{tu}/{sm}/plot_compute_covariance.html': f'{tu}/{fw}/90_compute_covariance.html', # noqa E501 + f'{tu}/{sm}/plot_object_source_estimate.html': f'{tu}/{nv}/10_stc_class.html', # noqa E501 + f'{tu}/{sm}/plot_dipole_fit.html': f'{tu}/{nv}/20_dipole_fit.html', + f'{tu}/{sm}/plot_mne_dspm_source_localization.html': f'{tu}/{nv}/30_mne_dspm_loreta.html', # noqa E501 + f'{tu}/{sm}/plot_dipole_orientations.html': f'{tu}/{nv}/35_dipole_orientations.html', # noqa E501 + f'{tu}/{sm}/plot_mne_solutions.html': f'{tu}/{nv}/40_mne_fixed_free.html', + f'{tu}/{sm}/plot_beamformer_lcmv.html': f'{tu}/{nv}/50_beamformer_lcmv.html', # noqa E501 + f'{tu}/{sm}/plot_visualize_stc.html': f'{tu}/{nv}/60_visualize_stc.html', + f'{tu}/{sm}/plot_eeg_mri_coords.html': f'{tu}/{nv}/70_eeg_mri_coords.html', + f'{tu}/{sd}/plot_brainstorm_phantom_elekta.html': f'{tu}/{nv}/80_brainstorm_phantom_elekta.html', # noqa E501 + f'{tu}/{sd}/plot_brainstorm_phantom_ctf.html': f'{tu}/{nv}/85_brainstorm_phantom_ctf.html', # noqa E501 + f'{tu}/{sd}/plot_phantom_4DBTi.html': f'{tu}/{nv}/90_phantom_4DBTi.html', + f'{tu}/{sd}/plot_brainstorm_auditory.html': f'{tu}/io/60_ctf_bst_auditory.html', # noqa E501 + f'{tu}/{sd}/plot_sleep.html': f'{tu}/clinical/60_sleep.html', + f'{tu}/{di}/plot_background_filtering.html': f'{tu}/preprocessing/25_background_filtering.html', # noqa E501 + f'{tu}/{di}/plot_background_statistics.html': f'{tu}/{sn}/10_background_stats.html', # noqa E501 + f'{tu}/{sn}/plot_stats_cluster_erp.html': f'{tu}/{sn}/20_erp_stats.html', + f'{tu}/{sn}/plot_stats_cluster_1samp_test_time_frequency.html': f'{tu}/{sn}/40_cluster_1samp_time_freq.html', # noqa E501 + f'{tu}/{sn}/plot_stats_cluster_time_frequency.html': f'{tu}/{sn}/50_cluster_between_time_freq.html', # noqa E501 + f'{tu}/{sn}/plot_stats_spatio_temporal_cluster_sensors.html': f'{tu}/{sn}/75_cluster_ftest_spatiotemporal.html', # noqa E501 + f'{tu}/{sr}/plot_stats_cluster_spatio_temporal.html': f'{tu}/{sr}/20_cluster_1samp_spatiotemporal.html', # noqa E501 + f'{tu}/{sr}/plot_stats_cluster_spatio_temporal_2samp.html': f'{tu}/{sr}/30_cluster_ftest_spatiotemporal.html', # noqa E501 + f'{tu}/{sr}/plot_stats_cluster_spatio_temporal_repeated_measures_anova.html': f'{tu}/{sr}/60_cluster_rmANOVA_spatiotemporal.html', # noqa E501 + f'{tu}/{sr}/plot_stats_cluster_time_frequency_repeated_measures_anova.html': f'{tu}/{sr}/70_cluster_rmANOVA_time_freq.html', # noqa E501 + f'{tu}/{tf}/plot_sensors_time_frequency.html': f'{tu}/{tf}/20_sensors_time_frequency.html', # noqa E501 + f'{tu}/{tf}/plot_ssvep.html': f'{tu}/{tf}/50_ssvep.html', + f'{tu}/{si}/plot_creating_data_structures.html': f'{tu}/{si}/10_array_objs.html', # noqa E501 + f'{tu}/{si}/plot_point_spread.html': f'{tu}/{si}/70_point_spread.html', + f'{tu}/{si}/plot_dics.html': f'{tu}/{si}/80_dics.html', } + + +def make_redirects(app, exception): + """Make HTML redirects.""" + # https://www.sphinx-doc.org/en/master/extdev/appapi.html + # Adapted from sphinxcontrib/redirects (BSD 2-clause) + if not isinstance(app.builder, sphinx.builders.html.StandaloneHTMLBuilder): + return + logger = sphinx.util.logging.getLogger('mne') + TEMPLATE = """\ + + + + + + + Page Redirection + + + If you are not redirected automatically, follow this link. + +""" # noqa: E501 + sphinx_gallery_conf = app.config['sphinx_gallery_conf'] + for src_dir, out_dir in zip(sphinx_gallery_conf['examples_dirs'], + sphinx_gallery_conf['gallery_dirs']): + root = os.path.abspath(os.path.join(app.srcdir, src_dir)) + fnames = [os.path.join(os.path.relpath(dirpath, root), fname) + for dirpath, _, fnames in os.walk(root) + for fname in fnames + if fname in needed_plot_redirects] + # plot_ redirects + for fname in fnames: + dirname = os.path.join(app.outdir, out_dir, os.path.dirname(fname)) + to_fname = os.path.splitext(os.path.basename(fname))[0] + '.html' + fr_fname = f'plot_{to_fname}' + to_path = os.path.join(dirname, to_fname) + fr_path = os.path.join(dirname, fr_fname) + assert os.path.isfile(to_path), (fname, to_path) + with open(fr_path, 'w') as fid: + fid.write(TEMPLATE.format(to=to_fname)) + logger.info( + f'Added {len(fnames):3d} HTML plot_* redirects for {out_dir}') + # custom redirects + for fr, to in custom_redirects.items(): + to_path = os.path.join(app.outdir, to) + assert os.path.isfile(to_path), to + assert to_path.endswith('html'), to_path + fr_path = os.path.join(app.outdir, fr) + assert fr_path.endswith('html'), fr_path + # allow overwrite if existing file is just a redirect + if os.path.isfile(fr_path): + with open(fr_path, 'r') as fid: + for _ in range(9): + next(fid) + line = fid.readline() + assert 'Page Redirection' in line, line + # handle folders that no longer exist + if fr_path.split(os.path.sep)[-2] in ( + 'misc', 'discussions', 'source-modeling', 'sample-datasets'): + os.makedirs(os.path.dirname(fr_path), exist_ok=True) + # handle links to sibling folders + path_parts = to.split(os.path.sep) + path_parts = ['..'] + path_parts[(path_parts.index(tu) + 1):] + with open(fr_path, 'w') as fid: + fid.write(TEMPLATE.format(to=os.path.join(*path_parts))) + logger.info( + f'Added {len(custom_redirects):3d} HTML custom redirects') + + +# -- Connect our handlers to the main Sphinx app --------------------------- + +def setup(app): + """Set up the Sphinx app.""" + app.connect('autodoc-process-docstring', append_attr_meth_examples) + if report_scraper is not None: + report_scraper.app = app + app.config.rst_prolog = prolog + app.connect('builder-inited', report_scraper.copyfiles) + app.connect('build-finished', make_redirects) diff --git a/doc/connectivity.rst b/doc/connectivity.rst new file mode 100644 index 00000000000..3526712d6f7 --- /dev/null +++ b/doc/connectivity.rst @@ -0,0 +1,20 @@ + +Connectivity Estimation +======================= + +:py:mod:`mne.connectivity`: + +.. automodule:: mne.connectivity + :no-members: + :no-inherited-members: + +.. currentmodule:: mne.connectivity + +.. autosummary:: + :toctree: generated/ + + degree + envelope_correlation + phase_slope_index + seed_target_indices + spectral_connectivity diff --git a/doc/covariance.rst b/doc/covariance.rst new file mode 100644 index 00000000000..1de751f21a8 --- /dev/null +++ b/doc/covariance.rst @@ -0,0 +1,19 @@ + +Covariance computation +====================== + +.. currentmodule:: mne + +.. autosummary:: + :toctree: generated/ + + Covariance + compute_covariance + compute_raw_covariance + cov.compute_whitener + cov.prepare_noise_cov + cov.regularize + compute_rank + make_ad_hoc_cov + read_cov + write_cov diff --git a/doc/creating_from_arrays.rst b/doc/creating_from_arrays.rst new file mode 100644 index 00000000000..f580cadfc78 --- /dev/null +++ b/doc/creating_from_arrays.rst @@ -0,0 +1,13 @@ + +Creating data objects from arrays +================================= + +.. currentmodule:: mne + +.. autosummary:: + :toctree: generated/ + + EvokedArray + EpochsArray + io.RawArray + create_info diff --git a/doc/datasets.rst b/doc/datasets.rst new file mode 100644 index 00000000000..63889bb722a --- /dev/null +++ b/doc/datasets.rst @@ -0,0 +1,43 @@ + +Datasets +======== + +.. currentmodule:: mne.datasets + +:py:mod:`mne.datasets`: + +.. automodule:: mne.datasets + :no-members: + :no-inherited-members: + +.. autosummary:: + :toctree: generated/ + + brainstorm.bst_auditory.data_path + brainstorm.bst_resting.data_path + brainstorm.bst_raw.data_path + eegbci.load_data + eegbci.standardize + fetch_aparc_sub_parcellation + fetch_fsaverage + fetch_hcp_mmp_parcellation + fetch_infant_template + fnirs_motor.data_path + hf_sef.data_path + kiloword.data_path + limo.load_data + misc.data_path + mtrf.data_path + multimodal.data_path + opm.data_path + sleep_physionet.age.fetch_data + sleep_physionet.temazepam.fetch_data + sample.data_path + somato.data_path + spm_face.data_path + visual_92_categories.data_path + phantom_4dbti.data_path + refmeg_noise.data_path + ssvep.data_path + erp_core.data_path + epilepsy_ecog.data_path \ No newline at end of file diff --git a/doc/decoding.rst b/doc/decoding.rst new file mode 100644 index 00000000000..e539629c8c0 --- /dev/null +++ b/doc/decoding.rst @@ -0,0 +1,40 @@ + +.. _api_decoding: + +Decoding +======== + +:py:mod:`mne.decoding`: + +.. automodule:: mne.decoding + :no-members: + :no-inherited-members: + +.. autosummary:: + :toctree: generated/ + + CSP + EMS + FilterEstimator + LinearModel + PSDEstimator + Scaler + TemporalFilter + TimeFrequency + UnsupervisedSpatialFilter + Vectorizer + ReceptiveField + TimeDelayingRidge + SlidingEstimator + GeneralizingEstimator + SPoC + SSD + +Functions that assist with decoding and model fitting: + +.. autosummary:: + :toctree: generated/ + + compute_ems + cross_val_multiscore + get_coef diff --git a/doc/events.rst b/doc/events.rst new file mode 100644 index 00000000000..d37d034f0d1 --- /dev/null +++ b/doc/events.rst @@ -0,0 +1,56 @@ + +Events +====== + +.. currentmodule:: mne + +.. autosummary:: + :toctree: generated/ + + Annotations + AcqParserFIF + concatenate_events + find_events + find_stim_steps + make_fixed_length_events + make_fixed_length_epochs + merge_events + parse_config + pick_events + read_annotations + read_events + write_events + concatenate_epochs + events_from_annotations + annotations_from_events + +:py:mod:`mne.event`: + +.. automodule:: mne.event + :no-members: + :no-inherited-members: + +.. currentmodule:: mne.event + +.. autosummary:: + :toctree: generated/ + + define_target_events + shift_time_events + +:py:mod:`mne.epochs`: + +.. automodule:: mne.epochs + :no-members: + :no-inherited-members: + +.. currentmodule:: mne.epochs + +.. autosummary:: + :toctree: generated/ + + add_channels_epochs + average_movements + combine_event_ids + equalize_epoch_counts + make_metadata \ No newline at end of file diff --git a/doc/file_io.rst b/doc/file_io.rst new file mode 100644 index 00000000000..1af91b0b0d9 --- /dev/null +++ b/doc/file_io.rst @@ -0,0 +1,63 @@ +File I/O +======== + +.. currentmodule:: mne + +.. autosummary:: + :toctree: generated + + channel_type + channel_indices_by_type + get_head_surf + get_meg_helmet_surf + get_volume_labels_from_aseg + get_volume_labels_from_src + parse_config + read_labels_from_annot + read_bem_solution + read_bem_surfaces + read_cov + read_dipole + read_epochs + read_epochs_kit + read_epochs_eeglab + read_epochs_fieldtrip + read_events + read_evokeds + read_evoked_fieldtrip + read_evokeds_mff + read_freesurfer_lut + read_forward_solution + read_label + read_morph_map + read_proj + read_reject_parameters + read_selection + read_source_estimate + read_source_spaces + read_surface + read_trans + read_tri + write_labels_to_annot + write_bem_solution + write_bem_surfaces + write_head_bem + write_cov + write_events + write_evokeds + write_forward_solution + write_label + write_proj + write_source_spaces + write_surface + write_trans + what + io.read_info + io.show_fiff + +Base class: + +.. autosummary:: + :toctree: generated + + BaseEpochs diff --git a/doc/forward.rst b/doc/forward.rst new file mode 100644 index 00000000000..9f355ed9006 --- /dev/null +++ b/doc/forward.rst @@ -0,0 +1,60 @@ + +Forward Modeling +================ + +.. currentmodule:: mne + +.. autosummary:: + :toctree: generated/ + + Forward + SourceSpaces + add_source_space_distances + apply_forward + apply_forward_raw + average_forward_solutions + convert_forward_solution + decimate_surface + dig_mri_distances + forward.compute_depth_prior + forward.compute_orient_prior + forward.restrict_forward_to_label + forward.restrict_forward_to_stc + make_bem_model + make_bem_solution + make_forward_dipole + make_forward_solution + make_field_map + make_sphere_model + morph_source_spaces + read_bem_surfaces + read_forward_solution + read_trans + read_source_spaces + read_surface + sensitivity_map + setup_source_space + setup_volume_source_space + surface.complete_surface_info + surface.read_curvature + use_coil_def + write_bem_surfaces + write_trans + +:py:mod:`mne.bem`: + +.. automodule:: mne.bem + :no-members: + :no-inherited-members: + +.. currentmodule:: mne.bem + +.. autosummary:: + :toctree: generated/ + + ConductorModel + fit_sphere_to_headshape + get_fitting_dig + make_watershed_bem + make_flash_bem + convert_flash_mris diff --git a/doc/funding.rst b/doc/funding.rst new file mode 100644 index 00000000000..43d4ccf3454 --- /dev/null +++ b/doc/funding.rst @@ -0,0 +1,50 @@ +:orphan: + +Funding and other support +========================= + +Development of MNE-Python has been supported by: + +.. rst-class:: list-unstyled funders + +- |nih| **National Institutes of Health:** R01-EB009048, R01-EB009048, R01-EB006385, R01-HD40712, R01-NS44319, R01-NS37462, R01-NS104585, P41-EB015896, P41-RR14075-06 +- |nsf| **US National Science Foundation:** 0958669, 1042134 +- |erc| **European Research Council:** YStG-263584, YStG-676943 +- |doe| **US Department of Energy:** DE-FG02-99ER62764 (MIND) +- |anr| **Agence Nationale de la Recherche:** `14-NEUC-0002-01 `_, **IDEX** Paris-Saclay `11-IDEX-0003-02 `_ +- |cds| **Paris-Saclay Center for Data Science:** `PARIS-SACLAY `_ +- |goo| **Google:** Summer of code (×6) +- |ama| **Amazon:** AWS Research Grants +- |czi| **Chan Zuckerberg Initiative:** `EOSS2 `_ + + +Additionally, many universities or research institutions have supported their employees’ contributions to MNE-Python as part of normal work duties. These institutions include: + +- `Massachusetts General Hospital `_ +- `Athinoula A. Martinos Center for Biomedical Imaging `_ +- `Harvard Medical School `_ +- `Massachusetts Institute of Technology `_ +- `New York University `_ +- `Commissariat à l’énergie atomique et aux énergies alternatives `_ +- `Aalto-yliopiston perustieteiden korkeakoulu `_ +- `Télécom ParisTech `_ +- `University of Washington `_ +- `Institut du Cerveau et de la Moelle épinière `_ +- `Boston University `_ +- `Institut national de la santé et de la recherche médicale `_ +- `Forschungszentrum Jülich `_ +- `Technische Universität Ilmenau `_ +- `Berkeley Institute for Data Science `_ +- `Institut national de recherche en informatique et en automatique `_ +- `Aarhus Universitet `_ +- `Karl-Franzens-Universität Graz `_ + +.. |nih| image:: _static/funding/nih.png +.. |nsf| image:: _static/funding/nsf.png +.. |erc| image:: _static/funding/erc.svg +.. |doe| image:: _static/funding/doe.svg +.. |anr| image:: _static/funding/anr.svg +.. |cds| image:: _static/funding/cds.png +.. |goo| image:: _static/funding/google.svg +.. |ama| image:: _static/funding/amazon.svg +.. |czi| image:: _static/funding/czi.svg diff --git a/doc/glossary.rst b/doc/glossary.rst index e2074c44188..d677ef3e9ea 100644 --- a/doc/glossary.rst +++ b/doc/glossary.rst @@ -1,7 +1,3 @@ -:orphan: - -.. _glossary: - Glossary ======== @@ -12,6 +8,7 @@ general neuroimaging concepts. If you think a term is missing, please consider `creating a new issue`_ or `opening a pull request`_ to add it. .. glossary:: + :sorted: annotations @@ -24,19 +21,22 @@ general neuroimaging concepts. If you think a term is missing, please consider object class and :ref:`tut-annotations` for a tutorial on how to manipulate such objects. - Beamformer + beamformer Beamformer is a popular source estimation approach that uses a set of spatial filters (beamformer weights) to compute time courses of sources - which coordinates are predefined. See :class:`mne.beamformer.Beamformer`. + at predefined coordinates. See :class:`beamformer.Beamformer`. See + also :term:`LCMV`. BEM + boundary element model + boundary element method BEM is the acronym for boundary element method or boundary element model. Both are related to the forward model computation and more specifically the definion of the conductor model. The boundary element model consists of surfaces such as the inner skull, outer skull and outer skin (a.k.a. scalp) that define compartments of tissues of the head. You can compute the BEM surfaces with - :func:`mne.bem.make_watershed_bem` or :func:`mne.bem.make_flash_bem`. + :func:`bem.make_watershed_bem` or :func:`bem.make_flash_bem`. See :ref:`tut-forward` for usage demo. channels @@ -57,9 +57,10 @@ general neuroimaging concepts. If you think a term is missing, please consider .. mne:: data channels list DICS + dynamic imaging of coherent sources Dynamic Imaging of Coherent Sources, a method for computing source power in different frequency bands. see :ref:`ex-inverse-source-power` - and :func:`mne.beamformer.make_dics`. + and :func:`beamformer.make_dics`. digitization Digitization is a procedure of recording the headshape of a subject and @@ -68,20 +69,32 @@ general neuroimaging concepts. If you think a term is missing, please consider See :ref:`reading-dig-montages` and :ref:`dig-formats`. dipole - See :term:`equivalent current dipole`. + ECD + equivalent current dipole + An equivalent current dipole (ECD) is an approximate representation of + post-synaptic activity in a small region of cortex. The intracellular + currents that give rise to measurable EEG/MEG signals are thought to + originate in populations of cortical pyramidal neurons aligned + perpendicularly to the cortical surface. Because the length of such + current sources is very small relative to the distance between the + cortex and the EEG/MEG sensors, the fields measured by the techniques + are well-approximated by (i.e., "equivalent" to) fields generated by + idealized point sources (dipoles) located on the cortical surface. dSPM + dynamic statistical parametric mapping Dynamic statistical parametric mapping (abbr. ``dSPM``) gives a noise- normalized minimum-norm estimate at a given source location. dSPM is calculated by dividing the activity estimate at each source location by the baseline standard deviation of the noise. - eLORETA and sLORETA + eLORETA + sLORETA eLORETA and sLORETA (exact and standardized low resolution brain electromagnetic tomography) are linear source estimation techniques, - as are dSPM or :term:`MNE `. sLORETA outputs + as are :term:`dSPM` and :term:`MNE`. sLORETA outputs standardized values (like dSPM does), while eLORETA outputs normalized - current estimates. See :func:`mne.minimum_norm.apply_inverse`, + current estimates. See :func:`minimum_norm.apply_inverse`, :ref:`tut-inverse-methods`, and :ref:`example-sLORETA`. epochs @@ -93,17 +106,6 @@ general neuroimaging concepts. If you think a term is missing, please consider API of the corresponding object class, and :ref:`tut-epochs-class` for a narrative overview. - equivalent current dipole - An equivalent current dipole (ECD) is an approximate representation of - post-synaptic activity in a small region of cortex. The intracellular - currents that give rise to measurable EEG/MEG signals are thought to - originate in populations of cortical pyramidal neurons aligned - perpendicularly to the cortical surface. Because the length of such - current sources is very small relative to the distance between the - cortex and the EEG/MEG sensors, the fields measured by the techniques - are well-approximated by (i.e., "equivalent" to) fields generated by - idealized point sources (dipoles) located on the cortical surface. - events Events correspond to specific time points in raw data; e.g., triggers, experimental condition events, etc. MNE represents events with @@ -118,22 +120,38 @@ general neuroimaging concepts. If you think a term is missing, please consider See :class:`EvokedArray` for the API of the corresponding object class, and :ref:`tut-evoked-class` for a narrative overview. + fiducial fiducial point - There are three fiducial (a.k.a. cardinal) points: the left - preauricular point (LPA), the right preauricular point (RPA) - and the nasion. + anatomical landmark + Fiducials are objects placed in the field of view of an imaging system + to act as a known spatial reference location that is easy to localize. + In neuroimaging, fiducials are often placed on anatomical landmarks + such as the nasion (NAS) or left/right preauricular points (LPA and + RPA). + + These known reference locations are used to define a coordinate system + used for localization of sensors (hence NAS, LPA and RPA are often + called "cardinal points" because they define the cardinal directions of + the "head" coordinate system). The cardinal points are also useful when + co-registering measurements in different coordinate systems (such as + aligning EEG sensor locations to an MRI of the subject's head). + + Due to the common neuroimaging practice of placing fiducial objects on + anatomical landmarks, the terms "fiducial", "anatomical landmark" and + "cardinal point" are often (erroneously) used interchangeably. first_samp - The :attr:`~mne.io.Raw.first_samp` attribute of :class:`~mne.io.Raw` + The :attr:`~io.Raw.first_samp` attribute of :class:`~io.Raw` objects is an integer representing the number of time samples that passed between the onset of the hardware acquisition system and the time when data started to be recorded to disk. This approach to sample numbering is a peculiarity of VectorView MEG systems, but for - consistency it is present in all :class:`~mne.io.Raw` objects + consistency it is present in all :class:`~io.Raw` objects regardless of the source of the data. In other words, - :attr:`~mne.io.Raw.first_samp` will be ``0`` in :class:`~mne.io.Raw` + :attr:`~io.Raw.first_samp` will be ``0`` in :class:`~io.Raw` objects loaded from non-VectorView data files. + forward forward solution The forward solution (abbr. ``fwd``) is a linear operator capturing the relationship between each dipole location in the :term:`source space` @@ -141,17 +159,27 @@ general neuroimaging concepts. If you think a term is missing, please consider the "lead field matrix"). Calculating a forward solution requires a conductivity model of the head, encapsulating the geometry and electrical conductivity of the different tissue compartments (see - :term:`boundary element model ` and - :class:`mne.bem.ConductorModel`). + :term:`boundary element model` and :class:`bem.ConductorModel`). GFP + global field power Global Field Power (abbr. ``GFP``) is a measure of the (non-)uniformity of the electromagnetic field at the sensors. It is typically calculated as the standard deviation of the sensor values at each time point; thus it is a one-dimensional time series capturing the spatial variability of the signal across sensor locations. + HED + hierarchical event descriptors + Hierarchical event descriptors (abbr. ``HED``) are tags that use + keywords separated by '/' to describe different types of + experimental events (for example, stimulus/circle/red/left and + stimulus/circle/blue/left). These tags can be used to group + experimental events and select event types for analysis. + HPI + cHPI + head position indicator Head position indicators (abbr. ``HPI``, or sometimes ``cHPI`` for *continuous* head position indicators) are small coils attached to a subject's head during MEG acquisition. Each coil emits a sinusoidal @@ -162,41 +190,48 @@ general neuroimaging concepts. If you think a term is missing, please consider low-pass filtering. See :ref:`tut-head-pos`. info - Also called ``measurement info``, it is a collection of metadata regarding - a Raw, Epochs or Evoked object; e.g., - channel locations and types, sampling frequency, - preprocessing history such as filters ... + Also called ``measurement info``, it is a collection of metadata + regarding a :class:`~io.Raw`, :class:`Epochs` or :class:`Evoked` + object, containing channel locations and types, sampling frequency, + preprocessing history such as filters, etc. See :ref:`tut-info-class` for a narrative overview. + inverse inverse operator The inverse operator is an :math:`M \times N` matrix (:math:`M` source locations by :math:`N` sensors) that, when applied to the sensor signals, yields estimates of the brain activity that gave rise to the observed sensor signals. Inverse operators are available for the linear inverse methods MNE, dSPM, sLORETA and eLORETA. - See :func:`mne.minimum_norm.apply_inverse`. + See :func:`minimum_norm.apply_inverse`. label - A :class:`Label` refers to a region in the cortex, also often called - a region of interest (ROI) in the literature. + A :class:`Label` refers to a defined region in the cortex, also often called + a region of interest (ROI) in the literature. Labels can be defined + anatomically (based on physical structure of the cortex) or functionally + (based on cortical response to specific stimuli). layout - A :class:`Layout ` gives sensor positions in 2 + A :class:`~channels.Layout` gives sensor positions in 2 dimensions (defined by ``x``, ``y``, ``width``, and ``height`` values for each sensor). It is primarily used for illustrative purposes (i.e., making diagrams of approximate sensor positions in top-down diagrams of the head, so-called topographies or topomaps). + LCMV LCMV beamformer Linearly constrained minimum variance beamformer, which attempts to estimate activity for a given source while suppressing cross-talk from - other regions, see :func:`mne.beamformer.make_lcmv`. + other regions, see :func:`beamformer.make_lcmv`. See also + :term:`beamformer`. maximum intensity projection A method of displaying activity within some volume by, for each pixel, finding the maximum value along vector from the viewer to the pixel (i.e., along the vector pependicular to the view plane). + MNE + minimum-norm estimate minimum-norm estimation Minimum-norm estimation (abbr. ``MNE``) can be used to generate a distributed map of activation on a :term:`source space`, usually on a cortical surface. @@ -235,34 +270,45 @@ general neuroimaging concepts. If you think a term is missing, please consider available in ``info['chs']``. projector + SSP A projector (abbr. ``proj``), also referred to as Signal Space - Projection (SSP), defines - a linear operation applied spatially to EEG or MEG data. You can see - this as a matrix multiplication that reduces the rank of the data by - projecting it to a lower dimensional subspace. Such a projection - operator is applied to both the data and the forward operator for + Projection (SSP), defines a linear operation applied spatially to EEG + or MEG data. A matrix multiplication of an SSP projector with the data + will reduce the rank of the data by projecting it to a + lower-dimensional subspace. Such projections are typically applied to + both the data and the forward operator when performing source localization. Note that EEG average referencing can be done - using such a projection operator. It is stored in the measurement - info in ``info['projs']``. + using such a projection operator. Projectors are stored alongside data + in :term:`the measurement info` in the field ``info['projs']``. raw - It corresponds to continuous data (preprocessed or not). One typically + `~io.Raw` objects hold continuous data (preprocessed or not). One typically manipulates raw data when reading recordings in a file on disk. See :class:`~io.RawArray` for the API of the corresponding object class, and :ref:`tut-raw-class` for a narrative overview. - selection (abbr. sel) - A set of picks. E.g., all sensors included in a Region of Interest. + ROI + region of interest + A spatial region where an experimental effect is expected to manifest. + This can be a collection of sensors or, when performing inverse imaging, + a set of vertices on the cortical surface or within the cortical volume. + See also :term:`label`. + + selection + A selection is a set of picked channels (for example, all sensors + falling within a :term:`region of interest`). - source estimates (abbr. ``stc``) + STC + source estimate + source time course Source estimates, commonly referred to as STC (Source Time Courses), - are obtained from source localization methods, - such as dSPM, sLORETA, LCMV or MxNE. - It contains the amplitudes of the sources over time. - An STC object only stores the amplitudes of activations but - not the locations of the sources. To get access to the locations - you need to have the source space used to compute the forward - operator. + are obtained from source localization methods such as :term:`dSPM`, + :term:`sLORETA`, :term:`LCMV` or MxNE. + STCs contain the amplitudes of the neural sources over time. + In MNE-Python, :class:`SourceEstimate` objects only store the + amplitudes of activation but not the locations of the sources; the + locations are stored separately in the :class:`SourceSpaces` object + that was used to compute the forward operator. See :class:`SourceEstimate`, :class:`VolSourceEstimate` :class:`VectorSourceEstimate`, :class:`MixedSourceEstimate`, for the API of the corresponding object classes. @@ -271,15 +317,15 @@ general neuroimaging concepts. If you think a term is missing, please consider A source space (abbr. ``src``) specifies where in the brain one wants to estimate the source amplitudes. It corresponds to locations of a set of - candidate equivalent current dipoles (ECD). MNE mostly works + candidate :term:`equivalent current dipoles`. MNE mostly works with source spaces defined on the cortical surfaces estimated - by FreeSurfer from a T1-weighted MRI image. See - :ref:`tut-forward` to read on - how to compute a forward operator on a source space. + by FreeSurfer from a T1-weighted MRI image. See :ref:`tut-forward` + to read about how to compute a forward operator on a source space. See :class:`SourceSpaces` for the API of the corresponding object class. stim channel + trigger channel A stim channel, a.k.a. trigger channel, is a channel that encodes events during the recording. It is typically a channel that is usually zero and takes positive values when something happens (such as the @@ -309,4 +355,4 @@ general neuroimaging concepts. If you think a term is missing, please consider .. _`creating a new issue`: https://github.com/mne-tools/mne-python/issues/new?template=glossary.md .. _`opening a pull request`: - https://github.com/mne-tools/mne-python/pull/new/master + https://github.com/mne-tools/mne-python/pull/new/main diff --git a/doc/index.rst b/doc/index.rst index 50f6fa8e9c8..c3d6d9216fc 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -1,95 +1,35 @@ .. title:: MNE -.. title image, description -.. raw:: html - -
-
- -.. image:: _static/mne_logo.svg - :alt: MNE - :class: logo - :align: center - -Open-source Python package for exploring, visualizing, and -analyzing human neurophysiological data: MEG, EEG, sEEG, ECoG, NIRS, and more. - -.. raw:: html - -
- -.. carousel -.. include:: carousel.inc - -.. raw:: html - -
-
- -.. whats_new box (with spacer) -.. raw:: html - -
-
-

- -Version |version| - -.. raw:: html - -

- -- :ref:`whats_new` -- :ref:`Installation ` -- :ref:`Documentation ` -- :ref:`Cite ` +.. The page title must be in rST for it to show in next/prev page buttons. + Therefore we add a special style rule to only this page that hides h1 tags .. raw:: html -
-
-
+ -.. funding box -.. raw:: html +MNE-Python Homepage +=================== -
-
-

+.. LOGO -Direct financial support - -.. raw:: html - -

-
+.. image:: _static/mne_logo.svg + :alt: MNE-Python + :class: logo + :align: center -.. rst-class:: list-funding list-unstyled -- |nih| **National Institutes of Health:** **R01**-EB009048, EB009048, EB006385, HD40712, NS44319, NS37462, NS104585, **P41**-EB015896, RR14075-06 -- |nsf| **US National Science Foundation:** 0958669, 1042134 -- |erc| **European Research Council:** **YStG**-263584, 676943 -- |doe| **US Department of Energy:** DE-FG02-99ER62764 (MIND) -- |anr| **Agence Nationale de la Recherche:** 14-NEUC-0002-01 +.. rst-class:: h4 text-center font-weight-light my-4 - **IDEX** Paris-Saclay, 11-IDEX-0003-02 -- |cds| **Paris-Saclay Center for Data Science:** `PARIS-SACLAY `__ -- |goo| **Google:** Summer of code (×6) -- |ama| **Amazon:** AWS Research Grants -- |czi| **Chan Zuckerberg Initiative:** `Essential Open Source Software for Science `__ + Open-source Python package for exploring, visualizing, and analyzing + human neurophysiological data: MEG, EEG, sEEG, ECoG, NIRS, and more. -.. raw:: html +.. frontpage gallery is added by a conditional in _templates/layout.html -
-
-
+.. toctree:: + :hidden: -.. |nih| image:: _static/funding/nih.png -.. |nsf| image:: _static/funding/nsf.png -.. |erc| image:: _static/funding/erc.svg -.. |doe| image:: _static/funding/doe.svg -.. |anr| image:: _static/funding/anr.svg -.. |cds| image:: _static/funding/cds.png -.. |goo| image:: _static/funding/google.svg -.. |ama| image:: _static/funding/amazon.svg -.. |czi| image:: _static/funding/czi.svg + Install + Documentation + API Reference + Get help + Development diff --git a/doc/install/advanced.rst b/doc/install/advanced.rst index 98a08a168e9..42dff355aa1 100644 --- a/doc/install/advanced.rst +++ b/doc/install/advanced.rst @@ -2,15 +2,11 @@ .. _advanced_setup: -Advanced setup of MNE-Python -============================ +Advanced setup +============== -.. contents:: - :local: - :depth: 2 - -Using MNE-Python with IPython / Jupyter notebooks -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Using with IPython / Jupyter notebooks +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When using MNE-Python within IPython or a Jupyter notebook, we strongly recommend using the Qt matplotlib backend for fast and correct rendering. On @@ -65,7 +61,7 @@ when choosing which notebook kernel to use. Otherwise, be sure to activate the ``mne`` environment before launching the notebook. If you use another Python setup and you encounter some difficulties please -report them on the `MNE mailing list`_ or on the `GitHub issues page`_ to get +report them on the `MNE Forum`_ or on the `GitHub issues page`_ to get assistance. It is also possible to interact with the 3D plots without installing Qt by using @@ -78,13 +74,13 @@ the notebook 3d backend: The notebook 3d backend requires PyVista to be installed along with other packages, -please follow :doc:`mne_python` +please follow :doc:`mne_python`. -.. _installing_master: +.. _installing_main: -Using the development version of MNE-Python (latest master) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Using the development version +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If you want access to the latest features and bugfixes, you can easily switch from the stable version of MNE-Python to the current development version. @@ -92,23 +88,23 @@ from the stable version of MNE-Python to the current development version. .. warning:: In between releases, function and class APIs can change without warning. -For a one-time update to latest master, make sure you're in the conda +For a one-time update to latest main, make sure you're in the conda environment where MNE-Python is installed (if you followed the default install -instructions, this will be ``base``), and use ``pip`` to upgrade: +instructions, this will be ``mne``), and use ``pip`` to upgrade: .. code-block:: console $ conda activate name_of_my_mne_environment - $ pip install --upgrade --no-deps https://github.com/mne-tools/mne-python/archive/master.zip + $ pip install --upgrade --no-deps https://github.com/mne-tools/mne-python/archive/main.zip If you plan to contribute to MNE-Python, or just prefer to use git rather than -pip to make frequent updates, check out the :ref:`contributing guide -`. +pip to make frequent updates, there are instructions for installing from a +``git clone`` in the :ref:`contributing`. .. _other-py-distros: -Using MNE-Python with other Python distributions -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Other Python distributions +^^^^^^^^^^^^^^^^^^^^^^^^^^ While the `Anaconda`_ Python distribution provides many conveniences, other distributions of Python should also work with MNE-Python. In particular, @@ -118,13 +114,13 @@ installing new packages and managing environments; unlike Anaconda, Miniconda starts off with a minimal set of around 30 packages instead of Anaconda's hundreds. See the `installation instructions for Miniconda`_ for more info. -It is also possible to use a system-level installation of Python (version 3.5 +It is also possible to use a system-level installation of Python (version 3.6 or higher) and use ``pip`` to install MNE-Python and its dependencies, using the provided `requirements file`_: .. code-block:: console - curl --remote-name https://raw.githubusercontent.com/mne-tools/mne-python/master/requirements.txt + curl --remote-name https://raw.githubusercontent.com/mne-tools/mne-python/main/requirements.txt pip install --user requirements.txt Other configurations will probably also work, but we may be unable to offer @@ -133,13 +129,13 @@ installation choices. .. _CUDA: -Using MNE-Python with CUDA (NVIDIA GPU acceleration) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +GPU acceleration with CUDA +^^^^^^^^^^^^^^^^^^^^^^^^^^ -Some operations in MNE-Python can utilize `NVIDIA CUDA GPU processing`_ to -speed up some operations (e.g. FIR filtering) by roughly an order of magnitude. -To use CUDA, first ensure that you are running the `NVIDIA proprietary -drivers`_ on your operating system, and then do: +MNE-Python can utilize `NVIDIA CUDA GPU processing`_ to speed up some +operations (e.g. FIR filtering) by roughly an order of magnitude. To use CUDA, +first ensure that you are running the `NVIDIA proprietary drivers`_ on your +operating system, and then do: .. code-block:: console @@ -164,10 +160,10 @@ that state that they allow passing ``n_jobs='cuda'``, such as and they should run faster than the CPU-based multithreading such as ``n_jobs=8``. -Off-screen rendering in MNE-Python on Linux with MESA -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Off-screen rendering with MESA +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -On remote systems, it might be possible to use MESA software rendering +On remote Linux systems, it might be possible to use MESA software rendering (such as ``llvmpipe`` or ``swr``) for 3D visualization (with some tweaks). For example, on CentOS 7.5 you might be able to use an environment variable to force MESA to use modern OpenGL by using this before executing @@ -191,15 +187,52 @@ or by doing :func:`mne.viz.set_3d_options(antialias=False) ` within a given Python session. +Another issue that may come up is that the MESA software itself may be out of date +in certain operating systems, for example CentOS. This may lead to incomplete +rendering of some 3D plots. A solution is described in this `Github comment `_. +It boils down to building a newer version (e.g., 18.3.6) +locally following a variant of `these instructions `_. +If you have CentOS 7 or newer, you can also try some `prebuilt binaries `_ we made. +After downloading the files, untar them and add them to the appropriate library paths +using the following commands: + +.. code-block:: console + + $ tar xzvf mesa_18.3.6_centos_lib.tgz + $ export LIBGL_DRIVERS_PATH="${PWD}/lib" + $ export LD_LIBRARY_PATH="${PWD}/lib" + +To check that everything went well, type the following: + +.. code-block:: console + + $ glxinfo | grep "OpenGL core profile version" + +which should give:: + + OpenGL core profile version string: 3.3 (Core Profile) Mesa 18.3.6 + +Another way to check is to type: + +.. code-block:: console + + $ mne sys_info + +and it should show the right version of MESA:: + + ... + pyvista: 0.27.4 {pyvistaqt=0.2.0, OpenGL 3.3 (Core Profile) Mesa 18.3.6 via llvmpipe (LLVM 3.4, 256 bits)} + ... + .. _troubleshoot_3d: -Troubleshooting 3D plots in MNE-Python -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Troubleshooting 3D plots +^^^^^^^^^^^^^^^^^^^^^^^^ -3D plotting trouble after version 0.20 upgrade on macOS -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +3D plotting trouble after upgrade on macOS +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -When upgrading MNE-Python to version 0.20, some macOS users may end up with +When upgrading MNE-Python from version 0.19 or lower, some macOS users may end up with conflicting versions of some of the 3D plotting dependencies. If you plot using the pyvista 3D backend and find that you can click-drag to rotate the brain, but cannot adjust any of the settings sliders, it is likely that your versions @@ -216,8 +249,8 @@ If you installed VTK using ``pip`` rather than ``conda``, substitute the first line for ``pip uninstall -y vtk``. -3D plotting trouble using mayavi 3D backend -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Trouble using mayavi backend +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you run into trouble when visualizing source estimates (or anything else) using mayavi, you can try setting a couple of environment variables at the diff --git a/doc/install/contributing.rst b/doc/install/contributing.rst index 9cb15b8f16f..fe66c83eb78 100644 --- a/doc/install/contributing.rst +++ b/doc/install/contributing.rst @@ -1,31 +1,10 @@ -:orphan: - .. _contributing: -Contributing to MNE-Python -========================== - -.. contents:: Page contents - :local: - :depth: 3 +Contributing guide +================== .. highlight:: console -.. NOTE: this first section (up until "overview of contribution process") is - basically a copy/paste of CONTRIBUTING.rst from the repository root, with - one sentence deleted to avoid self-referential linking. Changes made here - should be mirrored there, and vice-versa. - -MNE-Python is maintained by a community of scientists and research labs, and -accepts contributions in the form of bug reports, fixes, feature additions, and -documentation improvements (even just typo corrections). The best way to start -contributing is by `opening an issue`_ on our GitHub page to discuss your ideas -for changes or enhancements, or to tell us about behavior that you think might -be a bug in MNE-Python. *For general troubleshooting of scripts that use -MNE-Python*, you should instead write to the `MNE mailing list`_ or chat with -developers on the `MNE gitter channel`_. Users and contributors to MNE-Python -are expected to follow our `code of conduct`_. - This page has details on the preferred contribution workflow and how best to configure your system for a smooth experience contributing to MNE-Python. @@ -33,16 +12,11 @@ MNE-Python. .. collapse:: |rocket| Want an example to work through? :class: success - A great way to learn to contribute is to work through an actual example + A great way to learn to contribute is to work through an actual example. We recommend that you take a look at the `GitHub issues marked "easy"`_, pick one that looks interesting, and work through it while reading this guide! -.. _`opening an issue`: https://github.com/mne-tools/mne-python/issues/new/choose -.. _`MNE mailing list`: http://mail.nmr.mgh.harvard.edu/mailman/listinfo/mne_analysis -.. _`MNE gitter channel`: https://gitter.im/mne-tools/mne-python - -.. _`code of conduct`: https://github.com/mne-tools/.github/blob/master/CODE_OF_CONDUCT.md .. _`GitHub issues marked "easy"`: https://github.com/mne-tools/mne-python/issues?q=is%3Aissue+is%3Aopen+label%3AEASY Overview of contribution process @@ -50,9 +24,9 @@ Overview of contribution process Changes to MNE-Python are typically made by `forking`_ the MNE-Python repository, making changes to your fork (usually by `cloning`_ it to your -personal computer, making the changes, and then `pushing`_ the local changes up -to your fork), and finally creating a `pull request`_ to incorporate your -changes back into the shared "upstream" version of the codebase. +personal computer, making the changes locally, and then `pushing`_ the local +changes up to your fork), and finally creating a `pull request`_ to incorporate +your changes back into the shared "upstream" version of the codebase. In general you'll be working with three different copies of the MNE-Python codebase: the official remote copy at https://github.com/mne-tools/mne-python @@ -73,8 +47,8 @@ The sections :ref:`basic-git` and :ref:`github-workflow` (below) describe this process in more detail. -Setting up your local environment for MNE-Python development -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Setting up your local development environment +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Configuring git ~~~~~~~~~~~~~~~ @@ -110,7 +84,7 @@ identifying yourself and your contact info:: Make sure that the same email address is associated with your GitHub account and with your local git configuration. It is possible to associate multiple emails with a GitHub account, so if you initially set them up with different -emails, just add the local email to the GitHub account. +emails, you can add the local email to the GitHub account. Sooner or later, git is going to ask you what text editor you want it to use when writing commit messages, so you might as well configure that now too:: @@ -123,9 +97,12 @@ for more information. GNU Make ~~~~~~~~ -GNU Make facilitates deploying a package by executing corresponding commands -from the ``Makefile``. For MNE-Python we have two Makefiles, one in the parent -directory mainly for testing and one in ``/doc`` for building the documentation. + +We use `GNU Make`_ to organize commands or short scripts that are often needed +in development. These are stored in files with the name :file:`Makefile`. +MNE-Python has two Makefiles, one in the package's root directory (containing +mainly testing commands) and one in :file:`doc/` (containing recipes for +building our documentation pages in different ways). To check if make is already installed type :: @@ -137,25 +114,26 @@ into a terminal and you should see :: If you don't see this or something similar: -.. sidebar:: - If you get: +.. sidebar:: If you get: *bash: conda: command not found* you need to add - - ``(Anaconda-Path)`` - - ``(Anaconda-Path)\Scripts`` + - :file:`{path_to_Anaconda}` + - :file:`{path_to_Anaconda}\\Scripts` to Windows-PATH. - For Linux/MacOS, get `GNU Make`_ - For Windows, you can install make for git BASH (which comes with `git for Windows`_): - 1. Download ``make-(newest version)-without-guile-w32-bin.zip`` from `ezwinports`_ + 1. Download :file:`make-{newest.version}-without-guile-w32-bin.zip` from `ezwinports`_ 2. Extract zip-folder - 3. Copy the contents into ``(git-path)\mingw64\`` (e.g. by merging the folders with the equivalent ones already inside) - 4. For the first time using git BASH, you need to run once (to be able to activate your mnedev-environment): :: + 3. Copy the contents into :file:`{path_to_git}\\mingw64\\` (e.g. by merging the + folders with the equivalent ones already inside) + 4. For the first time using git BASH, you need to run once (to be able to + activate your mnedev-environment): :: $ conda init bash @@ -181,8 +159,8 @@ of how that structure is set up is given here: :align: left -Setting up the development environment -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Creating the virtual environment +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. sidebar:: Supported Python environments @@ -205,11 +183,11 @@ version of MNE-Python, you should now repeat that process to create a new, separate environment for MNE-Python development (here we'll give it the name ``mnedev``):: - $ curl --remote-name https://raw.githubusercontent.com/mne-tools/mne-python/master/environment.yml + $ curl --remote-name https://raw.githubusercontent.com/mne-tools/mne-python/main/environment.yml $ conda env create --file environment.yml --name mnedev $ conda activate mnedev -Now you'll have *two* MNE-Python environments: ``base`` (or whatever custom +Now you'll have *two* MNE-Python environments: ``mne`` (or whatever custom name you used when installing the stable version of MNE-Python) and ``mnedev`` that we just created. At this point ``mnedev`` also has the stable version of MNE-Python (that's what the :file:`environment.yml` file installs), but we're @@ -252,15 +230,15 @@ Finally, set up a link between your local clone and the official repository Now we'll remove the *stable* version of MNE-Python and replace it with the *development* version (the clone we just created with git). Make sure you're in -the correct environment first (:samp:`conda activate mnedev`), and then do:: +the correct environment first (``conda activate mnedev``), and then do:: $ cd $INSTALL_LOCATION/mne-python # make sure we're in the right folder $ pip uninstall -y mne $ pip install -e . The command ``pip install -e .`` installs a python module into the current -environment by creating a link to the source code directory (instead of -copying the code to pip's ``site_packages`` directory, which is what normally +environment by creating a link to the source code directory (instead of copying +the code to pip's :file:`site_packages` directory, which is what normally happens). This means that any edits you make to the MNE-Python source code will be reflected the next time you open a Python interpreter and ``import mne`` (the ``-e`` flag of ``pip`` stands for an "editable" installation). @@ -290,7 +268,8 @@ To build documentation, you will also require `optipng`_: - On MacOS, optipng can be installed using Homebrew. -- On Windows, unzip optipng.exe from the `optipng for Windows`_ archive into the ``doc`` folder. +- On Windows, unzip :file:`optipng.exe` from the `optipng for Windows`_ archive + into the :file:`doc/` folder. You can also choose to install some optional linters for reStructuredText:: @@ -319,22 +298,22 @@ Other commands that you will undoubtedly need relate to `branches`_. Branches represent multiple copies of the codebase *within a local clone or remote repo*. Branches are typically used to experiment with new features while still keeping a clean, working copy of the original codebase that you can switch back -to at any time. The default branch of any repo is always called ``master``, and -it is recommended that you reserve the ``master`` branch to be that clean copy +to at any time. The default branch of any repo is called ``main``, and +it is recommended that you reserve the ``main`` branch to be that clean copy of the working ``upstream`` codebase. Therefore, if you want to add a new -feature, you should first synchronize your local ``master`` branch with the -``upstream`` repository, then create a new branch based off of ``master`` and +feature, you should first synchronize your local ``main`` branch with the +``upstream`` repository, then create a new branch based off of ``main`` and `check it out`_ so that any changes you make will exist on that new branch -(instead of on ``master``):: +(instead of on ``main``):: - $ git checkout master # switch to local master branch + $ git checkout main # switch to local main branch $ git fetch upstream # get the current state of the remote upstream repo - $ git merge upstream/master # synchronize local master branch with remote upstream master branch + $ git merge upstream/main # synchronize local main branch with remote upstream main branch $ git checkout -b new-feature-x # create local branch "new-feature-x" and check it out .. sidebar:: Alternative - You can save some typing by using ``git pull upstream/master`` to replace + You can save some typing by using ``git pull upstream/main`` to replace the ``fetch`` and ``merge`` lines above. Now that you're on a new branch, you can fix a bug or add a new feature, add a @@ -446,7 +425,7 @@ General requirements All new functionality must have test coverage --------------------------------------------- -For example, a new :class:`mne.Evoked` method in :file:`mne/evoked.py` should +For example, a new `mne.Evoked` method in :file:`mne/evoked.py` should have a corresponding test in :file:`mne/tests/test_evoked.py`. @@ -455,7 +434,7 @@ All new functionality must be documented This includes thorough docstring descriptions for all public API changes, as well as how-to examples or longer tutorials for major contributions. Docstrings -for private functions may be more sparse, but should not be omitted. +for private functions may be more sparse, but should usually not be omitted. Avoid API changes when possible @@ -465,11 +444,11 @@ Changes to the public API (e.g., class/function/method names and signatures) should not be made lightly, as they can break existing user scripts. Changes to the API require a deprecation cycle (with warnings) so that users have time to adapt their code before API changes become default behavior. See :ref:`the -deprecation section ` and :class:`mne.utils.deprecated` for +deprecation section ` and `mne.utils.deprecated` for instructions. Bug fixes (when something isn't doing what it says it will do) do not require a deprecation cycle. -Note that any new API elements should be added to the master reference; +Note that any new API elements should be added to the main reference; classes, functions, methods, and attributes cannot be cross-referenced unless they are included in the :doc:`python_reference` (:file:`doc/python_reference.rst`). @@ -574,7 +553,7 @@ new feature etc.): .. |Your Name| replace:: **Your Name** - Short description of the changes (:gh:`0000` **by new contributor** |Your Name|_) - + - ... where ``0000`` must be replaced with the respective GitHub pull request (PR) @@ -635,11 +614,11 @@ Make tests fast and thorough Whenever possible, use the testing dataset rather than one of the sample datasets when writing tests; it includes small versions of most MNE-Python -objects (e.g., :class:`~mne.io.Raw` objects with short durations and few +objects (e.g., `~mne.io.Raw` objects with short durations and few channels). You can also check which lines are missed by the tests, then modify existing tests (or write new ones) to target the missed lines. Here's an example that reports which lines within ``mne.viz`` are missed when running -``test_evoked.py`` and ``test_topo.py``:: +:file:`test_evoked.py` and :file:`test_topo.py`:: $ pytest --cov=mne.viz --cov-report=term-missing mne/viz/tests/test_evoked.py mne/viz/tests/test_topo.py @@ -678,12 +657,33 @@ single-character variable names, unless inside a :term:`comprehension ` or :ref:`generator `. -Follow NumPy style for docstrings ---------------------------------- +We (mostly) follow NumPy style for docstrings +--------------------------------------------- + +In most cases you can look at existing MNE-Python docstrings to figure out how +yours should be formatted. If you can't find a relevant example, consult the +`Numpy docstring style guidelines`_ for examples of more complicated formatting +such as embedding example code, citing references, or including rendered +mathematics. Note that we diverge from the NumPy docstring standard in a few +ways: + +1. We use a module called ``sphinxcontrib-bibtex`` to render citations. Search + our source code (``git grep footcite`` and ``git grep footbibliography``) to + see examples of how to add in-text citations and formatted references to + your docstrings, examples, or tutorials. The structured bibliographic data + lives in :file:`doc/references.bib`; please follow the existing key scheme + when adding new references (e.g., ``Singleauthor2019``, + ``AuthoroneAuthortwo2020``, ``FirstauthorEtAl2021a``, + ``FirstauthorEtAl2021b``). +2. We don't explicitly say "optional" for optional keyword parameters (because + it's clear from the function or method signature which parameters have + default values). +3. For parameters that may take multiple types, we use pipe characters instead + of the word "or", like this: ``param_name : str | None``. +4. We don't include a ``Raises`` or ``Warns`` section describing + errors/warnings that might occur. + -In most cases imitating existing docstrings will be sufficient, but consult the -`Numpy docstring style guidelines`_ for more complicated formatting such as -embedding example code, citing references, or including rendered mathematics. Private function/method docstrings may be brief for simple functions/methods, but complete docstrings are appropriate when private functions/methods are relatively complex. To run some basic tests on documentation, you can use:: @@ -698,7 +698,7 @@ Cross-reference everywhere Both the docstrings and dedicated documentation pages (tutorials, how-to examples, discussions, and glossary) should include cross-references to any mentioned module, class, function, method, attribute, or documentation page. -There are sphinx directives for all of these (``:mod:``, ``:class:``, +There are sphinx roles for all of these (``:mod:``, ``:class:``, ``:func:``, ``:meth:``, ``:attr:``, ``:doc:``) as well as a generic cross-reference directive (``:ref:``) for linking to specific sections of a documentation page. @@ -720,6 +720,12 @@ dumped to file with commands like:: $ python -m sphinx.ext.intersphinx https://docs.python.org/3/objects.inv > python.txt +Note that anything surrounded by single backticks that is *not* preceded by one +of the API roles (``:class:``, ``:func:``, etc) will be assumed to be +in the MNE-Python namespace. This can save some typing especially in +tutorials; instead of ``see :func:`mne.io.Raw.plot_psd` for details`` you can +instead type ``see `mne.io.Raw.plot_psd` for details``. + Other style guidance -------------------- @@ -746,17 +752,17 @@ Code organization Importing --------- -Import modules in this order: +Import modules in this order, preferably alphabetized within each subsection: -1. Python built-in (``os``, ``copy``, ``functools``, etc) -2. standard scientific (``numpy as np``, ``scipy.signal``, etc) -3. others -4. MNE-Python imports (e.g., ``from .pick import pick_types``) +1. Python built-in (``copy``, ``functools``, ``os``, etc.) +2. NumPy (``numpy as np``) and, in test files, pytest (``pytest``) +3. MNE-Python imports (e.g., ``from .pick import pick_types``) When importing from other parts of MNE-Python, use relative imports in the main codebase and absolute imports in tests, tutorials, and how-to examples. Imports -for ``matplotlib`` and optional modules (``sklearn``, ``pandas``, etc.) should -be nested (i.e., within a function or method, not at the top of a file). +for ``matplotlib``, ``scipy``, and optional modules (``sklearn``, ``pandas``, +etc.) should be nested (i.e., within a function or method, not at the top of a +file). This helps reduce import time and limit hard requirements for using MNE. Return types @@ -875,7 +881,7 @@ These are typically used with a call like:: $ mne browse_raw ~/mne_data/MNE-sample-data/MEG/sample/sample_audvis_raw.fif These are generally available for convenience, and can be useful for quick -debugging (in this case, for :class:`mne.io.Raw.plot`). +debugging (in this case, for `mne.io.Raw.plot`). If a given command-line function fails, they can also be executed as part of the ``mne`` module with ``python -m``. For example:: @@ -915,13 +921,13 @@ down the road. Here are the guidelines: doing this. Avoid purely cosmetic changes to the code; they make PRs harder to review. -- It is usually better to make PRs *from* branches other than your master - branch, so that you can use your master branch to easily get back to a +- It is usually better to make PRs *from* branches other than your main + branch, so that you can use your main branch to easily get back to a working state of the code if needed (e.g., if you're working on multiple changes at once, or need to pull in recent changes from someone else to get your new feature to work properly). -- In most cases you should make PRs *into* the upstream's master branch, unless +- In most cases you should make PRs *into* the upstream's main branch, unless you are specifically asked by a maintainer to PR into another branch (e.g., for backports or maintenance bugfixes to the current stable version). @@ -948,16 +954,16 @@ down the road. Here are the guidelines: `continuous integration`_ (CI) providers. Use them judiciously; *do not skip tests simply because they are failing*: - - ``[skip circle]`` Skip `circle`_, which tests successful building of our - documentation. + - ``[skip circle]`` Skip `CircleCI`_, which tests successful building of + our documentation. - - ``[skip travis]`` Skip `travis`_, which tests installation and execution - on Linux and macOS systems. + - ``[skip github]`` Skip our `GitHub Actions`_, which test installation + and execution on Linux and macOS systems. - ``[skip azp]`` Skip `azure`_ which tests installation and execution on Windows systems. - - ``[ci skip]`` is an alias for ``[skip travis][skip azp][skip circle]``. + - ``[ci skip]`` is an alias for ``[skip github][skip azp][skip circle]``. Notice that ``[skip ci]`` is not a valid tag. - ``[circle full]`` triggers a "full" documentation build, i.e., all code @@ -967,8 +973,8 @@ down the road. Here are the guidelines: `This sample pull request`_ exemplifies many of the conventions listed above: it addresses only one problem; it started with an issue to discuss the problem -and some possible solutions; it is a PR from the user's non-master branch into -the upstream master branch; it separates different kinds of changes into +and some possible solutions; it is a PR from the user's non-main branch into +the upstream main branch; it separates different kinds of changes into separate commits and uses labels like ``DOC``, ``FIX``, and ``STY`` to make it easier for maintainers to review the changeset; etc. If you are new to GitHub it can serve as a useful example of what to expect from the PR review process. @@ -1046,9 +1052,9 @@ it can serve as a useful example of what to expect from the PR review process. .. _Spyder: https://www.spyder-ide.org/ .. _continuous integration: https://en.wikipedia.org/wiki/Continuous_integration .. _matplotlib: https://matplotlib.org/ -.. _travis: https://travis-ci.org/mne-tools/mne-python/branches -.. _azure: https://dev.azure.com/mne-tools/mne-python/_build/latest?definitionId=1&branchName=master -.. _circle: https://circleci.com/gh/mne-tools/mne-python +.. _github actions: https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions +.. _azure: https://dev.azure.com/mne-tools/mne-python/_build/latest?definitionId=1&branchName=main +.. _CircleCI: https://circleci.com/gh/mne-tools/mne-python .. optipng diff --git a/doc/install/freesurfer.rst b/doc/install/freesurfer.rst index 6eb08176f7e..1de6fc82c55 100644 --- a/doc/install/freesurfer.rst +++ b/doc/install/freesurfer.rst @@ -19,8 +19,6 @@ need to install ``tcsh`` for FreeSurfer to work; ``tcsh`` is usually pre-installed with macOS, and is available in the package repositories for Linux-based systems (e.g., ``sudo apt install tcsh`` on Ubuntu-like systems). -**Next:** :doc:`advanced` - .. LINKS .. _fs-wiki: https://surfer.nmr.mgh.harvard.edu/fswiki/ diff --git a/doc/install/index.rst b/doc/install/index.rst index 60f1e414c2a..a619860c75b 100644 --- a/doc/install/index.rst +++ b/doc/install/index.rst @@ -1,26 +1,42 @@ -:orphan: +Quick start +=========== -Installation — contents -======================= +MNE-Python requires Python version 3.6 or higher. +For users already familiar with Python: -MNE-Python is an open-source Python module for processing, analysis, and -visualization of functional neuroimaging data (EEG, MEG, sEEG, ECoG, and -fNIRS). Pages describing the installation procedure are listed below. The -:ref:`contributing guide ` has advanced installation instructions -for (future) contributors to MNE-Python. +- If you only need 2D plotting capabilities with MNE-Python (i.e., most EEG/ERP + or other sensor-level analyses), you can install MNE-Python using ``pip``: + + .. code-block:: console + + $ pip install mne # dependencies are numpy, scipy, matplotlib + +- If you need MNE-Python's 3D plotting capabilities (e.g., plotting estimated + source activity on a cortical surface) it is a good idea to install + MNE-Python into its own virtual environment. To do this with ``conda`` (this + will create a conda environment called ``mne``): + + .. code-block:: console + + $ curl --remote-name https://raw.githubusercontent.com/mne-tools/mne-python/master/environment.yml + $ conda env update --file environment.yml + + If you need to convert structural MRI scans into models of the scalp, + inner/outer skull, and cortical surfaces you also need + :doc:`FreeSurfer `. + +The :ref:`install_python_and_mne_python` page has more detailed instructions +for different operating systems (including instructions for installing Python +if you don't already have it). The :ref:`advanced_setup` page has additional +tips and tricks for special situations (servers, notebooks, CUDA, installing +the development version, etc). The :ref:`contributing` has additional +installation instructions for (future) contributors to MNE-Python (extra +dependencies, etc). .. toctree:: - :maxdepth: 2 + :hidden: pre_install mne_python freesurfer advanced - -Installation instructions are also provided for MNE-C (the Unix command-line -tools originally developed by Matti Hämäläinen). - -.. toctree:: - :maxdepth: 2 - - mne_c diff --git a/doc/install/mne_c.rst b/doc/install/mne_c.rst index be0682f87b5..a5512930af4 100644 --- a/doc/install/mne_c.rst +++ b/doc/install/mne_c.rst @@ -1,3 +1,5 @@ +:orphan: + .. include:: ../links.inc .. _install_mne_c: @@ -5,10 +7,6 @@ Installing MNE-C ================ -.. contents:: - :local: - :depth: 1 - System requirements ^^^^^^^^^^^^^^^^^^^ @@ -163,8 +161,8 @@ effect or you need a faster graphics adapter. Troubleshooting MNE-C installation ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If MNE-C can't find ``libxp.so.6``, download libxp6 from debian_ or `pkgs.org`_ -and install with ``dpkg`` or similar: +If MNE-C can't find ``libxp.so.6``, download libxp6 from debian_ or similar and +install it: .. code-block:: console @@ -179,8 +177,8 @@ a typical 64-bit Ubuntu-like system this would be accomplished by: $ cd /usr/lib/x86_64-linux-gnu $ sudo ln -s libgfortran.so.1 $(find . -maxdepth 1 -type f -name libgfortran.so*) -If you encounter other errors installing MNE-C, please send a message to the -`MNE mailing list`_. +If you encounter other errors installing MNE-C, please post a message to the +`MNE Forum`_. .. links @@ -192,4 +190,3 @@ If you encounter other errors installing MNE-C, please send a message to the .. _XCode developer tools: https://developer.apple.com/xcode/ .. _xquartz: https://www.xquartz.org/ .. _debian: https://packages.debian.org/jessie/amd64/libxp6/download -.. _pkgs.org: https://pkgs.org/download/libxp6 diff --git a/doc/install/mne_python.rst b/doc/install/mne_python.rst index a2fce6d880f..3f9517e05a1 100644 --- a/doc/install/mne_python.rst +++ b/doc/install/mne_python.rst @@ -5,10 +5,6 @@ Installing MNE-Python ===================== -.. contents:: Page contents - :local: - :depth: 1 - .. highlight:: console .. _install-python: @@ -48,7 +44,7 @@ conda to ``/home/user/anaconda3``):: $ which pip /home/user/anaconda3/bin/pip -.. collapse:: |hand-stop-o| If you get an error or these look incorrect... +.. collapse:: |hand-paper| If you get an error or these look incorrect... :class: danger .. rubric:: If you are on a |windows| Windows command prompt: @@ -122,8 +118,8 @@ Installing MNE-Python and its dependencies Once you have Python/Anaconda installed, you have a few choices for how to install MNE-Python. -For sensor-level analysis -~~~~~~~~~~~~~~~~~~~~~~~~~ +2D plotting and sensor-level analysis +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you only need 2D plotting capabilities with MNE-Python (i.e., most EEG/ERP or other sensor-level analyses), you can install all you need by running @@ -133,8 +129,8 @@ GUI). This will install MNE-Python into the "base" conda environment, which should be active by default and should already have the necessary dependencies (``numpy``, ``scipy``, and ``matplotlib``). -For 3D plotting and source analysis -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +3D plotting and source analysis +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you need MNE-Python's 3D plotting capabilities (e.g., plotting estimated source activity on a cortical surface) it is a good idea to install @@ -148,10 +144,10 @@ your operating system. Download the MNE-Python `environment file`_ (done here with ``curl``) and use it to create a new environment (named ``mne`` by default):: - $ curl --remote-name https://raw.githubusercontent.com/mne-tools/mne-python/master/environment.yml + $ curl --remote-name https://raw.githubusercontent.com/mne-tools/mne-python/main/environment.yml $ conda env update --file environment.yml - .. collapse:: |hand-stop-o| If you get errors building mayavi... + .. collapse:: |hand-paper| If you get errors building mayavi... :class: danger Installing `mayavi`_ needs OpenGL support. On debian-like systems this @@ -165,15 +161,15 @@ your operating system. with ``curl``) and use it to create a new environment (named ``mne`` by default):: - $ conda install --name base nb_conda_kernels - $ curl --remote-name https://raw.githubusercontent.com/mne-tools/mne-python/master/environment.yml + $ conda install --name base nb_conda_kernels "spyder>=4.2.1" + $ curl --remote-name https://raw.githubusercontent.com/mne-tools/mne-python/main/environment.yml $ conda env update --file environment.yml .. collapse:: |windows| Windows - Download the `environment file`_ - Open an Anaconda command prompt - - Run :samp:`conda install --name base nb_conda_kernels` + - Run :samp:`conda install --name base nb_conda_kernels "spyder>=4.2.1"` - :samp:`cd` to the directory where you downloaded the file - Run :samp:`conda env update --file environment.yml` @@ -191,7 +187,7 @@ your operating system. if (navigator.userAgent.indexOf("Mac")!=-1) OSName="apple-macos"; $(document).ready(function(){ var element = document.getElementById("collapse_" + OSName); - element.className += " in"; + element.className += " show"; element.setAttribute("aria-expanded", "true"); }); @@ -219,8 +215,8 @@ Installing to a headless server need administrator privileges to install it. -Testing MNE-Python installation -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Testing your installation +^^^^^^^^^^^^^^^^^^^^^^^^^ To make sure MNE-Python installed itself and its dependencies correctly, type the following command in a terminal:: @@ -253,7 +249,7 @@ MNE-Python and its dependencies. Typical output looks like this:: PyQt5: 5.14.1 -.. collapse:: |hand-stop-o| If you get an error... +.. collapse:: |hand-paper| If you get an error... :class: danger .. rubric:: If you see an error like: @@ -276,12 +272,12 @@ MNE-Python and its dependencies. Typical output looks like this:: If something else went wrong during installation and you can't figure it out, check out the :doc:`advanced` page to see if your problem is discussed there. -If not, the `MNE mailing list`_ and `MNE gitter channel`_ are -good resources for troubleshooting installation problems. +If not, the `MNE Forum`_ is a good resources for troubleshooting installation +problems. -Installing a Python IDE -^^^^^^^^^^^^^^^^^^^^^^^ +Python IDEs +^^^^^^^^^^^ Most users find it convenient to write and run their code in an `Integrated Development Environment`_ (IDE). Some popular choices for scientific @@ -291,16 +287,17 @@ Python development are: use Python. It is included by default in the ``base`` environment when you install Anaconda, and can be started from a terminal with the command ``spyder`` (or on Windows or macOS, launched from the Anaconda Navigator GUI). - If you installed MNE-Python into a separate ``mne`` environment (not the - ``base`` Anaconda environment), you can set up Spyder to use the ``mne`` - environment automatically, by opening Spyder and navigating to + If you use Spyder, you should *not* install it into the ``mne`` environment; + instead, launch Spyder from the ``base`` environment and set it to use the + ``mne`` environment automatically, by opening Spyder and navigating to :samp:`Tools > Preferences > Python Interpreter > Use the following interpreter`. - There, paste the output of the following terminal command:: + There, paste the output of the following terminal commands:: - $ conda activate mne && python -c "import sys; print(sys.executable)" + $ conda activate mne + $ python -c "import sys; print(sys.executable)" It should be something like ``C:\Users\user\anaconda3\envs\mne\python.exe`` - (Windows) or ``/Users/user/anaconda3/envs/mne/bin/python`` (macOS). + (Windows) or ``/Users/user/opt/anaconda3/envs/mne/bin/python`` (macOS). - `Visual Studio Code`_ (often shortened to "VS Code" or "vscode") is a development-focused text editor that supports many programming languages in addition to Python, includes an integrated terminal console, and has a rich @@ -329,13 +326,10 @@ Python development are: .. highlight:: python -**Next:** :doc:`freesurfer` - - .. LINKS -.. _environment file: https://raw.githubusercontent.com/mne-tools/mne-python/master/environment.yml -.. _server environment file: https://raw.githubusercontent.com/mne-tools/mne-python/master/server_environment.yml +.. _environment file: https://raw.githubusercontent.com/mne-tools/mne-python/main/environment.yml +.. _server environment file: https://raw.githubusercontent.com/mne-tools/mne-python/main/server_environment.yml .. _`mayavi`: https://docs.enthought.com/mayavi/mayavi/ .. _`pyvista`: https://docs.pyvista.org/ .. _`X server`: https://en.wikipedia.org/wiki/X_Window_System diff --git a/doc/install/pre_install.rst b/doc/install/pre_install.rst index d275b40b7a9..518a604f0cc 100644 --- a/doc/install/pre_install.rst +++ b/doc/install/pre_install.rst @@ -1,32 +1,35 @@ .. include:: ../links.inc -Before you install -================== +Overview of the MNE tools suite +=============================== -.. contents:: - :local: - :depth: 1 +MNE-Python is an open-source Python module for processing, analysis, and +visualization of functional neuroimaging data (EEG, MEG, sEEG, ECoG, and +fNIRS). There are several related or interoperable software packages that you +may also want to install, depending on your analysis needs. -Overview of the MNE tools suite -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Related software +^^^^^^^^^^^^^^^^ - MNE-C was the initial stage of this project, providing a set of interrelated command-line and GUI programs focused on computing cortically constrained Minimum Norm Estimates from MEG and EEG data. These tools were written in C by Matti Hämäläinen, and are - documented `here `_. + documented `here `_. See :ref:`install_mne_c` for installation + instructions. -- :doc:`MNE-Python <../python_reference>` reimplements the functionality of - MNE-C, and extends considerably the analysis and visualization capabilities. - MNE-Python is collaboratively developed and has more than 200 contributors. +- MNE-Python reimplements the functionality of MNE-C, extends considerably the + analysis and visualization capabilities, and adds support for additional data + types like functional near-infrared spectroscopy (fNIRS). MNE-Python is + collaboratively developed and has more than 200 contributors. -- The :ref:`mne_matlab` provides a MATLAB interface to the .fif file +- :ref:`MNE MATLAB ` provides a MATLAB interface to the .fif file format and other MNE data structures, and provides example MATLAB implementations of some of the core analysis functionality of MNE-C. It is distributed alongside MNE-C, and can also be downloaded from the `MNE-MATLAB git repository`_. -- :doc:`MNE-CPP <../mne_cpp>` provides core MNE functionality implemented in +- :ref:`MNE-CPP ` provides core MNE functionality implemented in C++ and is primarily intended for embedded and real-time applications. There is also a growing ecosystem of other Python packages that work alongside @@ -77,19 +80,14 @@ activity, you will need MNE-Python, plus :doc:`FreeSurfer ` to convert structural MRI scans into models of the scalp, inner/outer skull, and cortical surfaces (specifically, for command-line functions :ref:`mne flash_bem`, :ref:`mne watershed_bem`, and -:ref:`mne make_scalp_surfaces`). If you follow the recommended installation -procedure using anaconda, you don't need to do anything extra — Freesurfer will -automatically be installed. +:ref:`mne make_scalp_surfaces`). Getting help ^^^^^^^^^^^^ -Help with installation is available through the `MNE mailing list`_ and -`MNE gitter channel`_. See the :ref:`help` page for more information. - - -**Next:** :doc:`mne_python` +Help with installation is available through the `MNE Forum`_. See the +:ref:`help` page for more information. .. LINKS: diff --git a/doc/inverse.rst b/doc/inverse.rst new file mode 100644 index 00000000000..6405bf04bc5 --- /dev/null +++ b/doc/inverse.rst @@ -0,0 +1,98 @@ + +Inverse Solutions +================= + +:py:mod:`mne.minimum_norm`: + +.. automodule:: mne.minimum_norm + :no-members: + :no-inherited-members: + +.. currentmodule:: mne.minimum_norm + +.. autosummary:: + :toctree: generated/ + + InverseOperator + apply_inverse + apply_inverse_cov + apply_inverse_epochs + apply_inverse_raw + compute_source_psd + compute_source_psd_epochs + compute_rank_inverse + estimate_snr + make_inverse_operator + prepare_inverse_operator + read_inverse_operator + source_band_induced_power + source_induced_power + write_inverse_operator + make_inverse_resolution_matrix + resolution_metrics + get_cross_talk + get_point_spread + +:py:mod:`mne.inverse_sparse`: + +.. automodule:: mne.inverse_sparse + :no-members: + :no-inherited-members: + +.. currentmodule:: mne.inverse_sparse + +.. autosummary:: + :toctree: generated/ + + mixed_norm + tf_mixed_norm + gamma_map + make_stc_from_dipoles + +:py:mod:`mne.beamformer`: + +.. automodule:: mne.beamformer + :no-members: + :no-inherited-members: + +.. currentmodule:: mne.beamformer + +.. autosummary:: + :toctree: generated/ + + Beamformer + read_beamformer + make_lcmv + apply_lcmv + apply_lcmv_epochs + apply_lcmv_raw + apply_lcmv_cov + make_dics + apply_dics + apply_dics_csd + apply_dics_epochs + rap_music + tf_dics + make_lcmv_resolution_matrix + +.. currentmodule:: mne + +.. autosummary:: + :toctree: generated/ + + Dipole + DipoleFixed + fit_dipole + +:py:mod:`mne.dipole`: + +.. automodule:: mne.dipole + :no-members: + :no-inherited-members: + +.. currentmodule:: mne.dipole + +.. autosummary:: + :toctree: generated/ + + get_phantom_dipoles diff --git a/doc/links.inc b/doc/links.inc index 4a9e6ed6e54..6103dc4df1b 100644 --- a/doc/links.inc +++ b/doc/links.inc @@ -17,9 +17,8 @@ .. _`mne command line utilities`: http://www.nmr.mgh.harvard.edu/martinos/userInfo/data/MNE_register/ .. _`mne-scripts`: https://github.com/mne-tools/mne-scripts/ .. _`MNE-C manual`: https://mne.tools/mne-c-manual/MNE-manual-2.7.3.pdf -.. _`MNE mailing list`: http://mail.nmr.mgh.harvard.edu/mailman/listinfo/mne_analysis .. _`GitHub issues page`: https://github.com/mne-tools/mne-python/issues/ -.. _`MNE gitter channel`: https://gitter.im/mne-tools/mne-python +.. _`MNE Forum`: https://mne.discourse.group .. _`MNE-BIDS`: https://mne-tools.github.io/mne-bids/ .. _`MNE-HCP`: http://mne-tools.github.io/mne-hcp/ .. _`MNE-Realtime`: https://github.com/mne-tools/mne-realtime @@ -207,7 +206,7 @@ .. installation links -.. _requirements file: https://raw.githubusercontent.com/mne-tools/mne-python/master/requirements.txt +.. _requirements file: https://raw.githubusercontent.com/mne-tools/mne-python/main/requirements.txt .. _NVIDIA CUDA GPU processing: https://developer.nvidia.com/cuda-zone .. _NVIDIA proprietary drivers: https://www.geforce.com/drivers diff --git a/doc/logging.rst b/doc/logging.rst new file mode 100644 index 00000000000..110cd1bede4 --- /dev/null +++ b/doc/logging.rst @@ -0,0 +1,47 @@ + +Logging and Configuration +========================= + +.. currentmodule:: mne + +.. autosummary:: + :toctree: generated/ + + get_config_path + get_config + open_docs + set_log_level + set_log_file + set_config + set_cache_dir + sys_info + verbose + +:py:mod:`mne.utils`: + +.. currentmodule:: mne.utils + +.. automodule:: mne.utils + :no-members: + :no-inherited-members: + +.. autosummary:: + :toctree: generated/ + + deprecated + warn + +:py:mod:`mne.cuda`: + +.. currentmodule:: mne.cuda + +.. automodule:: mne.cuda + :no-members: + :no-inherited-members: + +.. autosummary:: + :toctree: generated/ + + get_cuda_memory + init_cuda + set_cuda_device diff --git a/doc/most_used_classes.rst b/doc/most_used_classes.rst new file mode 100644 index 00000000000..2e4bc1d7dd0 --- /dev/null +++ b/doc/most_used_classes.rst @@ -0,0 +1,12 @@ +Most-used classes +================= + +.. currentmodule:: mne + +.. autosummary:: + :toctree: generated/ + + io.Raw + Epochs + Evoked + Info diff --git a/doc/mri.rst b/doc/mri.rst new file mode 100644 index 00000000000..393f9f0dd7b --- /dev/null +++ b/doc/mri.rst @@ -0,0 +1,24 @@ + +MRI Processing +============== + +.. currentmodule:: mne + +Step by step instructions for using :func:`gui.coregistration`: + + - `Coregistration for subjects with structural MRI + `_ + - `Scaling a template MRI for subjects for which no MRI is available + `_ + +.. autosummary:: + :toctree: generated/ + + coreg.get_mni_fiducials + gui.coregistration + gui.fiducials + create_default_subject + scale_mri + scale_bem + scale_labels + scale_source_space diff --git a/doc/overview/cite.rst b/doc/overview/cite.rst index f49d4c2dcab..00ad59520c1 100644 --- a/doc/overview/cite.rst +++ b/doc/overview/cite.rst @@ -3,29 +3,47 @@ How to cite MNE-Python ====================== +Citing the software +------------------- + +To cite specific version numbers of the software, you can use the DOIs provided +by `Zenodo `_. Additionally, we ask that +when citing the MNE-Python package, you cite the canonical journal article +reference :footcite:`GramfortEtAl2013a`: + +.. footbibliography:: + +.. collapse:: |quote-left| BibTeX for MNE-Python + :class: info + + .. include:: ../references.bib + :code: bibtex + :start-after: % MNE-Python reference + :end-before: % everything else + + +Citing the inverse imaging algorithms +------------------------------------- + To cite MNE-C or the inverse imaging implementations provided by the MNE -software, please use: +software, please use :footcite:`GramfortEtAl2014`: - A. Gramfort, M. Luessi, E. Larson, D. Engemann, D. Strohmeier, C. Brodbeck, - L. Parkkonen, M. Hämäläinen, `MNE software for processing MEG and EEG data - `_, *NeuroImage*, Volume 86, - 1 February 2014, Pages 446-460, ISSN 1053-8119, - `[DOI] `__ +.. footbibliography:: -To cite the MNE-Python package, please use: +.. collapse:: |quote-left| BibTeX for inverse algorithms / MNE-C + :class: info - A. Gramfort, M. Luessi, E. Larson, D. Engemann, D. Strohmeier, C. Brodbeck, - R. Goj, M. Jas, T. Brooks, L. Parkkonen, M. Hämäläinen, `MEG and EEG data - analysis with MNE-Python - `_, - *Frontiers in Neuroscience*, Volume 7, 2013, ISSN 1662-453X, - `[DOI] `__ + .. include:: ../references.bib + :code: bibtex + :start-after: % MNE-C reference + :end-before: % MNE-Python reference -To cite specific version numbers of the software, you can use the DOIs provided -by `Zenodo `_. +Citing other algorithms +----------------------- Depending on your research topic, it may also be appropriate to cite related -method papers, some of which are listed in :ref:`ch_reading` and in the -documentation strings of the relevant functions or methods. +method papers, some of which are listed in the documentation strings of the +relevant functions or methods. All references cited in the MNE-Python codebase +and documentation are collected in the :ref:`general_bibliography`. diff --git a/doc/overview/cookbook.rst b/doc/overview/cookbook.rst index 3e090bafede..f46239006b7 100644 --- a/doc/overview/cookbook.rst +++ b/doc/overview/cookbook.rst @@ -4,10 +4,6 @@ The typical M/EEG workflow ========================== -.. contents:: Contents - :local: - :depth: 2 - Overview ======== @@ -271,13 +267,11 @@ inside the outer skull). This step assigns the conductivity values to the BEM compartments. For the scalp and the brain compartments, the default is 0.3 S/m. The default skull conductivity is 50 times smaller, *i.e.*, -0.006 S/m. Recent publications, see :ref:`CEGEGDEI`, report -a range of skull conductivity ratios ranging from 1:15 (Oostendorp *et -al.*, 2000) to 1:25 - 1:50 (Slew *et al.*, -2009, Conçalves *et al.*, 2003). The -MNE default ratio 1:50 is based on the typical values reported in -(Conçalves *et al.*, 2003), since their -approach is based comparison of SEF/SEP measurements in a BEM model. +0.006 S/m. Recent publications report a range of skull conductivity ratios +ranging from 1:15 :footcite:`OostendorpEtAl2000` to 1:25 - 1:50 +:footcite:`GoncalvesEtAl2003,LewEtAl2009`. The MNE default ratio 1:50 is based +on the typical values reported in :footcite:`GoncalvesEtAl2003`, since their +approach is based on comparison of SEF/SEP measurements in a BEM model. The variability across publications may depend on individual variations but, more importantly, on the precision of the skull compartment segmentation. @@ -427,3 +421,9 @@ done *e.g.*, to ``subject='fsaverage'`` as:: >>> stc_fsaverage = morph.apply(stc) # doctest: +SKIP See :ref:`ch_morph` for more information. + + +References +========== + +.. footbibliography:: diff --git a/doc/overview/datasets_index.rst b/doc/overview/datasets_index.rst index 7f5cdfc8857..691aa267024 100644 --- a/doc/overview/datasets_index.rst +++ b/doc/overview/datasets_index.rst @@ -6,8 +6,8 @@ Datasets Overview .. sidebar:: Contributing datasets to MNE-Python Do not hesitate to contact MNE-Python developers on the - `MNE mailing list `_ - to discuss the possibility of adding more publicly available datasets. + `MNE Forum `_ to discuss the possibility of + adding more publicly available datasets. All the dataset fetchers are available in :mod:`mne.datasets`. To download any of the datasets, use the ``data_path`` (fetches full dataset) or the ``load_data`` (fetches dataset partially) functions. @@ -17,11 +17,6 @@ is already on your computer, and only download it if necessary. The default download location is also configurable; see the documentation of any of the ``data_path`` functions for more information. -.. contents:: Available datasets - :local: - :depth: 2 - - .. _sample-dataset: Sample @@ -333,6 +328,19 @@ data please cite :footcite:`KempEtAl2000` and :footcite:`GoldbergerEtAl2000`. * :ref:`tut-sleep-stage-classif` +Reference channel noise MEG data set +==================================== +:func:`mne.datasets.refmeg_noise.data_path`. + +This dataset was obtained with a 4D Neuroimaging / BTi system at +the University Clinic - Erlangen, Germany. There are powerful bursts of +external magnetic noise throughout the recording, which make it a good +example for automatic noise removal techniques. + +.. topic:: Examples + + * :ref:`ex-megnoise_processing` + Miscellaneous Datasets ====================== These datasets are used for specific purposes in the documentation and in @@ -351,6 +359,12 @@ For convenience, we provide a function to separately download and extract the :ref:`tut-eeg-fsaverage-source-modeling` +Infant template MRIs +^^^^^^^^^^^^^^^^^^^^ +:func:`mne.datasets.fetch_infant_template` + +This function will download an infant template MRI from +:footcite:`OReillyEtAl2021` along with MNE-specific files. ECoG Dataset ^^^^^^^^^^^^ @@ -395,6 +409,49 @@ discriminate. and demonstrates how to fit a single trial linear regression using the information contained in the metadata of the individual datasets. +.. _erp-core-dataset: + +ERP CORE Dataset +^^^^^^^^^^^^^^^^ +:func:`mne.datasets.erp_core.data_path` + +The original `ERP CORE dataset`_ :footcite:`Kappenman2021` contains data from +40 participants who completed 6 EEG experiments, carefully crafted to evoke +7 well-known event-related potential (ERP) components. + +Currently, the MNE-Python ERP CORE dataset only provides data from one +participant (subject ``001``) of the Flankers paradigm, which elicits the +lateralized readiness potential (LRP) and error-related negativity (ERN). The +data provided is **not** the original data from the ERP CORE dataset, but +rather a slightly modified version, designed to demonstrate the Epochs metadata +functionality. For example, we already set the references and montage +correctly, and stored events as Annotations. Data is provided in ``FIFF`` +format. + +.. topic:: Examples + + * :ref:`tut-autogenerate-metadata`: Learn how to auto-generate + `~mne.Epochs` metadata, and visualize the error-related negativity (ERN) + ERP component. + +.. _ssvep-dataset: + +SSVEP +===== +:func:`mne.datasets.ssvep.data_path` + +This is a simple example dataset with frequency tagged visual stimulation: +N=2 participants observed checkerboards patterns inverting with a constant +frequency of either 12.0 Hz of 15.0 Hz. 10 trials of 20.0 s length each. +32 channels wet EEG was recorded. + +Data format: BrainVision .eeg/.vhdr/.vmrk files organized according to BIDS +standard. + +.. topic:: Examples + + * :ref:`tut-ssvep` + References ========== @@ -407,16 +464,4 @@ References .. _resting state dataset tutorial: https://neuroimage.usc.edu/brainstorm/DatasetResting .. _median nerve dataset tutorial: https://neuroimage.usc.edu/brainstorm/DatasetMedianNerveCtf .. _SPM faces dataset: https://www.fil.ion.ucl.ac.uk/spm/data/mmfaces/ - -Reference channel noise MEG data set -==================================== -:func:`mne.datasets.refmeg_noise.data_path`. - -This dataset was obtained with a 4D Neuroimaging / BTi system at -the University Clinic - Erlangen, Germany. There are powerful bursts of -external magnetic noise throughout the recording, which make it a good -example for automatic noise removal techniques. - -.. topic:: Examples - - * :ref:`ex-megnoise_processing` +.. _ERP-CORE dataset: https://erpinfo.org/erp-core diff --git a/doc/overview/design_philosophy.rst b/doc/overview/design_philosophy.rst index 67e9a183af4..035b18c4236 100644 --- a/doc/overview/design_philosophy.rst +++ b/doc/overview/design_philosophy.rst @@ -2,8 +2,8 @@ .. _design_philosophy: -MNE-Python design philosophy -============================ +Design philosophy +================= Interactive versus scripted analysis ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -22,7 +22,7 @@ later and/or share it with others (including your future self). Integration with the scientific python stack ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -MNE-Python also integrates well with other standard scientific python +MNE-Python also integrates well with other standard scientific Python libraries. For example, MNE-Python objects underlyingly store their data in NumPy arrays, making it easy to apply custom algorithms or pass your data into one of `scikit-learn's `_ machine learning pipelines. @@ -74,6 +74,8 @@ names and types, applied filters, projectors, etc. See :ref:`tut-info-class` for more info. +.. _sect-meth-chain: + In-place operation ^^^^^^^^^^^^^^^^^^ diff --git a/doc/overview/development.rst b/doc/overview/development.rst new file mode 100644 index 00000000000..0548b53863a --- /dev/null +++ b/doc/overview/development.rst @@ -0,0 +1,28 @@ + +MNE-Python Development +====================== + +.. NOTE: this first section (up until "overview of contribution process") is + basically a copy/paste of CONTRIBUTING.rst from the repository root, with + one sentence deleted to avoid self-referential linking. Changes made here + should be mirrored there, and vice-versa. + +MNE-Python is maintained by a community of scientists and research labs, and +accepts contributions in the form of bug reports, fixes, feature additions, and +documentation improvements (even just typo corrections). The best way to start +contributing is by `opening an issue`_ on our GitHub page to discuss your ideas +for changes or enhancements, or to tell us about behavior that you think might +be a bug in MNE-Python. *For general troubleshooting of scripts that use +MNE-Python*, you should instead post on the `MNE Forum`_. Users and +contributors to MNE-Python are expected to follow our `code of conduct`_. + +.. _`opening an issue`: https://github.com/mne-tools/mne-python/issues/new/choose +.. _`MNE Forum`: https://mne.discourse.group +.. _`code of conduct`: https://github.com/mne-tools/.github/blob/main/CODE_OF_CONDUCT.md + +.. toctree:: + :hidden: + + ../install/contributing + ../whats_new + roadmap diff --git a/doc/overview/faq.rst b/doc/overview/faq.rst index 63f9cea9589..98ca5edc76a 100644 --- a/doc/overview/faq.rst +++ b/doc/overview/faq.rst @@ -6,9 +6,6 @@ Frequently Asked Questions (FAQ) ================================ -.. contents:: Page contents - :local: - .. highlight:: python General MNE-Python issues @@ -72,7 +69,7 @@ I'm not sure how to do *X* analysis step with my *Y* data... ------------------------------------------------------------ Knowing "the right thing" to do with EEG and MEG data is challenging. We use -the `MNE mailing list`_ to discuss analysis strategies for different kinds of +the `MNE Forum`_ to discuss analysis strategies for different kinds of data. It's worth searching the archives to see if there have been relevant discussions in the past, but don't hesitate to ask a new question if the answer isn't out there already. @@ -84,13 +81,12 @@ I think I found a bug, what do I do? When you encounter an error message or unexpected results, it can be hard to tell whether it happened because of a bug in MNE-Python, a mistake in user code, a corrupted data file, or irregularities in the data itself. Your first -step when asking for help should be the `MNE mailing list`_ or the -`MNE Gitter channel`_, not GitHub. This bears repeating: *the GitHub issue -tracker is not for usage help* — it is for software bugs, feature requests, and -improvements to documentation. If you open an issue that contains only a usage -question, we will close the issue and direct you to the mailing list or Gitter -channel. If you're pretty sure the problem you've encountered is a software bug -(not bad data or user error): +step when asking for help should be the `MNE Forum`_, not GitHub. This bears +repeating: *the GitHub issue tracker is not for usage help* — it is for +software bugs, feature requests, and improvements to documentation. If you +open an issue that contains only a usage question, we will close the issue and +direct you to the forum. If you're pretty sure the problem you've encountered +is a software bug (not bad data or user error): - Make sure you're using `the most current version`_. You can check it locally at a shell prompt with: @@ -103,7 +99,7 @@ channel. If you're pretty sure the problem you've encountered is a software bug dependencies. - If you're already on the most current version, if possible try using - :ref:`the latest development version `, as the bug may + :ref:`the latest development version `, as the bug may have been fixed already since the latest release. If you can't try the latest development version, search the GitHub issues page to see if the problem has already been reported and/or fixed. @@ -121,14 +117,6 @@ three backticks (\`\`\`) above and below the lines of code. This MNE-Python contributors should be able to copy and paste the provided snippet and replicate the bug on their own computers. -If you post to the `mailing list -`__ -instead, a `GitHub Public Gist `_ for the code sample -is recommended; if you use the -`Gitter channel `_ the three backticks -(\`\`\`) trick works there too. - - Why is it dangerous to "pickle" my MNE-Python objects and data for later use? ----------------------------------------------------------------------------- @@ -145,8 +133,8 @@ MNE-Python is designed to provide its own file saving formats (often based on the FIF standard) for its objects usually via a ``save`` method or ``write_*`` method, e.g. :func:`mne.io.Raw.save`, :func:`mne.Epochs.save`, :func:`mne.write_evokeds`, :func:`mne.SourceEstimate.save`. If you have some -data that you want to save but can't figure out how, shoot an email to the `MNE -mailing list`_ or post it to the `GitHub issues page`_. +data that you want to save but can't figure out how, post to the `MNE Forum`_ +or to the `GitHub issues page`_. If you want to write your own data to disk (e.g., subject behavioral scores), we strongly recommend using `h5io `_, which is @@ -317,7 +305,7 @@ does not contain a systematic signal (time-locked to the event of interest), the whitened baseline signal should be follow a multivariate Gaussian distribution, i.e., whitened baseline signals should be between -1.96 and 1.96 at a given time sample. Based on the same reasoning, the expected value for the -:term:`Global Field Power (GFP) ` is 1 (calculation of the :term:`GFP` +:term:`global field power` (GFP) is 1 (calculation of the :term:`GFP` should take into account the true degrees of freedom, e.g. ``ddof=3`` with 2 active SSP vectors):: @@ -349,7 +337,7 @@ compared:: >>> evoked.plot_white(covs) # doctest: +SKIP This will plot the whitened evoked for the optimal estimator and display the -:term:`GFPs ` for all estimators as separate lines in the related panel. +:term:`GFP` for all estimators as separate lines in the related panel. .. _faq_watershed_bem_meshes: @@ -385,8 +373,7 @@ order of difficulty): :ref:`mne watershed_bem`. 2. Changing the ``--atlas`` and ``--gcaatlas`` options of :ref:`mne watershed_bem`. -3. Manually editing the meshes (see :ref:`this tutorial - `). +3. Manually editing the meshes (see :ref:`this tutorial `). 4. Manually running mri_watershed_ with various FreeSurfer flags (e.g., ``-less`` to fix the output). 5. Going farther back in your Freesurfer pipeline to fix the problem. diff --git a/doc/overview/get_help.rst b/doc/overview/get_help.rst index 74df14f57cc..c97ed58e460 100644 --- a/doc/overview/get_help.rst +++ b/doc/overview/get_help.rst @@ -5,19 +5,24 @@ Getting help ^^^^^^^^^^^^ -There are three main channels for obtaining help with MNE software tools. +There are several places to obtain help with MNE software tools. -- There are some troubleshooting tips built into +- The `MNE Forum`_ is a good placed to go for both troubleshooting and general + questions. +- The :ref:`faq` page has some troubleshooting tips, and is a good source of + general information. There are also some troubleshooting tips built into :ref:`the installation page ` (look for the - |hand-stop-o| symbols), and some tips related to 3D plotting problems on + |hand-paper| symbols), and some tips related to 3D plotting problems on :ref:`the advanced setup page `. -- The `MNE mailing list`_ and `MNE gitter channel`_ are good places to go for - both troubleshooting and general questions. - If you want to request new features or if you're confident that you have found a bug, please create a new issue on the `GitHub issues page`_. When reporting bugs, please try to replicate the bug with the MNE-Python :ref:`sample data `, and make every effort to simplify your example script to only the elements necessary to replicate the bug. -The :ref:`faq` page also has a few troubleshooting tips, and is a good source -of general information too. + +.. toctree:: + :hidden: + + learn_python + faq diff --git a/doc/overview/implementation.rst b/doc/overview/implementation.rst index 2aa295e1027..ebae0201f7a 100644 --- a/doc/overview/implementation.rst +++ b/doc/overview/implementation.rst @@ -5,11 +5,6 @@ Algorithms and other implementation details This page describes some of the technical details of MNE-Python implementation. -.. contents:: Page contents - :local: - :depth: 1 - - .. _units: Internal representation (units) diff --git a/doc/overview/index.rst b/doc/overview/index.rst index 5c12d2fe762..dc7a71e9578 100644 --- a/doc/overview/index.rst +++ b/doc/overview/index.rst @@ -1,5 +1,3 @@ -:orphan: - .. include:: ../links.inc .. _documentation_overview: @@ -10,8 +8,8 @@ Documentation overview .. note:: If you haven't already installed Python and MNE-Python, here are the - :doc:`installation instructions `, and some resources for - :doc:`learn_python`. + :ref:`installation instructions `, and some + resources for :doc:`learn_python`. The documentation for MNE-Python is divided into four main sections: @@ -21,7 +19,7 @@ The documentation for MNE-Python is divided into four main sections: emphasis is on thorough explanations that get new users up to speed quickly, at the expense of covering only a limited number of topics. -2. The :doc:`Examples Gallery <../auto_examples/index>` provides working code +2. The :doc:`How-to Examples <../auto_examples/index>` provides working code samples demonstrating various analysis and visualization techniques. These examples often lack the narrative explanations seen in the tutorials, but can be a useful way to discover new analysis or plotting ideas, or to see @@ -41,25 +39,25 @@ The documentation for MNE-Python is divided into four main sections: notebook. The rest of the MNE-Python documentation pages (parts outside of the four -categories above) are linked here: - -.. toctree:: - :maxdepth: 1 - - faq - design_philosophy - implementation - ../whats_new - roadmap - datasets_index - cookbook - ../generated/commands - migrating - cite - get_help - ../bibliography - +categories above) are shown in the navigation menu, including the +:ref:`list of example datasets`, +:ref:`implementation details`, and more. Documentation for the related C and MATLAB tools are available here: - :ref:`MNE-MATLAB ` (HTML) - `MNE-C `_ (PDF) + +.. toctree:: + :hidden: + + Tutorials<../auto_tutorials/index> + Examples<../auto_examples/index> + ../glossary + Implementation details + design_philosophy + Example datasets + Command-line tools<../generated/commands> + migrating + cookbook + cite + ../cited diff --git a/doc/overview/learn_python.rst b/doc/overview/learn_python.rst index cd3a5e8ed66..87328c82891 100644 --- a/doc/overview/learn_python.rst +++ b/doc/overview/learn_python.rst @@ -1,5 +1,3 @@ -:orphan: - .. include:: ../links.inc .. _learn-python: diff --git a/doc/overview/matlab.rst b/doc/overview/matlab.rst index 92e465fa28b..8617097708f 100644 --- a/doc/overview/matlab.rst +++ b/doc/overview/matlab.rst @@ -8,10 +8,6 @@ MNE-MATLAB documentation ======================== -.. contents:: Page contents - :local: - :depth: 2 - .. note:: The MNE MATLAB Toolbox is compatible with Matlab versions 7.0 or later. Overview diff --git a/doc/overview/migrating.rst b/doc/overview/migrating.rst index 719b391c6cf..4fe4ba3ce4f 100644 --- a/doc/overview/migrating.rst +++ b/doc/overview/migrating.rst @@ -14,7 +14,7 @@ reader :func:`mne.io.read_raw_edf` and a ``set`` file reader. To read in to read in ``set`` files containing ``epochs`` data, use :func:`mne.read_epochs_eeglab`. -This table summarizes equivalent EEGLAB and MNE-Python code for some of the +This table summarizes the equivalent EEGLAB and MNE-Python code for some of the most common analysis tasks. For the sake of clarity, the table below assumes the following variables exist: the file name ``fname``, time interval of the epochs ``tmin`` and ``tmax``, and the experimental conditions ``cond1`` and @@ -29,33 +29,47 @@ below which and above which to filter out data. +=====================+==========================================================+==================================================================================================+ | Get started | | ``addpath(...);`` | | :mod:`import mne ` | | | | ``eeglab;`` | | :mod:`from mne import io, ` :class:`~mne.Epochs` | -| | | | :mod:`from mne.preprocessing ` :class:`import ICA ` | +| | | | | :mod:`from mne.preprocessing ` :class:`import ICA ` | +---------------------+----------------------------------------------------------+--------------------------------------------------------------------------------------------------+ -| Import data | ``EEG = pop_fileio(fname);`` | | :func:`raw = io.read_raw_fif(fname) ` | -| | | | :func:`raw = io.read_raw_edf(fname) ` | -| | | | :func:`raw = io.read_raw_eeglab(fname) ` | +| Import data | | ``EEG = pop_fileio(fname);`` | | :func:`raw = io.read_raw_fif(fname) ` | +| | | | | :func:`raw = io.read_raw_edf(fname) ` | +| | | | | :func:`raw = io.read_raw_eeglab(fname) ` ``(set file)`` | +| | | | | | +---------------------+----------------------------------------------------------+--------------------------------------------------------------------------------------------------+ -| Filter data | ``EEG = pop_eegfiltnew(EEG, l_freq, h_freq);`` | :func:`raw.filter(l_freq, h_freq) ` | +| Filter data | | ``EEG = pop_eegfiltnew(EEG, l_freq, h_freq);`` | | :func:`raw.filter(l_freq, h_freq) ` | +---------------------+----------------------------------------------------------+--------------------------------------------------------------------------------------------------+ -| Run ICA | ``EEG = pop_runica(EEG, 'pca', n);`` | | :class:`ica = ICA(max_pca_components=n) ` | -| | | | :func:`ica.fit(raw) ` | +| Common Average | | ``EEG= pop_averef;`` | | :func:`raw.set_eeg_reference("average") ` | +| referencing | | | | | ++---------------------+----------------------------------------------------------+--------------------------------------------------------------------------------------------------+ +| Remove channels | | ``pop_select.m`` | | :func:`raw.drop_channels() ` | +| | | | | | ++---------------------+----------------------------------------------------------+--------------------------------------------------------------------------------------------------+ +| Run ICA | | ``EEG = pop_runica(EEG, 'pca', n);`` | | :func:`ica.fit(raw) ` | +| | | | | | +| | | ``EEG = pop_binica(EEG, 'pca', n);`` | | :func:`mne.preprocessing.infomax` | ++---------------------+----------------------------------------------------------+--------------------------------------------------------------------------------------------------+ +| Plot ICA properties | | ``pop_compprop( EEG, comp_num, winhandle);`` | | :func:`ica.plot_properties(raw, picks) ` | ++---------------------+----------------------------------------------------------+--------------------------------------------------------------------------------------------------+ +| Plot ICA components | | ``compheads()`` | | :func:`ica.plot_components(raw, picks) ` | ++---------------------+----------------------------------------------------------+--------------------------------------------------------------------------------------------------+ +| Exclude components | | ``pop_selectcomps()`` | | ``ica.exclude = list_of_components_to_exclude`` | +---------------------+----------------------------------------------------------+--------------------------------------------------------------------------------------------------+ | Epoch data | | ``event_id = {'cond1', 'cond2'};`` | | :func:`events = mne.find_events(raw) ` | -| | | ``Epochs = pop_epochs(EEG, event_id, [tmin, tmax]);`` | | :py:class:`event_id = dict(cond1=32, cond2=64) ` | -| | | | :class:`epochs = Epochs(raw, events, event_id, tmin, tmax) ` | +| | | ``Epochs = pop_epochs(EEG, event_id, [tmin, tmax]);`` | | :class:`event_id = dict(cond1=32, cond2=64) ` | +| | | | | :class:`epochs = Epochs(raw, events, event_id, tmin, tmax) ` | +---------------------+----------------------------------------------------------+--------------------------------------------------------------------------------------------------+ -| Selecting epochs | ``Epochs = pop_epochs(EEG_epochs, {cond2});`` | :class:`epochs[cond2] ` | +| Selecting epochs | | ``Epochs = pop_epochs(EEG_epochs, {cond2});`` | | :class:`epochs[cond2] ` | +---------------------+----------------------------------------------------------+--------------------------------------------------------------------------------------------------+ -| ERP butterfly plot | ``pop_timtopo(EEG_epochs, ...);`` | | :meth:`evoked = epochs[cond2].average() ` | -| | | | :func:`evoked.plot() ` | -| | | | :func:`evoked.plot_joint() ` | +| ERP butterfly plot | | ``pop_timtopo(EEG_epochs, ...);`` | | :meth:`evoked = epochs[cond2].average() ` | +| | | | | :func:`evoked.plot() ` | +| | | | | :func:`evoked.plot_joint() ` | +---------------------+----------------------------------------------------------+--------------------------------------------------------------------------------------------------+ -| Contrast ERPs | ``pop_compareerps(EEG_epochs1, EEG_epochs2);`` | | :func:`mne.combine_evoked([evoked1, -evoked2], weights='equal').plot() ` | -| | | | :func:`mne.viz.plot_compare_evokeds([evoked1, evoked2]) ` | +| Contrast ERPs | | ``pop_compareerps(EEG_epochs1, EEG_epochs2);`` | | :func:`mne.combine_evoked([evoked1, -evoked2], weights='equal').plot() ` | +| | | | | :func:`mne.viz.plot_compare_evokeds([evoked1, evoked2]) ` | +---------------------+----------------------------------------------------------+--------------------------------------------------------------------------------------------------+ -| Save data | ``EEG = pop_saveset(EEG, fname);`` | | :func:`raw.save(fname) ` | -| | | | :func:`epochs.save(fname) ` | -| | | | :func:`evoked.save(fname) ` | +| Save data | | ``EEG = pop_saveset(EEG, fname);`` | | :func:`raw.save(fname) ` | +| | | | | :func:`epochs.save(fname) ` | +| | | | | :func:`evoked.save(fname) ` | +---------------------+----------------------------------------------------------+--------------------------------------------------------------------------------------------------+ Potential pitfalls diff --git a/doc/overview/roadmap.rst b/doc/overview/roadmap.rst index cba398af03e..4f54d0670b6 100644 --- a/doc/overview/roadmap.rst +++ b/doc/overview/roadmap.rst @@ -1,5 +1,3 @@ -.. _roadmap: - Roadmap ======= @@ -8,11 +6,6 @@ MNE-Python. These are goals that require substantial effort and/or API design considerations. Some of these may be suitable for Google Summer of Code projects, while others require more extensive work. -.. contents:: Page contents - :local: - :depth: 1 - - Clustering statistics API ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/preprocessing.rst b/doc/preprocessing.rst new file mode 100644 index 00000000000..ffb09b88904 --- /dev/null +++ b/doc/preprocessing.rst @@ -0,0 +1,183 @@ + +Preprocessing +============= + +Projections: + +.. currentmodule:: mne + +.. autosummary:: + :toctree: generated/ + + Projection + compute_proj_epochs + compute_proj_evoked + compute_proj_raw + read_proj + write_proj + +:py:mod:`mne.channels`: + +.. currentmodule:: mne.channels + +.. automodule:: mne.channels + :no-members: + :no-inherited-members: + +.. autosummary:: + :toctree: generated/ + + Layout + DigMontage + compute_native_head_t + fix_mag_coil_types + read_polhemus_fastscan + get_builtin_montages + make_dig_montage + read_dig_polhemus_isotrak + read_dig_captrak + read_dig_dat + read_dig_egi + read_dig_fif + read_dig_hpts + make_standard_montage + read_custom_montage + compute_dev_head_t + read_layout + find_layout + make_eeg_layout + make_grid_layout + find_ch_adjacency + read_ch_adjacency + equalize_channels + rename_channels + generate_2d_layout + make_1020_channel_selections + combine_channels + +:py:mod:`mne.preprocessing`: + +.. currentmodule:: mne.preprocessing + +.. automodule:: mne.preprocessing + :no-members: + :no-inherited-members: + +.. autosummary:: + :toctree: generated/ + + ICA + Xdawn + annotate_flat + annotate_movement + annotate_muscle_zscore + compute_average_dev_head_t + compute_current_source_density + compute_fine_calibration + compute_maxwell_basis + compute_proj_ecg + compute_proj_eog + create_ecg_epochs + create_eog_epochs + find_bad_channels_maxwell + find_ecg_events + find_eog_events + fix_stim_artifact + ica_find_ecg_events + ica_find_eog_events + infomax + equalize_bads + maxwell_filter + oversampled_temporal_projection + peak_finder + read_ica + realign_raw + regress_artifact + corrmap + read_ica_eeglab + read_fine_calibration + write_fine_calibration + +:py:mod:`mne.preprocessing.nirs`: + +.. currentmodule:: mne.preprocessing.nirs + +.. automodule:: mne.preprocessing.nirs + :no-members: + :no-inherited-members: + +.. autosummary:: + :toctree: generated/ + + optical_density + beer_lambert_law + source_detector_distances + short_channels + scalp_coupling_index + temporal_derivative_distribution_repair + +EEG referencing: + +.. currentmodule:: mne + +.. autosummary:: + :toctree: generated/ + + add_reference_channels + set_bipolar_reference + set_eeg_reference + +:py:mod:`mne.filter`: + +.. currentmodule:: mne.filter + +.. automodule:: mne.filter + :no-members: + :no-inherited-members: + +.. autosummary:: + :toctree: generated/ + + construct_iir_filter + create_filter + estimate_ringing_samples + filter_data + notch_filter + resample + +:py:mod:`mne.chpi` + +.. currentmodule:: mne.chpi + +.. automodule:: mne.chpi + :no-members: + :no-inherited-members: + +.. autosummary:: + :toctree: generated/ + + compute_chpi_amplitudes + compute_chpi_locs + compute_head_pos + extract_chpi_locs_ctf + extract_chpi_locs_kit + filter_chpi + head_pos_to_trans_rot_t + read_head_pos + write_head_pos + +:py:mod:`mne.transforms` + +.. currentmodule:: mne.transforms + +.. automodule:: mne.transforms + :no-members: + :no-inherited-members: + +.. autosummary:: + :toctree: generated/ + + Transform + quat_to_rot + rot_to_quat + read_ras_mni_t diff --git a/doc/python_reference.rst b/doc/python_reference.rst index 45404145768..a5d8ccdb7fd 100644 --- a/doc/python_reference.rst +++ b/doc/python_reference.rst @@ -1,5 +1,3 @@ -:orphan: - .. _api_reference: ==================== @@ -14,1097 +12,36 @@ below a module heading are found in the ``mne`` namespace. MNE-Python also provides multiple command-line scripts that can be called directly from a terminal, see :ref:`python_commands`. -.. contents:: - :local: - :depth: 2 - - -:py:mod:`mne`: - -.. automodule:: mne - :no-members: - :no-inherited-members: - -Most-used classes -================= - -.. currentmodule:: mne - -.. autosummary:: - :toctree: generated/ - - io.Raw - Epochs - Evoked - Info - -Reading raw data -================ - -:py:mod:`mne.io`: - -.. currentmodule:: mne.io - -.. automodule:: mne.io - :no-members: - :no-inherited-members: - -.. autosummary:: - :toctree: generated/ - - anonymize_info - read_raw - read_raw_artemis123 - read_raw_bti - read_raw_cnt - read_raw_ctf - read_raw_curry - read_raw_edf - read_raw_bdf - read_raw_gdf - read_raw_kit - read_raw_nicolet - read_raw_nirx - read_raw_snirf - read_raw_eeglab - read_raw_brainvision - read_raw_egi - read_raw_fif - read_raw_eximia - read_raw_fieldtrip - read_raw_boxy - read_raw_persyst - read_raw_nihon - -Base class: - -.. autosummary:: - :toctree: generated - - BaseRaw - -:py:mod:`mne.io.kit`: - -.. currentmodule:: mne.io.kit - -.. automodule:: mne.io.kit - :no-members: - :no-inherited-members: - -.. autosummary:: - :toctree: generated/ - - read_mrk - -File I/O -======== - -.. currentmodule:: mne - -.. autosummary:: - :toctree: generated - - channel_type - channel_indices_by_type - get_head_surf - get_meg_helmet_surf - get_volume_labels_from_aseg - get_volume_labels_from_src - parse_config - read_labels_from_annot - read_bem_solution - read_bem_surfaces - read_cov - read_dipole - read_epochs - read_epochs_kit - read_epochs_eeglab - read_epochs_fieldtrip - read_events - read_evokeds - read_evoked_fieldtrip - read_evokeds_mff - read_freesurfer_lut - read_forward_solution - read_label - read_morph_map - read_proj - read_reject_parameters - read_selection - read_source_estimate - read_source_spaces - read_surface - read_trans - read_tri - write_labels_to_annot - write_bem_solution - write_bem_surfaces - write_cov - write_events - write_evokeds - write_forward_solution - write_label - write_proj - write_source_spaces - write_surface - write_trans - what - io.read_info - io.show_fiff - -Base class: - -.. autosummary:: - :toctree: generated - - BaseEpochs - -Creating data objects from arrays -================================= - -.. currentmodule:: mne - -.. autosummary:: - :toctree: generated/ - - EvokedArray - EpochsArray - io.RawArray - create_info - - -Datasets -======== - -.. currentmodule:: mne.datasets - -:py:mod:`mne.datasets`: - -.. automodule:: mne.datasets - :no-members: - :no-inherited-members: - -.. autosummary:: - :toctree: generated/ - - brainstorm.bst_auditory.data_path - brainstorm.bst_resting.data_path - brainstorm.bst_raw.data_path - eegbci.load_data - eegbci.standardize - fetch_aparc_sub_parcellation - fetch_fsaverage - fetch_hcp_mmp_parcellation - fnirs_motor.data_path - hf_sef.data_path - kiloword.data_path - limo.load_data - misc.data_path - mtrf.data_path - multimodal.data_path - opm.data_path - sleep_physionet.age.fetch_data - sleep_physionet.temazepam.fetch_data - sample.data_path - somato.data_path - spm_face.data_path - visual_92_categories.data_path - phantom_4dbti.data_path - refmeg_noise.data_path - - -Visualization -============= - -.. currentmodule:: mne.viz - -:py:mod:`mne.viz`: - -.. automodule:: mne.viz - :no-members: - :no-inherited-members: - -.. autosummary:: - :toctree: generated/ - - Brain - ClickableImage - add_background_image - centers_to_edges - compare_fiff - circular_layout - iter_topography - mne_analyze_colormap - plot_bem - plot_brain_colorbar - plot_connectivity_circle - plot_cov - plot_csd - plot_dipole_amplitudes - plot_dipole_locations - plot_drop_log - plot_epochs - plot_epochs_psd_topomap - plot_events - plot_evoked - plot_evoked_image - plot_evoked_topo - plot_evoked_topomap - plot_evoked_joint - plot_evoked_field - plot_evoked_white - plot_filter - plot_head_positions - plot_ideal_filter - plot_compare_evokeds - plot_ica_sources - plot_ica_components - plot_ica_properties - plot_ica_scores - plot_ica_overlay - plot_epochs_image - plot_layout - plot_montage - plot_projs_topomap - plot_raw - plot_raw_psd - plot_sensors - plot_sensors_connectivity - plot_snr_estimate - plot_source_estimates - link_brains - plot_volume_source_estimates - plot_vector_source_estimates - plot_sparse_source_estimates - plot_tfr_topomap - plot_topo_image_epochs - plot_topomap - plot_alignment - snapshot_brain_montage - plot_arrowmap - set_3d_backend - get_3d_backend - use_3d_backend - set_3d_options - set_3d_view - set_3d_title - create_3d_figure - get_brain_class - - -Preprocessing -============= - -Projections: - -.. currentmodule:: mne - -.. autosummary:: - :toctree: generated/ - - Projection - compute_proj_epochs - compute_proj_evoked - compute_proj_raw - read_proj - write_proj - -:py:mod:`mne.channels`: - -.. currentmodule:: mne.channels - -.. automodule:: mne.channels - :no-members: - :no-inherited-members: - -.. autosummary:: - :toctree: generated/ - - Layout - DigMontage - compute_native_head_t - fix_mag_coil_types - read_polhemus_fastscan - get_builtin_montages - make_dig_montage - read_dig_polhemus_isotrak - read_dig_captrak - read_dig_dat - read_dig_egi - read_dig_fif - read_dig_hpts - make_standard_montage - read_custom_montage - compute_dev_head_t - read_layout - find_layout - make_eeg_layout - make_grid_layout - find_ch_adjacency - read_ch_adjacency - equalize_channels - rename_channels - generate_2d_layout - make_1020_channel_selections - combine_channels - -:py:mod:`mne.preprocessing`: - -.. currentmodule:: mne.preprocessing - -.. automodule:: mne.preprocessing - :no-members: - :no-inherited-members: - -.. autosummary:: - :toctree: generated/ - - ICA - Xdawn - annotate_flat - annotate_movement - annotate_muscle_zscore - annotate_nan - compute_average_dev_head_t - compute_current_source_density - compute_fine_calibration - compute_proj_ecg - compute_proj_eog - create_ecg_epochs - create_eog_epochs - find_bad_channels_maxwell - find_ecg_events - find_eog_events - fix_stim_artifact - ica_find_ecg_events - ica_find_eog_events - infomax - maxwell_filter - oversampled_temporal_projection - peak_finder - read_ica - realign_raw - regress_artifact - corrmap - read_ica_eeglab - read_fine_calibration - write_fine_calibration - -:py:mod:`mne.preprocessing.nirs`: - -.. currentmodule:: mne.preprocessing.nirs - -.. automodule:: mne.preprocessing.nirs - :no-members: - :no-inherited-members: - -.. autosummary:: - :toctree: generated/ - - optical_density - beer_lambert_law - source_detector_distances - short_channels - scalp_coupling_index - temporal_derivative_distribution_repair - -EEG referencing: - -.. currentmodule:: mne - -.. autosummary:: - :toctree: generated/ - - add_reference_channels - set_bipolar_reference - set_eeg_reference - -:py:mod:`mne.filter`: - -.. currentmodule:: mne.filter - -.. automodule:: mne.filter - :no-members: - :no-inherited-members: - -.. autosummary:: - :toctree: generated/ - - construct_iir_filter - create_filter - estimate_ringing_samples - filter_data - notch_filter - resample - -:py:mod:`mne.chpi` - -.. currentmodule:: mne.chpi - -.. automodule:: mne.chpi - :no-members: - :no-inherited-members: - -.. autosummary:: - :toctree: generated/ - - compute_chpi_amplitudes - compute_chpi_locs - compute_head_pos - extract_chpi_locs_ctf - filter_chpi - head_pos_to_trans_rot_t - read_head_pos - write_head_pos - -:py:mod:`mne.transforms` - -.. currentmodule:: mne.transforms - -.. automodule:: mne.transforms - :no-members: - :no-inherited-members: - -.. autosummary:: - :toctree: generated/ - - Transform - quat_to_rot - rot_to_quat - read_ras_mni_t - -Events -====== - -.. currentmodule:: mne - -.. autosummary:: - :toctree: generated/ - - Annotations - AcqParserFIF - concatenate_events - find_events - find_stim_steps - make_fixed_length_events - make_fixed_length_epochs - merge_events - parse_config - pick_events - read_annotations - read_events - write_events - concatenate_epochs - events_from_annotations - annotations_from_events - -:py:mod:`mne.event`: - -.. automodule:: mne.event - :no-members: - :no-inherited-members: - -.. currentmodule:: mne.event - -.. autosummary:: - :toctree: generated/ - - define_target_events - shift_time_events - -:py:mod:`mne.epochs`: - -.. automodule:: mne.epochs - :no-members: - :no-inherited-members: - -.. currentmodule:: mne.epochs - -.. autosummary:: - :toctree: generated/ - - add_channels_epochs - average_movements - combine_event_ids - equalize_epoch_counts - - -Sensor Space Data -================= - -.. currentmodule:: mne - -.. autosummary:: - :toctree: generated/ - - combine_evoked - concatenate_raws - equalize_channels - grand_average - pick_channels - pick_channels_cov - pick_channels_forward - pick_channels_regexp - pick_types - pick_types_forward - pick_info - read_epochs - read_reject_parameters - read_selection - rename_channels - -:py:mod:`mne.baseline`: - -.. automodule:: mne.baseline - :no-members: - :no-inherited-members: - -.. currentmodule:: mne.baseline - -.. autosummary:: - :toctree: generated/ - - rescale - - -Covariance computation -====================== - -.. currentmodule:: mne - -.. autosummary:: - :toctree: generated/ - - Covariance - compute_covariance - compute_raw_covariance - cov.compute_whitener - cov.prepare_noise_cov - cov.regularize - compute_rank - make_ad_hoc_cov - read_cov - write_cov - - -MRI Processing -============== - -.. currentmodule:: mne - -Step by step instructions for using :func:`gui.coregistration`: - - - `Coregistration for subjects with structural MRI - `_ - - `Scaling a template MRI for subjects for which no MRI is available - `_ - -.. autosummary:: - :toctree: generated/ - - coreg.get_mni_fiducials - gui.coregistration - gui.fiducials - create_default_subject - scale_mri - scale_bem - scale_labels - scale_source_space - - -Forward Modeling -================ - -.. currentmodule:: mne - -.. autosummary:: - :toctree: generated/ - - Forward - SourceSpaces - add_source_space_distances - apply_forward - apply_forward_raw - average_forward_solutions - convert_forward_solution - decimate_surface - dig_mri_distances - forward.compute_depth_prior - forward.compute_orient_prior - forward.restrict_forward_to_label - forward.restrict_forward_to_stc - make_bem_model - make_bem_solution - make_forward_dipole - make_forward_solution - make_field_map - make_sphere_model - morph_source_spaces - read_bem_surfaces - read_forward_solution - read_trans - read_source_spaces - read_surface - sensitivity_map - setup_source_space - setup_volume_source_space - surface.complete_surface_info - surface.read_curvature - use_coil_def - write_bem_surfaces - write_trans - -:py:mod:`mne.bem`: - -.. automodule:: mne.bem - :no-members: - :no-inherited-members: - -.. currentmodule:: mne.bem - -.. autosummary:: - :toctree: generated/ - - ConductorModel - fit_sphere_to_headshape - get_fitting_dig - make_watershed_bem - make_flash_bem - convert_flash_mris - - -Inverse Solutions -================= - -:py:mod:`mne.minimum_norm`: - -.. automodule:: mne.minimum_norm - :no-members: - :no-inherited-members: - -.. currentmodule:: mne.minimum_norm - -.. autosummary:: - :toctree: generated/ - - InverseOperator - apply_inverse - apply_inverse_cov - apply_inverse_epochs - apply_inverse_raw - compute_source_psd - compute_source_psd_epochs - compute_rank_inverse - estimate_snr - make_inverse_operator - prepare_inverse_operator - read_inverse_operator - source_band_induced_power - source_induced_power - write_inverse_operator - make_inverse_resolution_matrix - resolution_metrics - get_cross_talk - get_point_spread - -:py:mod:`mne.inverse_sparse`: - -.. automodule:: mne.inverse_sparse - :no-members: - :no-inherited-members: - -.. currentmodule:: mne.inverse_sparse - -.. autosummary:: - :toctree: generated/ - - mixed_norm - tf_mixed_norm - gamma_map - make_stc_from_dipoles - -:py:mod:`mne.beamformer`: - -.. automodule:: mne.beamformer - :no-members: - :no-inherited-members: - -.. currentmodule:: mne.beamformer - -.. autosummary:: - :toctree: generated/ - - Beamformer - read_beamformer - make_lcmv - apply_lcmv - apply_lcmv_epochs - apply_lcmv_raw - apply_lcmv_cov - make_dics - apply_dics - apply_dics_csd - apply_dics_epochs - rap_music - tf_dics - make_lcmv_resolution_matrix - -.. currentmodule:: mne - -.. autosummary:: - :toctree: generated/ - - Dipole - DipoleFixed - fit_dipole - -:py:mod:`mne.dipole`: - -.. automodule:: mne.dipole - :no-members: - :no-inherited-members: - -.. currentmodule:: mne.dipole - -.. autosummary:: - :toctree: generated/ - - get_phantom_dipoles - - -Source Space Data -================= - -.. currentmodule:: mne - -.. autosummary:: - :toctree: generated/ - - BiHemiLabel - Label - MixedSourceEstimate - MixedVectorSourceEstimate - SourceEstimate - VectorSourceEstimate - VolSourceEstimate - VolVectorSourceEstimate - SourceMorph - compute_source_morph - head_to_mni - head_to_mri - extract_label_time_course - grade_to_tris - grade_to_vertices - label.select_sources - grow_labels - label_sign_flip - labels_to_stc - morph_labels - random_parcellation - read_labels_from_annot - read_dipole - read_label - read_source_estimate - read_source_morph - read_talxfm - split_label - stc_to_label - stc_near_sensors - transform_surface_to - vertex_to_mni - write_labels_to_annot - write_label - source_space.compute_distance_to_sensors - - -Time-Frequency -============== - -:py:mod:`mne.time_frequency`: - -.. automodule:: mne.time_frequency - :no-members: - :no-inherited-members: - -.. currentmodule:: mne.time_frequency - -.. autosummary:: - :toctree: generated/ - - AverageTFR - EpochsTFR - CrossSpectralDensity - -Functions that operate on mne-python objects: - -.. autosummary:: - :toctree: generated/ - - csd_fourier - csd_multitaper - csd_morlet - pick_channels_csd - read_csd - fit_iir_model_raw - psd_welch - psd_multitaper - tfr_morlet - tfr_multitaper - tfr_stockwell - read_tfrs - write_tfrs - -Functions that operate on ``np.ndarray`` objects: - -.. autosummary:: - :toctree: generated/ - - csd_array_fourier - csd_array_multitaper - csd_array_morlet - dpss_windows - morlet - stft - istft - stftfreq - psd_array_multitaper - psd_array_welch - tfr_array_morlet - tfr_array_multitaper - tfr_array_stockwell - - -:py:mod:`mne.time_frequency.tfr`: - -.. automodule:: mne.time_frequency.tfr - :no-members: - :no-inherited-members: - -.. currentmodule:: mne.time_frequency.tfr - -.. autosummary:: - :toctree: generated/ - - cwt - morlet - - -Connectivity Estimation -======================= - -:py:mod:`mne.connectivity`: - -.. automodule:: mne.connectivity - :no-members: - :no-inherited-members: - -.. currentmodule:: mne.connectivity - -.. autosummary:: - :toctree: generated/ - - degree - envelope_correlation - phase_slope_index - seed_target_indices - spectral_connectivity - - -.. _api_reference_statistics: - -Statistics -========== - -:py:mod:`mne.stats`: - -.. automodule:: mne.stats - :no-members: - :no-inherited-members: - -.. currentmodule:: mne.stats - -Parametric statistics (see :mod:`scipy.stats` and :mod:`statsmodels` for more -options): - -.. autosummary:: - :toctree: generated/ - - ttest_1samp_no_p - ttest_ind_no_p - f_oneway - f_mway_rm - f_threshold_mway_rm - linear_regression - linear_regression_raw - -Mass-univariate multiple comparison correction: - -.. autosummary:: - :toctree: generated/ - - bonferroni_correction - fdr_correction - -Non-parametric (clustering) resampling methods: - -.. autosummary:: - :toctree: generated/ - - combine_adjacency - permutation_cluster_test - permutation_cluster_1samp_test - permutation_t_test - spatio_temporal_cluster_test - spatio_temporal_cluster_1samp_test - summarize_clusters_stc - bootstrap_confidence_interval - -Compute ``adjacency`` matrices for cluster-level statistics: - -.. currentmodule:: mne - -.. autosummary:: - :toctree: generated/ - - channels.find_ch_adjacency - channels.read_ch_adjacency - spatial_dist_adjacency - spatial_src_adjacency - spatial_tris_adjacency - spatial_inter_hemi_adjacency - spatio_temporal_src_adjacency - spatio_temporal_tris_adjacency - spatio_temporal_dist_adjacency - - -Simulation -========== - -:py:mod:`mne.simulation`: - -.. automodule:: mne.simulation - :no-members: - :no-inherited-members: - -.. currentmodule:: mne.simulation - -.. autosummary:: - :toctree: generated/ - - add_chpi - add_ecg - add_eog - add_noise - simulate_evoked - simulate_raw - simulate_stc - simulate_sparse_stc - select_source_in_label - SourceSimulator - -.. _api_decoding: - -Decoding -======== - -:py:mod:`mne.decoding`: - -.. automodule:: mne.decoding - :no-members: - :no-inherited-members: - -.. autosummary:: - :toctree: generated/ - - CSP - EMS - FilterEstimator - LinearModel - PSDEstimator - Scaler - TemporalFilter - TimeFrequency - UnsupervisedSpatialFilter - Vectorizer - ReceptiveField - TimeDelayingRidge - SlidingEstimator - GeneralizingEstimator - SPoC - SSD - -Functions that assist with decoding and model fitting: - -.. autosummary:: - :toctree: generated/ - - compute_ems - cross_val_multiscore - get_coef - - -Realtime -======== - -Realtime functionality has moved to the standalone module :mod:`mne_realtime`. - -MNE-Report -========== - -:py:mod:`mne`: - -.. currentmodule:: mne - -.. autosummary:: - :toctree: generated/ - - Report - open_report - - -Logging and Configuration -========================= - -.. currentmodule:: mne - -.. autosummary:: - :toctree: generated/ - - get_config_path - get_config - open_docs - set_log_level - set_log_file - set_config - set_cache_dir - sys_info - verbose - -:py:mod:`mne.utils`: - -.. currentmodule:: mne.utils - -.. automodule:: mne.utils - :no-members: - :no-inherited-members: - -.. autosummary:: - :toctree: generated/ - - deprecated - warn - -:py:mod:`mne.cuda`: - -.. currentmodule:: mne.cuda - -.. automodule:: mne.cuda - :no-members: - :no-inherited-members: - -.. autosummary:: - :toctree: generated/ - - get_cuda_memory - init_cuda - set_cuda_device +.. container:: d-none + + :py:mod:`mne`: + + .. automodule:: mne + :no-members: + :no-inherited-members: + +.. toctree:: + :maxdepth: 2 + + most_used_classes + reading_raw_data + file_io + creating_from_arrays + datasets + visualization + preprocessing + events + sensor_space + covariance + mri + forward + inverse + source_space + time_frequency + connectivity + statistics + simulation + decoding + realtime + report + logging diff --git a/doc/reading_raw_data.rst b/doc/reading_raw_data.rst new file mode 100644 index 00000000000..a5c22700948 --- /dev/null +++ b/doc/reading_raw_data.rst @@ -0,0 +1,58 @@ +Reading raw data +================ + +:py:mod:`mne.io`: + +.. currentmodule:: mne.io + +.. automodule:: mne.io + :no-members: + :no-inherited-members: + +.. autosummary:: + :toctree: generated/ + + anonymize_info + read_raw + read_raw_artemis123 + read_raw_bti + read_raw_cnt + read_raw_ctf + read_raw_curry + read_raw_edf + read_raw_bdf + read_raw_gdf + read_raw_kit + read_raw_nedf + read_raw_nicolet + read_raw_nirx + read_raw_snirf + read_raw_eeglab + read_raw_brainvision + read_raw_egi + read_raw_fif + read_raw_eximia + read_raw_fieldtrip + read_raw_boxy + read_raw_persyst + read_raw_nihon + +Base class: + +.. autosummary:: + :toctree: generated + + BaseRaw + +:py:mod:`mne.io.kit`: + +.. currentmodule:: mne.io.kit + +.. automodule:: mne.io.kit + :no-members: + :no-inherited-members: + +.. autosummary:: + :toctree: generated/ + + read_mrk diff --git a/doc/realtime.rst b/doc/realtime.rst new file mode 100644 index 00000000000..91c027a9e3f --- /dev/null +++ b/doc/realtime.rst @@ -0,0 +1,5 @@ + +Realtime +======== + +Realtime functionality has moved to the standalone module :mod:`mne_realtime`. diff --git a/doc/references.bib b/doc/references.bib index 12b60939a02..502c1704217 100644 --- a/doc/references.bib +++ b/doc/references.bib @@ -1,4 +1,30 @@ - +% Encoding: UTF-8 +% +% If available, include a DOI (preferred) *or* a URL for a given reference, but +# not both, as the DOI turns into a link which is redundant with the URL. + +% MNE-C reference +@article{GramfortEtAl2014, + title = {{{MNE}} Software for Processing {{MEG}} and {{EEG}} Data}, + author = {Gramfort, Alexandre and Luessi, Martin and Larson, Eric and Engemann, Denis A. and Strohmeier, Daniel and Brodbeck, Christian and Parkkonen, Lauri and H{\"a}m{\"a}l{\"a}inen, Matti S.}, + year = {2014}, + volume = {86}, + pages = {446--460}, + doi = {10.1016/j.neuroimage.2013.10.027}, + journal = {NeuroImage}, +} +% MNE-Python reference +@article{GramfortEtAl2013a, + title = {{{MEG}} and {{EEG}} Data Analysis with {{MNE}}-{{Python}}}, + author = {Gramfort, Alexandre and Luessi, Martin and Larson, Eric and Engemann, Denis A. and Strohmeier, Daniel and Brodbeck, Christian and Goj, Roman and Jas, Mainak and Brooks, Teon and Parkkonen, Lauri and H{\"a}m{\"a}l{\"a}inen, Matti S.}, + year = {2013}, + volume = {7}, + pages = {1--13}, + doi = {10.3389/fnins.2013.00267}, + journal = {Frontiers in Neuroscience}, + number = {267} +} +% everything else @article{AblinEtAl2018, author = {Ablin, Pierre and Cardoso, Jean-Francois and Gramfort, Alexandre}, doi = {10.1109/TSP.2018.2844203}, @@ -118,6 +144,16 @@ @article{BergScherg1994 year = {1994} } +@inproceedings{BigdelyShamloEtAl2013, + author = {Bigdely-Shamlo, Nima and Kreutz-Delgado, Kenneth and Robbins, Kay and Miyakoshi, Makoto and Westerfield, Marissa and Bel-Bahar, Tarik and Kothe, Christian and Hsi, Jessica and Makeig, Scott}, + doi = {10.1109/GlobalSIP.2013.6736796}, + booktitle = {2013 IEEE Global Conference on Signal and Information Processing}, + pages = {1--4}, + title = {Hierarchical event descriptor {(HED)} tags for analysis of event-related {EEG} studies}, + organization = {IEEE}, + year = {2013}, +} + @article{BlankertzEtAl2008, author = {Blankertz, Benjamin and Tomioka, Ryota and Lemm, Steven and Kawanabe, Motoaki and Müller, Klaus-Robert}, doi = {10.1109/MSP.2008.4408441}, @@ -152,6 +188,28 @@ @article{BrookesEtAl2008 year = {2008} } +@article{BrunaEtAl2018, + doi = {10.1088/1741-2552/aacfe4}, + year = {2018}, + publisher = {{IOP} Publishing}, + volume = {15}, + number = {5}, + pages = {056011}, + author = {Ricardo Bru{\~{n}}a, Fernando Maest{\'{u}}, Ernesto Pereda}, + title = {Phase locking value revisited: teaching new tricks to an old dog}, + journal = {Journal of Neural Engineering}, +} + +@techreport{BurdakovMerkulov2001, + title={On a new norm for data fitting and optimization problems}, + author={Burdakov, Oleg and Merkulov, Boris}, + number={LiTH-MAT-R-2001-29}, + address={Link{\"o}ping}, + institution={Link{\"o}ping University}, + type = {Technical {{Report}}}, + year={2001} +} + @article{CamposViolaEtAl2009, author = {Campos Viola, Filipa and Thorne, Jeremy and Edmonds, Barrie and Schneider, Till and Eichele, Tom and Debener, Stefan}, doi = {10.1016/j.clinph.2009.01.015}, @@ -383,7 +441,7 @@ @article{FischlEtAl2004 } @article{FishburnEtAl2019, - title={Temporal derivative distribution repair (TDDR): a motion correction method for fNIRS}, + title={Temporal derivative distribution repair (TDDR): a motion correction method for {fNIRS}}, doi = {10.1016/j.neuroimage.2018.09.025}, author={Fishburn, Frank A and Ludlum, Ruth S and Vaidya, Chandan J and Medvedev, Andrei V}, journal={NeuroImage}, @@ -458,6 +516,18 @@ @article{GraimannEtAl2002 year = {2002} } +@article{GramfortEtAl2010, + author = {Alexandre Gramfort and Renaud Keriven and Maureen Clerc}, + title = {Graph-Based Variability Estimation in Single-Trial Event-Related Neural Responses}, + journal = {{IEEE} Transactions on Biomedical Engineering}, + doi = {10.1109/tbme.2009.2037139}, + year = {2010}, + publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, + volume = {57}, + number = {5}, + pages = {1051--1061}, +} + @incollection{GramfortEtAl2011, address = {{Berlin; Heidelberg}}, author = {Gramfort, Alexandre and Strohmeier, Daniel and Haueisen, Jens and Hämäläinen, Matti S. and Kowalski, Matthieu}, @@ -483,7 +553,7 @@ @article{GramfortEtAl2012 year = {2012} } -@article{GramfortEtAl2013, +@article{GramfortEtAl2013b, author = {Gramfort, Alexandre and Strohmeier, Daniel T. and Haueisen, Jens and Hämäläinen, Matti S. and Kowalski, Matthieu}, doi = {10.1016/j.neuroimage.2012.12.051}, journal = {NeuroImage}, @@ -617,13 +687,13 @@ @article{HaufeEtAl2014 } @article{HaufeEtAl2014b, - author = {Haufe, Stefan and D{\"a}hne, Sven and Nikulin, Vadim V}, - doi = {https://doi.org/10.1016/j.neuroimage.2014.06.073}, - journal = {NeuroImage}, - pages = {583-597}, - title = {Dimensionality reduction for the analysis of brain oscillations}, - volume = {101}, - year = {2014} + author = {Haufe, Stefan and D{\"a}hne, Sven and Nikulin, Vadim V}, + doi = {https://doi.org/10.1016/j.neuroimage.2014.06.073}, + journal = {NeuroImage}, + pages = {583-597}, + title = {Dimensionality reduction for the analysis of brain oscillations}, + volume = {101}, + year = {2014} } @article{HaukEtAl2006, @@ -638,12 +708,12 @@ @article{HaukEtAl2006 } @article {HaukEtAl2019, - author = {Hauk, Olaf and Stenroos, Matti and Treder, Matthias}, - title = {Towards an Objective Evaluation of EEG/MEG Source Estimation Methods: The Linear Tool Kit}, - year = {2019}, - doi = {10.1101/672956}, - publisher = {Cold Spring Harbor Laboratory}, - journal = {bioRxiv} + author = {Hauk, Olaf and Stenroos, Matti and Treder, Matthias}, + title = {Towards an Objective Evaluation of {EEG/MEG} Source Estimation Methods: The Linear Tool Kit}, + year = {2019}, + doi = {10.1101/672956}, + publisher = {Cold Spring Harbor Laboratory}, + journal = {bioRxiv} } @book{Heiman2002, @@ -960,6 +1030,19 @@ @article{LinEtAl2006 year = {2006} } +@article{LinEtAl2006a, + title = {Assessing and improving the spatial accuracy in {MEG} source localization by depth-weighted minimum-norm estimates}, + volume = {31}, + issn = {1053-8119}, + doi = {10.1016/j.neuroimage.2005.11.054}, + number = {1}, + urldate = {2021-01-28}, + journal = {NeuroImage}, + author = {Lin, Fa-Hsuan and Witzel, Thomas and Ahlfors, Seppo P. and Stufflebeam, Steven M. and Belliveau, John W. and Hämäläinen, Matti S.}, + year = {2006}, + pages = {160--171} +} + @article{LiuEtAl1998, author = {Liu, Arthur K. and Belliveau, John W. and Dale, Anders M.}, doi = {10.1073/pnas.95.15.8945}, @@ -1042,6 +1125,18 @@ @misc{Mills2016 year = {2016} } +@article{MolinsEtAl2008, + author = {Molins A, and Stufflebeam S. M., and Brown E. N., and Hämäläinen M. S.}, + doi = {10.1016/j.neuroimage.2008.05.064}, + journal = {Neuroimage}, + number = {3}, + pages = {1069-1077}, + title = {Quantification of the benefit from integrating {MEG} and {EEG} data in + minimum l2-norm estimation}, + volume = {42}, + year = {2008} +} + @incollection{Montoya-MartinezEtAl2017, address = {{Cham}}, author = {{Montoya-Martínez}, Jair and Cardoso, Jean-François and Gramfort, Alexandre}, @@ -1136,14 +1231,14 @@ @article{NicholsHolmes2002 } @article{NikulinEtAl2011, - author = {Nikulin, Vadim V and Nolte, Guido and Curio, Gabriel}, - doi = {10.1016/j.neuroimage.2011.01.057}, - journal={NeuroImage}, - title = {A novel method for reliable and fast extraction of neuronal {EEG/MEG} oscillations on the basis of spatio-spectral decomposition}, - pages={1528-1535}, - volume={55}, - number={4}, - year={2011} + author = {Nikulin, Vadim V and Nolte, Guido and Curio, Gabriel}, + doi = {10.1016/j.neuroimage.2011.01.057}, + journal={NeuroImage}, + title = {A novel method for reliable and fast extraction of neuronal {EEG/MEG} oscillations on the basis of spatio-spectral decomposition}, + pages={1528-1535}, + volume={55}, + number={4}, + year={2011} } @article{NolteEtAl2004, @@ -1213,12 +1308,10 @@ @article{Pascual-Marqui2002 @article{Pascual-Marqui2011, title = {Assessing interactions in the brain with exact low-resolution electromagnetic tomography}, volume = {369}, - url = {https://royalsocietypublishing.org/doi/full/10.1098/rsta.2011.0081}, doi = {10.1098/rsta.2011.0081}, number = {1952}, journal = {Philosophical Transactions of the Royal Society A: Mathematical, Physical and Engineering Sciences}, author = {Pascual-Marqui, Roberto D. and Lehmann, Dietrich and Koukkou, Martha and Kochi, Kieko and Anderer, Peter and Saletu, Bernd and Tanaka, Hideaki and Hirata, Koichi and John, E. Roy and Prichep, Leslie and Biscay-Lirio, Rolando and Kinoshita, Toshihiko}, - month = oct, year = {2011}, pages = {3768--3784} } @@ -1243,7 +1336,6 @@ @article{PerrinEtAl1987 number = {4}, journal = {IEEE Transactions on Biomedical Engineering}, author = {Perrin, F. and Bertrand, O. and Pernier, J.}, - month = apr, year = {1987}, pages = {283--288} } @@ -1809,7 +1901,6 @@ @article{KayserTenke2015 number = {3}, journal = {International journal of psychophysiology : official journal of the International Organization of Psychophysiology}, author = {Kayser, Jürgen and Tenke, Craig E.}, - month = sep, year = {2015}, pmid = {26071227}, pmcid = {PMC4610715}, @@ -1817,67 +1908,58 @@ @article{KayserTenke2015 } @article{GrattonEtAl1983, - title = {A new method for off-line removal of ocular artifact}, - volume = {55}, - issn = {0013-4694}, - doi = {10.1016/0013-4694(83)90135-9}, - language = {en}, - number = {4}, - urldate = {2020-08-03}, - journal = {Electroencephalography and Clinical Neurophysiology}, - author = {Gratton, Gabriele and Coles, Michael G. H and Donchin, Emanuel}, - month = apr, - year = {1983}, - pages = {468--484} + title = {A new method for off-line removal of ocular artifact}, + volume = {55}, + issn = {0013-4694}, + doi = {10.1016/0013-4694(83)90135-9}, + number = {4}, + urldate = {2020-08-03}, + journal = {Electroencephalography and Clinical Neurophysiology}, + author = {Gratton, Gabriele and Coles, Michael G. H and Donchin, Emanuel}, + year = {1983}, + pages = {468--484} } @book{OppenheimEtAl1999, - address = {Upper Saddle River, NJ}, - edition = {2 edition}, - title = {Discrete-{Time} {Signal} {Processing}}, - isbn = {978-0-13-754920-7}, - language = {English}, - publisher = {Prentice Hall}, - author = {Oppenheim, Alan V. and Schafer, Ronald W. and Buck, John R.}, - month = jan, - year = {1999} + address = {Upper Saddle River, NJ}, + edition = {2 edition}, + title = {Discrete-{Time} {Signal} {Processing}}, + isbn = {978-0-13-754920-7}, + publisher = {Prentice Hall}, + author = {Oppenheim, Alan V. and Schafer, Ronald W. and Buck, John R.}, + year = {1999} } - @book{CrochiereRabiner1983, - address = {Englewood Cliffs, NJ}, - edition = {1 edition}, - title = {Multirate {Digital} {Signal} {Processing}}, - isbn = {978-0-13-605162-6}, - language = {English}, - publisher = {Pearson}, - author = {Crochiere, Ronald E. and Rabiner, Lawrence R.}, - month = dec, - year = {1983} + address = {Englewood Cliffs, NJ}, + edition = {1 edition}, + title = {Multirate {Digital} {Signal} {Processing}}, + isbn = {978-0-13-605162-6}, + publisher = {Pearson}, + author = {Crochiere, Ronald E. and Rabiner, Lawrence R.}, + year = {1983} } @article{Yao2001, - title = {A method to standardize a reference of scalp {EEG} recordings to a point at infinity}, - volume = {22}, - issn = {0967-3334}, - doi = {10.1088/0967-3334/22/4/305}, - number = {4}, - journal = {Physiological Measurement}, - author = {Yao, D.}, - month = nov, - year = {2001}, - pmid = {11761077}, - pages = {693--711} + title = {A method to standardize a reference of scalp {EEG} recordings to a point at infinity}, + volume = {22}, + issn = {0967-3334}, + doi = {10.1088/0967-3334/22/4/305}, + number = {4}, + journal = {Physiological Measurement}, + author = {Yao, D.}, + year = {2001}, + pmid = {11761077}, + pages = {693--711} } @inproceedings{StrohmeierEtAl2015, - title = {{MEG}/{EEG} {Source} {Imaging} with a {Non}-{Convex} {Penalty} in the {Time}-{Frequency} {Domain}}, - doi = {10.1109/PRNI.2015.14}, - booktitle = {2015 {International} {Workshop} on {Pattern} {Recognition} in {NeuroImaging}}, - author = {Strohmeier, Daniel and Gramfort, Alexandre and Haueisen, Jens}, - month = jun, - year = {2015}, - pages = {21--24} + title = {{MEG}/{EEG} {Source} {Imaging} with a {Non}-{Convex} {Penalty} in the {Time}-{Frequency} {Domain}}, + doi = {10.1109/PRNI.2015.14}, + booktitle = {2015 {International} {Workshop} on {Pattern} {Recognition} in {NeuroImaging}}, + author = {Strohmeier, Daniel and Gramfort, Alexandre and Haueisen, Jens}, + year = {2015}, + pages = {21--24} } @misc{WikipediaSI, @@ -1888,10 +1970,120 @@ @misc{WikipediaSI urldate = "12-October-2020" } - @misc{BIDSdocs, author = "{BIDS} contributors", title = {Brain Imaging Data Structure — Specification}, url = {https://bids-specification.readthedocs.io/en/stable/}, urldate = "12-October-2020" } + +@article{OReillyEtAl2021, + title = {Structural templates for imaging {EEG} cortical sources in infants}, + volume = {227}, + issn = {1053-8119}, + doi = {10.1016/j.neuroimage.2020.117682}, + urldate = {2021-01-12}, + journal = {NeuroImage}, + author = {O'Reilly, Christian and Larson, Eric and Richards, John E. and Elsabbagh, Mayada}, + year = {2021}, + pages = {117682} +} + +@article{RichardsEtAl2016, + series = {Sharing the wealth: {Brain} {Imaging} {Repositories} in 2015}, + title = {A database of age-appropriate average {MRI} templates}, + volume = {124}, + issn = {1053-8119}, + doi = {10.1016/j.neuroimage.2015.04.055}, + journal = {NeuroImage}, + author = {Richards, John E. and Sanchez, Carmen and Phillips-Meek, Michelle and Xie, Wanze}, + year = {2016}, + pages = {1254--1259} +} + +@Article{Lehmann1980, + author = {Dietrich Lehmann and Wolfgang Skrandies}, + journal = {Electroencephalography and Clinical Neurophysiology}, + title = {Reference-free identification of components of checkerboard-evoked multichannel potential fields}, + year = {1980}, + issn = {0013-4694}, + number = {6}, + pages = {609--621}, + volume = {48}, + doi = {10.1016/0013-4694(80)90419-8}, + publisher = {Elsevier {BV}}, +} + +@Article{Lehmann1984, + author = {Dietrich Lehmann and Wolfgang Skrandies}, + journal = {Progress in Neurobiology}, + title = {Spatial analysis of evoked potentials in man—a review}, + year = {1984}, + issn = {0301-0082}, + number = {3}, + pages = {227--250}, + volume = {23}, + doi = {10.1016/0301-0082(84)90003-0}, + publisher = {Elsevier {BV}}, +} + +@Article{Murray2008, + author = {Micah M. Murray and Denis Brunet and Christoph M. Michel}, + journal = {Brain Topography}, + title = {Topographic {ERP} Analyses: {A} Step-by-Step Tutorial Review}, + year = {2008}, + issn = {0896-0267}, + number = {4}, + pages = {249--264}, + volume = {20}, + doi = {10.1007/s10548-008-0054-5}, + publisher = {Springer Science and Business Media {LLC}}, +} + +@Article{Kappenman2021, + author = {Emily S. Kappenman and Jaclyn L. Farrens and Wendy Zhang and Andrew X. Stewart and Steven J. Luck}, + journal = {{NeuroImage}}, + title = {{ERP} {CORE}: An open resource for human event-related potential research}, + year = {2021}, + issn = {1053-8119}, + pages = {117465}, + volume = {225}, + doi = {10.1016/j.neuroimage.2020.117465}, + publisher = {Elsevier {BV}}, +} + +@article{GenoveseEtAl2002, + title = {Thresholding of Statistical Maps in Functional Neuroimaging Using the False Discovery Rate}, + journal = {NeuroImage}, + volume = {15}, + number = {4}, + pages = {870-878}, + year = {2002}, + issn = {1053-8119}, + doi = {https://doi.org/10.1006/nimg.2001.1037}, + author = {Christopher R. Genovese and Nicole A. Lazar and Thomas Nichols}, +} + +@article{YaoEtAl2019, + title={Which reference should we use for {EEG} and {ERP} practice?}, + author={Yao, Dezhong and Qin, Yun and Hu, Shiang and Dong, Li and Vega, Maria L Bringas and Sosa, Pedro A Vald{\'e}s}, + journal={Brain topography}, + volume={32}, + number={4}, + pages={530--549}, + year={2019}, + doi = {10.1007/s10548-019-00707-x}, + publisher={Springer} +} + +@article{PolonenkoMaddox2019, + title = {The {Parallel} {Auditory} {Brainstem} {Response}}, + volume = {23}, + issn = {2331-2165}, + doi = {10.1177/2331216519871395}, + language = {en}, + journal = {Trends in Hearing}, + author = {Polonenko, Melissa J. and Maddox, Ross K.}, + year = {2019}, + pages = {2331216519871395} +} diff --git a/doc/references.rst b/doc/references.rst deleted file mode 100644 index 32616f7fd40..00000000000 --- a/doc/references.rst +++ /dev/null @@ -1,70 +0,0 @@ -:orphan: - -.. _ch_reading: - -==================== -Related publications -==================== - -General MEG reviews -################### - -.. rst-class:: hidden - -:footcite:`HamalainenEtAl1993,BailletEtAl2001,HamalainenHari2002` - -.. footbibliography:: - - -Cortical surface reconstruction and morphing -############################################ - -.. rst-class:: hidden - -:footcite:`DaleEtAl1999,FischlEtAl1999,FischlEtAl1999a` - -.. footbibliography:: - - -.. _CEGEGDEI: - -Forward modeling -################ - -.. rst-class:: hidden - -:footcite:`HamalainenSarvas1989,FischlEtAl2004,SegonneEtAl2004,JovicichEtAl2006,MosherEtAl1999` - -.. footbibliography:: - - -.. _CEGIEEBB: - -Signal-space projections -######################## - -.. rst-class:: hidden - -:footcite:`TescheEtAl1995,UusitaloIlmoniemi1997` - -.. footbibliography:: - - -Minimum-norm estimates -###################### - -.. rst-class:: hidden - -:footcite:`HamalainenIlmoniemi1984,DaleSereno1993,HamalainenIlmoniemi1994,DaleEtAl2000,LiuEtAl2002,LinEtAl2006,OostendorpEtAl2000,GoncalvesEtAl2003,LewEtAl2009` - -.. footbibliography:: - - -fMRI-weighted estimates -####################### - -.. rst-class:: hidden - -:footcite:`DaleEtAl2000,LiuEtAl1998,LinEtAl2004` - -.. footbibliography:: diff --git a/doc/report.rst b/doc/report.rst new file mode 100644 index 00000000000..5104f68adc6 --- /dev/null +++ b/doc/report.rst @@ -0,0 +1,13 @@ + +MNE-Report +========== + +:py:mod:`mne`: + +.. currentmodule:: mne + +.. autosummary:: + :toctree: generated/ + + Report + open_report diff --git a/doc/sensor_space.rst b/doc/sensor_space.rst new file mode 100644 index 00000000000..a1c72b3aa59 --- /dev/null +++ b/doc/sensor_space.rst @@ -0,0 +1,37 @@ + +Sensor Space Data +================= + +.. currentmodule:: mne + +.. autosummary:: + :toctree: generated/ + + combine_evoked + concatenate_raws + equalize_channels + grand_average + pick_channels + pick_channels_cov + pick_channels_forward + pick_channels_regexp + pick_types + pick_types_forward + pick_info + read_epochs + read_reject_parameters + read_vectorview_selection + rename_channels + +:py:mod:`mne.baseline`: + +.. automodule:: mne.baseline + :no-members: + :no-inherited-members: + +.. currentmodule:: mne.baseline + +.. autosummary:: + :toctree: generated/ + + rescale diff --git a/doc/simulation.rst b/doc/simulation.rst new file mode 100644 index 00000000000..935b2d6599a --- /dev/null +++ b/doc/simulation.rst @@ -0,0 +1,25 @@ + +Simulation +========== + +:py:mod:`mne.simulation`: + +.. automodule:: mne.simulation + :no-members: + :no-inherited-members: + +.. currentmodule:: mne.simulation + +.. autosummary:: + :toctree: generated/ + + add_chpi + add_ecg + add_eog + add_noise + simulate_evoked + simulate_raw + simulate_stc + simulate_sparse_stc + select_source_in_label + SourceSimulator diff --git a/doc/source_space.rst b/doc/source_space.rst new file mode 100644 index 00000000000..ab928030dd8 --- /dev/null +++ b/doc/source_space.rst @@ -0,0 +1,44 @@ + +Source Space Data +================= + +.. currentmodule:: mne + +.. autosummary:: + :toctree: generated/ + + BiHemiLabel + Label + MixedSourceEstimate + MixedVectorSourceEstimate + SourceEstimate + VectorSourceEstimate + VolSourceEstimate + VolVectorSourceEstimate + SourceMorph + compute_source_morph + head_to_mni + head_to_mri + extract_label_time_course + grade_to_tris + grade_to_vertices + label.select_sources + grow_labels + label_sign_flip + labels_to_stc + morph_labels + random_parcellation + read_labels_from_annot + read_dipole + read_label + read_source_estimate + read_source_morph + read_talxfm + split_label + stc_to_label + stc_near_sensors + transform_surface_to + vertex_to_mni + write_labels_to_annot + write_label + source_space.compute_distance_to_sensors diff --git a/doc/sphinxext/gen_commands.py b/doc/sphinxext/gen_commands.py index eda53fed6e2..38c6b883d42 100644 --- a/doc/sphinxext/gen_commands.py +++ b/doc/sphinxext/gen_commands.py @@ -32,10 +32,6 @@ def setup_module(): Command line tools using Python =============================== -.. contents:: Page contents - :local: - :depth: 1 - """ command_rst = """ diff --git a/doc/sphinxext/gen_names.py b/doc/sphinxext/gen_names.py new file mode 100644 index 00000000000..01785598430 --- /dev/null +++ b/doc/sphinxext/gen_names.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- + +import os +from os import path as op + + +def setup(app): + app.connect('builder-inited', generate_name_links_rst) + + +def setup_module(): + # HACK: Stop nosetests running setup() above + pass + + +def generate_name_links_rst(app=None): + if 'linkcheck' not in str(app.builder).lower(): + return + out_dir = op.abspath(op.join(op.dirname(__file__), '..', 'generated')) + if not op.isdir(out_dir): + os.mkdir(out_dir) + out_fname = op.join(out_dir, '_names.rst') + names_path = op.abspath( + op.join(os.path.dirname(__file__), '..', 'changes', 'names.inc')) + with open(out_fname, 'w', encoding='utf8') as fout: + fout.write(':orphan:\n\n') + with open(names_path, 'r') as fin: + for line in fin: + if line.startswith('.. _'): + fout.write(f'- {line[4:]}') diff --git a/doc/sphinxext/sphinx_bootstrap_divs/__init__.py b/doc/sphinxext/sphinx_bootstrap_divs/__init__.py index f1f36f16990..459eac5cb20 100644 --- a/doc/sphinxext/sphinx_bootstrap_divs/__init__.py +++ b/doc/sphinxext/sphinx_bootstrap_divs/__init__.py @@ -74,16 +74,16 @@ class CollapseNode(DivNode): OPTION_KEYS = ('title', 'id_', 'extra', 'class') ELEMENT = 'div' - BASECLASS = 'panel' + BASECLASS = 'card' HEADER_PRETITLE = """.. raw:: html - -
-
""" + +
+
""" FOOTER = """.. raw:: html
""" @@ -112,7 +112,7 @@ def run(self): """Parse.""" self.assert_has_content() title_text = _(self.arguments[0]) - extra = _(' in' if 'open' in self.options else '') + extra = _(' show' if 'open' in self.options else '') class_ = {'class': self.options.get('class', 'default')} id_ = nodes.make_id(title_text) node = CollapseNode(title=title_text, id_=id_, extra=extra, **class_) diff --git a/doc/sphinxext/sphinx_bootstrap_divs/bootstrap_divs.css b/doc/sphinxext/sphinx_bootstrap_divs/bootstrap_divs.css index f12ab1d4d09..4996e7db627 100644 --- a/doc/sphinxext/sphinx_bootstrap_divs/bootstrap_divs.css +++ b/doc/sphinxext/sphinx_bootstrap_divs/bootstrap_divs.css @@ -1,9 +1,9 @@ -.panel-title a { +.card-header a { display: block !important; text-decoration: none; } @media (max-width: 991px) { - .collapse.in{ + .collapse.show{ display:block !important; } } diff --git a/doc/sphinxext/sphinx_bootstrap_divs/bootstrap_divs.js b/doc/sphinxext/sphinx_bootstrap_divs/bootstrap_divs.js index 55d27b24ba1..a0cc3db7c58 100644 --- a/doc/sphinxext/sphinx_bootstrap_divs/bootstrap_divs.js +++ b/doc/sphinxext/sphinx_bootstrap_divs/bootstrap_divs.js @@ -1,6 +1,6 @@ $(document).ready(function () { if(location.hash != null && location.hash != ""){ - $('.collapse').removeClass('in'); - $(location.hash + '.collapse').addClass('in'); + $('.collapse').removeClass('show'); + $(location.hash + '.collapse').addClass('show'); } }); diff --git a/doc/statistics.rst b/doc/statistics.rst new file mode 100644 index 00000000000..2b8313b1f11 --- /dev/null +++ b/doc/statistics.rst @@ -0,0 +1,66 @@ + +.. _api_reference_statistics: + +Statistics +========== + +:py:mod:`mne.stats`: + +.. automodule:: mne.stats + :no-members: + :no-inherited-members: + +.. currentmodule:: mne.stats + +Parametric statistics (see :mod:`scipy.stats` and :mod:`statsmodels` for more +options): + +.. autosummary:: + :toctree: generated/ + + ttest_1samp_no_p + ttest_ind_no_p + f_oneway + f_mway_rm + f_threshold_mway_rm + linear_regression + linear_regression_raw + +Mass-univariate multiple comparison correction: + +.. autosummary:: + :toctree: generated/ + + bonferroni_correction + fdr_correction + +Non-parametric (clustering) resampling methods: + +.. autosummary:: + :toctree: generated/ + + combine_adjacency + permutation_cluster_test + permutation_cluster_1samp_test + permutation_t_test + spatio_temporal_cluster_test + spatio_temporal_cluster_1samp_test + summarize_clusters_stc + bootstrap_confidence_interval + +Compute ``adjacency`` matrices for cluster-level statistics: + +.. currentmodule:: mne + +.. autosummary:: + :toctree: generated/ + + channels.find_ch_adjacency + channels.read_ch_adjacency + spatial_dist_adjacency + spatial_src_adjacency + spatial_tris_adjacency + spatial_inter_hemi_adjacency + spatio_temporal_src_adjacency + spatio_temporal_tris_adjacency + spatio_temporal_dist_adjacency diff --git a/doc/time_frequency.rst b/doc/time_frequency.rst new file mode 100644 index 00000000000..f0e5ab90a7a --- /dev/null +++ b/doc/time_frequency.rst @@ -0,0 +1,71 @@ + +Time-Frequency +============== + +:py:mod:`mne.time_frequency`: + +.. automodule:: mne.time_frequency + :no-members: + :no-inherited-members: + +.. currentmodule:: mne.time_frequency + +.. autosummary:: + :toctree: generated/ + + AverageTFR + EpochsTFR + CrossSpectralDensity + +Functions that operate on mne-python objects: + +.. autosummary:: + :toctree: generated/ + + csd_fourier + csd_multitaper + csd_morlet + pick_channels_csd + read_csd + fit_iir_model_raw + psd_welch + psd_multitaper + tfr_morlet + tfr_multitaper + tfr_stockwell + read_tfrs + write_tfrs + +Functions that operate on ``np.ndarray`` objects: + +.. autosummary:: + :toctree: generated/ + + csd_array_fourier + csd_array_multitaper + csd_array_morlet + dpss_windows + morlet + stft + istft + stftfreq + psd_array_multitaper + psd_array_welch + tfr_array_morlet + tfr_array_multitaper + tfr_array_stockwell + + +:py:mod:`mne.time_frequency.tfr`: + +.. automodule:: mne.time_frequency.tfr + :no-members: + :no-inherited-members: + +.. currentmodule:: mne.time_frequency.tfr + +.. autosummary:: + :toctree: generated/ + + cwt + morlet diff --git a/doc/upload_html.sh b/doc/upload_html.sh deleted file mode 100755 index bc85f85f0a2..00000000000 --- a/doc/upload_html.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env bash - -#scp -r build/html/* martinos-data:/web/html/mne/ -rsync -rltvz --delete --perms --chmod=g+w _build/html/ martinos-data:/web/html/ext/mne/stable -essh -ssh martinos-data "chgrp -R megweb /web/html/ext/mne/stable" diff --git a/doc/visualization.rst b/doc/visualization.rst new file mode 100644 index 00000000000..f10afd2d766 --- /dev/null +++ b/doc/visualization.rst @@ -0,0 +1,78 @@ + +Visualization +============= + +.. currentmodule:: mne.viz + +:py:mod:`mne.viz`: + +.. automodule:: mne.viz + :no-members: + :no-inherited-members: + +.. autosummary:: + :toctree: generated/ + + Brain + ClickableImage + add_background_image + centers_to_edges + compare_fiff + circular_layout + iter_topography + mne_analyze_colormap + plot_bem + plot_brain_colorbar + plot_connectivity_circle + plot_cov + plot_csd + plot_dipole_amplitudes + plot_dipole_locations + plot_drop_log + plot_epochs + plot_epochs_psd_topomap + plot_events + plot_evoked + plot_evoked_image + plot_evoked_topo + plot_evoked_topomap + plot_evoked_joint + plot_evoked_field + plot_evoked_white + plot_filter + plot_head_positions + plot_ideal_filter + plot_compare_evokeds + plot_ica_sources + plot_ica_components + plot_ica_properties + plot_ica_scores + plot_ica_overlay + plot_epochs_image + plot_layout + plot_montage + plot_projs_topomap + plot_raw + plot_raw_psd + plot_sensors + plot_sensors_connectivity + plot_snr_estimate + plot_source_estimates + link_brains + plot_volume_source_estimates + plot_vector_source_estimates + plot_sparse_source_estimates + plot_tfr_topomap + plot_topo_image_epochs + plot_topomap + plot_alignment + snapshot_brain_montage + plot_arrowmap + set_3d_backend + get_3d_backend + use_3d_backend + set_3d_options + set_3d_view + set_3d_title + create_3d_figure + get_brain_class diff --git a/doc/whats_new.rst b/doc/whats_new.rst index 52d935b18ed..c351396572f 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -6,6 +6,7 @@ What's new .. currentmodule:: mne .. include:: changes/latest.inc +.. include:: changes/0.22.inc .. include:: changes/0.21.inc .. include:: changes/0.20.inc .. include:: changes/0.19.inc diff --git a/environment.yml b/environment.yml index a30433293b9..a5cbdea0ed2 100644 --- a/environment.yml +++ b/environment.yml @@ -18,12 +18,9 @@ dependencies: - joblib - psutil - numexpr -- traits -- pyface -- traitsui - imageio - tqdm -- spyder-kernels +- spyder-kernels>=1.10.0 - imageio-ffmpeg>=0.4.1 - vtk>=9.0.1 - pyvista>=0.24 @@ -34,6 +31,9 @@ dependencies: - nibabel - nilearn - python-picard -- pyqt +- pyqt!=5.15.3 - mne - mffpy>=0.5.7 +- ipywidgets +- pip: + - ipyvtk-simple diff --git a/examples/connectivity/plot_cwt_sensor_connectivity.py b/examples/connectivity/cwt_sensor_connectivity.py similarity index 100% rename from examples/connectivity/plot_cwt_sensor_connectivity.py rename to examples/connectivity/cwt_sensor_connectivity.py diff --git a/examples/connectivity/plot_mixed_source_space_connectivity.py b/examples/connectivity/mixed_source_space_connectivity.py similarity index 98% rename from examples/connectivity/plot_mixed_source_space_connectivity.py rename to examples/connectivity/mixed_source_space_connectivity.py index c3a724de57b..013d0e2f5ef 100644 --- a/examples/connectivity/plot_mixed_source_space_connectivity.py +++ b/examples/connectivity/mixed_source_space_connectivity.py @@ -106,8 +106,8 @@ subjects_dir=subjects_dir) # Average the source estimates within each label of the cortical parcellation -# and each sub structures contained in the src space -# If mode = 'mean_flip' this option is used only for the cortical label +# and each sub-structure contained in the source space. +# When mode = 'mean_flip', this option is used only for the cortical labels. src = inverse_operator['src'] label_ts = mne.extract_label_time_course( stcs, labels_parc, src, mode='mean_flip', allow_empty=True, diff --git a/examples/connectivity/plot_mne_inverse_coherence_epochs.py b/examples/connectivity/mne_inverse_coherence_epochs.py similarity index 99% rename from examples/connectivity/plot_mne_inverse_coherence_epochs.py rename to examples/connectivity/mne_inverse_coherence_epochs.py index e0bcc2150ed..c5303ba840d 100644 --- a/examples/connectivity/plot_mne_inverse_coherence_epochs.py +++ b/examples/connectivity/mne_inverse_coherence_epochs.py @@ -6,7 +6,6 @@ This example computes the coherence between a seed in the left auditory cortex and the rest of the brain based on single-trial MNE-dSPM inverse solutions. - """ # Author: Martin Luessi # diff --git a/examples/connectivity/plot_mne_inverse_connectivity_spectrum.py b/examples/connectivity/mne_inverse_connectivity_spectrum.py similarity index 99% rename from examples/connectivity/plot_mne_inverse_connectivity_spectrum.py rename to examples/connectivity/mne_inverse_connectivity_spectrum.py index 31e5d33d6fa..3cc10a8a9c2 100644 --- a/examples/connectivity/plot_mne_inverse_connectivity_spectrum.py +++ b/examples/connectivity/mne_inverse_connectivity_spectrum.py @@ -6,6 +6,7 @@ The connectivity is computed between 4 labels across the spectrum between 7.5 Hz and 40 Hz. """ + # Authors: Alexandre Gramfort # # License: BSD (3-clause) diff --git a/examples/connectivity/plot_mne_inverse_envelope_correlation.py b/examples/connectivity/mne_inverse_envelope_correlation.py similarity index 97% rename from examples/connectivity/plot_mne_inverse_envelope_correlation.py rename to examples/connectivity/mne_inverse_envelope_correlation.py index b73845ed06f..cb20da23a53 100644 --- a/examples/connectivity/plot_mne_inverse_envelope_correlation.py +++ b/examples/connectivity/mne_inverse_envelope_correlation.py @@ -6,9 +6,10 @@ ============================================= Compute envelope correlations of orthogonalized activity -:footcite:`HippEtAl2012,KhanEtAl2018` in source -space using resting state CTF data. +:footcite:`HippEtAl2012,KhanEtAl2018` in source space using resting state +CTF data. """ + # Authors: Eric Larson # Sheraz Khan # Denis Engemann diff --git a/examples/connectivity/plot_mne_inverse_envelope_correlation_volume.py b/examples/connectivity/mne_inverse_envelope_correlation_volume.py similarity index 97% rename from examples/connectivity/plot_mne_inverse_envelope_correlation_volume.py rename to examples/connectivity/mne_inverse_envelope_correlation_volume.py index 0a8f4fa9b27..803fe8890bb 100644 --- a/examples/connectivity/plot_mne_inverse_envelope_correlation_volume.py +++ b/examples/connectivity/mne_inverse_envelope_correlation_volume.py @@ -4,8 +4,8 @@ ==================================================== Compute envelope correlations of orthogonalized activity -:footcite:`HippEtAl2012,KhanEtAl2018` in source -space using resting state CTF data in a volume source space. +:footcite:`HippEtAl2012,KhanEtAl2018` in source space using resting state +CTF data in a volume source space. """ # Authors: Eric Larson diff --git a/examples/connectivity/plot_mne_inverse_label_connectivity.py b/examples/connectivity/mne_inverse_label_connectivity.py similarity index 98% rename from examples/connectivity/plot_mne_inverse_label_connectivity.py rename to examples/connectivity/mne_inverse_label_connectivity.py index 7c18b46c20d..b61c63b2048 100644 --- a/examples/connectivity/plot_mne_inverse_label_connectivity.py +++ b/examples/connectivity/mne_inverse_label_connectivity.py @@ -94,8 +94,8 @@ # Average the source estimates within each label using sign-flips to reduce # signal cancellations, also here we return a generator src = inverse_operator['src'] -label_ts = mne.extract_label_time_course(stcs, labels, src, mode='mean_flip', - return_generator=True) +label_ts = mne.extract_label_time_course( + stcs, labels, src, mode='mean_flip', return_generator=True) fmin = 8. fmax = 13. diff --git a/examples/connectivity/plot_mne_inverse_psi_visual.py b/examples/connectivity/mne_inverse_psi_visual.py similarity index 98% rename from examples/connectivity/plot_mne_inverse_psi_visual.py rename to examples/connectivity/mne_inverse_psi_visual.py index a2eb839eb13..df1a4dbe2aa 100644 --- a/examples/connectivity/plot_mne_inverse_psi_visual.py +++ b/examples/connectivity/mne_inverse_psi_visual.py @@ -19,7 +19,6 @@ # # License: BSD (3-clause) - import numpy as np import mne @@ -91,7 +90,7 @@ tmin_con = 0. sfreq = epochs.info['sfreq'] # the sampling frequency -psi, freqs, times, n_epochs, _ = phase_slope_index( +psi, _, _, _, _ = phase_slope_index( comb_ts, mode='multitaper', indices=indices, sfreq=sfreq, fmin=fmin, fmax=fmax, tmin=tmin_con) diff --git a/examples/connectivity/plot_sensor_connectivity.py b/examples/connectivity/sensor_connectivity.py similarity index 94% rename from examples/connectivity/plot_sensor_connectivity.py rename to examples/connectivity/sensor_connectivity.py index 2d9c7d263a2..68adb2d0561 100644 --- a/examples/connectivity/plot_sensor_connectivity.py +++ b/examples/connectivity/sensor_connectivity.py @@ -13,7 +13,6 @@ # License: BSD (3-clause) import mne -from mne import io from mne.connectivity import spectral_connectivity from mne.datasets import sample from mne.viz import plot_sensors_connectivity @@ -27,7 +26,7 @@ event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' # Setup for reading the raw data -raw = io.read_raw_fif(raw_fname) +raw = mne.io.read_raw_fif(raw_fname) events = mne.read_events(event_fname) # Add a bad channel @@ -43,8 +42,8 @@ baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6)) # Compute connectivity for band containing the evoked response. -# We exclude the baseline period -fmin, fmax = 3., 9. +# We exclude the baseline period: +fmin, fmax = 4., 9. sfreq = raw.info['sfreq'] # the sampling frequency tmin = 0.0 # exclude the baseline period epochs.load_data().pick_types(meg='grad') # just keep MEG and no EOG now diff --git a/examples/datasets/plot_brainstorm_data.py b/examples/datasets/brainstorm_data.py similarity index 100% rename from examples/datasets/plot_brainstorm_data.py rename to examples/datasets/brainstorm_data.py diff --git a/examples/datasets/plot_hf_sef_data.py b/examples/datasets/hf_sef_data.py similarity index 100% rename from examples/datasets/plot_hf_sef_data.py rename to examples/datasets/hf_sef_data.py diff --git a/examples/datasets/plot_limo_data.py b/examples/datasets/limo_data.py similarity index 100% rename from examples/datasets/plot_limo_data.py rename to examples/datasets/limo_data.py diff --git a/examples/datasets/plot_opm_data.py b/examples/datasets/opm_data.py similarity index 100% rename from examples/datasets/plot_opm_data.py rename to examples/datasets/opm_data.py diff --git a/examples/datasets/spm_faces_dataset.py b/examples/datasets/spm_faces_dataset_sgskip.py similarity index 97% rename from examples/datasets/spm_faces_dataset.py rename to examples/datasets/spm_faces_dataset_sgskip.py index 7351d0db87d..e7d1f51a84a 100644 --- a/examples/datasets/spm_faces_dataset.py +++ b/examples/datasets/spm_faces_dataset_sgskip.py @@ -63,7 +63,8 @@ baseline=baseline, preload=True, reject=reject) # Fit ICA, find and remove major artifacts -ica = ICA(n_components=0.95, random_state=0).fit(raw, decim=1, reject=reject) +ica = ICA(n_components=0.95, max_iter='auto', random_state=0) +ica.fit(raw, decim=1, reject=reject) # compute correlation scores, get bad indices sorted by score eog_epochs = create_eog_epochs(raw, ch_name='MRT31-2908', reject=reject) diff --git a/examples/decoding/plot_decoding_csp_eeg.py b/examples/decoding/decoding_csp_eeg.py similarity index 100% rename from examples/decoding/plot_decoding_csp_eeg.py rename to examples/decoding/decoding_csp_eeg.py diff --git a/examples/decoding/plot_decoding_csp_timefreq.py b/examples/decoding/decoding_csp_timefreq.py similarity index 99% rename from examples/decoding/plot_decoding_csp_timefreq.py rename to examples/decoding/decoding_csp_timefreq.py index a2dff6e8678..437c284b716 100644 --- a/examples/decoding/plot_decoding_csp_timefreq.py +++ b/examples/decoding/decoding_csp_timefreq.py @@ -1,4 +1,6 @@ """ +.. _ex-decoding-csp-eeg-timefreq: + ==================================================================== Decoding in time-frequency space using Common Spatial Patterns (CSP) ==================================================================== diff --git a/examples/decoding/decoding_rsa.py b/examples/decoding/decoding_rsa_sgskip.py similarity index 100% rename from examples/decoding/decoding_rsa.py rename to examples/decoding/decoding_rsa_sgskip.py diff --git a/examples/decoding/plot_decoding_spatio_temporal_source.py b/examples/decoding/decoding_spatio_temporal_source.py similarity index 100% rename from examples/decoding/plot_decoding_spatio_temporal_source.py rename to examples/decoding/decoding_spatio_temporal_source.py diff --git a/examples/decoding/plot_decoding_spoc_CMC.py b/examples/decoding/decoding_spoc_CMC.py similarity index 99% rename from examples/decoding/plot_decoding_spoc_CMC.py rename to examples/decoding/decoding_spoc_CMC.py index 925fd81ca82..08e130e7822 100644 --- a/examples/decoding/plot_decoding_spoc_CMC.py +++ b/examples/decoding/decoding_spoc_CMC.py @@ -1,4 +1,6 @@ """ +.. _ex-spoc-cmc: + ==================================== Continuous Target Decoding with SPoC ==================================== diff --git a/examples/decoding/plot_decoding_time_generalization_conditions.py b/examples/decoding/decoding_time_generalization_conditions.py similarity index 100% rename from examples/decoding/plot_decoding_time_generalization_conditions.py rename to examples/decoding/decoding_time_generalization_conditions.py diff --git a/examples/decoding/plot_decoding_unsupervised_spatial_filter.py b/examples/decoding/decoding_unsupervised_spatial_filter.py similarity index 100% rename from examples/decoding/plot_decoding_unsupervised_spatial_filter.py rename to examples/decoding/decoding_unsupervised_spatial_filter.py diff --git a/examples/decoding/plot_decoding_xdawn_eeg.py b/examples/decoding/decoding_xdawn_eeg.py similarity index 99% rename from examples/decoding/plot_decoding_xdawn_eeg.py rename to examples/decoding/decoding_xdawn_eeg.py index a9ef1f06bad..6aeabd7337b 100644 --- a/examples/decoding/plot_decoding_xdawn_eeg.py +++ b/examples/decoding/decoding_xdawn_eeg.py @@ -1,4 +1,6 @@ """ +.. _ex-xdawn-decoding: + ============================ XDAWN Decoding From EEG data ============================ diff --git a/examples/decoding/plot_ems_filtering.py b/examples/decoding/ems_filtering.py similarity index 99% rename from examples/decoding/plot_ems_filtering.py rename to examples/decoding/ems_filtering.py index 24969e452d0..6ff4e4b3802 100644 --- a/examples/decoding/plot_ems_filtering.py +++ b/examples/decoding/ems_filtering.py @@ -1,4 +1,6 @@ """ +.. _ex-ems-filtering: + ============================================== Compute effect-matched-spatial filtering (EMS) ============================================== diff --git a/examples/decoding/plot_linear_model_patterns.py b/examples/decoding/linear_model_patterns.py similarity index 99% rename from examples/decoding/plot_linear_model_patterns.py rename to examples/decoding/linear_model_patterns.py index 3e7718de20c..88ae540b347 100644 --- a/examples/decoding/plot_linear_model_patterns.py +++ b/examples/decoding/linear_model_patterns.py @@ -1,5 +1,7 @@ # -*- coding: utf-8 -*- """ +.. _ex-linear-patterns: + =============================================================== Linear classifier on sensor data with plot patterns and filters =============================================================== diff --git a/examples/decoding/plot_receptive_field_mtrf.py b/examples/decoding/receptive_field_mtrf.py similarity index 100% rename from examples/decoding/plot_receptive_field_mtrf.py rename to examples/decoding/receptive_field_mtrf.py diff --git a/examples/decoding/plot_ssd_spatial_filters.py b/examples/decoding/ssd_spatial_filters.py similarity index 98% rename from examples/decoding/plot_ssd_spatial_filters.py rename to examples/decoding/ssd_spatial_filters.py index 927b41aad50..b2710266631 100644 --- a/examples/decoding/plot_ssd_spatial_filters.py +++ b/examples/decoding/ssd_spatial_filters.py @@ -1,6 +1,6 @@ """ =========================================================== -Compute Sepctro-Spatial Decomposition (SSD) spatial filters +Compute Spectro-Spatial Decomposition (SSD) spatial filters =========================================================== In this example, we will compute spatial filters for retaining diff --git a/examples/forward/plot_forward_sensitivity_maps.py b/examples/forward/forward_sensitivity_maps.py similarity index 100% rename from examples/forward/plot_forward_sensitivity_maps.py rename to examples/forward/forward_sensitivity_maps.py diff --git a/examples/forward/plot_left_cerebellum_volume_source.py b/examples/forward/left_cerebellum_volume_source.py similarity index 100% rename from examples/forward/plot_left_cerebellum_volume_source.py rename to examples/forward/left_cerebellum_volume_source.py diff --git a/examples/forward/plot_source_space_morphing.py b/examples/forward/source_space_morphing.py similarity index 100% rename from examples/forward/plot_source_space_morphing.py rename to examples/forward/source_space_morphing.py diff --git a/examples/inverse/plot_compute_mne_inverse_epochs_in_label.py b/examples/inverse/compute_mne_inverse_epochs_in_label.py similarity index 100% rename from examples/inverse/plot_compute_mne_inverse_epochs_in_label.py rename to examples/inverse/compute_mne_inverse_epochs_in_label.py diff --git a/examples/inverse/plot_compute_mne_inverse_raw_in_label.py b/examples/inverse/compute_mne_inverse_raw_in_label.py similarity index 100% rename from examples/inverse/plot_compute_mne_inverse_raw_in_label.py rename to examples/inverse/compute_mne_inverse_raw_in_label.py diff --git a/examples/inverse/plot_compute_mne_inverse_volume.py b/examples/inverse/compute_mne_inverse_volume.py similarity index 100% rename from examples/inverse/plot_compute_mne_inverse_volume.py rename to examples/inverse/compute_mne_inverse_volume.py diff --git a/examples/inverse/plot_covariance_whitening_dspm.py b/examples/inverse/covariance_whitening_dspm.py similarity index 100% rename from examples/inverse/plot_covariance_whitening_dspm.py rename to examples/inverse/covariance_whitening_dspm.py diff --git a/examples/inverse/plot_custom_inverse_solver.py b/examples/inverse/custom_inverse_solver.py similarity index 100% rename from examples/inverse/plot_custom_inverse_solver.py rename to examples/inverse/custom_inverse_solver.py diff --git a/examples/inverse/plot_dics_source_power.py b/examples/inverse/dics_source_power.py similarity index 100% rename from examples/inverse/plot_dics_source_power.py rename to examples/inverse/dics_source_power.py diff --git a/examples/inverse/plot_evoked_ers_source_power.py b/examples/inverse/evoked_ers_source_power.py similarity index 100% rename from examples/inverse/plot_evoked_ers_source_power.py rename to examples/inverse/evoked_ers_source_power.py diff --git a/examples/inverse/plot_gamma_map_inverse.py b/examples/inverse/gamma_map_inverse.py similarity index 100% rename from examples/inverse/plot_gamma_map_inverse.py rename to examples/inverse/gamma_map_inverse.py diff --git a/examples/inverse/plot_label_activation_from_stc.py b/examples/inverse/label_activation_from_stc.py similarity index 100% rename from examples/inverse/plot_label_activation_from_stc.py rename to examples/inverse/label_activation_from_stc.py diff --git a/examples/inverse/plot_label_from_stc.py b/examples/inverse/label_from_stc.py similarity index 100% rename from examples/inverse/plot_label_from_stc.py rename to examples/inverse/label_from_stc.py diff --git a/examples/inverse/plot_label_source_activations.py b/examples/inverse/label_source_activations.py similarity index 100% rename from examples/inverse/plot_label_source_activations.py rename to examples/inverse/label_source_activations.py diff --git a/examples/inverse/plot_mixed_norm_inverse.py b/examples/inverse/mixed_norm_inverse.py similarity index 100% rename from examples/inverse/plot_mixed_norm_inverse.py rename to examples/inverse/mixed_norm_inverse.py diff --git a/examples/inverse/plot_mixed_source_space_inverse.py b/examples/inverse/mixed_source_space_inverse.py similarity index 97% rename from examples/inverse/plot_mixed_source_space_inverse.py rename to examples/inverse/mixed_source_space_inverse.py index f9d7a6a56ce..3d491ab48c8 100644 --- a/examples/inverse/plot_mixed_source_space_inverse.py +++ b/examples/inverse/mixed_source_space_inverse.py @@ -85,7 +85,7 @@ # # >>> write_source_spaces(fname_mixed_src, src, overwrite=True) # -# We can also export source positions to nifti file and visualize it again: +# We can also export source positions to NIfTI file and visualize it again: nii_fname = op.join(bem_dir, '%s-mixed-src.nii' % subject) src.export_volume(nii_fname, mri_resolution=True, overwrite=True) @@ -139,7 +139,8 @@ pick_ori='vector') brain = stc_vec.plot( hemi='both', src=inverse_operator['src'], views='coronal', - initial_time=initial_time, subjects_dir=subjects_dir) + initial_time=initial_time, subjects_dir=subjects_dir, + brain_kwargs=dict(silhouette=True)) ############################################################################### # Plot the surface @@ -148,7 +149,7 @@ subjects_dir=subjects_dir) ############################################################################### # Plot the volume -# ---------------- +# --------------- fig = stc.volume().plot(initial_time=initial_time, src=src, subjects_dir=subjects_dir) diff --git a/examples/inverse/plot_mne_cov_power.py b/examples/inverse/mne_cov_power.py similarity index 100% rename from examples/inverse/plot_mne_cov_power.py rename to examples/inverse/mne_cov_power.py diff --git a/examples/inverse/plot_morph_surface_stc.py b/examples/inverse/morph_surface_stc.py similarity index 100% rename from examples/inverse/plot_morph_surface_stc.py rename to examples/inverse/morph_surface_stc.py diff --git a/examples/inverse/plot_morph_volume_stc.py b/examples/inverse/morph_volume_stc.py similarity index 98% rename from examples/inverse/plot_morph_volume_stc.py rename to examples/inverse/morph_volume_stc.py index 6840022ef8f..72474bad3e2 100644 --- a/examples/inverse/plot_morph_volume_stc.py +++ b/examples/inverse/morph_volume_stc.py @@ -48,8 +48,7 @@ fname_src_fsaverage = subjects_dir + '/fsaverage/bem/fsaverage-vol-5-src.fif' ############################################################################### -# Compute example data. For reference see -# :ref:`sphx_glr_auto_examples_inverse_plot_compute_mne_inverse_volume.py` +# Compute example data. For reference see :ref:`ex-inverse-volume`. # # Load data: evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0)) diff --git a/examples/inverse/plot_multidict_reweighted_tfmxne.py b/examples/inverse/multidict_reweighted_tfmxne.py similarity index 63% rename from examples/inverse/plot_multidict_reweighted_tfmxne.py rename to examples/inverse/multidict_reweighted_tfmxne.py index a7571cac933..8b9b3064fd3 100644 --- a/examples/inverse/plot_multidict_reweighted_tfmxne.py +++ b/examples/inverse/multidict_reweighted_tfmxne.py @@ -5,15 +5,15 @@ The iterative reweighted TF-MxNE solver is a distributed inverse method based on the TF-MxNE solver, which promotes focal (sparse) sources -:footcite:`StrohmeierEtAl2015`. The benefit of this approach is that: - - - it is spatio-temporal without assuming stationarity (sources properties - can vary over time), - - activations are localized in space, time and frequency in one step, - - the solver uses non-convex penalties in the TF domain, which results in a - solution less biased towards zero than when simple TF-MxNE is used, - - using a multiscale dictionary allows to capture short transient - activations along with slower brain waves :footcite:`BekhtiEtAl2016`. +:footcite:`StrohmeierEtAl2015`. The benefits of this approach are that: + +- it is spatio-temporal without assuming stationarity (source properties + can vary over time), +- activations are localized in space, time, and frequency in one step, +- the solver uses non-convex penalties in the TF domain, which results in a + solution less biased towards zero than when simple TF-MxNE is used, +- using a multiscale dictionary allows to capture short transient + activations along with slower brain waves :footcite:`BekhtiEtAl2016`. """ # Author: Mathurin Massias # Yousra Bekhti @@ -43,23 +43,22 @@ fwd_fname = op.join(data_path, 'derivatives', 'sub-{}'.format(subject), 'sub-{}_task-{}-fwd.fif'.format(subject, task)) -condition = 'Unknown' - # Read evoked raw = mne.io.read_raw_fif(raw_fname) +raw.pick_types(meg=True, eog=True, stim=True) events = mne.find_events(raw, stim_channel='STI 014') + reject = dict(grad=4000e-13, eog=350e-6) -picks = mne.pick_types(raw.info, meg=True, eog=True) +event_id, tmin, tmax = dict(unknown=1), -0.5, 0.5 +epochs = mne.Epochs(raw, events, event_id, tmin, tmax, reject=reject, + baseline=(None, 0)) +evoked = epochs.average() -event_id, tmin, tmax = 1, -1., 3. -epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, - reject=reject, preload=True) -evoked = epochs.filter(1, None).average() -evoked = evoked.pick_types(meg=True) -evoked.crop(tmin=0.008, tmax=0.2) +evoked.crop(tmin=0.0, tmax=0.2) # Compute noise covariance matrix cov = mne.compute_covariance(epochs, rank='info', tmax=0.) +del epochs, raw # Handling forward solution forward = mne.read_forward_solution(fwd_fname) @@ -68,7 +67,7 @@ # Run iterative reweighted multidict TF-MxNE solver alpha, l1_ratio = 20, 0.05 -loose, depth = 1, 0.95 +loose, depth = 0.9, 1. # Use a multiscale time-frequency dictionary wsize, tstep = [4, 16], [2, 4] @@ -82,33 +81,21 @@ wsize=wsize, tstep=tstep, return_as_dipoles=True, return_residual=True) -# Crop to remove edges -for dip in dipoles: - dip.crop(tmin=-0.05, tmax=0.3) -evoked.crop(tmin=-0.05, tmax=0.3) -residual.crop(tmin=-0.05, tmax=0.3) - - ############################################################################### # Generate stc from dipoles -stc = make_stc_from_dipoles(dipoles, forward['src']) - -plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1), - opacity=0.1, fig_name="irTF-MxNE (cond %s)" - % condition) +stc = make_stc_from_dipoles(dipoles, forward['src']) +plot_sparse_source_estimates( + forward['src'], stc, bgcolor=(1, 1, 1), opacity=0.1, + fig_name=f"irTF-MxNE (cond {evoked.comment})") ############################################################################### # Show the evoked response and the residual for gradiometers ylim = dict(grad=[-300, 300]) -evoked.pick_types(meg='grad') -evoked.plot(titles=dict(grad='Evoked Response: Gradiometers'), ylim=ylim, - proj=True) - -residual.pick_types(meg='grad') -residual.plot(titles=dict(grad='Residuals: Gradiometers'), ylim=ylim, - proj=True) - +evoked.copy().pick_types(meg='grad').plot( + titles=dict(grad='Evoked Response: Gradiometers'), ylim=ylim) +residual.copy().pick_types(meg='grad').plot( + titles=dict(grad='Residuals: Gradiometers'), ylim=ylim) ############################################################################### # References diff --git a/examples/inverse/plot_tf_dics.py b/examples/inverse/plot_tf_dics.py deleted file mode 100644 index c9d84624cf6..00000000000 --- a/examples/inverse/plot_tf_dics.py +++ /dev/null @@ -1,124 +0,0 @@ -""" -===================================== -Time-frequency beamforming using DICS -===================================== - -Compute DICS source power :footcite:`DalalEtAl2008` in a grid of time-frequency -windows. -""" -# Author: Roman Goj -# -# License: BSD (3-clause) - -import mne -from mne.event import make_fixed_length_events -from mne.datasets import sample -from mne.time_frequency import csd_fourier -from mne.beamformer import tf_dics -from mne.viz import plot_source_spectrogram - -print(__doc__) - -data_path = sample.data_path() -raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif' -noise_fname = data_path + '/MEG/sample/ernoise_raw.fif' -event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif' -fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif' -subjects_dir = data_path + '/subjects' -label_name = 'Aud-lh' -fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name - -############################################################################### -# Read raw data -raw = mne.io.read_raw_fif(raw_fname, preload=True) -raw.info['bads'] = ['MEG 2443'] # 1 bad MEG channel - -# Pick a selection of magnetometer channels. A subset of all channels was used -# to speed up the example. For a solution based on all MEG channels use -# meg=True, selection=None and add mag=4e-12 to the reject dictionary. -left_temporal_channels = mne.read_selection('Left-temporal') -picks = mne.pick_types(raw.info, meg='mag', eeg=False, eog=False, - stim=False, exclude='bads', - selection=left_temporal_channels) -raw.pick_channels([raw.ch_names[pick] for pick in picks]) -reject = dict(mag=4e-12) -# Re-normalize our empty-room projectors, which should be fine after -# subselection -raw.info.normalize_proj() - -# Setting time windows. Note that tmin and tmax are set so that time-frequency -# beamforming will be performed for a wider range of time points than will -# later be displayed on the final spectrogram. This ensures that all time bins -# displayed represent an average of an equal number of time windows. -tmin, tmax, tstep = -0.5, 0.75, 0.05 # s -tmin_plot, tmax_plot = -0.3, 0.5 # s - -# Read epochs -event_id = 1 -events = mne.read_events(event_fname) -epochs = mne.Epochs(raw, events, event_id, tmin, tmax, - baseline=None, preload=True, proj=True, reject=reject) - -# Read empty room noise raw data -raw_noise = mne.io.read_raw_fif(noise_fname, preload=True) -raw_noise.info['bads'] = ['MEG 2443'] # 1 bad MEG channel -raw_noise.pick_channels([raw_noise.ch_names[pick] for pick in picks]) -raw_noise.info.normalize_proj() - -# Create noise epochs and make sure the number of noise epochs corresponds to -# the number of data epochs -events_noise = make_fixed_length_events(raw_noise, event_id) -epochs_noise = mne.Epochs(raw_noise, events_noise, event_id, tmin_plot, - tmax_plot, baseline=None, preload=True, proj=True, - reject=reject) -epochs_noise.info.normalize_proj() -epochs_noise.apply_proj() -# then make sure the number of epochs is the same -epochs_noise = epochs_noise[:len(epochs.events)] - -# Read forward operator -forward = mne.read_forward_solution(fname_fwd) - -# Read label -label = mne.read_label(fname_label) - -############################################################################### -# Time-frequency beamforming based on DICS - -# Setting frequency bins as in Dalal et al. 2008 -freq_bins = [(4, 12), (12, 30), (30, 55), (65, 300)] # Hz -win_lengths = [0.3, 0.2, 0.15, 0.1] # s -# Then set FFTs length for each frequency range. -# Should be a power of 2 to be faster. -n_ffts = [256, 128, 128, 128] - -# Subtract evoked response prior to computation? -subtract_evoked = False - -# Calculating noise cross-spectral density from empty room noise for each -# frequency bin and the corresponding time window length. To calculate noise -# from the baseline period in the data, change epochs_noise to epochs -noise_csds = [] -for freq_bin, win_length, n_fft in zip(freq_bins, win_lengths, n_ffts): - noise_csd = csd_fourier(epochs_noise, fmin=freq_bin[0], fmax=freq_bin[1], - tmin=-win_length, tmax=0, n_fft=n_fft) - noise_csds.append(noise_csd.sum()) - -# Computing DICS solutions for time-frequency windows in a label in source -# space for faster computation, use label=None for full solution -stcs = tf_dics(epochs, forward, noise_csds, tmin, tmax, tstep, win_lengths, - freq_bins=freq_bins, subtract_evoked=subtract_evoked, - n_ffts=n_ffts, reg=0.05, label=label, inversion='matrix') - -# Plotting source spectrogram for source with maximum activity -# Note that tmin and tmax are set to display a time range that is smaller than -# the one for which beamforming estimates were calculated. This ensures that -# all time bins shown are a result of smoothing across an identical number of -# time windows. -plot_source_spectrogram(stcs, freq_bins, tmin=tmin_plot, tmax=tmax_plot, - source_index=None, colorbar=True) - -############################################################################### -# References -# ---------- -# .. footbibliography:: diff --git a/examples/inverse/plot_psf_ctf_label_leakage.py b/examples/inverse/psf_ctf_label_leakage.py similarity index 100% rename from examples/inverse/plot_psf_ctf_label_leakage.py rename to examples/inverse/psf_ctf_label_leakage.py diff --git a/examples/inverse/plot_psf_ctf_vertices.py b/examples/inverse/psf_ctf_vertices.py similarity index 100% rename from examples/inverse/plot_psf_ctf_vertices.py rename to examples/inverse/psf_ctf_vertices.py diff --git a/examples/inverse/plot_psf_ctf_vertices_lcmv.py b/examples/inverse/psf_ctf_vertices_lcmv.py similarity index 100% rename from examples/inverse/plot_psf_ctf_vertices_lcmv.py rename to examples/inverse/psf_ctf_vertices_lcmv.py diff --git a/examples/inverse/plot_rap_music.py b/examples/inverse/rap_music.py similarity index 100% rename from examples/inverse/plot_rap_music.py rename to examples/inverse/rap_music.py diff --git a/examples/inverse/plot_read_inverse.py b/examples/inverse/read_inverse.py similarity index 100% rename from examples/inverse/plot_read_inverse.py rename to examples/inverse/read_inverse.py diff --git a/examples/inverse/plot_read_stc.py b/examples/inverse/read_stc.py similarity index 100% rename from examples/inverse/plot_read_stc.py rename to examples/inverse/read_stc.py diff --git a/examples/inverse/plot_resolution_metrics.py b/examples/inverse/resolution_metrics.py similarity index 100% rename from examples/inverse/plot_resolution_metrics.py rename to examples/inverse/resolution_metrics.py diff --git a/examples/inverse/plot_resolution_metrics_eegmeg.py b/examples/inverse/resolution_metrics_eegmeg.py similarity index 100% rename from examples/inverse/plot_resolution_metrics_eegmeg.py rename to examples/inverse/resolution_metrics_eegmeg.py diff --git a/examples/inverse/plot_snr_estimate.py b/examples/inverse/snr_estimate.py similarity index 100% rename from examples/inverse/plot_snr_estimate.py rename to examples/inverse/snr_estimate.py diff --git a/examples/inverse/plot_source_space_snr.py b/examples/inverse/source_space_snr.py similarity index 98% rename from examples/inverse/plot_source_space_snr.py rename to examples/inverse/source_space_snr.py index e7904f12838..ef22092a8be 100644 --- a/examples/inverse/plot_source_space_snr.py +++ b/examples/inverse/source_space_snr.py @@ -53,7 +53,7 @@ ax.set(xlabel='Time (sec)', ylabel='SNR MEG-EEG') fig.tight_layout() -# Find time point of maximum SNR: +# Find time point of maximum SNR maxidx = np.argmax(ave) # Plot SNR on source space at the time point of maximum SNR: diff --git a/examples/inverse/plot_time_frequency_mixed_norm_inverse.py b/examples/inverse/time_frequency_mixed_norm_inverse.py similarity index 98% rename from examples/inverse/plot_time_frequency_mixed_norm_inverse.py rename to examples/inverse/time_frequency_mixed_norm_inverse.py index 5f553d0a21b..400d19bb084 100644 --- a/examples/inverse/plot_time_frequency_mixed_norm_inverse.py +++ b/examples/inverse/time_frequency_mixed_norm_inverse.py @@ -5,7 +5,7 @@ The TF-MxNE solver is a distributed inverse method (like dSPM or sLORETA) that promotes focal (sparse) sources (such as dipole fitting techniques) -:footcite:`GramfortEtAl2013` :footcite:`GramfortEtAl2011`. +:footcite:`GramfortEtAl2013b,GramfortEtAl2011`. The benefit of this approach is that: - it is spatio-temporal without assuming stationarity (sources properties diff --git a/examples/inverse/plot_vector_mne_solution.py b/examples/inverse/vector_mne_solution.py similarity index 98% rename from examples/inverse/plot_vector_mne_solution.py rename to examples/inverse/vector_mne_solution.py index 0a8983480fa..1ac9f2873b5 100644 --- a/examples/inverse/plot_vector_mne_solution.py +++ b/examples/inverse/vector_mne_solution.py @@ -67,7 +67,7 @@ stc_max, directions = stc.project('pca', src=inv['src']) # These directions must by design be close to the normals because this -# inverse was computed with loose=0.2: +# inverse was computed with loose=0.2 print('Absolute cosine similarity between source normals and directions: ' f'{np.abs(np.sum(directions * inv["source_nn"][2::3], axis=-1)).mean()}') brain_max = stc_max.plot( diff --git a/examples/io/plot_elekta_epochs.py b/examples/io/elekta_epochs.py similarity index 100% rename from examples/io/plot_elekta_epochs.py rename to examples/io/elekta_epochs.py diff --git a/examples/io/plot_read_neo_format.py b/examples/io/read_neo_format.py similarity index 100% rename from examples/io/plot_read_neo_format.py rename to examples/io/read_neo_format.py diff --git a/examples/io/plot_read_noise_covariance_matrix.py b/examples/io/read_noise_covariance_matrix.py similarity index 78% rename from examples/io/plot_read_noise_covariance_matrix.py rename to examples/io/read_noise_covariance_matrix.py index 0359cf28080..4efc25a5da4 100644 --- a/examples/io/plot_read_noise_covariance_matrix.py +++ b/examples/io/read_noise_covariance_matrix.py @@ -3,7 +3,7 @@ Reading/Writing a noise covariance matrix ========================================= -Plot a noise covariance matrix. +How to plot a noise covariance matrix. """ # Author: Alexandre Gramfort # @@ -13,17 +13,15 @@ import mne from mne.datasets import sample -print(__doc__) - data_path = sample.data_path() fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif') fname_evo = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif') cov = mne.read_cov(fname_cov) print(cov) -evoked = mne.read_evokeds(fname_evo)[0] +ev_info = mne.io.read_info(fname_evo) ############################################################################### -# Show covariance +# Plot covariance -cov.plot(evoked.info, exclude='bads', show_svd=False) +cov.plot(ev_info, exclude='bads', show_svd=False) diff --git a/examples/io/read_xdf.py b/examples/io/read_xdf.py new file mode 100644 index 00000000000..f846503d7e9 --- /dev/null +++ b/examples/io/read_xdf.py @@ -0,0 +1,42 @@ +""" +.. _ex-read-xdf: + +==================== +Reading XDF EEG data +==================== + +Here we read some sample XDF data. Although we do not analyze it here, this +recording is of a short parallel auditory response (pABR) experiment +:footcite:`PolonenkoMaddox2019` and was provided by the `Maddox Lab +`__. +""" +# Authors: Clemens Brunner +# Eric Larson +# +# License: BSD (3-clause) + +import os.path as op + +import pyxdf + +import mne +from mne.datasets import misc + +fname = op.join( + misc.data_path(), 'xdf', + 'sub-P001_ses-S004_task-Default_run-001_eeg_a2.xdf') +streams, header = pyxdf.load_xdf(fname) +data = streams[0]["time_series"].T +assert data.shape[0] == 5 # four raw EEG plus one stim channel +data[:4:2] -= data[1:4:2] # subtract (rereference) to get two bipolar EEG +data = data[::2] # subselect +data[:2] *= (1e-6 / 50 / 2) # uV -> V and preamp gain +sfreq = float(streams[0]["info"]["nominal_srate"][0]) +info = mne.create_info(3, sfreq, ["eeg", "eeg", "stim"]) +raw = mne.io.RawArray(data, info) +raw.plot(scalings=dict(eeg=100e-6), duration=1, start=14) + +############################################################################### +# References +# ---------- +# .. footbibliography:: diff --git a/examples/preprocessing/plot_define_target_events.py b/examples/preprocessing/define_target_events.py similarity index 100% rename from examples/preprocessing/plot_define_target_events.py rename to examples/preprocessing/define_target_events.py diff --git a/examples/preprocessing/plot_eeg_csd.py b/examples/preprocessing/eeg_csd.py similarity index 97% rename from examples/preprocessing/plot_eeg_csd.py rename to examples/preprocessing/eeg_csd.py index 3935d4e58c8..d0fb8dfa33a 100644 --- a/examples/preprocessing/plot_eeg_csd.py +++ b/examples/preprocessing/eeg_csd.py @@ -4,7 +4,7 @@ ===================================================== This script shows an example of how to use CSD -:footcite`PerrinEtAl1987,PerrinEtAl1989,Cohen2014,KayserTenke2015`. +:footcite:`PerrinEtAl1987,PerrinEtAl1989,Cohen2014,KayserTenke2015`. CSD takes the spatial Laplacian of the sensor signal (derivative in both x and y). It does what a planar gradiometer does in MEG. Computing these spatial derivatives reduces point spread. CSD transformed data have a sharper diff --git a/examples/preprocessing/plot_eog_artifact_histogram.py b/examples/preprocessing/eog_artifact_histogram.py similarity index 100% rename from examples/preprocessing/plot_eog_artifact_histogram.py rename to examples/preprocessing/eog_artifact_histogram.py diff --git a/examples/preprocessing/plot_find_ref_artifacts.py b/examples/preprocessing/find_ref_artifacts.py similarity index 96% rename from examples/preprocessing/plot_find_ref_artifacts.py rename to examples/preprocessing/find_ref_artifacts.py index d72de7204e1..71e347ded21 100644 --- a/examples/preprocessing/plot_find_ref_artifacts.py +++ b/examples/preprocessing/find_ref_artifacts.py @@ -70,7 +70,8 @@ fit_params=dict(tol=1e-4), # use a high tol here for speed ) all_picks = mne.pick_types(raw_tog.info, meg=True, ref_meg=True) -ica_tog = ICA(n_components=60, allow_ref_meg=True, **ica_kwargs) +ica_tog = ICA(n_components=60, max_iter='auto', allow_ref_meg=True, + **ica_kwargs) ica_tog.fit(raw_tog, picks=all_picks) # low threshold (2.0) here because of cropped data, entire recording can use # a higher threshold (2.5) @@ -97,7 +98,8 @@ # Do ICA only on the reference channels. ref_picks = mne.pick_types(raw_sep.info, meg=False, ref_meg=True) -ica_ref = ICA(n_components=2, allow_ref_meg=True, **ica_kwargs) +ica_ref = ICA(n_components=2, max_iter='auto', allow_ref_meg=True, + **ica_kwargs) ica_ref.fit(raw_sep, picks=ref_picks) # Do ICA on both reference and standard channels. Here, we can just reuse diff --git a/examples/preprocessing/plot_fnirs_artifact_removal.py b/examples/preprocessing/fnirs_artifact_removal.py similarity index 100% rename from examples/preprocessing/plot_fnirs_artifact_removal.py rename to examples/preprocessing/fnirs_artifact_removal.py diff --git a/examples/preprocessing/plot_ica_comparison.py b/examples/preprocessing/ica_comparison.py similarity index 97% rename from examples/preprocessing/plot_ica_comparison.py rename to examples/preprocessing/ica_comparison.py index db56cca599d..537e6d4c4db 100644 --- a/examples/preprocessing/plot_ica_comparison.py +++ b/examples/preprocessing/ica_comparison.py @@ -42,7 +42,7 @@ def run_ica(method, fit_params=None): ica = ICA(n_components=20, method=method, fit_params=fit_params, - random_state=0) + max_iter='auto', random_state=0) t0 = time() ica.fit(raw, picks=picks, reject=reject) fit_time = time() - t0 diff --git a/examples/preprocessing/plot_interpolate_bad_channels.py b/examples/preprocessing/interpolate_bad_channels.py similarity index 97% rename from examples/preprocessing/plot_interpolate_bad_channels.py rename to examples/preprocessing/interpolate_bad_channels.py index a1af79d23dd..b384f5fdfa6 100644 --- a/examples/preprocessing/plot_interpolate_bad_channels.py +++ b/examples/preprocessing/interpolate_bad_channels.py @@ -1,4 +1,6 @@ """ +.. _ex-interpolate-bad-channels: + ============================================= Interpolate bad channels for MEG/EEG channels ============================================= diff --git a/examples/preprocessing/plot_movement_compensation.py b/examples/preprocessing/movement_compensation.py similarity index 100% rename from examples/preprocessing/plot_movement_compensation.py rename to examples/preprocessing/movement_compensation.py diff --git a/examples/preprocessing/plot_movement_detection.py b/examples/preprocessing/movement_detection.py similarity index 100% rename from examples/preprocessing/plot_movement_detection.py rename to examples/preprocessing/movement_detection.py diff --git a/examples/preprocessing/plot_muscle_detection.py b/examples/preprocessing/muscle_detection.py similarity index 100% rename from examples/preprocessing/plot_muscle_detection.py rename to examples/preprocessing/muscle_detection.py diff --git a/examples/preprocessing/plot_otp.py b/examples/preprocessing/otp.py similarity index 90% rename from examples/preprocessing/plot_otp.py rename to examples/preprocessing/otp.py index f48e0c97677..f4eb15542d0 100644 --- a/examples/preprocessing/plot_otp.py +++ b/examples/preprocessing/otp.py @@ -3,8 +3,8 @@ Plot sensor denoising using oversampled temporal projection =========================================================== -This demonstrates denoising using the OTP algorithm [1]_ on data with -with sensor artifacts (flux jumps) and random noise. +This demonstrates denoising using the OTP algorithm :footcite:`LarsonTaulu2018` +on data with with sensor artifacts (flux jumps) and random noise. """ # Author: Eric Larson # @@ -79,6 +79,4 @@ def compute_bias(raw): ############################################################################### # References # ---------- -# .. [1] Larson E, Taulu S (2017). Reducing Sensor Noise in MEG and EEG -# Recordings Using Oversampled Temporal Projection. -# IEEE Transactions on Biomedical Engineering. +# .. footbibliography:: diff --git a/examples/preprocessing/plot_run_ica.py b/examples/preprocessing/plot_run_ica.py deleted file mode 100644 index fe5efa07e03..00000000000 --- a/examples/preprocessing/plot_run_ica.py +++ /dev/null @@ -1,63 +0,0 @@ -""" -================================ -Compute ICA components on epochs -================================ - -ICA is fit to MEG raw data. -We assume that the non-stationary EOG artifacts have already been removed. -The sources matching the ECG are automatically found and displayed. - -.. note:: This example does quite a bit of processing, so even on a - fast machine it can take about a minute to complete. -""" - -# Authors: Denis Engemann -# -# License: BSD (3-clause) - -import mne -from mne.preprocessing import ICA, create_ecg_epochs -from mne.datasets import sample - -print(__doc__) - -############################################################################### -# Read and preprocess the data. Preprocessing consists of: -# -# - MEG channel selection -# - 1-30 Hz band-pass filter -# - epoching -0.2 to 0.5 seconds with respect to events -# - rejection based on peak-to-peak amplitude - -data_path = sample.data_path() -raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' - -raw = mne.io.read_raw_fif(raw_fname) -raw.pick_types(meg=True, eeg=False, exclude='bads', stim=True).load_data() -raw.filter(1, 30, fir_design='firwin') - -# peak-to-peak amplitude rejection parameters -reject = dict(grad=4000e-13, mag=4e-12) -# longer + more epochs for more artifact exposure -events = mne.find_events(raw, stim_channel='STI 014') -epochs = mne.Epochs(raw, events, event_id=None, tmin=-0.2, tmax=0.5, - reject=reject) - -############################################################################### -# Fit ICA model using the FastICA algorithm, detect and plot components -# explaining ECG artifacts. - -ica = ICA(n_components=0.95, method='fastica').fit(epochs) - -ecg_epochs = create_ecg_epochs(raw, tmin=-.5, tmax=.5) -ecg_inds, scores = ica.find_bads_ecg(ecg_epochs, threshold='auto') - -ica.plot_components(ecg_inds) - -############################################################################### -# Plot properties of ECG components: -ica.plot_properties(epochs, picks=ecg_inds) - -############################################################################### -# Plot the estimated source of detected ECG related components -ica.plot_sources(raw, picks=ecg_inds) diff --git a/examples/preprocessing/plot_shift_evoked.py b/examples/preprocessing/shift_evoked.py similarity index 100% rename from examples/preprocessing/plot_shift_evoked.py rename to examples/preprocessing/shift_evoked.py diff --git a/examples/preprocessing/plot_virtual_evoked.py b/examples/preprocessing/virtual_evoked.py similarity index 62% rename from examples/preprocessing/plot_virtual_evoked.py rename to examples/preprocessing/virtual_evoked.py index a8510c87b4d..dc658b53638 100644 --- a/examples/preprocessing/plot_virtual_evoked.py +++ b/examples/preprocessing/virtual_evoked.py @@ -26,14 +26,30 @@ fname = data_path + '/MEG/sample/sample_audvis-ave.fif' evoked = mne.read_evokeds(fname, condition='Left Auditory', baseline=(None, 0)) -# go from grad + mag to mag +############################################################################### +# First, let's call remap gradiometers to magnometers, and plot +# the original and remapped topomaps of the magnetometers. + +# go from grad + mag to mag and plot original mag virt_evoked = evoked.as_type('mag') evoked.plot_topomap(ch_type='mag', title='mag (original)', time_unit='s') + +############################################################################### + +# plot interpolated grad + mag virt_evoked.plot_topomap(ch_type='mag', time_unit='s', title='mag (interpolated from mag + grad)') -# go from grad + mag to grad +############################################################################### +# Now, we remap magnometers to gradiometers, and plot +# the original and remapped topomaps of the gradiometers + +# go from grad + mag to grad and plot original grad virt_evoked = evoked.as_type('grad') evoked.plot_topomap(ch_type='grad', title='grad (original)', time_unit='s') + +############################################################################### + +# plot interpolated grad + mag virt_evoked.plot_topomap(ch_type='grad', time_unit='s', title='grad (interpolated from mag + grad)') diff --git a/examples/preprocessing/plot_xdawn_denoising.py b/examples/preprocessing/xdawn_denoising.py similarity index 83% rename from examples/preprocessing/plot_xdawn_denoising.py rename to examples/preprocessing/xdawn_denoising.py index 42837794497..cf206d9247b 100644 --- a/examples/preprocessing/plot_xdawn_denoising.py +++ b/examples/preprocessing/xdawn_denoising.py @@ -1,4 +1,6 @@ """ +.. _ex-xdawn-denoising: + =============== XDAWN Denoising =============== @@ -7,24 +9,13 @@ space and then projected back in the sensor space using only the first two XDAWN components. The process is similar to an ICA, but is supervised in order to maximize the signal to signal + noise ratio of the -evoked response. +evoked response :footcite:`RivetEtAl2009, RivetEtAl2011`. .. warning:: As this denoising method exploits the known events to maximize SNR of the contrast between conditions it can lead to overfitting. To avoid a statistical analysis problem you should split epochs used in fit with the ones used in apply method. - -References ----------- -[1] Rivet, B., Souloumiac, A., Attina, V., & Gibert, G. (2009). xDAWN -algorithm to enhance evoked potentials: application to brain-computer -interface. Biomedical Engineering, IEEE Transactions on, 56(8), 2035-2043. - -[2] Rivet, B., Cecotti, H., Souloumiac, A., Maby, E., & Mattout, J. (2011, -August). Theoretical analysis of xDAWN algorithm: application to an -efficient sensor selection in a P300 BCI. In Signal Processing Conference, -2011 19th European (pp. 1382-1386). IEEE. """ # Authors: Alexandre Barachant @@ -85,3 +76,8 @@ # Plot image epoch after Xdawn plot_epochs_image(epochs_denoised['vis_r'], picks=[230], vmin=-500, vmax=500) + +############################################################################### +# References +# ---------- +# .. footbibliography:: diff --git a/examples/simulation/plot_simulate_evoked_data.py b/examples/simulation/simulate_evoked_data.py similarity index 98% rename from examples/simulation/plot_simulate_evoked_data.py rename to examples/simulation/simulate_evoked_data.py index a205c360968..80e25b53aaf 100644 --- a/examples/simulation/plot_simulate_evoked_data.py +++ b/examples/simulation/simulate_evoked_data.py @@ -22,7 +22,7 @@ print(__doc__) ############################################################################### -# Load real data as templates +# Load real data as templates: data_path = sample.data_path() raw = mne.io.read_raw_fif(data_path + '/MEG/sample/sample_audvis_raw.fif') diff --git a/examples/simulation/plot_simulate_raw_data.py b/examples/simulation/simulate_raw_data.py similarity index 100% rename from examples/simulation/plot_simulate_raw_data.py rename to examples/simulation/simulate_raw_data.py diff --git a/examples/simulation/plot_simulated_raw_data_using_subject_anatomy.py b/examples/simulation/simulated_raw_data_using_subject_anatomy.py similarity index 97% rename from examples/simulation/plot_simulated_raw_data_using_subject_anatomy.py rename to examples/simulation/simulated_raw_data_using_subject_anatomy.py index dbc8e5954e3..c462e4e81c3 100644 --- a/examples/simulation/plot_simulated_raw_data_using_subject_anatomy.py +++ b/examples/simulation/simulated_raw_data_using_subject_anatomy.py @@ -70,7 +70,8 @@ # In order to simulate source time courses, labels of desired active regions # need to be specified for each of the 4 simulation conditions. # Make a dictionary that maps conditions to activation strengths within -# aparc.a2009s [1]_ labels. In the aparc.a2009s parcellation: +# aparc.a2009s :footcite:`DestrieuxEtAl2010` labels. +# In the aparc.a2009s parcellation: # # - 'G_temp_sup-G_T_transv' is the label for primary auditory area # - 'S_calcarine' is the label for primary visual area @@ -234,6 +235,4 @@ def data_fun(times, latency, duration): ############################################################################### # References # ---------- -# .. [1] Destrieux C, Fischl B, Dale A, Halgren E (2010). Automatic -# parcellation of human cortical gyri and sulci using standard -# anatomical nomenclature, vol. 53(1), 1-15, NeuroImage. +# .. footbibliography:: diff --git a/examples/simulation/plot_source_simulator.py b/examples/simulation/source_simulator.py similarity index 98% rename from examples/simulation/plot_source_simulator.py rename to examples/simulation/source_simulator.py index aaa3c060aa7..27a13af287b 100644 --- a/examples/simulation/plot_source_simulator.py +++ b/examples/simulation/source_simulator.py @@ -60,7 +60,7 @@ class to generate source estimates and raw data. It is meant to be a brief # of the event, the second is not used, and the third is the event id. Here the # events occur every 200 samples. n_events = 50 -events = np.zeros((n_events, 3)) +events = np.zeros((n_events, 3), int) events[:, 0] = 100 + 200 * np.arange(n_events) # Events sample. events[:, 2] = 1 # All events have the sample id. diff --git a/examples/stats/plot_cluster_stats_evoked.py b/examples/stats/cluster_stats_evoked.py similarity index 100% rename from examples/stats/plot_cluster_stats_evoked.py rename to examples/stats/cluster_stats_evoked.py diff --git a/examples/stats/plot_fdr_stats_evoked.py b/examples/stats/fdr_stats_evoked.py similarity index 100% rename from examples/stats/plot_fdr_stats_evoked.py rename to examples/stats/fdr_stats_evoked.py diff --git a/examples/stats/plot_linear_regression_raw.py b/examples/stats/linear_regression_raw.py similarity index 100% rename from examples/stats/plot_linear_regression_raw.py rename to examples/stats/linear_regression_raw.py diff --git a/examples/stats/plot_sensor_permutation_test.py b/examples/stats/sensor_permutation_test.py similarity index 100% rename from examples/stats/plot_sensor_permutation_test.py rename to examples/stats/sensor_permutation_test.py diff --git a/examples/stats/plot_sensor_regression.py b/examples/stats/sensor_regression.py similarity index 84% rename from examples/stats/plot_sensor_regression.py rename to examples/stats/sensor_regression.py index 21b6317fe6f..e97a3a740cb 100644 --- a/examples/stats/plot_sensor_regression.py +++ b/examples/stats/sensor_regression.py @@ -10,20 +10,16 @@ timepoint. This example shows the regression coefficient; the t and p values are also calculated automatically. -Here, we repeat a few of the analyses from [1]_. This can be easily performed -by accessing the metadata object, which contains word-level information about -various psycholinguistically relevant features of the words for which we have -EEG activity. +Here, we repeat a few of the analyses from :footcite:`DufauEtAl2015`. This +can be easily performed by accessing the metadata object, which contains +word-level information about various psycholinguistically relevant features +of the words for which we have EEG activity. -For the general methodology, see e.g. [2]_. +For the general methodology, see e.g. :footcite:`HaukEtAl2006`. References ---------- -.. [1] Dufau, S., Grainger, J., Midgley, KJ., Holcomb, PJ. A thousand - words are worth a picture: Snapshots of printed-word processing in an - event-related potential megastudy. Psychological Science, 2015 -.. [2] Hauk et al. The time course of visual word recognition as revealed by - linear regression analysis of ERP data. Neuroimage, 2006 +.. footbibliography:: """ # Authors: Tal Linzen # Denis A. Engemann diff --git a/examples/time_frequency/plot_compute_csd.py b/examples/time_frequency/compute_csd.py similarity index 100% rename from examples/time_frequency/plot_compute_csd.py rename to examples/time_frequency/compute_csd.py diff --git a/examples/time_frequency/plot_compute_source_psd_epochs.py b/examples/time_frequency/compute_source_psd_epochs.py similarity index 100% rename from examples/time_frequency/plot_compute_source_psd_epochs.py rename to examples/time_frequency/compute_source_psd_epochs.py diff --git a/examples/time_frequency/plot_source_label_time_frequency.py b/examples/time_frequency/source_label_time_frequency.py similarity index 100% rename from examples/time_frequency/plot_source_label_time_frequency.py rename to examples/time_frequency/source_label_time_frequency.py diff --git a/examples/time_frequency/plot_source_power_spectrum.py b/examples/time_frequency/source_power_spectrum.py similarity index 100% rename from examples/time_frequency/plot_source_power_spectrum.py rename to examples/time_frequency/source_power_spectrum.py diff --git a/examples/time_frequency/plot_source_power_spectrum_opm.py b/examples/time_frequency/source_power_spectrum_opm.py similarity index 96% rename from examples/time_frequency/plot_source_power_spectrum_opm.py rename to examples/time_frequency/source_power_spectrum_opm.py index 1ace1f65d4a..9a3c388eca8 100644 --- a/examples/time_frequency/plot_source_power_spectrum_opm.py +++ b/examples/time_frequency/source_power_spectrum_opm.py @@ -7,7 +7,7 @@ Here we compute the resting state from raw for data recorded using a Neuromag VectorView system and a custom OPM system. -The pipeline is meant to mostly follow the Brainstorm [1]_ +The pipeline is meant to mostly follow the Brainstorm :footcite:`TadelEtAl2011` `OMEGA resting tutorial pipeline `_. The steps we use are: @@ -17,10 +17,6 @@ 4. Frequency: power spectral density (Welch), 4 sec window, 50% overlap. 5. Standardize: normalize by relative power for each source. -.. contents:: - :local: - :depth: 1 - .. _bst_omega: https://neuroimage.usc.edu/brainstorm/Tutorials/RestingOmega Preprocessing @@ -232,7 +228,4 @@ def plot_band(kind, band): ############################################################################### # References # ---------- -# .. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM. -# Brainstorm: A User-Friendly Application for MEG/EEG Analysis. -# Computational Intelligence and Neuroscience, vol. 2011, Article ID -# 879716, 13 pages, 2011. doi:10.1155/2011/879716 +# .. footbibliography:: diff --git a/examples/time_frequency/plot_source_space_time_frequency.py b/examples/time_frequency/source_space_time_frequency.py similarity index 100% rename from examples/time_frequency/plot_source_space_time_frequency.py rename to examples/time_frequency/source_space_time_frequency.py diff --git a/examples/time_frequency/plot_temporal_whitening.py b/examples/time_frequency/temporal_whitening.py similarity index 100% rename from examples/time_frequency/plot_temporal_whitening.py rename to examples/time_frequency/temporal_whitening.py diff --git a/examples/time_frequency/plot_time_frequency_erds.py b/examples/time_frequency/time_frequency_erds.py similarity index 85% rename from examples/time_frequency/plot_time_frequency_erds.py rename to examples/time_frequency/time_frequency_erds.py index 5d18f309ae5..c88ea29d18d 100644 --- a/examples/time_frequency/plot_time_frequency_erds.py +++ b/examples/time_frequency/time_frequency_erds.py @@ -5,12 +5,13 @@ This example calculates and displays ERDS maps of event-related EEG data. ERDS (sometimes also written as ERD/ERS) is short for event-related -desynchronization (ERD) and event-related synchronization (ERS) [1]_. +desynchronization (ERD) and event-related synchronization (ERS) +:footcite:`PfurtschellerLopesdaSilva1999`. Conceptually, ERD corresponds to a decrease in power in a specific frequency band relative to a baseline. Similarly, ERS corresponds to an increase in power. An ERDS map is a time/frequency representation of ERD/ERS over a range -of frequencies [2]_. ERDS maps are also known as ERSP (event-related spectral -perturbation) [3]_. +of frequencies :footcite:`GraimannEtAl2002`. ERDS maps are also known as ERSP +(event-related spectral perturbation) :footcite:`Makeig1993`. We use a public EEG BCI data set containing two different motor imagery tasks available at PhysioNet. The two tasks are imagined hand and feet movement. Our @@ -26,15 +27,7 @@ References ---------- -.. [1] G. Pfurtscheller, F. H. Lopes da Silva. Event-related EEG/MEG - synchronization and desynchronization: basic principles. Clinical - Neurophysiology 110(11), 1842-1857, 1999. -.. [2] B. Graimann, J. E. Huggins, S. P. Levine, G. Pfurtscheller. - Visualization of significant ERD/ERS patterns in multichannel EEG and - ECoG data. Clinical Neurophysiology 113(1), 43-47, 2002. -.. [3] S. Makeig. Auditory event-related dynamics of the EEG spectrum and - effects of exposure to tones. Electroencephalography and Clinical - Neurophysiology 86(4), 283-293, 1993. +.. footbibliography:: """ # Authors: Clemens Brunner # diff --git a/examples/time_frequency/plot_time_frequency_global_field_power.py b/examples/time_frequency/time_frequency_global_field_power.py similarity index 82% rename from examples/time_frequency/plot_time_frequency_global_field_power.py rename to examples/time_frequency/time_frequency_global_field_power.py index 022a92a665a..f3da2d105c8 100644 --- a/examples/time_frequency/plot_time_frequency_global_field_power.py +++ b/examples/time_frequency/time_frequency_global_field_power.py @@ -6,21 +6,24 @@ =========================================================== The objective is to show you how to explore spectrally localized -effects. For this purpose we adapt the method described in [1]_ and use it on -the somato dataset. The idea is to track the band-limited temporal evolution -of spatial patterns by using the :term:`Global Field Power(GFP) `. +effects. For this purpose we adapt the method described in +:footcite:`HariSalmelin1997` and use it on the somato dataset. +The idea is to track the band-limited temporal evolution +of spatial patterns by using the :term:`global field power` (GFP). We first bandpass filter the signals and then apply a Hilbert transform. To reveal oscillatory activity the evoked response is then subtracted from every single trial. Finally, we rectify the signals prior to averaging across trials by taking the magniude of the Hilbert. -Then the :term:`GFP` is computed as described in [2]_, using the sum of the +Then the :term:`GFP` is computed as described in +:footcite:`EngemannGramfort2015`, using the sum of the squares but without normalization by the rank. -Baselining is subsequently applied to make the :term:`GFPs ` comparable +Baselining is subsequently applied to make the :term:`GFP` comparable between frequencies. The procedure is then repeated for each frequency band of interest and -all :term:`GFPs ` are visualized. To estimate uncertainty, non-parametric -confidence intervals are computed as described in [3]_ across channels. +all :term:`GFPs` are visualized. To estimate uncertainty, non-parametric +confidence intervals are computed as described in :footcite:`EfronHastie2016` +across channels. The advantage of this method over summarizing the Space x Time x Frequency output of a Morlet Wavelet in frequency bands is relative speed and, more @@ -31,15 +34,8 @@ References ---------- +.. footbibliography:: -.. [1] Hari R. and Salmelin R. Human cortical oscillations: a neuromagnetic - view through the skull (1997). Trends in Neuroscience 20 (1), - pp. 44-49. -.. [2] Engemann D. and Gramfort A. (2015) Automated model selection in - covariance estimation and spatial whitening of MEG and EEG signals, - vol. 108, 328-342, NeuroImage. -.. [3] Efron B. and Hastie T. Computer Age Statistical Inference (2016). - Cambrdige University Press, Chapter 11.2. """ # noqa: E501 # Authors: Denis A. Engemann # Stefan Appelhoff diff --git a/examples/time_frequency/plot_time_frequency_simulated.py b/examples/time_frequency/time_frequency_simulated.py similarity index 100% rename from examples/time_frequency/plot_time_frequency_simulated.py rename to examples/time_frequency/time_frequency_simulated.py diff --git a/examples/visualization/plot_3d_to_2d.py b/examples/visualization/3d_to_2d.py similarity index 100% rename from examples/visualization/plot_3d_to_2d.py rename to examples/visualization/3d_to_2d.py diff --git a/examples/visualization/plot_channel_epochs_image.py b/examples/visualization/channel_epochs_image.py similarity index 89% rename from examples/visualization/plot_channel_epochs_image.py rename to examples/visualization/channel_epochs_image.py index f5ae5556959..20b90344b11 100644 --- a/examples/visualization/plot_channel_epochs_image.py +++ b/examples/visualization/channel_epochs_image.py @@ -10,7 +10,7 @@ that does not show any evoked field. It is also demonstrated how to reorder the epochs using a 1D spectral -embedding as described in [1]_. +embedding as described in :footcite:`GramfortEtAl2010`. """ # Authors: Alexandre Gramfort # @@ -75,7 +75,4 @@ def order_func(times, data): ############################################################################### # References # ---------- -# .. [1] Graph-based variability estimation in single-trial event-related -# neural responses. A. Gramfort, R. Keriven, M. Clerc, 2010, -# Biomedical Engineering, IEEE Trans. on, vol. 57 (5), 1051-1061 -# https://ieeexplore.ieee.org/document/5406156 +# .. footbibliography:: diff --git a/examples/visualization/plot_eeg_on_scalp.py b/examples/visualization/eeg_on_scalp.py similarity index 97% rename from examples/visualization/plot_eeg_on_scalp.py rename to examples/visualization/eeg_on_scalp.py index 79d72dc5839..454021f7953 100644 --- a/examples/visualization/plot_eeg_on_scalp.py +++ b/examples/visualization/eeg_on_scalp.py @@ -1,4 +1,6 @@ """ +.. _ex-eeg-on-scalp: + ================================= Plotting EEG sensors on the scalp ================================= diff --git a/examples/visualization/plot_eeglab_head_sphere.py b/examples/visualization/eeglab_head_sphere.py similarity index 94% rename from examples/visualization/plot_eeglab_head_sphere.py rename to examples/visualization/eeglab_head_sphere.py index 488c14edd57..59e369aee92 100644 --- a/examples/visualization/plot_eeglab_head_sphere.py +++ b/examples/visualization/eeglab_head_sphere.py @@ -48,7 +48,7 @@ # measured # in the 10-20 system (a line going through Fpz, T8/T4, Oz and T7/T3 channels). # MNE-Python places the head outline lower on the z dimension, at the level of -# the anatomical landmarks :term:`LPA, RPA, and NAS `. +# the anatomical landmarks :term:`LPA, RPA, and NAS `. # Therefore to use the EEGLAB layout we # have to move the origin of the reference sphere (a sphere that is used as a # reference when projecting channel locations to a 2d plane) a few centimeters @@ -59,9 +59,8 @@ # the position of Fpz, T8, Oz and T7 channels available in our montage. # first we obtain the 3d positions of selected channels -check_ch = ['Oz', 'Fpz', 'T7', 'T8'] -ch_idx = [fake_evoked.ch_names.index(ch) for ch in check_ch] -pos = np.stack([fake_evoked.info['chs'][idx]['loc'][:3] for idx in ch_idx]) +chs = ['Oz', 'Fpz', 'T7', 'T8'] +pos = np.stack([biosemi_montage.get_positions()['ch_pos'][ch] for ch in chs]) # now we calculate the radius from T7 and T8 x position # (we could use Oz and Fpz y positions as well) diff --git a/examples/visualization/plot_evoked_arrowmap.py b/examples/visualization/evoked_arrowmap.py similarity index 100% rename from examples/visualization/plot_evoked_arrowmap.py rename to examples/visualization/evoked_arrowmap.py diff --git a/examples/visualization/plot_evoked_topomap.py b/examples/visualization/evoked_topomap.py similarity index 98% rename from examples/visualization/plot_evoked_topomap.py rename to examples/visualization/evoked_topomap.py index 59185f92a9f..465e4ed282e 100644 --- a/examples/visualization/plot_evoked_topomap.py +++ b/examples/visualization/evoked_topomap.py @@ -122,8 +122,8 @@ # Animating the topomap # --------------------- # -# Instead of using a still image we can plot magnetometer data as an animation -# (animates only in matplotlib interactive mode) +# Instead of using a still image we can plot magnetometer data as an animation, +# which animates properly only in matplotlib interactive mode. # sphinx_gallery_thumbnail_number = 9 times = np.arange(0.05, 0.151, 0.01) diff --git a/examples/visualization/plot_evoked_whitening.py b/examples/visualization/evoked_whitening.py similarity index 92% rename from examples/visualization/plot_evoked_whitening.py rename to examples/visualization/evoked_whitening.py index 902da51c2ba..154e39122b6 100644 --- a/examples/visualization/plot_evoked_whitening.py +++ b/examples/visualization/evoked_whitening.py @@ -1,4 +1,6 @@ """ +.. _ex-evoked-whitening: + ============================================= Whitening evoked data with a noise covariance ============================================= @@ -7,13 +9,12 @@ matrix. It's an excellent quality check to see if baseline signals match the assumption of Gaussian white noise during the baseline period. -Covariance estimation and diagnostic plots are based on [1]_. +Covariance estimation and diagnostic plots are based on +:footcite:`EngemannGramfort2015`. References ---------- -.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in - covariance estimation and spatial whitening of MEG and EEG signals, vol. - 108, 328-342, NeuroImage. +.. footbibliography:: """ # Authors: Alexandre Gramfort diff --git a/examples/visualization/plot_meg_sensors.py b/examples/visualization/meg_sensors.py similarity index 98% rename from examples/visualization/plot_meg_sensors.py rename to examples/visualization/meg_sensors.py index 01414f8cf97..7bb080506dd 100644 --- a/examples/visualization/plot_meg_sensors.py +++ b/examples/visualization/meg_sensors.py @@ -6,10 +6,6 @@ ====================================== Show sensor layouts of different MEG systems. - -.. contents:: - :local: - :depth: 1 """ # Author: Eric Larson # diff --git a/examples/visualization/plot_mne_helmet.py b/examples/visualization/mne_helmet.py similarity index 97% rename from examples/visualization/plot_mne_helmet.py rename to examples/visualization/mne_helmet.py index 1267f46ff67..335d47f5251 100644 --- a/examples/visualization/plot_mne_helmet.py +++ b/examples/visualization/mne_helmet.py @@ -28,5 +28,5 @@ coord_frame='mri') evoked.plot_field(maps, time=time, fig=fig, time_label=None, vmax=5e-13) mne.viz.set_3d_view( - fig, azimuth=40, elevation=87, focalpoint=(0., -0.01, 0.04), roll=-100, - distance=0.48) + fig, azimuth=40, elevation=87, focalpoint=(0., -0.01, 0.04), roll=-25, + distance=0.55) diff --git a/examples/visualization/montage.py b/examples/visualization/montage_sgskip.py similarity index 100% rename from examples/visualization/montage.py rename to examples/visualization/montage_sgskip.py diff --git a/examples/visualization/plot_parcellation.py b/examples/visualization/parcellation.py similarity index 80% rename from examples/visualization/plot_parcellation.py rename to examples/visualization/parcellation.py index 9fcf3963c03..81fcce1f7cc 100644 --- a/examples/visualization/plot_parcellation.py +++ b/examples/visualization/parcellation.py @@ -4,9 +4,10 @@ Plot a cortical parcellation ============================ -In this example, we download the HCP-MMP1.0 parcellation [1]_ and show it -on ``fsaverage``. -We will also download the customized 448-label aparc parcellation from [2]_ +In this example, we download the HCP-MMP1.0 parcellation +:footcite:`GlasserEtAl2016` and show it on ``fsaverage``. +We will also download the customized 448-label aparc +parcellation from :footcite:`KhanEtAl2018`. .. note:: The HCP-MMP dataset has license terms restricting its use. Of particular relevance: @@ -15,14 +16,6 @@ derived from WU-Minn HCP data when publicly presenting any results or algorithms that benefitted from their use." -References ----------- -.. [1] Glasser MF et al. (2016) A multi-modal parcellation of human - cerebral cortex. Nature 536:171-178. -.. [2] Khan S et al. (2018) Maturation trajectories of cortical - resting-state networks depend on the mediating frequency band. - Neuroimage 174 57-68. - """ # Author: Eric Larson # Denis Engemann @@ -61,3 +54,8 @@ brain = Brain('fsaverage', 'lh', 'inflated', subjects_dir=subjects_dir, cortex='low_contrast', background='white', size=(800, 600)) brain.add_annotation('aparc_sub') + +############################################################################### +# References +# ---------- +# .. footbibliography:: diff --git a/examples/visualization/plot_make_report.py b/examples/visualization/plot_make_report.py deleted file mode 100644 index 256c279f1b0..00000000000 --- a/examples/visualization/plot_make_report.py +++ /dev/null @@ -1,53 +0,0 @@ -""" -.. _ex-report: - -================================ -Make an MNE-Report with a Slider -================================ - -In this example, MEG evoked data are plotted in an HTML slider. -""" - -# Authors: Teon Brooks -# Eric Larson -# -# License: BSD (3-clause) - -from mne.report import Report -from mne.datasets import sample -from mne import read_evokeds -from matplotlib import pyplot as plt - - -data_path = sample.data_path() -meg_path = data_path + '/MEG/sample' -subjects_dir = data_path + '/subjects' -evoked_fname = meg_path + '/sample_audvis-ave.fif' - -############################################################################### -# Do standard folder parsing (this can take a couple of minutes): - -report = Report(image_format='png', subjects_dir=subjects_dir, - info_fname=evoked_fname, subject='sample', - raw_psd=False) # use False for speed here -report.parse_folder(meg_path, on_error='ignore', mri_decim=10) - -############################################################################### -# Add a custom section with an evoked slider: - -# Load the evoked data -evoked = read_evokeds(evoked_fname, condition='Left Auditory', - baseline=(None, 0), verbose=False) -evoked.crop(0, .2) -times = evoked.times[::4] -# Create a list of figs for the slider -figs = list() -for t in times: - figs.append(evoked.plot_topomap(t, vmin=-300, vmax=300, res=100, - show=False)) - plt.close(figs[-1]) -report.add_slider_to_section(figs, times, 'Evoked Response', - image_format='png') # can also use 'svg' - -# Save the report -report.save('my_report.html', overwrite=True) diff --git a/examples/visualization/plot_publication_figure.py b/examples/visualization/publication_figure.py similarity index 99% rename from examples/visualization/plot_publication_figure.py rename to examples/visualization/publication_figure.py index 05715a59eb5..4d57a99705a 100644 --- a/examples/visualization/plot_publication_figure.py +++ b/examples/visualization/publication_figure.py @@ -16,10 +16,6 @@ # License: BSD (3-clause) ############################################################################### -# .. contents:: Contents -# :local: -# :depth: 1 -# # Imports # ------- # We are importing everything we need for this example: diff --git a/examples/visualization/plot_roi_erpimage_by_rt.py b/examples/visualization/roi_erpimage_by_rt.py similarity index 98% rename from examples/visualization/plot_roi_erpimage_by_rt.py rename to examples/visualization/roi_erpimage_by_rt.py index c88e2fd6b05..0b07de87557 100644 --- a/examples/visualization/plot_roi_erpimage_by_rt.py +++ b/examples/visualization/roi_erpimage_by_rt.py @@ -64,7 +64,7 @@ event_id={"square": 2}) ############################################################################### -# Plot using :term:`Global Field Power ` +# Plot using :term:`global field power` # Parameters for plotting order = rts.argsort() # sorting from fast to slow trials diff --git a/examples/visualization/plot_sensor_noise_level.py b/examples/visualization/sensor_noise_level.py similarity index 74% rename from examples/visualization/plot_sensor_noise_level.py rename to examples/visualization/sensor_noise_level.py index c9b76755787..35f202c0f6b 100644 --- a/examples/visualization/plot_sensor_noise_level.py +++ b/examples/visualization/sensor_noise_level.py @@ -5,13 +5,7 @@ ====================================== This shows how to use :meth:`mne.io.Raw.plot_psd` to examine noise levels -of systems. See [1]_ for an example. - -References ----------- -.. [1] Khan S, Cohen D (2013). Note: Magnetic noise from the inner wall of - a magnetically shielded room. Review of Scientific Instruments 84:56101. - https://doi.org/10.1063/1.4802845 +of systems. See :footcite:`KhanCohen2013` for an example. """ # Author: Eric Larson # @@ -29,3 +23,8 @@ # We can plot the absolute noise levels: raw_erm.plot_psd(tmax=10., average=True, spatial_colors=False, dB=False, xscale='log') +############################################################################### +# References +# ---------- +# +# .. footbibliography:: diff --git a/examples/visualization/plot_ssp_projs_sensitivity_map.py b/examples/visualization/ssp_projs_sensitivity_map.py similarity index 100% rename from examples/visualization/plot_ssp_projs_sensitivity_map.py rename to examples/visualization/ssp_projs_sensitivity_map.py diff --git a/examples/visualization/plot_topo_compare_conditions.py b/examples/visualization/topo_compare_conditions.py similarity index 100% rename from examples/visualization/plot_topo_compare_conditions.py rename to examples/visualization/topo_compare_conditions.py diff --git a/examples/visualization/plot_topo_customized.py b/examples/visualization/topo_customized.py similarity index 100% rename from examples/visualization/plot_topo_customized.py rename to examples/visualization/topo_customized.py diff --git a/examples/visualization/plot_xhemi.py b/examples/visualization/xhemi.py similarity index 100% rename from examples/visualization/plot_xhemi.py rename to examples/visualization/xhemi.py diff --git a/mne/__init__.py b/mne/__init__.py index b30bc50caca..85b49a5f7dd 100644 --- a/mne/__init__.py +++ b/mne/__init__.py @@ -36,7 +36,7 @@ add_reference_channels) from .io.what import what from .bem import (make_sphere_model, make_bem_model, make_bem_solution, - read_bem_surfaces, write_bem_surfaces, + read_bem_surfaces, write_bem_surfaces, write_head_bem, read_bem_solution, write_bem_solution) from .cov import (read_cov, write_cov, Covariance, compute_raw_covariance, compute_covariance, whiten_evoked, make_ad_hoc_cov) @@ -89,9 +89,9 @@ transform_surface_to, Transform) from .proj import (read_proj, write_proj, compute_proj_epochs, compute_proj_evoked, compute_proj_raw, sensitivity_map) -from .selection import read_selection from .dipole import read_dipole, Dipole, DipoleFixed, fit_dipole -from .channels import equalize_channels, rename_channels, find_layout +from .channels import (equalize_channels, rename_channels, find_layout, + read_vectorview_selection) from .report import Report, open_report from .io import read_epochs_fieldtrip, read_evoked_fieldtrip, read_evokeds_mff @@ -122,6 +122,10 @@ from . import viz from . import decoding +# deprecations +from .utils import deprecated_alias +deprecated_alias('read_selection', read_vectorview_selection) + # initialize logging set_log_level(None, False) set_log_file() diff --git a/mne/_version.py b/mne/_version.py index be2e25cdc9b..8ac0d198af4 100644 --- a/mne/_version.py +++ b/mne/_version.py @@ -3,4 +3,4 @@ # # License: BSD (3-clause) -__version__ = '0.22.dev0' +__version__ = '0.23.dev0' diff --git a/mne/annotations.py b/mne/annotations.py index 58db835583d..dfb7025f320 100644 --- a/mne/annotations.py +++ b/mne/annotations.py @@ -1,4 +1,5 @@ # Authors: Jaakko Leppakangas +# Robert Luke # # License: BSD (3-clause) @@ -8,6 +9,7 @@ import re from copy import deepcopy from itertools import takewhile +import json from collections import Counter from collections.abc import Iterable import warnings @@ -17,10 +19,11 @@ from .utils import (_pl, check_fname, _validate_type, verbose, warn, logger, _check_pandas_installed, _mask_to_onsets_offsets, _DefaultEventParser, _check_dt, _stamp_to_dt, _dt_to_stamp, - _check_fname) + _check_fname, int_like, _check_option, fill_doc, + _on_missing) from .io.write import (start_block, end_block, write_float, write_name_list, - write_double, start_file) + write_double, start_file, write_string) from .io.constants import FIFF from .io.open import fiff_open from .io.tree import dir_tree_find @@ -31,7 +34,7 @@ _datetime = datetime -def _check_o_d_s(onset, duration, description): +def _check_o_d_s_c(onset, duration, description, ch_names): onset = np.atleast_1d(np.array(onset, dtype=float)) if onset.ndim != 1: raise ValueError('Onset must be a one dimensional array, got %s ' @@ -50,17 +53,39 @@ def _check_o_d_s(onset, duration, description): if description.ndim != 1: raise ValueError('Description must be a one dimensional array, ' 'got %d.' % (description.ndim,)) - if any(['{COLON}' in desc for desc in description]): - raise ValueError('The substring "{COLON}" ' - 'in descriptions not supported.') - - if not (len(onset) == len(duration) == len(description)): - raise ValueError('Onset, duration and description must be ' - 'equal in sizes, got %s, %s, and %s.' - % (len(onset), len(duration), len(description))) - return onset, duration, description - - + _prep_name_list(description, 'check', 'description') + + # ch_names: convert to ndarray of tuples + _validate_type(ch_names, (None, tuple, list, np.ndarray), 'ch_names') + if ch_names is None: + ch_names = [()] * len(onset) + ch_names = list(ch_names) + for ai, ch in enumerate(ch_names): + _validate_type(ch, (list, tuple, np.ndarray), f'ch_names[{ai}]') + ch_names[ai] = tuple(ch) + for ci, name in enumerate(ch_names[ai]): + _validate_type(name, str, f'ch_names[{ai}][{ci}]') + ch_names = _ndarray_ch_names(ch_names) + + if not (len(onset) == len(duration) == len(description) == len(ch_names)): + raise ValueError( + 'Onset, duration, description, and ch_names must be ' + f'equal in sizes, got {len(onset)}, {len(duration)}, ' + f'{len(description)}, and {len(ch_names)}.') + return onset, duration, description, ch_names + + +def _ndarray_ch_names(ch_names): + # np.array(..., dtype=object) if all entries are empty will give + # an empty array of shape (n_entries, 0) which is not helpful. So let's + # force it to give us an array of shape (n_entries,) full of empty + # tuples + out = np.empty(len(ch_names), dtype=object) + out[:] = ch_names + return out + + +@fill_doc class Annotations(object): """Annotation object for annotating segments of raw data. @@ -88,8 +113,11 @@ class Annotations(object): In general, ``raw.info['meas_date']`` (or None) can be used for syncing the annotations with raw data if their acquisiton is started at the same time. If it is a string, it should conform to the ISO8601 format. - More precisely to this '%Y-%m-%d %H:%M:%S.%f' particular case of the - ISO8601 format where the delimiter between date and time is ' '. + More precisely to this '%%Y-%%m-%%d %%H:%%M:%%S.%%f' particular case of + the ISO8601 format where the delimiter between date and time is ' '. + %(annot_ch_names)s + + .. versionadded:: 0.23 See Also -------- @@ -116,6 +144,15 @@ class Annotations(object): >>> raw.set_annotations(annotations) # doctest: +SKIP >>> epochs = mne.Epochs(raw, events, event_id, tmin, tmax) # doctest: +SKIP + **ch_names** + + Specifying channel names allows the creation of channel-specific + annotations. Once the annotations are assigned to a raw instance with + :meth:`mne.io.Raw.set_annotations`, if channels are renamed by the raw + instance, the annotation channels also get renamed. If channels are dropped + from the raw instance, any channel-specific annotation that has no channels + left in the raw instance will also be removed. + **orig_time** If ``orig_time`` is None, the annotations are synced to the start of the @@ -196,10 +233,10 @@ class Annotations(object): """ # noqa: E501 def __init__(self, onset, duration, description, - orig_time=None): # noqa: D102 + orig_time=None, ch_names=None): # noqa: D102 self._orig_time = _handle_meas_date(orig_time) - self.onset, self.duration, self.description = _check_o_d_s( - onset, duration, description) + self.onset, self.duration, self.description, self.ch_names = \ + _check_o_d_s_c(onset, duration, description, ch_names) self._sort() # ensure we're sorted @property @@ -214,6 +251,7 @@ def __eq__(self, other): return (np.array_equal(self.onset, other.onset) and np.array_equal(self.duration, other.duration) and np.array_equal(self.description, other.description) and + np.array_equal(self.ch_names, other.ch_names) and self.orig_time == other.orig_time) def __repr__(self): @@ -221,12 +259,19 @@ def __repr__(self): counter = Counter(self.description) kinds = ', '.join(['%s (%s)' % k for k in sorted(counter.items())]) kinds = (': ' if len(kinds) > 0 else '') + kinds - s = ('Annotations | %s segment%s%s' % - (len(self.onset), _pl(len(self.onset)), kinds)) + ch_specific = ', channel-specific' if self._any_ch_names() else '' + s = ('Annotations | %s segment%s%s%s' % + (len(self.onset), _pl(len(self.onset)), ch_specific, kinds)) return '<' + shorten(s, width=77, placeholder=' ...') + '>' def __len__(self): - """Return the number of annotations.""" + """Return the number of annotations. + + Returns + ------- + n_annot : int + The number of annotations. + """ return len(self.duration) def __add__(self, other): @@ -256,19 +301,24 @@ def __iter__(self): def __getitem__(self, key): """Propagate indexing and slicing to the underlying numpy structure.""" - if isinstance(key, int): + if isinstance(key, int_like): out_keys = ('onset', 'duration', 'description', 'orig_time') out_vals = (self.onset[key], self.duration[key], self.description[key], self.orig_time) + if self._any_ch_names(): + out_keys += ('ch_names',) + out_vals += (self.ch_names[key],) return OrderedDict(zip(out_keys, out_vals)) else: key = list(key) if isinstance(key, tuple) else key return Annotations(onset=self.onset[key], duration=self.duration[key], description=self.description[key], - orig_time=self.orig_time) + orig_time=self.orig_time, + ch_names=self.ch_names[key]) - def append(self, onset, duration, description): + @fill_doc + def append(self, onset, duration, description, ch_names=None): """Add an annotated segment. Operates inplace. Parameters @@ -281,6 +331,9 @@ def append(self, onset, duration, description): description : str | array-like Description for the annotation. To reject epochs, use description starting with keyword 'bad'. + %(annot_ch_names)s + + .. versionadded:: 0.23 Returns ------- @@ -293,11 +346,12 @@ def append(self, onset, duration, description): to not only ``list.append``, but also `list.extend `__. """ # noqa: E501 - onset, duration, description = _check_o_d_s( - onset, duration, description) + onset, duration, description, ch_names = _check_o_d_s_c( + onset, duration, description, ch_names) self.onset = np.append(self.onset, onset) self.duration = np.append(self.duration, duration) self.description = np.append(self.description, description) + self.ch_names = np.append(self.ch_names, ch_names) self._sort() return self @@ -323,8 +377,62 @@ def delete(self, idx): self.onset = np.delete(self.onset, idx) self.duration = np.delete(self.duration, idx) self.description = np.delete(self.description, idx) + self.ch_names = np.delete(self.ch_names, idx) - def save(self, fname): + def to_data_frame(self): + """Export annotations in tabular structure as a pandas DataFrame. + + Returns + ------- + result : pandas.DataFrame + Returns a pandas DataFrame with onset, duration, and + description columns. A column named ch_names is added if any + annotations are channel-specific. + """ + pd = _check_pandas_installed(strict=True) + dt = _handle_meas_date(self.orig_time) + if dt is None: + dt = _handle_meas_date(0) + dt = dt.replace(tzinfo=None) + onsets_dt = [dt + timedelta(seconds=o) for o in self.onset] + df = dict(onset=onsets_dt, duration=self.duration, + description=self.description) + if self._any_ch_names(): + df.update(ch_names=self.ch_names) + df = pd.DataFrame(df) + return df + + def _any_ch_names(self): + return any(len(ch) for ch in self.ch_names) + + def _prune_ch_names(self, info, on_missing): + # this prunes channel names and if a given channel-specific annotation + # no longer has any channels left, it gets dropped + keep = set(info['ch_names']) + ch_names = self.ch_names + warned = False + drop_idx = list() + for ci, ch in enumerate(ch_names): + if len(ch): + names = list() + for name in ch: + if name not in keep: + if not warned: + _on_missing( + on_missing, 'At least one channel name in ' + f'annotations missing from info: {name}') + warned = True + else: + names.append(name) + ch_names[ci] = tuple(names) + if not len(ch_names[ci]): + drop_idx.append(ci) + if len(drop_idx): + self.delete(drop_idx) + return self + + @verbose + def save(self, fname, *, overwrite=False, verbose=None): """Save annotations to FIF, CSV or TXT. Typically annotations get saved in the FIF file for raw data @@ -336,10 +444,15 @@ def save(self, fname): ---------- fname : str The filename to use. + %(overwrite)s + + .. versionadded:: 0.23 + %(verbose)s """ check_fname(fname, 'annotations', ('-annot.fif', '-annot.fif.gz', '_annot.fif', '_annot.fif.gz', '.txt', '.csv')) + fname = _check_fname(fname, overwrite=overwrite) if fname.endswith(".txt"): _write_annotations_txt(fname, self) elif fname.endswith(".csv"): @@ -357,6 +470,7 @@ def _sort(self): self.onset = self.onset[order] self.duration = self.duration[order] self.description = self.description[order] + self.ch_names = self.ch_names[order] @verbose def crop(self, tmin=None, tmax=None, emit_warning=False, verbose=None): @@ -401,10 +515,10 @@ def crop(self, tmin=None, tmax=None, emit_warning=False, verbose=None): absolute_tmax = _handle_meas_date(tmax) del tmin, tmax - onsets, durations, descriptions = [], [], [] + onsets, durations, descriptions, ch_names = [], [], [], [] out_of_bounds, clip_left_elem, clip_right_elem = [], [], [] - for onset, duration, description in zip( - self.onset, self.duration, self.description): + for onset, duration, description, ch in zip( + self.onset, self.duration, self.description, self.ch_names): # if duration is NaN behave like a zero if np.isnan(duration): duration = 0. @@ -433,10 +547,12 @@ def crop(self, tmin=None, tmax=None, emit_warning=False, verbose=None): onsets.append( (absolute_onset - offset).total_seconds()) descriptions.append(description) + ch_names.append(ch) self.onset = np.array(onsets, float) self.duration = np.array(durations, float) assert (self.duration >= 0).all() self.description = np.array(descriptions, dtype=str) + self.ch_names = _ndarray_ch_names(ch_names) if emit_warning: omitted = np.array(out_of_bounds).sum() @@ -463,7 +579,8 @@ def _combine_annotations(one, two, one_n_samples, one_first_samp, onset = np.concatenate([one.onset, two.onset + shift]) duration = np.concatenate([one.duration, two.duration]) description = np.concatenate([one.description, two.description]) - return Annotations(onset, duration, description, one.orig_time) + ch_names = np.concatenate([one.ch_names, two.ch_names]) + return Annotations(onset, duration, description, one.orig_time, ch_names) def _handle_meas_date(meas_date): @@ -555,32 +672,46 @@ def _annotations_starts_stops(raw, kinds, name='skip_by_annotation', return onsets, ends +def _prep_name_list(lst, operation, name='description'): + if operation == 'check': + if any(['{COLON}' in val for val in lst]): + raise ValueError( + f'The substring "{{COLON}}" in {name} not supported.') + elif operation == 'write': + # take a list of strings and return a sanitized string + return ':'.join(val.replace(':', '{COLON}') for val in lst) + else: + # take a sanitized string and return a list of strings + assert operation == 'read' + assert isinstance(lst, str) + if not len(lst): + return [] + return [val.replace('{COLON}', ':') for val in lst.split(':')] + + def _write_annotations(fid, annotations): """Write annotations.""" start_block(fid, FIFF.FIFFB_MNE_ANNOTATIONS) write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, annotations.onset) write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, annotations.duration + annotations.onset) - # To allow : in description, they need to be replaced for serialization - # -> replace with "{COLON}". When read back in, replace it back with ":" - write_name_list(fid, FIFF.FIFF_COMMENT, [d.replace(':', '{COLON}') for d in - annotations.description]) + write_name_list(fid, FIFF.FIFF_COMMENT, _prep_name_list( + annotations.description, 'write').split(':')) if annotations.orig_time is not None: write_double(fid, FIFF.FIFF_MEAS_DATE, _dt_to_stamp(annotations.orig_time)) + if annotations._any_ch_names(): + write_string(fid, FIFF.FIFF_MNE_EPOCHS_DROP_LOG, + json.dumps(tuple(annotations.ch_names))) end_block(fid, FIFF.FIFFB_MNE_ANNOTATIONS) def _write_annotations_csv(fname, annot): - pd = _check_pandas_installed(strict=True) - dt = _handle_meas_date(annot.orig_time) - if dt is None: - dt = _handle_meas_date(0) - dt = dt.replace(tzinfo=None) - onsets_dt = [dt + timedelta(seconds=o) for o in annot.onset] - df = pd.DataFrame(dict(onset=onsets_dt, duration=annot.duration, - description=annot.description)) - df.to_csv(fname, index=False) + annot = annot.to_data_frame() + if 'ch_names' in annot: + annot['ch_names'] = [ + _prep_name_list(ch, 'write') for ch in annot['ch_names']] + annot.to_csv(fname) def _write_annotations_txt(fname, annot): @@ -589,10 +720,16 @@ def _write_annotations_txt(fname, annot): # for backward compat, we do not write tzinfo (assumed UTC) content += ("# orig_time : %s \n" % annot.orig_time.replace(tzinfo=None)) - content += "# onset, duration, description\n" - - data = np.array([annot.onset, annot.duration, annot.description], - dtype=str).T + content += "# onset, duration, description" + data = [annot.onset, annot.duration, annot.description] + if annot._any_ch_names(): + content += ', ch_names' + data.append([_prep_name_list(ch, 'write') for ch in annot.ch_names]) + content += '\n' + data = np.array(data, dtype=str).T + assert data.ndim == 2 + assert data.shape[0] == len(annot.onset) + assert data.shape[1] in (3, 4) with open(fname, 'wb') as fid: fid.write(content.encode()) np.savetxt(fid, data, delimiter=',', fmt="%s") @@ -645,7 +782,7 @@ def read_annotations(fname, sfreq='auto', uint16_codec=None): _validate_type(fname, 'path-like', 'fname') fname = _check_fname( fname, overwrite='read', must_exist=True, - allow_dir=str(fname).endswith('.ds'), # allow_dir for CTF + need_dir=str(fname).endswith('.ds'), # for CTF name='fname') name = op.basename(fname) if name.endswith(('fif', 'fif.gz')): @@ -655,10 +792,10 @@ def read_annotations(fname, sfreq='auto', uint16_codec=None): annotations = _read_annotations_fif(fid, tree) elif name.endswith('txt'): orig_time = _read_annotations_txt_parse_header(fname) - onset, duration, description = _read_annotations_txt(fname) + onset, duration, description, ch_names = _read_annotations_txt(fname) annotations = Annotations(onset=onset, duration=duration, - description=description, - orig_time=orig_time) + description=description, orig_time=orig_time, + ch_names=ch_names) elif name.endswith('vmrk'): annotations = _read_annotations_brainvision(fname, sfreq=sfreq) @@ -711,7 +848,7 @@ def _read_annotations_csv(fname): The annotations. """ pd = _check_pandas_installed(strict=True) - df = pd.read_csv(fname) + df = pd.read_csv(fname, keep_default_na=False) orig_time = df['onset'].values[0] try: float(orig_time) @@ -725,7 +862,11 @@ def _read_annotations_csv(fname): onset = (onset_dt - onset_dt[0]).dt.total_seconds() duration = df['duration'].values.astype(float) description = df['description'].values - return Annotations(onset, duration, description, orig_time) + ch_names = None + if 'ch_names' in df.columns: + ch_names = [_prep_name_list(val, 'read') + for val in df['ch_names'].values] + return Annotations(onset, duration, description, orig_time, ch_names) def _read_brainstorm_annotations(fname, orig_time=None): @@ -792,15 +933,23 @@ def _read_annotations_txt(fname): warnings.simplefilter("ignore") out = np.loadtxt(fname, delimiter=',', dtype=np.bytes_, unpack=True) + ch_names = None if len(out) == 0: onset, duration, desc = [], [], [] else: - onset, duration, desc = out + _check_option('text header', len(out), (3, 4)) + if len(out) == 3: + onset, duration, desc = out + else: + onset, duration, desc, ch_names = out onset = [float(o.decode()) for o in np.atleast_1d(onset)] duration = [float(d.decode()) for d in np.atleast_1d(duration)] desc = [str(d.decode()).strip() for d in np.atleast_1d(desc)] - return onset, duration, desc + if ch_names is not None: + ch_names = [_prep_name_list(ch.decode().strip(), 'read') + for ch in ch_names] + return onset, duration, desc, ch_names def _read_annotations_fif(fid, tree): @@ -810,7 +959,7 @@ def _read_annotations_fif(fid, tree): annotations = None else: annot_data = annot_data[0] - orig_time = None + orig_time = ch_names = None onset, duration, description = list(), list(), list() for ent in annot_data['directory']: kind = ent.kind @@ -823,21 +972,18 @@ def _read_annotations_fif(fid, tree): duration = tag.data duration = list() if duration is None else duration - onset elif kind == FIFF.FIFF_COMMENT: - description = tag.data.split(':') - - # replace all "{COLON}" in FIF files with necessary - # : character - description = [d.replace('{COLON}', ':') for d in - description] + description = _prep_name_list(tag.data, 'read') elif kind == FIFF.FIFF_MEAS_DATE: orig_time = tag.data try: orig_time = float(orig_time) # old way except TypeError: orig_time = tuple(orig_time) # new way + elif kind == FIFF.FIFF_MNE_EPOCHS_DROP_LOG: + ch_names = tuple(tuple(x) for x in json.loads(tag.data)) assert len(onset) == len(duration) == len(description) annotations = Annotations(onset, duration, description, - orig_time) + orig_time, ch_names) return annotations diff --git a/mne/baseline.py b/mne/baseline.py index 5b25aad5951..a204044f16c 100644 --- a/mne/baseline.py +++ b/mne/baseline.py @@ -59,9 +59,11 @@ def rescale(data, times, baseline, mode='mean', copy=True, picks=None, data_scaled: array Array of same shape as data after rescaling. """ - data = data.copy() if copy else data - msg = _log_rescale(baseline, mode) - logger.info(msg) + if copy: + data = data.copy() + if verbose is not False: + msg = _log_rescale(baseline, mode) + logger.info(msg) if baseline is None or data.shape[-1] == 0: return data @@ -120,3 +122,80 @@ def fun(d, m): for pi in picks: fun(data[..., pi, :], mean[..., pi, :]) return data + + +def _check_baseline(baseline, times, sfreq, on_baseline_outside_data='raise'): + """Check if the baseline is valid, and adjust it if requested. + + ``None`` values inside the baseline parameter will be replaced with + ``times[0]`` and ``times[-1]``. + + Parameters + ---------- + baseline : tuple | None + Beginning and end of the baseline period, in seconds. If ``None``, + assume no baseline and return immediately. + times : array + The time points. + sfreq : float + The sampling rate. + on_baseline_outside_data : 'raise' | 'info' | 'adjust' + What do do if the baseline period exceeds the data. + If ``'raise'``, raise an exception (default). + If ``'info'``, log an info message. + If ``'adjust'``, adjust the baseline such that it's within the data + range again. + + Returns + ------- + (baseline_tmin, baseline_tmax) | None + The baseline with ``None`` values replaced with times, and with + adjusted times if ``on_baseline_outside_data='adjust'``; or ``None`` + if the ``baseline`` parameter is ``None``. + + """ + if baseline is None: + return None + + if not isinstance(baseline, tuple) or len(baseline) != 2: + raise ValueError(f'`baseline={baseline}` is an invalid argument, must ' + f'be a tuple of length 2 or None') + + tmin, tmax = times[0], times[-1] + tstep = 1. / float(sfreq) + + # check default value of baseline and `tmin=0` + if baseline == (None, 0) and tmin == 0: + raise ValueError('Baseline interval is only one sample. Use ' + '`baseline=(0, 0)` if this is desired.') + + baseline_tmin, baseline_tmax = baseline + + if baseline_tmin is None: + baseline_tmin = tmin + baseline_tmin = float(baseline_tmin) + + if baseline_tmax is None: + baseline_tmax = tmax + baseline_tmax = float(baseline_tmax) + + if baseline_tmin > baseline_tmax: + raise ValueError( + "Baseline min (%s) must be less than baseline max (%s)" + % (baseline_tmin, baseline_tmax)) + + if (baseline_tmin < tmin - tstep) or (baseline_tmax > tmax + tstep): + msg = (f"Baseline interval [{baseline_tmin}, {baseline_tmax}] sec " + f"is outside of epochs data [{tmin}, {tmax}] sec. Epochs were " + f"probably cropped.") + if on_baseline_outside_data == 'raise': + raise ValueError(msg) + elif on_baseline_outside_data == 'info': + logger.info(msg) + elif on_baseline_outside_data == 'adjust': + if baseline_tmin < tmin - tstep: + baseline_tmin = tmin + if baseline_tmax > tmax + tstep: + baseline_tmax = tmax + + return baseline_tmin, baseline_tmax diff --git a/mne/beamformer/_compute_beamformer.py b/mne/beamformer/_compute_beamformer.py index c0ccc435ba0..17a0680c8a3 100644 --- a/mne/beamformer/_compute_beamformer.py +++ b/mne/beamformer/_compute_beamformer.py @@ -9,7 +9,6 @@ from copy import deepcopy import numpy as np -from scipy import linalg from ..cov import Covariance, make_ad_hoc_cov from ..forward.forward import is_fixed_orient, _restrict_forward_to_src_sel @@ -94,7 +93,7 @@ def _prepare_beamformer_input(info, forward, label=None, pick_ori=None, orient_std = np.ones(gain.shape[1]) # Get the projector - proj, ncomp, _ = make_projector( + proj, _, _ = make_projector( info_picked['projs'], info_picked['ch_names']) return (is_free_ori, info_picked, proj, vertno, gain, whitener, nn, orient_std) @@ -142,7 +141,8 @@ def _sym_inv_sm(x, reduce_rank, inversion, sk): def _compute_beamformer(G, Cm, reg, n_orient, weight_norm, pick_ori, - reduce_rank, rank, inversion, nn, orient_std): + reduce_rank, rank, inversion, nn, orient_std, + whitener): """Compute a spatial beamformer filter (LCMV or DICS). For more detailed information on the parameters, see the docstrings of @@ -172,6 +172,8 @@ def _compute_beamformer(G, Cm, reg, n_orient, weight_norm, pick_ori, The source normals. orient_std : ndarray, shape (n_dipoles,) The std of the orientation prior used in weighting the lead fields. + whitener : ndarray, shape (n_channels, n_channels) + The whitener. Returns ------- @@ -181,6 +183,13 @@ def _compute_beamformer(G, Cm, reg, n_orient, weight_norm, pick_ori, _check_option('weight_norm', weight_norm, ['unit-noise-gain-invariant', 'unit-noise-gain', 'nai', None]) + + # Whiten the data covariance + Cm = whitener @ Cm @ whitener.T.conj() + # Restore to properly Hermitian as large whitening coefs can have bad + # rounding error + Cm[:] = (Cm + Cm.T.conj()) / 2. + assert Cm.shape == (G.shape[0],) * 2 s, _ = np.linalg.eigh(Cm) if not (s >= -s.max() * 1e-7).all(): @@ -367,7 +376,7 @@ def _compute_bf_terms(Gk, Cm_inv): 'matrix or using regularization.') noise = loading_factor else: - noise, _ = linalg.eigh(Cm) + noise, _ = np.linalg.eigh(Cm) noise = noise[-rank] noise = max(noise, loading_factor) W /= np.sqrt(noise) @@ -450,8 +459,7 @@ def save(self, fname, overwrite=False, verbose=None): fname : str The filename to use to write the HDF5 data. Should end in ``'-lcmv.h5'`` or ``'-dics.h5'``. - overwrite : bool - If True, overwrite the file (if it exists). + %(overwrite)s %(verbose)s """ ending = '-%s.h5' % (self['kind'].lower(),) @@ -499,3 +507,15 @@ def read_beamformer(fname): for arg in ('data', 'names', 'bads', 'projs', 'nfree', 'eig', 'eigvec', 'method', 'loglik')]) return Beamformer(beamformer) + + +def _proj_whiten_data(M, proj, filters): + if filters.get('is_ssp', True): + # check whether data and filter projs match + _check_proj_match(proj, filters) + if filters['whitener'] is None: + M = np.dot(filters['proj'], M) + + if filters['whitener'] is not None: + M = np.dot(filters['whitener'], M) + return M diff --git a/mne/beamformer/_dics.py b/mne/beamformer/_dics.py index 6745dc1ee9f..deecb54616a 100644 --- a/mne/beamformer/_dics.py +++ b/mne/beamformer/_dics.py @@ -9,16 +9,19 @@ import numpy as np from ..channels import equalize_channels +from ..io.pick import pick_info, pick_channels from ..utils import (logger, verbose, warn, _check_one_ch_type, _check_channels_spatial_filter, _check_rank, - _check_option, _validate_type) + _check_option, _validate_type, deprecated) from ..forward import _subject_from_forward from ..minimum_norm.inverse import combine_xyz, _check_reference, _check_depth +from ..rank import compute_rank from ..source_estimate import _make_stc, _get_src_type from ..time_frequency import csd_fourier, csd_multitaper, csd_morlet -from ._compute_beamformer import (_check_proj_match, _prepare_beamformer_input, +from ._compute_beamformer import (_prepare_beamformer_input, _compute_beamformer, _check_src_type, - Beamformer, _compute_power) + Beamformer, _compute_power, + _proj_whiten_data) @verbose @@ -164,8 +167,11 @@ def make_dics(info, forward, csd, reg=0.05, noise_csd=None, label=None, frequencies = [np.mean(freq_bin) for freq_bin in csd.frequencies] n_freqs = len(frequencies) - _check_one_ch_type('dics', info, forward, csd, noise_csd) - info, fwd, csd = equalize_channels([info, forward, csd]) + _, _, allow_mismatch = _check_one_ch_type('dics', info, forward, csd, + noise_csd) + # remove bads so that equalize_channels only keeps all good + info = pick_info(info, pick_channels(info['ch_names'], [], info['bads'])) + info, forward, csd = equalize_channels([info, forward, csd]) csd, noise_csd = _prepare_noise_csd(csd, noise_csd, real_filter) @@ -177,6 +183,23 @@ def make_dics(info, forward, csd, reg=0.05, noise_csd=None, label=None, _prepare_beamformer_input( info, forward, label, pick_ori, noise_cov=noise_csd, rank=rank, pca=False, **depth) + + # Compute ranks + csd_int_rank = [] + if not allow_mismatch: + noise_rank = compute_rank(noise_csd, info=info, rank=rank) + for i in range(len(frequencies)): + csd_rank = compute_rank(csd.get_data(index=i, as_cov=True), + info=info, rank=rank) + if not allow_mismatch: + for key in csd_rank: + if key not in noise_rank or csd_rank[key] != noise_rank[key]: + raise ValueError('%s data rank (%s) did not match the ' + 'noise rank (%s)' + % (key, csd_rank[key], + noise_rank.get(key, None))) + csd_int_rank.append(sum(csd_rank.values())) + del noise_csd ch_names = list(info['ch_names']) @@ -189,17 +212,18 @@ def make_dics(info, forward, csd, reg=0.05, noise_csd=None, label=None, (freq, i + 1, n_freqs)) Cm = csd.get_data(index=i) + + # XXX: Weird that real_filter happens *before* whitening, which could + # make things complex again...? if real_filter: Cm = Cm.real - # Whiten the CSD - Cm = np.dot(whitener, np.dot(Cm, whitener.conj().T)) - # compute spatial filter n_orient = 3 if is_free_ori else 1 W, max_power_ori = _compute_beamformer( G, Cm, reg, n_orient, weight_norm, pick_ori, reduce_rank, - rank=rank, inversion=inversion, nn=nn, orient_std=orient_std) + rank=csd_int_rank[i], inversion=inversion, nn=nn, + orient_std=orient_std, whitener=whitener) Ws.append(W) max_oris.append(max_power_ori) @@ -256,9 +280,7 @@ def _apply_dics(data, filters, info, tmin): logger.info("Processing epoch : %d" % (i + 1)) # Apply SSPs - if info['projs']: - _check_proj_match(info['projs'], filters) - M = np.dot(filters['proj'], M) + M = _proj_whiten_data(M, info['projs'], filters) stcs = [] for W in Ws: @@ -471,6 +493,9 @@ def apply_dics_csd(csd, filters, verbose=None): frequencies) +@deprecated( + 'tf_dics is deprecated and will be removed in 0.24, use LCMV with ' + 'covariances matrices computed on band-passed data or DICS instead.') @verbose def tf_dics(epochs, forward, noise_csds, tmin, tmax, tstep, win_lengths, subtract_evoked=False, mode='fourier', freq_bins=None, @@ -654,6 +679,10 @@ def tf_dics(epochs, forward, noise_csds, tmin, tmax, tstep, win_lengths, raise ValueError('When using multitaper mode and specifying ' 'multitaper transform bandwidth, one value must be ' 'provided per frequency bin') + if isinstance(cwt_n_cycles, (int, float)): + # create a list out of single values to match n_freq_bins + n_cyc = cwt_n_cycles + cwt_n_cycles = [n_cyc] * n_freq_bins # Multiplying by 1e3 to avoid numerical issues, e.g. 0.3 // 0.05 == 5 n_time_steps = int(((tmax - tmin) * 1e3) // (tstep * 1e3)) @@ -678,6 +707,7 @@ def tf_dics(epochs, forward, noise_csds, tmin, tmax, tstep, win_lengths, freq_bin = frequencies[i_freq] fmin = np.min(freq_bin) fmax = np.max(freq_bin) + n_cycles = cwt_n_cycles[i_freq] else: fmin, fmax = freq_bins[i_freq] if n_ffts is None: @@ -728,7 +758,7 @@ def tf_dics(epochs, forward, noise_csds, tmin, tmax, tstep, win_lengths, elif mode == 'cwt_morlet': csd = csd_morlet( epochs, frequencies=freq_bin, tmin=win_tmin, - tmax=win_tmax, n_cycles=cwt_n_cycles, decim=decim, + tmax=win_tmax, n_cycles=n_cycles, decim=decim, verbose=False) else: raise ValueError('Invalid mode, choose either ' diff --git a/mne/beamformer/_lcmv.py b/mne/beamformer/_lcmv.py index c58bace14e9..f58a6305dd7 100644 --- a/mne/beamformer/_lcmv.py +++ b/mne/beamformer/_lcmv.py @@ -16,8 +16,8 @@ from ..utils import logger, verbose, _check_channels_spatial_filter from ..utils import _check_one_ch_type, _check_info_inv from ._compute_beamformer import ( - _check_proj_match, _prepare_beamformer_input, _compute_power, - _compute_beamformer, _check_src_type, Beamformer) + _prepare_beamformer_input, _compute_power, + _compute_beamformer, _check_src_type, Beamformer, _proj_whiten_data) @verbose @@ -168,13 +168,6 @@ def make_lcmv(info, forward, data_cov, reg=0.05, noise_cov=None, label=None, Cm = data_cov._get_square() if 'estimator' in data_cov: del data_cov['estimator'] - - # Whiten the data covariance - Cm = np.dot(whitener, np.dot(Cm, whitener.T)) - # Restore to positive semi-definite, as - # (negative eigenvalues are errant / due to massive scaling differences) - s, u = np.linalg.eigh(Cm) - Cm = np.dot(u * np.abs(s), u.T.conj()) rank_int = sum(rank.values()) del rank @@ -182,7 +175,8 @@ def make_lcmv(info, forward, data_cov, reg=0.05, noise_cov=None, label=None, n_orient = 3 if is_free_ori else 1 W, max_power_ori = _compute_beamformer( G, Cm, reg, n_orient, weight_norm, pick_ori, reduce_rank, rank_int, - inversion=inversion, nn=nn, orient_std=orient_std) + inversion=inversion, nn=nn, orient_std=orient_std, + whitener=whitener) # get src type to store with filters for _make_stc src_type = _get_src_type(forward['src'], vertno) @@ -206,18 +200,6 @@ def make_lcmv(info, forward, data_cov, reg=0.05, noise_cov=None, label=None, return filters -def _proj_whiten_data(M, proj, filters): - if filters['is_ssp']: - # check whether data and filter projs match - _check_proj_match(proj, filters) - if filters['whitener'] is None: - M = np.dot(filters['proj'], M) - - if filters['whitener'] is not None: - M = np.dot(filters['whitener'], M) - return M - - def _apply_lcmv(data, filters, info, tmin, max_ori_out): """Apply LCMV spatial filter to data for source reconstruction.""" if max_ori_out != 'signed': diff --git a/mne/beamformer/_rap_music.py b/mne/beamformer/_rap_music.py index 60ffe1e741a..ce87d983d8a 100644 --- a/mne/beamformer/_rap_music.py +++ b/mne/beamformer/_rap_music.py @@ -6,7 +6,6 @@ # License: BSD (3-clause) import numpy as np -from scipy import linalg from ..forward import is_fixed_orient, convert_forward_solution from ..io.pick import pick_channels_evoked, pick_info, pick_channels_forward @@ -47,6 +46,7 @@ def _apply_rap_music(data, info, times, forward, noise_cov, n_dipoles=2, selected active dipoles and their estimated orientation. Computed only if return_explained_data is True. """ + from scipy import linalg info = pick_info(info, picks) del picks # things are much simpler if we avoid surface orientation @@ -184,6 +184,7 @@ def _make_dipoles(times, poss, oris, sol, gof): def _compute_subcorr(G, phi_sig): """Compute the subspace correlation.""" + from scipy import linalg Ug, Sg, Vg = linalg.svd(G, full_matrices=False) # Now we look at the actual rank of the forward fields # in G and handle the fact that it might be rank defficient @@ -197,11 +198,12 @@ def _compute_subcorr(G, phi_sig): tmp = np.dot(Ug.T.conjugate(), phi_sig) Uc, Sc, _ = linalg.svd(tmp, full_matrices=False) X = np.dot(Vg.T / Sg[None, :], Uc[:, 0]) # subcorr - return Sc[0], X / linalg.norm(X) + return Sc[0], X / np.linalg.norm(X) def _compute_proj(A): """Compute the orthogonal projection operation for a manifold vector A.""" + from scipy import linalg U, _, _ = linalg.svd(A, full_matrices=False) return np.identity(A.shape[0]) - np.dot(U, U.T.conjugate()) diff --git a/mne/beamformer/tests/test_dics.py b/mne/beamformer/tests/test_dics.py index 9c70742475f..4e93296f786 100644 --- a/mne/beamformer/tests/test_dics.py +++ b/mne/beamformer/tests/test_dics.py @@ -18,7 +18,7 @@ from mne.beamformer._compute_beamformer import _prepare_beamformer_input from mne.beamformer._dics import _prepare_noise_csd from mne.time_frequency import csd_morlet -from mne.utils import run_tests_if_main, object_diff, requires_h5py +from mne.utils import object_diff, requires_h5py, catch_logging from mne.proj import compute_proj_evoked, make_projector from mne.surface import _compute_nearest from mne.beamformer.tests.test_lcmv import _assert_weight_norm @@ -93,7 +93,7 @@ def _simulate_data(fwd, idx): # Somewhere on the frontal lobe by default evoked = epochs.average() # Compute the cross-spectral density matrix - csd = csd_morlet(epochs, frequencies=[10, 20], n_cycles=[5, 10], decim=10) + csd = csd_morlet(epochs, frequencies=[10, 20], n_cycles=[5, 10], decim=5) labels = mne.read_labels_from_annot( 'sample', hemi='lh', subjects_dir=subjects_dir) @@ -107,14 +107,47 @@ def _simulate_data(fwd, idx): # Somewhere on the frontal lobe by default return epochs, evoked, csd, source_vertno, label, vertices, source_ind -idx_param = pytest.mark.parametrize('idx', [0, 100, 200, 233]) +idx_param = pytest.mark.parametrize('idx', [ + 0, + pytest.param(100, marks=pytest.mark.slowtest), + 200, + pytest.param(233, marks=pytest.mark.slowtest), +]) + + +def _rand_csd(rng, info): + scales = mne.make_ad_hoc_cov(info).data + n = scales.size + # Some random complex correlation structure (with channel scalings) + data = rng.randn(n, n) + 1j * rng.randn(n, n) + data = data @ data.conj().T + data *= scales + data *= scales[:, np.newaxis] + data.flat[::n + 1] = scales + return data + + +def _make_rand_csd(info, csd): + rng = np.random.RandomState(0) + data = _rand_csd(rng, info) + # now we need to have the same null space as the data csd + s, u = np.linalg.eigh(csd.get_data(csd.frequencies[0])) + mask = np.abs(s) >= s[-1] * 1e-7 + rank = mask.sum() + assert rank == len(data) == len(info['ch_names']) + noise_csd = CrossSpectralDensity( + _sym_mat_to_vector(data), info['ch_names'], 0., csd.n_fft) + return noise_csd, rank @pytest.mark.slowtest @testing.requires_testing_data @requires_h5py @idx_param -@pytest.mark.parametrize('whiten', (False, True)) +@pytest.mark.parametrize('whiten', [ + pytest.param(False, marks=pytest.mark.slowtest), + True, +]) def test_make_dics(tmpdir, _load_forward, idx, whiten): """Test making DICS beamformer filters.""" # We only test proper handling of parameters here. Testing the results is @@ -126,17 +159,8 @@ def test_make_dics(tmpdir, _load_forward, idx, whiten): with pytest.raises(ValueError, match='several sensor types'): make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None) if whiten: - rng = np.random.RandomState(0) - scales = mne.make_ad_hoc_cov(epochs.info).data - n = scales.size - # Some random complex correlation structure (with channel scalings) - data = rng.randn(n, n) + 1j * rng.randn(n, n) - data = data @ data.conj().T - data *= scales - data *= scales[:, np.newaxis] - data.flat[::n + 1] = scales - noise_csd = CrossSpectralDensity( - _sym_mat_to_vector(data), epochs.ch_names, 0., csd.n_fft) + noise_csd, rank = _make_rand_csd(epochs.info, csd) + assert rank == len(epochs.info['ch_names']) == 62 else: noise_csd = None epochs.pick_types(meg='grad') @@ -550,12 +574,13 @@ def test_apply_dics_timeseries(_load_forward, idx): @pytest.mark.slowtest @testing.requires_testing_data -@idx_param -def test_tf_dics(_load_forward, idx): +@pytest.mark.filterwarnings('ignore:.*tf_dics is dep.*:DeprecationWarning') +def test_tf_dics(_load_forward): """Test 5D time-frequency beamforming based on DICS.""" fwd_free, fwd_surf, fwd_fixed, _ = _load_forward + # idx isn't really used so let's just simulate one epochs, _, _, source_vertno, label, vertices, source_ind = \ - _simulate_data(fwd_fixed, idx) + _simulate_data(fwd_fixed, idx=0) reg = 1 # Lots of regularization for our toy dataset tmin = 0 @@ -659,6 +684,15 @@ def test_tf_dics(_load_forward, idx): win_lengths=win_lengths, freq_bins=freq_bins, mode='multitaper', mt_bandwidths=[20]) + # Test if 'cwt_morlet' mode works with both fixed cycle numbers and lists + # of cycle numbers + tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, + win_lengths, frequencies=frequencies, mode='cwt_morlet', + cwt_n_cycles=7) + tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, + win_lengths, frequencies=frequencies, mode='cwt_morlet', + cwt_n_cycles=[5., 7.]) + # Test if subtracting evoked responses yields NaN's, since we only have one # epoch. Suppress division warnings. assert len(epochs) == 1, len(epochs) @@ -669,4 +703,106 @@ def test_tf_dics(_load_forward, idx): assert np.all(np.isnan(stcs[0].data)) -run_tests_if_main() +def _cov_as_csd(cov, info): + rng = np.random.RandomState(0) + assert cov['data'].ndim == 2 + assert len(cov['data']) == len(cov['names']) + # we need to make this have at least some complex structure + data = cov['data'] + 1e-1 * _rand_csd(rng, info) + assert data.dtype == np.complex128 + return CrossSpectralDensity(_sym_mat_to_vector(data), cov['names'], 0., 16) + + +# Just test free ori here (assume fixed is same as LCMV if these are) +# Changes here should be synced with test_lcmv.py +@pytest.mark.slowtest +@pytest.mark.parametrize( + 'reg, pick_ori, weight_norm, use_cov, depth, lower, upper, real_filter', [ + (0.05, None, 'unit-noise-gain-invariant', False, None, 26, 28, False), + (0.05, None, 'unit-noise-gain-invariant', True, None, 40, 42, False), + (0.05, None, 'unit-noise-gain-invariant', True, None, 40, 42, True), + (0.05, None, 'unit-noise-gain', False, None, 13, 14, False), + (0.05, None, 'unit-noise-gain', True, None, 35, 37, False), + (0.05, None, 'nai', True, None, 35, 37, False), + (0.05, None, None, True, None, 12, 14, False), + (0.05, None, None, True, 0.8, 39, 43, False), + (0.05, 'max-power', 'unit-noise-gain-invariant', False, None, 17, 20, + False), + (0.05, 'max-power', 'unit-noise-gain', False, None, 17, 20, False), + (0.05, 'max-power', 'unit-noise-gain', False, None, 17, 20, True), + (0.05, 'max-power', 'nai', True, None, 21, 24, False), + (0.05, 'max-power', None, True, None, 7, 10, False), + (0.05, 'max-power', None, True, 0.8, 15, 18, False), + # skip most no-reg tests, assume others are equal to LCMV if these are + (0.00, None, None, True, None, 21, 32, False), + (0.00, 'max-power', None, True, None, 13, 19, False), + ]) +def test_localization_bias_free(bias_params_free, reg, pick_ori, weight_norm, + use_cov, depth, lower, upper, real_filter): + """Test localization bias for free-orientation DICS.""" + evoked, fwd, noise_cov, data_cov, want = bias_params_free + noise_csd = _cov_as_csd(noise_cov, evoked.info) + data_csd = _cov_as_csd(data_cov, evoked.info) + del noise_cov, data_cov + if not use_cov: + evoked.pick_types(meg='grad') + noise_csd = None + loc = apply_dics(evoked, make_dics( + evoked.info, fwd, data_csd, reg, noise_csd, pick_ori=pick_ori, + weight_norm=weight_norm, depth=depth, real_filter=real_filter)).data + loc = np.linalg.norm(loc, axis=1) if pick_ori == 'vector' else np.abs(loc) + # Compute the percentage of sources for which there is no loc bias: + perc = (want == np.argmax(loc, axis=0)).mean() * 100 + assert lower <= perc <= upper + + +@testing.requires_testing_data +@idx_param +@pytest.mark.parametrize('whiten', (False, True)) +def test_make_dics_rank(_load_forward, idx, whiten): + """Test making DICS beamformer filters with rank param.""" + _, fwd_surf, fwd_fixed, _ = _load_forward + epochs, _, csd, _, label, _, _ = _simulate_data(fwd_fixed, idx) + if whiten: + noise_csd, want_rank = _make_rand_csd(epochs.info, csd) + kind = 'mag + grad' + else: + noise_csd = None + epochs.pick_types(meg='grad') + want_rank = len(epochs.ch_names) + assert want_rank == 41 + kind = 'grad' + + with catch_logging() as log: + filters = make_dics( + epochs.info, fwd_surf, csd, label=label, noise_csd=noise_csd, + verbose=True) + log = log.getvalue() + assert f'Estimated rank ({kind}): {want_rank}' in log, log + stc, _ = apply_dics_csd(csd, filters) + other_rank = want_rank - 1 # shouldn't make a huge difference + use_rank = dict(meg=other_rank) + if not whiten: + # XXX it's a bug that our rank functions don't treat "meg" + # properly here... + use_rank['grad'] = use_rank.pop('meg') + with catch_logging() as log: + filters_2 = make_dics( + epochs.info, fwd_surf, csd, label=label, noise_csd=noise_csd, + rank=use_rank, verbose=True) + log = log.getvalue() + assert f'Computing rank from covariance with rank={use_rank}' in log, log + stc_2, _ = apply_dics_csd(csd, filters_2) + corr = np.corrcoef(stc_2.data.ravel(), stc.data.ravel())[0, 1] + assert 0.8 < corr < 0.99999 + + # degenerate conditions + if whiten: + # make rank deficient + data = noise_csd.get_data(0.) + data[0] = data[:0] = 0 + noise_csd._data[:, 0] = _sym_mat_to_vector(data) + with pytest.raises(ValueError, match='meg data rank.*the noise rank'): + filters = make_dics( + epochs.info, fwd_surf, csd, label=label, noise_csd=noise_csd, + verbose=True) diff --git a/mne/beamformer/tests/test_lcmv.py b/mne/beamformer/tests/test_lcmv.py index 2beb8ad15a6..11da392751c 100644 --- a/mne/beamformer/tests/test_lcmv.py +++ b/mne/beamformer/tests/test_lcmv.py @@ -6,13 +6,13 @@ from scipy import linalg from scipy.spatial.distance import cdist from numpy.testing import (assert_array_almost_equal, assert_array_equal, - assert_almost_equal, assert_allclose, - assert_array_less) + assert_allclose, assert_array_less) import mne +from mne.transforms import apply_trans, invert_transform from mne import (convert_forward_solution, read_forward_solution, compute_rank, VolVectorSourceEstimate, VolSourceEstimate, EvokedArray, - pick_channels_cov) + pick_channels_cov, read_vectorview_selection) from mne.beamformer import (make_lcmv, apply_lcmv, apply_lcmv_epochs, apply_lcmv_raw, Beamformer, read_beamformer, apply_lcmv_cov, make_dics) @@ -20,7 +20,9 @@ from mne.datasets import testing from mne.fixes import _get_args from mne.io.compensator import set_current_comp +from mne.io.constants import FIFF from mne.minimum_norm import make_inverse_operator, apply_inverse +from mne.minimum_norm.tests.test_inverse import _assert_free_ori_match from mne.simulation import simulate_evoked from mne.utils import object_diff, requires_h5py, catch_logging @@ -68,7 +70,7 @@ def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True, # Setup for reading the raw data raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bad channels # Set up pick list: MEG - bad channels - left_temporal_channels = mne.read_selection('Left-temporal') + left_temporal_channels = read_vectorview_selection('Left-temporal') picks = mne.pick_types(raw.info, meg=True, selection=left_temporal_channels) picks = picks[::2] # decimate for speed @@ -127,9 +129,9 @@ def test_lcmv_vector(): forward = mne.read_forward_solution(fname_fwd) forward = mne.pick_channels_forward(forward, info['ch_names']) - vertices = [s['vertno'][::100] for s in forward['src']] + vertices = [s['vertno'][::200] for s in forward['src']] n_vertices = sum(len(v) for v in vertices) - assert 5 < n_vertices < 20 + assert n_vertices == 4 amplitude = 100e-9 stc = mne.SourceEstimate(amplitude * np.eye(n_vertices), vertices, @@ -204,92 +206,89 @@ def test_lcmv_vector(): @pytest.mark.slowtest @requires_h5py @testing.requires_testing_data -@pytest.mark.parametrize('reg', (0.01, 0.)) -@pytest.mark.parametrize('proj', (True, False)) -def test_make_lcmv(tmpdir, reg, proj): +@pytest.mark.parametrize('reg, proj, kind', [ + (0.01, True, 'volume'), + (0., False, 'volume'), + (0.01, False, 'surface'), + (0., True, 'surface'), +]) +def test_make_lcmv_bem(tmpdir, reg, proj, kind): """Test LCMV with evoked data and single trials.""" raw, epochs, evoked, data_cov, noise_cov, label, forward,\ forward_surf_ori, forward_fixed, forward_vol = _get_data(proj=proj) - for fwd in [forward, forward_vol]: - filters = make_lcmv(evoked.info, fwd, data_cov, reg=reg, - noise_cov=noise_cov) - stc = apply_lcmv(evoked, filters, max_ori_out='signed') - stc.crop(0.02, None) + if kind == 'surface': + fwd = forward + else: + fwd = forward_vol + assert kind == 'volume' - stc_pow = np.sum(np.abs(stc.data), axis=1) - idx = np.argmax(stc_pow) - max_stc = stc.data[idx] - tmax = stc.times[np.argmax(max_stc)] - - assert 0.08 < tmax < 0.15, tmax - assert 0.9 < np.max(max_stc) < 3.5, np.max(max_stc) - - if fwd is forward: - # Test picking normal orientation (surface source space only). - filters = make_lcmv(evoked.info, forward_surf_ori, data_cov, - reg=reg, noise_cov=noise_cov, - pick_ori='normal', weight_norm=None) - stc_normal = apply_lcmv(evoked, filters, max_ori_out='signed') - stc_normal.crop(0.02, None) - - stc_pow = np.sum(np.abs(stc_normal.data), axis=1) - idx = np.argmax(stc_pow) - max_stc = stc_normal.data[idx] - tmax = stc_normal.times[np.argmax(max_stc)] - - lower = 0.04 if proj else 0.025 - assert lower < tmax < 0.14, tmax - lower = 3e-7 if proj else 2e-7 - assert lower < np.max(max_stc) < 3e-6, np.max(max_stc) - - # No weight normalization was applied, so the amplitude of normal - # orientation results should always be smaller than free - # orientation results. - assert (np.abs(stc_normal.data) <= stc.data).all() - - # Test picking source orientation maximizing output source power - filters = make_lcmv(evoked.info, fwd, data_cov, reg=reg, - noise_cov=noise_cov, pick_ori='max-power') - stc_max_power = apply_lcmv(evoked, filters, max_ori_out='signed') - stc_max_power.crop(0.02, None) - stc_pow = np.sum(np.abs(stc_max_power.data), axis=1) + filters = make_lcmv(evoked.info, fwd, data_cov, reg=reg, + noise_cov=noise_cov) + stc = apply_lcmv(evoked, filters, max_ori_out='signed') + stc.crop(0.02, None) + + stc_pow = np.sum(np.abs(stc.data), axis=1) + idx = np.argmax(stc_pow) + max_stc = stc.data[idx] + tmax = stc.times[np.argmax(max_stc)] + + assert 0.08 < tmax < 0.15, tmax + assert 0.9 < np.max(max_stc) < 3.5, np.max(max_stc) + + if kind == 'surface': + # Test picking normal orientation (surface source space only). + filters = make_lcmv(evoked.info, forward_surf_ori, data_cov, + reg=reg, noise_cov=noise_cov, + pick_ori='normal', weight_norm=None) + stc_normal = apply_lcmv(evoked, filters, max_ori_out='signed') + stc_normal.crop(0.02, None) + + stc_pow = np.sum(np.abs(stc_normal.data), axis=1) idx = np.argmax(stc_pow) - max_stc = np.abs(stc_max_power.data[idx]) - tmax = stc.times[np.argmax(max_stc)] - - lower = 0.08 if proj else 0.04 - assert lower < tmax < 0.15, tmax - assert 0.8 < np.max(max_stc) < 3., np.max(max_stc) - - stc_max_power.data[:, :] = np.abs(stc_max_power.data) - - if fwd is forward: - # Maximum output source power orientation results should be - # similar to free orientation results in areas with channel - # coverage - label = mne.read_label(fname_label) - mean_stc = stc.extract_label_time_course(label, fwd['src'], - mode='mean') - mean_stc_max_pow = \ - stc_max_power.extract_label_time_course(label, fwd['src'], - mode='mean') - assert_array_less(np.abs(mean_stc - mean_stc_max_pow), 1.0) - - # Test NAI weight normalization: - filters = make_lcmv(evoked.info, fwd, data_cov, reg=reg, - noise_cov=noise_cov, pick_ori='max-power', - weight_norm='nai') - stc_nai = apply_lcmv(evoked, filters, max_ori_out='signed') - stc_nai.crop(0.02, None) - - # Test whether unit-noise-gain solution is a scaled version of NAI - pearsoncorr = np.corrcoef(np.concatenate(np.abs(stc_nai.data)), - np.concatenate(stc_max_power.data)) - assert_almost_equal(pearsoncorr[0, 1], 1.) + max_stc = stc_normal.data[idx] + tmax = stc_normal.times[np.argmax(max_stc)] + + lower = 0.04 if proj else 0.025 + assert lower < tmax < 0.14, tmax + lower = 3e-7 if proj else 2e-7 + assert lower < np.max(max_stc) < 3e-6, np.max(max_stc) + + # No weight normalization was applied, so the amplitude of normal + # orientation results should always be smaller than free + # orientation results. + assert (np.abs(stc_normal.data) <= stc.data).all() + + # Test picking source orientation maximizing output source power + filters = make_lcmv(evoked.info, fwd, data_cov, reg=reg, + noise_cov=noise_cov, pick_ori='max-power') + stc_max_power = apply_lcmv(evoked, filters, max_ori_out='signed') + stc_max_power.crop(0.02, None) + stc_pow = np.sum(np.abs(stc_max_power.data), axis=1) + idx = np.argmax(stc_pow) + max_stc = np.abs(stc_max_power.data[idx]) + tmax = stc.times[np.argmax(max_stc)] + + lower = 0.08 if proj else 0.04 + assert lower < tmax < 0.15, tmax + assert 0.8 < np.max(max_stc) < 3., np.max(max_stc) + + stc_max_power.data[:, :] = np.abs(stc_max_power.data) + + if kind == 'surface': + # Maximum output source power orientation results should be + # similar to free orientation results in areas with channel + # coverage + label = mne.read_label(fname_label) + mean_stc = stc.extract_label_time_course( + label, fwd['src'], mode='mean') + mean_stc_max_pow = \ + stc_max_power.extract_label_time_course( + label, fwd['src'], mode='mean') + assert_array_less(np.abs(mean_stc - mean_stc_max_pow), 1.0) # Test if spatial filter contains src_type - assert 'src_type' in filters + assert filters['src_type'] == kind # __repr__ assert len(evoked.ch_names) == 22 @@ -298,7 +297,7 @@ def test_make_lcmv(tmpdir, reg, proj): rank = 17 if proj else 20 assert 'LCMV' in repr(filters) assert 'unknown subject' not in repr(filters) - assert '4157 vert' in repr(filters) + assert f'{fwd["nsource"]} vert' in repr(filters) assert '20 ch' in repr(filters) assert 'rank %s' % rank in repr(filters) @@ -314,6 +313,9 @@ def test_make_lcmv(tmpdir, reg, proj): filters['rank'] = int(filters['rank']) assert object_diff(filters, filters_read) == '' + if kind != 'surface': + return + # Test if fixed forward operator is detected when picking normal or # max-power orientation pytest.raises(ValueError, make_lcmv, evoked.info, forward_fixed, data_cov, @@ -354,9 +356,8 @@ def test_make_lcmv(tmpdir, reg, proj): # this channel from the data # also test here that no warnings are thrown - implemented to check whether # src should not be None warning occurs - with pytest.warns(None) as w: - stc = apply_lcmv(evoked, filters, max_ori_out='signed') - assert len(w) == 0 + stc = apply_lcmv(evoked, filters, max_ori_out='signed') + # the result should be equal to applying this filter to a dataset without # this channel: stc_ch = apply_lcmv(evoked_ch, filters, max_ori_out='signed') @@ -364,11 +365,16 @@ def test_make_lcmv(tmpdir, reg, proj): # Test if non-matching SSP projection is detected in application of filter if proj: - raw_proj = deepcopy(raw) - raw_proj.del_proj() + raw_proj = raw.copy().del_proj() with pytest.raises(ValueError, match='do not match the projections'): apply_lcmv_raw(raw_proj, filters, max_ori_out='signed') + # Test apply_lcmv_raw + use_raw = raw.copy().crop(0, 1) + stc = apply_lcmv_raw(use_raw, filters) + assert_allclose(stc.times, use_raw.times) + assert_array_equal(stc.vertices[0], forward_vol['src'][0]['vertno']) + # Test if spatial filter contains src_type assert 'src_type' in filters @@ -430,8 +436,13 @@ def test_make_lcmv(tmpdir, reg, proj): @testing.requires_testing_data @pytest.mark.slowtest -@pytest.mark.parametrize('weight_norm', (None, 'unit-noise-gain', 'nai')) -@pytest.mark.parametrize('pick_ori', (None, 'max-power', 'vector')) +@pytest.mark.parametrize('weight_norm, pick_ori', [ + ('unit-noise-gain', 'max-power'), + ('unit-noise-gain', 'vector'), + ('unit-noise-gain', None), + ('nai', 'vector'), + (None, 'max-power'), +]) def test_make_lcmv_sphere(pick_ori, weight_norm): """Test LCMV with sphere head model.""" # unit-noise gain beamformer and orientation @@ -446,10 +457,11 @@ def test_make_lcmv_sphere(pick_ori, weight_norm): # Test that we get an error if not reducing rank with pytest.raises(ValueError, match='Singular matrix detected'): - make_lcmv( - evoked.info, fwd_sphere, data_cov, reg=0.1, - noise_cov=noise_cov, weight_norm=weight_norm, - pick_ori=pick_ori, reduce_rank=False, rank='full') + with pytest.warns(RuntimeWarning, match='positive semidefinite'): + make_lcmv( + evoked.info, fwd_sphere, data_cov, reg=0.1, + noise_cov=noise_cov, weight_norm=weight_norm, + pick_ori=pick_ori, reduce_rank=False, rank='full') # Now let's reduce it filters = make_lcmv(evoked.info, fwd_sphere, data_cov, reg=0.1, @@ -475,33 +487,6 @@ def test_make_lcmv_sphere(pick_ori, weight_norm): assert min_ < np.max(max_stc) < max_, (min_, np.max(max_stc), max_) -@testing.requires_testing_data -def test_lcmv_raw(): - """Test LCMV with raw data.""" - raw, _, _, _, noise_cov, label, forward, _, _, _ =\ - _get_data(all_forward=False, epochs=False, data_cov=False) - - tmin, tmax = 0, 20 - start, stop = raw.time_as_index([tmin, tmax]) - - # use only the left-temporal MEG channels for LCMV - data_cov = mne.compute_raw_covariance(raw, tmin=tmin, tmax=tmax) - filters = make_lcmv(raw.info, forward, data_cov, reg=0.01, - noise_cov=noise_cov, label=label) - stc = apply_lcmv_raw(raw, filters, start=start, stop=stop, - max_ori_out='signed') - - assert_array_almost_equal(np.array([tmin, tmax]), - np.array([stc.times[0], stc.times[-1]]), - decimal=2) - - # make sure we get an stc with vertices only in the lh - vertno = [forward['src'][0]['vertno'], forward['src'][1]['vertno']] - assert len(stc.vertices[0]) == len(np.intersect1d(vertno[0], - label.vertices)) - assert len(stc.vertices[1]) == 0 - - @testing.requires_testing_data @pytest.mark.parametrize('weight_norm', (None, 'unit-noise-gain')) @pytest.mark.parametrize('pick_ori', ('max-power', 'normal')) @@ -538,16 +523,15 @@ def test_lcmv_ctf_comp(): ctf_dir = op.join(testing.data_path(download=False), 'CTF') raw_fname = op.join(ctf_dir, 'somMDYO-18av.ds') raw = mne.io.read_raw_ctf(raw_fname, preload=True) + raw.pick(raw.ch_names[:70]) events = mne.make_fixed_length_events(raw, duration=0.2)[:2] epochs = mne.Epochs(raw, events, tmin=-0.1, tmax=0.2) evoked = epochs.average() - with pytest.warns(RuntimeWarning, - match='Too few samples .* estimate may be unreliable'): - data_cov = mne.compute_covariance(epochs) + data_cov = mne.compute_covariance(epochs) fwd = mne.make_forward_solution(evoked.info, None, - mne.setup_volume_source_space(pos=15.0), + mne.setup_volume_source_space(pos=30.0), mne.make_sphere_model()) with pytest.raises(ValueError, match='reduce_rank'): make_lcmv(evoked.info, fwd, data_cov) @@ -562,8 +546,12 @@ def test_lcmv_ctf_comp(): @testing.requires_testing_data -@pytest.mark.parametrize('proj', [False, True]) -@pytest.mark.parametrize('weight_norm', (None, 'nai', 'unit-noise-gain')) +@pytest.mark.parametrize('proj, weight_norm', [ + (True, 'unit-noise-gain'), + (False, 'unit-noise-gain'), + pytest.param(True, None, marks=pytest.mark.slowtest), + pytest.param(True, 'nai', marks=pytest.mark.slowtest), +]) def test_lcmv_reg_proj(proj, weight_norm): """Test LCMV with and without proj.""" raw = mne.io.read_raw_fif(fname_raw, preload=True) @@ -666,50 +654,119 @@ def test_localization_bias_fixed(bias_params_fixed, reg, weight_norm, use_cov, assert lower <= perc <= upper +# Changes here should be synced with test_dics.py @pytest.mark.parametrize( - 'reg, pick_ori, weight_norm, use_cov, depth, lower, upper', [ - (0.05, 'vector', 'unit-noise-gain-invariant', False, None, 26, 28), - (0.05, 'vector', 'unit-noise-gain-invariant', True, None, 40, 42), - (0.05, 'vector', 'unit-noise-gain', False, None, 13, 14), - (0.05, 'vector', 'unit-noise-gain', True, None, 35, 37), - (0.05, 'vector', 'nai', True, None, 35, 37), - (0.05, 'vector', None, True, None, 12, 14), - (0.05, 'vector', None, True, 0.8, 39, 43), - (0.05, 'max-power', 'unit-noise-gain-invariant', False, None, 17, 20), - (0.05, 'max-power', 'unit-noise-gain', False, None, 17, 20), - (0.05, 'max-power', 'nai', True, None, 21, 24), - (0.05, 'max-power', None, True, None, 7, 10), - (0.05, 'max-power', None, True, 0.8, 15, 18), - (0.05, None, None, True, 0.8, 40, 42), + 'reg, pick_ori, weight_norm, use_cov, depth, lower, upper, ' + 'lower_ori, upper_ori', [ + (0.05, 'vector', 'unit-noise-gain-invariant', False, None, 26, 28, 0.82, 0.84), # noqa: E501 + (0.05, 'vector', 'unit-noise-gain-invariant', True, None, 40, 42, 0.96, 0.98), # noqa: E501 + (0.05, 'vector', 'unit-noise-gain', False, None, 13, 14, 0.79, 0.81), + (0.05, 'vector', 'unit-noise-gain', True, None, 35, 37, 0.98, 0.99), + (0.05, 'vector', 'nai', True, None, 35, 37, 0.98, 0.99), + (0.05, 'vector', None, True, None, 12, 14, 0.97, 0.98), + (0.05, 'vector', None, True, 0.8, 39, 43, 0.97, 0.98), + (0.05, 'max-power', 'unit-noise-gain-invariant', False, None, 17, 20, 0, 0), # noqa: E501 + (0.05, 'max-power', 'unit-noise-gain', False, None, 17, 20, 0, 0), + (0.05, 'max-power', 'nai', True, None, 21, 24, 0, 0), + (0.05, 'max-power', None, True, None, 7, 10, 0, 0), + (0.05, 'max-power', None, True, 0.8, 15, 18, 0, 0), + (0.05, None, None, True, 0.8, 40, 42, 0, 0), # no reg - (0.00, 'vector', None, True, None, 21, 32), - (0.00, 'vector', 'unit-noise-gain-invariant', True, None, 50, 65), - (0.00, 'vector', 'unit-noise-gain', True, None, 42, 65), - (0.00, 'vector', 'nai', True, None, 42, 65), - (0.00, 'max-power', None, True, None, 13, 19), - (0.00, 'max-power', 'unit-noise-gain-invariant', True, None, 43, 50), - (0.00, 'max-power', 'unit-noise-gain', True, None, 43, 50), - (0.00, 'max-power', 'nai', True, None, 43, 50), + (0.00, 'vector', None, True, None, 23, 24, 0.96, 0.97), + (0.00, 'vector', 'unit-noise-gain-invariant', True, None, 52, 54, 0.95, 0.96), # noqa: E501 + (0.00, 'vector', 'unit-noise-gain', True, None, 44, 46, 0.97, 0.98), + (0.00, 'vector', 'nai', True, None, 44, 46, 0.97, 0.98), + (0.00, 'max-power', None, True, None, 14, 15, 0, 0), + (0.00, 'max-power', 'unit-noise-gain-invariant', True, None, 35, 37, 0, 0), # noqa: E501 + (0.00, 'max-power', 'unit-noise-gain', True, None, 35, 37, 0, 0), + (0.00, 'max-power', 'nai', True, None, 35, 37, 0, 0), ]) def test_localization_bias_free(bias_params_free, reg, pick_ori, weight_norm, - use_cov, depth, lower, upper): + use_cov, depth, lower, upper, + lower_ori, upper_ori): """Test localization bias for free-orientation LCMV.""" evoked, fwd, noise_cov, data_cov, want = bias_params_free if not use_cov: evoked.pick_types(meg='grad') noise_cov = None - loc = apply_lcmv(evoked, make_lcmv(evoked.info, fwd, data_cov, reg, - noise_cov, pick_ori=pick_ori, - weight_norm=weight_norm, - depth=depth)).data + with pytest.warns(None): # rank deficiency of data_cov + filters = make_lcmv(evoked.info, fwd, data_cov, reg, + noise_cov, pick_ori=pick_ori, + weight_norm=weight_norm, + depth=depth) + loc = apply_lcmv(evoked, filters).data + if pick_ori == 'vector': + ori = loc.copy() / np.linalg.norm(loc, axis=1, keepdims=True) + else: + # doesn't make sense for pooled (None) or max-power (can't be all 3) + ori = None loc = np.linalg.norm(loc, axis=1) if pick_ori == 'vector' else np.abs(loc) # Compute the percentage of sources for which there is no loc bias: - perc = (want == np.argmax(loc, axis=0)).mean() * 100 + max_idx = np.argmax(loc, axis=0) + perc = (want == max_idx).mean() * 100 assert lower <= perc <= upper + _assert_free_ori_match(ori, max_idx, lower_ori, upper_ori) -@pytest.mark.parametrize('weight_norm', ('nai', 'unit-noise-gain')) -@pytest.mark.parametrize('pick_ori', ('vector', 'max-power', None)) +# Changes here should be synced with the ones above, but these have meaningful +# orientation values +@pytest.mark.parametrize( + 'reg, weight_norm, use_cov, depth, lower, upper, lower_ori, upper_ori', [ + (0.05, 'unit-noise-gain-invariant', False, None, 38, 40, 0.52, 0.54), + (0.05, 'unit-noise-gain', False, None, 38, 40, 0.52, 0.54), + (0.05, 'nai', True, None, 56, 57, 0.56, 0.58), + (0.05, None, True, None, 27, 28, 0.54, 0.56), + (0.05, None, True, 0.8, 42, 43, 0.54, 0.56), + # no reg + (0.00, None, True, None, 50, 51, 0.57, 0.58), + (0.00, 'unit-noise-gain-invariant', True, None, 73, 75, 0.57, 0.58), + (0.00, 'unit-noise-gain', True, None, 73, 75, 0.57, 0.58), + (0.00, 'nai', True, None, 73, 75, 0.57, 0.58), + ]) +def test_orientation_max_power(bias_params_fixed, bias_params_free, + reg, weight_norm, use_cov, depth, lower, upper, + lower_ori, upper_ori): + """Test orientation selection for bias for max-power LCMV.""" + # we simulate data for the fixed orientation forward and beamform using + # the free orientation forward, and check the orientation match at the end + evoked, _, noise_cov, data_cov, want = bias_params_fixed + fwd = bias_params_free[1] + if not use_cov: + evoked.pick_types(meg='grad') + noise_cov = None + with pytest.warns(None): # rank deficiency of data_cov + filters = make_lcmv(evoked.info, fwd, data_cov, reg, + noise_cov, pick_ori='max-power', + weight_norm=weight_norm, + depth=depth) + loc = apply_lcmv(evoked, filters).data + ori = filters['max_power_ori'] + loc = np.abs(loc) + # Compute the percentage of sources for which there is no loc bias: + max_idx = np.argmax(loc, axis=0) + perc = (want == max_idx).mean() * 100 + assert lower <= perc <= upper + # Compute the dot products of our forward normals and + assert fwd['coord_frame'] == FIFF.FIFFV_COORD_HEAD + nn = np.concatenate( + [s['nn'][v] for s, v in zip(fwd['src'], filters['vertices'])]) + nn = nn[want] + nn = apply_trans(invert_transform(fwd['mri_head_t']), nn, move=False) + assert_allclose(np.linalg.norm(nn, axis=1), 1, atol=1e-6) + assert_allclose(np.linalg.norm(ori, axis=1), 1, atol=1e-12) + dots = np.abs((nn * ori).sum(-1)) + assert_array_less(dots, 1) + assert_array_less(0, dots) + got = np.mean(dots) + assert lower_ori < got < upper_ori + + +@pytest.mark.parametrize('weight_norm, pick_ori', [ + pytest.param('nai', 'max-power', marks=pytest.mark.slowtest), + ('unit-noise-gain', 'vector'), + ('unit-noise-gain', 'max-power'), + pytest.param('unit-noise-gain', None, marks=pytest.mark.slowtest), +]) def test_depth_does_not_matter(bias_params_free, weight_norm, pick_ori): """Test that depth weighting does not matter for normalized filters.""" evoked, fwd, noise_cov, data_cov, _ = bias_params_free @@ -745,12 +802,24 @@ def test_lcmv_maxfiltered(): make_lcmv(epochs.info, fwd, data_cov, rank=use_rank) +# To reduce test time, only test combinations that should matter rather than +# all of them @testing.requires_testing_data -@pytest.mark.parametrize('pick_ori', ['vector', 'max-power', 'normal']) -@pytest.mark.parametrize( - 'weight_norm', ['unit-noise-gain', 'nai', 'unit-noise-gain-invariant']) -@pytest.mark.parametrize('reg', (0.05, 0.)) -@pytest.mark.parametrize('inversion', ['matrix', 'single']) +@pytest.mark.parametrize('pick_ori, weight_norm, reg, inversion', [ + ('vector', 'unit-noise-gain-invariant', 0.05, 'matrix'), + ('vector', 'unit-noise-gain-invariant', 0.05, 'single'), + ('vector', 'unit-noise-gain', 0.05, 'matrix'), + ('vector', 'unit-noise-gain', 0.05, 'single'), + ('vector', 'unit-noise-gain', 0.0, 'matrix'), + ('vector', 'unit-noise-gain', 0.0, 'single'), + ('vector', 'nai', 0.05, 'matrix'), + ('max-power', 'unit-noise-gain', 0.05, 'matrix'), + ('max-power', 'unit-noise-gain', 0.0, 'single'), + ('max-power', 'unit-noise-gain', 0.05, 'single'), + ('max-power', 'unit-noise-gain-invariant', 0.05, 'matrix'), + ('normal', 'unit-noise-gain', 0.05, 'matrix'), + ('normal', 'nai', 0.0, 'matrix'), +]) def test_unit_noise_gain_formula(pick_ori, weight_norm, reg, inversion): """Test unit-noise-gain filter against formula.""" raw = mne.io.read_raw_fif(fname_raw, preload=True) diff --git a/mne/bem.py b/mne/bem.py index 274232f9712..2b0645abdab 100644 --- a/mne/bem.py +++ b/mne/bem.py @@ -8,6 +8,7 @@ # The computations in this code were primarily derived from Matti Hämäläinen's # C code. +from collections import OrderedDict from functools import partial import glob import os @@ -16,7 +17,6 @@ from copy import deepcopy import numpy as np -from scipy import linalg from .io.constants import FIFF, FWD from .io._digitization import _dig_kind_dict, _dig_kind_rev, _dig_kind_ints @@ -28,11 +28,12 @@ from .io.open import fiff_open from .surface import (read_surface, write_surface, complete_surface_info, _compute_nearest, _get_ico_surface, read_tri, - _fast_cross_nd_sum, _get_solids) + _fast_cross_nd_sum, _get_solids, _complete_sphere_surf) from .transforms import _ensure_trans, apply_trans, Transform from .utils import (verbose, logger, run_subprocess, get_subjects_dir, warn, _pl, _validate_type, _TempDir, _check_freesurfer_home, - _check_fname, has_nibabel, _check_option) + _check_fname, has_nibabel, _check_option, path_like, + _on_missing) from .fixes import einsum from .externals.h5io import write_hdf5, read_hdf5 @@ -177,8 +178,8 @@ def _fwd_bem_lin_pot_coeff(surfs): rr_ord = np.arange(nps[si_1]) for si_2, surf2 in enumerate(surfs): logger.info(" %s (%d) -> %s (%d) ..." % - (_surf_name[surf1['id']], nps[si_1], - _surf_name[surf2['id']], nps[si_2])) + (_bem_surf_name[surf1['id']], nps[si_1], + _bem_surf_name[surf2['id']], nps[si_2])) tri_rr = surf2['rr'][surf2['tris']] tri_nn = surf2['tri_nn'] tri_area = surf2['tri_area'] @@ -228,7 +229,7 @@ def _fwd_bem_multi_solution(solids, gamma, nps): slice_k = slice(offsets[si_2], offsets[si_2 + 1]) solids[slice_j, slice_k] = defl - solids[slice_j, slice_k] * mult solids += np.eye(n_tot) - return linalg.inv(solids, overwrite_a=True) + return np.linalg.inv(solids) def _fwd_bem_homog_solution(solids, nps): @@ -264,7 +265,7 @@ def _check_complete_surface(surf, copy=False, incomplete='raise', extra=''): if len(fewer) > 0: msg = ('Surface {} has topological defects: {:.0f} / {:.0f} vertices ' 'have fewer than three neighboring triangles [{}]{}' - .format(_surf_name[surf['id']], len(fewer), surf['ntri'], + .format(_bem_surf_name[surf['id']], len(fewer), surf['ntri'], ', '.join(str(f) for f in fewer), extra)) if incomplete == 'raise': raise RuntimeError(msg) @@ -331,11 +332,7 @@ def make_bem_solution(surfs, verbose=None): .. versionadded:: 0.10.0 """ logger.info('Approximation method : Linear collocation\n') - if isinstance(surfs, str): - # Load the surfaces - logger.info('Loading surfaces...') - surfs = read_bem_surfaces(surfs) - bem = ConductorModel(is_sphere=False, surfs=surfs) + bem = _ensure_bem_surfaces(surfs) _add_gamma_multipliers(bem) if len(bem['surfs']) == 3: logger.info('Three-layer model surfaces loaded.') @@ -421,35 +418,28 @@ def _assert_complete_surface(surf, incomplete='raise'): # Center of mass.... cm = surf['rr'].mean(axis=0) logger.info('%s CM is %6.2f %6.2f %6.2f mm' % - (_surf_name[surf['id']], + (_bem_surf_name[surf['id']], 1000 * cm[0], 1000 * cm[1], 1000 * cm[2])) tot_angle = _get_solids(surf['rr'][surf['tris']], cm[np.newaxis, :])[0] prop = tot_angle / (2 * np.pi) if np.abs(prop - 1.0) > 1e-5: - msg = ('Surface %s is not complete (sum of solid angles ' - 'yielded %g, should be 1.)' - % (_surf_name[surf['id']], prop)) - if incomplete == 'raise': - raise RuntimeError(msg) - else: - warn(msg) - - -_surf_name = { - FIFF.FIFFV_BEM_SURF_ID_HEAD: 'outer skin ', - FIFF.FIFFV_BEM_SURF_ID_SKULL: 'outer skull', - FIFF.FIFFV_BEM_SURF_ID_BRAIN: 'inner skull', - FIFF.FIFFV_BEM_SURF_ID_UNKNOWN: 'unknown ', -} + msg = (f'Surface {_bem_surf_name[surf["id"]]} is not complete (sum of ' + f'solid angles yielded {prop}, should be 1.)') + _on_missing( + incomplete, msg, name='incomplete', error_klass=RuntimeError) def _assert_inside(fro, to): """Check one set of points is inside a surface.""" # this is "is_inside" in surface_checks.c + fro_name = _bem_surf_name[fro["id"]] + to_name = _bem_surf_name[to["id"]] + logger.info( + f'Checking that surface {fro_name} is inside surface {to_name} ...') tot_angle = _get_solids(to['rr'][to['tris']], fro['rr']) if (np.abs(tot_angle / (2 * np.pi) - 1.0) > 1e-5).any(): - raise RuntimeError('Surface %s is not completely inside surface %s' - % (_surf_name[fro['id']], _surf_name[to['id']])) + raise RuntimeError( + f'Surface {fro_name} is not completely inside surface {to_name}') def _check_surfaces(surfs, incomplete='raise'): @@ -458,8 +448,6 @@ def _check_surfaces(surfs, incomplete='raise'): _assert_complete_surface(surf, incomplete=incomplete) # Then check the topology for surf_1, surf_2 in zip(surfs[:-1], surfs[1:]): - logger.info('Checking that %s surface is inside %s surface...' % - (_surf_name[surf_2['id']], _surf_name[surf_1['id']])) _assert_inside(surf_2, surf_1) @@ -467,10 +455,10 @@ def _check_surface_size(surf): """Check that the coordinate limits are reasonable.""" sizes = surf['rr'].max(axis=0) - surf['rr'].min(axis=0) if (sizes < 0.05).any(): - raise RuntimeError('Dimensions of the surface %s seem too small ' - '(%9.5f mm). Maybe the the unit of measure is ' - 'meters instead of mm' % - (_surf_name[surf['id']], 1000 * sizes.min())) + raise RuntimeError( + f'Dimensions of the surface {_bem_surf_name[surf["id"]]} seem too ' + f'small ({1000 * sizes.min():9.5f}). Maybe the unit of measure' + ' is meters instead of mm') def _check_thicknesses(surfs): @@ -479,12 +467,11 @@ def _check_thicknesses(surfs): min_dist = _compute_nearest(surf_1['rr'], surf_2['rr'], return_dists=True)[1] min_dist = min_dist.min() - logger.info('Checking distance between %s and %s surfaces...' % - (_surf_name[surf_1['id']], _surf_name[surf_2['id']])) - logger.info('Minimum distance between the %s and %s surfaces is ' - 'approximately %6.1f mm' % - (_surf_name[surf_1['id']], _surf_name[surf_2['id']], - 1000 * min_dist)) + fro = _bem_surf_name[surf_1['id']] + to = _bem_surf_name[surf_2['id']] + logger.info(f'Checking distance between {fro} and {to} surfaces...') + logger.info(f'Minimum distance between the {fro} and {to} surfaces is ' + f'approximately {1000 * min_dist:6.1f} mm') def _surfaces_to_bem(surfs, ids, sigmas, ico=None, rescale=True, @@ -634,6 +621,7 @@ def _fwd_eeg_get_multi_sphere_model_coeffs(m, n_terms): def _compose_linear_fitting_data(mu, u): """Get the linear fitting data.""" + from scipy import linalg k1 = np.arange(1, u['nterms']) mu1ns = mu[0] ** k1 # data to be fitted @@ -846,11 +834,6 @@ def fit_sphere_to_headshape(info, dig_kinds='auto', units='m', verbose=None): Can be "m" (default) or "mm". .. versionadded:: 0.12 - move_origin : bool - If True, allow the origin to vary. Otherwise, fix it at (0, 0, 0). - - .. versionadded:: 0.20 - %(verbose)s Returns @@ -1052,9 +1035,8 @@ def make_watershed_bem(subject, subjects_dir=None, overwrite=False, ---------- subject : str Subject name. - $(subjects_dir)s - overwrite : bool - Write over existing files. + %(subjects_dir)s + %(overwrite)s volume : str Defaults to T1. atlas : bool @@ -1062,7 +1044,7 @@ def make_watershed_bem(subject, subjects_dir=None, overwrite=False, gcaatlas : bool Specify the --brain_atlas option for mri_watershed. preflood : int - Change the preflood height + Change the preflood height. show : bool Show surfaces to visually inspect all three BEM surfaces (recommended). @@ -1231,7 +1213,8 @@ def _extract_volume_info(mgz): # Read @verbose -def read_bem_surfaces(fname, patch_stats=False, s_id=None, verbose=None): +def read_bem_surfaces(fname, patch_stats=False, s_id=None, on_defects='raise', + verbose=None): """Read the BEM surfaces from a FIF file. Parameters @@ -1244,6 +1227,9 @@ def read_bem_surfaces(fname, patch_stats=False, s_id=None, verbose=None): If int, only read and return the surface with the given s_id. An error will be raised if it doesn't exist. If None, all surfaces are read and returned. + %(on_defects)s + + .. versionadded:: 0.23 %(verbose)s Returns @@ -1267,7 +1253,7 @@ def read_bem_surfaces(fname, patch_stats=False, s_id=None, verbose=None): raise ValueError('surface with id %d not found' % s_id) for this in surf: if patch_stats or this['nn'] is None: - _check_complete_surface(this) + _check_complete_surface(this, incomplete=on_defects) return surf[0] if s_id is not None else surf @@ -1504,22 +1490,56 @@ def _add_gamma_multipliers(bem): (sigma[1:] + sigma[:-1])[:, np.newaxis]) -_surf_dict = {'inner_skull': FIFF.FIFFV_BEM_SURF_ID_BRAIN, - 'outer_skull': FIFF.FIFFV_BEM_SURF_ID_SKULL, - 'head': FIFF.FIFFV_BEM_SURF_ID_HEAD} +# In our BEM code we do not model the CSF so we assign the innermost surface +# the id BRAIN. Our 4-layer sphere we model CSF (at least by default), so when +# searching for and referring to surfaces we need to keep track of this. +_sm_surf_dict = OrderedDict([ + ('brain', FIFF.FIFFV_BEM_SURF_ID_BRAIN), + ('inner_skull', FIFF.FIFFV_BEM_SURF_ID_CSF), + ('outer_skull', FIFF.FIFFV_BEM_SURF_ID_SKULL), + ('head', FIFF.FIFFV_BEM_SURF_ID_HEAD), +]) +_bem_surf_dict = { + 'inner_skull': FIFF.FIFFV_BEM_SURF_ID_BRAIN, + 'outer_skull': FIFF.FIFFV_BEM_SURF_ID_SKULL, + 'head': FIFF.FIFFV_BEM_SURF_ID_HEAD, +} +_bem_surf_name = { + FIFF.FIFFV_BEM_SURF_ID_BRAIN: 'inner skull', + FIFF.FIFFV_BEM_SURF_ID_SKULL: 'outer skull', + FIFF.FIFFV_BEM_SURF_ID_HEAD: 'outer skin ', + FIFF.FIFFV_BEM_SURF_ID_UNKNOWN: 'unknown ', +} +_sm_surf_name = { + FIFF.FIFFV_BEM_SURF_ID_BRAIN: 'brain', + FIFF.FIFFV_BEM_SURF_ID_CSF: 'csf', + FIFF.FIFFV_BEM_SURF_ID_SKULL: 'outer skull', + FIFF.FIFFV_BEM_SURF_ID_HEAD: 'outer skin ', + FIFF.FIFFV_BEM_SURF_ID_UNKNOWN: 'unknown ', +} def _bem_find_surface(bem, id_): - """Find surface from already-loaded BEM.""" + """Find surface from already-loaded conductor model.""" + if bem['is_sphere']: + _surf_dict = _sm_surf_dict + _name_dict = _sm_surf_name + kind = 'Sphere model' + tri = 'boundary' + else: + _surf_dict = _bem_surf_dict + _name_dict = _bem_surf_name + kind = 'BEM' + tri = 'triangulation' if isinstance(id_, str): name = id_ id_ = _surf_dict[id_] else: - name = _surf_name[id_] + name = _name_dict[id_] + kind = 'Sphere model' if bem['is_sphere'] else 'BEM' idx = np.where(np.array([s['id'] for s in bem['surfs']]) == id_)[0] if len(idx) != 1: - raise RuntimeError('BEM model does not have the %s triangulation' - % name.replace('_', ' ')) + raise RuntimeError(f'{kind} does not have the {name} {tri}') return bem['surfs'][idx[0]] @@ -1537,8 +1557,7 @@ def write_bem_surfaces(fname, surfs, overwrite=False, verbose=None): Filename to write. Can end with ``.h5`` to write using HDF5. surfs : dict | list of dict The surfaces, or a single surface. - overwrite : bool - If True (default False), overwrite the file. + %(overwrite)s %(verbose)s """ if isinstance(surfs, dict): @@ -1556,6 +1575,30 @@ def write_bem_surfaces(fname, surfs, overwrite=False, verbose=None): end_file(fid) +@verbose +def write_head_bem(fname, rr, tris, on_defects='raise', overwrite=False, + verbose=None): + """Write a head surface to a fiff file. + + Parameters + ---------- + fname : str + Filename to write. + rr : array, shape (n_vertices, 3) + Coordinate points in the MRI coordinate system. + tris : ndarray of int, shape (n_tris, 3) + Triangulation (each line contains indices for three points which + together form a face). + %(on_defects)s + %(overwrite)s + %(verbose)s + """ + surf = _surfaces_to_bem([dict(rr=rr, tris=tris)], + [FIFF.FIFFV_BEM_SURF_ID_HEAD], [1], rescale=False, + incomplete=on_defects) + write_bem_surfaces(fname, surf, overwrite=overwrite) + + def _write_bem_surfaces_block(fid, surfs): """Write bem surfaces to open file handle.""" for surf in surfs: @@ -1584,8 +1627,7 @@ def write_bem_solution(fname, bem, overwrite=False, verbose=None): The filename to use. Can end with ``.h5`` to write using HDF5. bem : instance of ConductorModel The BEM model with solution to save. - overwrite : bool - If True (default False), overwrite the file. + %(overwrite)s %(verbose)s See Also @@ -1632,6 +1674,8 @@ def _prepare_env(subject, subjects_dir): _validate_type(subject, "str") subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + subjects_dir = op.abspath(subjects_dir) # force use of an absolute path + subjects_dir = op.expanduser(subjects_dir) if not op.isdir(subjects_dir): raise RuntimeError('Could not find the MRI data directory "%s"' % subjects_dir) @@ -1824,6 +1868,10 @@ def make_flash_bem(subject, overwrite=False, show=True, subjects_dir=None, .. versionadded:: 0.18 %(verbose)s + See Also + -------- + convert_flash_mris + Notes ----- This program assumes that FreeSurfer is installed and sourced properly. @@ -1831,10 +1879,6 @@ def make_flash_bem(subject, overwrite=False, show=True, subjects_dir=None, This function extracts the BEM surfaces (outer skull, inner skull, and outer skin) from multiecho FLASH MRI data with spin angles of 5 and 30 degrees, in mgz format. - - See Also - -------- - convert_flash_mris """ from .viz.misc import plot_bem @@ -1988,3 +2032,33 @@ def _symlink(src, dest, copy=False): copy = True if copy: shutil.copy(src, dest) + + +def _ensure_bem_surfaces(bem, extra_allow=(), name='bem'): + # by default only allow path-like and list, but handle None and + # ConductorModel properly if need be. Always return a ConductorModel + # even though it's incomplete (and might have is_sphere=True). + assert all(extra in (None, ConductorModel) for extra in extra_allow) + allowed = ('path-like', list) + extra_allow + _validate_type(bem, allowed, name) + if isinstance(bem, path_like): + # Load the surfaces + logger.info(f'Loading BEM surfaces from {str(bem)}...') + bem = read_bem_surfaces(bem) + bem = ConductorModel(is_sphere=False, surfs=bem) + elif isinstance(bem, list): + for ii, this_surf in enumerate(bem): + _validate_type(this_surf, dict, f'{name}[{ii}]') + if isinstance(bem, list): + bem = ConductorModel(is_sphere=False, surfs=bem) + # add surfaces in the spherical case + if isinstance(bem, ConductorModel) and bem['is_sphere']: + bem = bem.copy() + bem['surfs'] = [] + if len(bem['layers']) == 4: + for idx, id_ in enumerate(_sm_surf_dict.values()): + bem['surfs'].append(_complete_sphere_surf( + bem, idx, 4, complete=False)) + bem['surfs'][-1]['id'] = id_ + + return bem diff --git a/mne/channels/__init__.py b/mne/channels/__init__.py index 6455e11801c..3593c52ed90 100644 --- a/mne/channels/__init__.py +++ b/mne/channels/__init__.py @@ -15,7 +15,9 @@ compute_native_head_t) from .channels import (equalize_channels, rename_channels, fix_mag_coil_types, read_ch_adjacency, _get_ch_type, find_ch_adjacency, - make_1020_channel_selections, combine_channels) + make_1020_channel_selections, combine_channels, + read_vectorview_selection, _SELECTIONS, _EEG_SELECTIONS, + _divide_to_regions) __all__ = [ # Data Structures @@ -35,6 +37,7 @@ 'rename_channels', 'make_1020_channel_selections', '_get_ch_type', 'equalize_channels', 'find_ch_adjacency', 'find_layout', 'fix_mag_coil_types', 'generate_2d_layout', 'get_builtin_montages', + 'combine_channels', 'read_vectorview_selection', # Other 'compute_dev_head_t', 'compute_native_head_t', diff --git a/mne/channels/_standard_montage_utils.py b/mne/channels/_standard_montage_utils.py index bc4730d261d..2caccbde879 100644 --- a/mne/channels/_standard_montage_utils.py +++ b/mne/channels/_standard_montage_utils.py @@ -3,6 +3,8 @@ # # License: BSD (3-clause) from collections import OrderedDict +import csv + import os.path as op import numpy as np @@ -71,7 +73,7 @@ def _biosemi(basename, head_size): return _read_theta_phi_in_degrees(fname, head_size, fid_names) -def _mgh_or_standard(basename, head_size): +def _mgh_or_standard(basename, head_size, coord_frame='unknown'): fid_names = ('Nz', 'LPA', 'RPA') fname = op.join(MONTAGE_PATH, basename) @@ -101,7 +103,7 @@ def _mgh_or_standard(basename, head_size): lpa *= scale rpa *= scale - return make_dig_montage(ch_pos=ch_pos, coord_frame='unknown', + return make_dig_montage(ch_pos=ch_pos, coord_frame=coord_frame, nasion=nasion, lpa=lpa, rpa=rpa) @@ -142,6 +144,10 @@ def _mgh_or_standard(basename, head_size): basename='standard_prefixed.elc'), 'standard_primed': partial(_mgh_or_standard, basename='standard_primed.elc'), + 'artinis-octamon': partial(_mgh_or_standard, coord_frame='mri', + basename='artinis-octamon.elc'), + 'artinis-brite23': partial(_mgh_or_standard, coord_frame='mri', + basename='artinis-brite23.elc'), } @@ -334,3 +340,42 @@ def _read_brainvision(fname, head_size): pos *= head_size / np.median(np.linalg.norm(pos, axis=1)) return make_dig_montage(ch_pos=_check_dupes_odict(ch_names, pos)) + + +def _read_xyz(fname): + """Import EEG channel locations from CSV, TSV, or XYZ files. + + CSV and TSV files should have columns 4 columns containing + ch_name, x, y, and z. Each row represents one channel. + XYZ files should have 5 columns containing + count, x, y, z, and ch_name. Each row represents one channel + CSV files should be separated by commas, TSV and XYZ files should be + separated by tabs. + + Parameters + ---------- + fname : str + Name of the file to read channel locations from. + + Returns + ------- + montage : instance of DigMontage + The montage. + """ + ch_names = [] + pos = [] + file_format = op.splitext(fname)[1].lower() + with open(fname, "r") as f: + if file_format != ".xyz": + f.readline() # skip header + delimiter = "," if file_format == ".csv" else "\t" + for row in csv.reader(f, delimiter=delimiter): + if file_format == ".xyz": + _, x, y, z, ch_name, *_ = row + ch_name = ch_name.strip() # deals with variable tab size + else: + ch_name, x, y, z, *_ = row + ch_names.append(ch_name) + pos.append((x, y, z)) + d = _check_dupes_odict(ch_names, np.array(pos, dtype=float)) + return make_dig_montage(ch_pos=d) diff --git a/mne/channels/channels.py b/mne/channels/channels.py index cb187842e3d..05ffab69262 100644 --- a/mne/channels/channels.py +++ b/mne/channels/channels.py @@ -1,5 +1,6 @@ # Authors: Alexandre Gramfort # Matti Hämäläinen +# Martin Luessi # Denis Engemann # Andrew Dykstra # Teon Brooks @@ -7,6 +8,7 @@ # # License: BSD (3-clause) + import os import os.path as op import sys @@ -15,20 +17,24 @@ from functools import partial import numpy as np -from scipy import sparse from ..defaults import HEAD_SIZE_DEFAULT, _handle_default from ..transforms import _frame_to_str from ..utils import (verbose, logger, warn, - _check_preload, _validate_type, fill_doc, _check_option) + _check_preload, _validate_type, fill_doc, _check_option, + _get_stim_channel, _check_fname) from ..io.compensator import get_current_comp from ..io.constants import FIFF -from ..io.meas_info import anonymize_info, Info, MontageMixin, create_info +from ..io.meas_info import (anonymize_info, Info, MontageMixin, create_info, + _rename_comps) from ..io.pick import (channel_type, pick_info, pick_types, _picks_by_type, _check_excludes_includes, _contains_ch_type, channel_indices_by_type, pick_channels, _picks_to_idx, - _get_channel_types, get_channel_type_constants) + _get_channel_types, get_channel_type_constants, + _pick_data_channels) +from ..io.tag import _rename_list from ..io.write import DATE_NONE +from ..io.proj import setup_proj from ..io._digitization import _get_data_as_dict_from_dig @@ -82,7 +88,7 @@ def _get_ch_type(inst, ch_type, allow_ref_meg=False): allowed_types = ['mag', 'grad', 'planar1', 'planar2', 'eeg', 'csd', 'fnirs_cw_amplitude', 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od', 'hbo', 'hbr', - 'ecog', 'seeg'] + 'ecog', 'seeg', 'dbs'] allowed_types += ['ref_meg'] if allow_ref_meg else [] for type_ in allowed_types: if isinstance(inst, Info): @@ -257,7 +263,7 @@ def get_montage(self): # get the channel names and chs data structure ch_names, chs = self.info['ch_names'], self.info['chs'] picks = pick_types(self.info, meg=False, eeg=True, - seeg=True, ecog=True) + seeg=True, ecog=True, dbs=True) # channel positions from dig do not match ch_names one to one, # so use loc[:3] instead @@ -410,8 +416,8 @@ def set_channel_types(self, mapping, verbose=None): ----- The following sensor types are accepted: - ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, stim, syst, ecog, - hbo, hbr, fnirs_cw_amplitude, fnirs_fd_ac_amplitude, + ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, dbs, stim, syst, + ecog, hbo, hbr, fnirs_cw_amplitude, fnirs_fd_ac_amplitude, fnirs_fd_phase, fnirs_od .. versionadded:: 0.9.0 @@ -446,7 +452,7 @@ def set_channel_types(self, mapping, verbose=None): unit_changes[this_change] = list() unit_changes[this_change].append(ch_name) self.info['chs'][c_ind]['unit'] = _human2unit[ch_type] - if ch_type in ['eeg', 'seeg', 'ecog']: + if ch_type in ['eeg', 'seeg', 'ecog', 'dbs']: coil_type = FIFF.FIFFV_COIL_EEG elif ch_type == 'hbo': coil_type = FIFF.FIFFV_COIL_FNIRS_HBO @@ -468,13 +474,14 @@ def set_channel_types(self, mapping, verbose=None): warn(msg.format(", ".join(sorted(names)), *this_change)) return self - @fill_doc - def rename_channels(self, mapping): + @verbose + def rename_channels(self, mapping, allow_duplicates=False, verbose=None): """Rename channels. Parameters ---------- - %(rename_channels_mapping)s + %(rename_channels_mapping_duplicates)s + %(verbose_meth)s Returns ------- @@ -488,7 +495,24 @@ def rename_channels(self, mapping): ----- .. versionadded:: 0.9.0 """ - rename_channels(self.info, mapping) + from ..io import BaseRaw + + ch_names_orig = list(self.info['ch_names']) + rename_channels(self.info, mapping, allow_duplicates) + + # Update self._orig_units for Raw + if isinstance(self, BaseRaw): + # whatever mapping was provided, now we can just use a dict + mapping = dict(zip(ch_names_orig, self.info['ch_names'])) + if self._orig_units is not None: + for old_name, new_name in mapping.items(): + if old_name != new_name: + self._orig_units[new_name] = self._orig_units[old_name] + del self._orig_units[old_name] + ch_names = self.annotations.ch_names + for ci, ch in enumerate(ch_names): + ch_names[ci] = tuple(mapping.get(name, name) for name in ch) + return self @verbose @@ -509,9 +533,9 @@ def plot_sensors(self, kind='topomap', ch_type=None, title=None, figure instance. Defaults to 'topomap'. ch_type : None | str The channel type to plot. Available options 'mag', 'grad', 'eeg', - 'seeg', 'ecog', 'all'. If ``'all'``, all the available mag, grad, - eeg, seeg and ecog channels are plotted. If None (default), then - channels are chosen in the order given above. + 'seeg', 'dbs', 'ecog', 'all'. If ``'all'``, all the available mag, + grad, eeg, seeg, dbs, and ecog channels are plotted. If + None (default), then channels are chosen in the order given above. title : str | None Title for the figure. If None (default), equals to ``'Sensor positions (%%s)' %% ch_type``. @@ -656,9 +680,9 @@ class UpdateChannelsMixin(object): def pick_types(self, meg=False, eeg=False, stim=False, eog=False, ecg=False, emg=False, ref_meg='auto', misc=False, resp=False, chpi=False, exci=False, ias=False, syst=False, - seeg=False, dipole=False, gof=False, bio=False, ecog=False, - fnirs=False, csd=False, include=(), exclude='bads', - selection=None, verbose=None): + seeg=False, dipole=False, gof=False, bio=False, + ecog=False, fnirs=False, csd=False, dbs=False, include=(), + exclude='bads', selection=None, verbose=None): """Pick some channels by type and names. Parameters @@ -685,8 +709,7 @@ def pick_types(self, meg=False, eeg=False, stim=False, eog=False, misc : bool If True include miscellaneous analog channels. resp : bool - If True include response-trigger channel. For some MEG systems this - is separate from the stim channel. + If ``True`` include respiratory channels. chpi : bool If True include continuous HPI coil channels. exci : bool @@ -712,6 +735,8 @@ def pick_types(self, meg=False, eeg=False, stim=False, eog=False, include channels measuring deoxyhemoglobin). csd : bool EEG-CSD channels. + dbs : bool + Deep brain stimulation channels. include : list of str List of additional channels to include. If empty do not include any. @@ -739,7 +764,7 @@ def pick_types(self, meg=False, eeg=False, stim=False, eog=False, self.info, meg=meg, eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg, ref_meg=ref_meg, misc=misc, resp=resp, chpi=chpi, exci=exci, ias=ias, syst=syst, seeg=seeg, dipole=dipole, gof=gof, bio=bio, - ecog=ecog, fnirs=fnirs, include=include, exclude=exclude, + ecog=ecog, fnirs=fnirs, dbs=dbs, include=include, exclude=exclude, selection=selection) self._pick_drop_channels(idx) @@ -930,6 +955,9 @@ def _pick_drop_channels(self, idx): else: assert isinstance(self, BaseRaw) and not self.preload + if isinstance(self, BaseRaw): + self.annotations._prune_ch_names(self.info, on_missing='ignore') + self._pick_projs() return self @@ -1043,15 +1071,44 @@ def add_channels(self, add_list, force_update_info=False): self._read_picks = [ np.concatenate([r, extra_idx]) for r in self._read_picks] assert all(len(r) == self.info['nchan'] for r in self._read_picks) + elif isinstance(self, BaseEpochs): + self.picks = np.arange(self._data.shape[1]) + if hasattr(self, '_projector'): + activate = False if self._do_delayed_proj else self.proj + self._projector, self.info = setup_proj(self.info, False, + activate=activate) + return self + @fill_doc + def add_reference_channels(self, ref_channels): + """Add reference channels to data that consists of all zeros. + + Adds reference channels to data that were not included during + recording. This is useful when you need to re-reference your data + to different channels. These added channels will consist of all zeros. + + Parameters + ---------- + %(ref_channels)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + The modified instance. + """ + from ..io.reference import add_reference_channels + + return add_reference_channels(self, ref_channels, copy=False) + class InterpolationMixin(object): """Mixin class for Raw, Evoked, Epochs.""" @verbose def interpolate_bads(self, reset_bads=True, mode='accurate', - origin='auto', method=None, verbose=None): + origin='auto', method=None, exclude=(), + verbose=None): """Interpolate bad MEG and EEG channels. Operates in place. @@ -1086,6 +1143,9 @@ def interpolate_bads(self, reset_bads=True, mode='accurate', method=dict(meg="MNE", eeg="spline", fnirs="nearest") .. versionadded:: 0.21 + exclude : list | tuple + The channels to exclude from interpolation. If excluded a bad + channel will stay in bads. %(verbose_meth)s Returns @@ -1115,30 +1175,31 @@ def interpolate_bads(self, reset_bads=True, mode='accurate', logger.info('Interpolating bad channels') origin = _check_origin(origin, self.info) if method['eeg'] == 'spline': - _interpolate_bads_eeg(self, origin=origin) + _interpolate_bads_eeg(self, origin=origin, exclude=exclude) eeg_mne = False else: eeg_mne = True - _interpolate_bads_meeg(self, mode=mode, origin=origin, eeg=eeg_mne) - _interpolate_bads_nirs(self) + _interpolate_bads_meeg(self, mode=mode, origin=origin, eeg=eeg_mne, + exclude=exclude) + _interpolate_bads_nirs(self, exclude=exclude) if reset_bads is True: - self.info['bads'] = [] + self.info['bads'] = \ + [ch for ch in self.info['bads'] if ch in exclude] return self -@fill_doc -def rename_channels(info, mapping): +@verbose +def rename_channels(info, mapping, allow_duplicates=False, verbose=None): """Rename channels. - .. warning:: The channel names must have at most 15 characters - Parameters ---------- info : dict Measurement info to modify. - %(rename_channels_mapping)s + %(rename_channels_mapping_duplicates)s + %(verbose)s """ _validate_type(info, Info, 'info') info._check_consistency() @@ -1165,12 +1226,6 @@ def rename_channels(info, mapping): for new_name in new_names: _validate_type(new_name[1], 'str', 'New channel mappings') - bad_new_names = [name for _, name in new_names if len(name) > 15] - if len(bad_new_names): - raise ValueError('Channel names cannot be longer than 15 ' - 'characters. These channel names are not ' - 'valid : %s' % new_names) - # do the remapping locally for c_ind, new_name in new_names: for bi, bad in enumerate(bads): @@ -1179,13 +1234,21 @@ def rename_channels(info, mapping): ch_names[c_ind] = new_name # check that all the channel names are unique - if len(ch_names) != len(np.unique(ch_names)): + if len(ch_names) != len(np.unique(ch_names)) and not allow_duplicates: raise ValueError('New channel names are not unique, renaming failed') # do the remapping in info info['bads'] = bads + ch_names_mapping = dict() for ch, ch_name in zip(info['chs'], ch_names): + ch_names_mapping[ch['ch_name']] = ch_name ch['ch_name'] = ch_name + # .get b/c fwd info omits it + _rename_comps(info.get('comps', []), ch_names_mapping) + if 'projs' in info: # fwd might omit it + for proj in info['projs']: + proj['data']['col_names'][:] = \ + _rename_list(proj['data']['col_names'], ch_names_mapping) info._update_redundant() info._check_consistency() @@ -1279,6 +1342,7 @@ def _ch_neighbor_adjacency(ch_names, neighbors): ch_adjacency : scipy.sparse matrix The adjacency matrix. """ + from scipy import sparse if len(ch_names) != len(neighbors): raise ValueError('`ch_names` and `neighbors` must ' 'have the same length') @@ -1354,8 +1418,6 @@ def find_ch_adjacency(info, ch_type): conn_name = 'neuromag306mag' elif has_vv_grad and ch_type == 'grad': conn_name = 'neuromag306planar' - elif has_neuromag_122_grad: - conn_name = 'neuromag122' elif has_4D_mag: if 'MEG 248' in info['ch_names']: idx = info['ch_names'].index('MEG 248') @@ -1406,11 +1468,16 @@ def _compute_ch_adjacency(info, ch_type): ch_names : list The list of channel names present in adjacency matrix. """ + from scipy import sparse from scipy.spatial import Delaunay from .. import spatial_tris_adjacency from ..channels.layout import _find_topomap_coords, _pair_grad_sensors - combine_grads = (ch_type == 'grad' and FIFF.FIFFV_COIL_VV_PLANAR_T1 in - np.unique([ch['coil_type'] for ch in info['chs']])) + combine_grads = (ch_type == 'grad' + and any([coil_type in [ch['coil_type'] + for ch in info['chs']] + for coil_type in + [FIFF.FIFFV_COIL_VV_PLANAR_T1, + FIFF.FIFFV_COIL_NM_122]])) picks = dict(_picks_by_type(info, exclude=[]))[ch_type] ch_names = [info['ch_names'][pick] for pick in picks] @@ -1742,3 +1809,163 @@ def combine_channels(inst, groups, method='mean', keep_stim=False, verbose=inst.verbose) return combined_inst + + +# NeuroMag channel groupings +_SELECTIONS = ['Vertex', 'Left-temporal', 'Right-temporal', 'Left-parietal', + 'Right-parietal', 'Left-occipital', 'Right-occipital', + 'Left-frontal', 'Right-frontal'] +_EEG_SELECTIONS = ['EEG 1-32', 'EEG 33-64', 'EEG 65-96', 'EEG 97-128'] + + +def _divide_to_regions(info, add_stim=True): + """Divide channels to regions by positions.""" + from scipy.stats import zscore + picks = _pick_data_channels(info, exclude=[]) + chs_in_lobe = len(picks) // 4 + pos = np.array([ch['loc'][:3] for ch in info['chs']]) + x, y, z = pos.T + + frontal = picks[np.argsort(y[picks])[-chs_in_lobe:]] + picks = np.setdiff1d(picks, frontal) + + occipital = picks[np.argsort(y[picks])[:chs_in_lobe]] + picks = np.setdiff1d(picks, occipital) + + temporal = picks[np.argsort(z[picks])[:chs_in_lobe]] + picks = np.setdiff1d(picks, temporal) + + lt, rt = _divide_side(temporal, x) + lf, rf = _divide_side(frontal, x) + lo, ro = _divide_side(occipital, x) + lp, rp = _divide_side(picks, x) # Parietal lobe from the remaining picks. + + # Because of the way the sides are divided, there may be outliers in the + # temporal lobes. Here we switch the sides for these outliers. For other + # lobes it is not a big problem because of the vicinity of the lobes. + with np.errstate(invalid='ignore'): # invalid division, greater compare + zs = np.abs(zscore(x[rt])) + outliers = np.array(rt)[np.where(zs > 2.)[0]] + rt = list(np.setdiff1d(rt, outliers)) + + with np.errstate(invalid='ignore'): # invalid division, greater compare + zs = np.abs(zscore(x[lt])) + outliers = np.append(outliers, (np.array(lt)[np.where(zs > 2.)[0]])) + lt = list(np.setdiff1d(lt, outliers)) + + l_mean = np.mean(x[lt]) + r_mean = np.mean(x[rt]) + for outlier in outliers: + if abs(l_mean - x[outlier]) < abs(r_mean - x[outlier]): + lt.append(outlier) + else: + rt.append(outlier) + + if add_stim: + stim_ch = _get_stim_channel(None, info, raise_error=False) + if len(stim_ch) > 0: + for region in [lf, rf, lo, ro, lp, rp, lt, rt]: + region.append(info['ch_names'].index(stim_ch[0])) + return OrderedDict([('Left-frontal', lf), ('Right-frontal', rf), + ('Left-parietal', lp), ('Right-parietal', rp), + ('Left-occipital', lo), ('Right-occipital', ro), + ('Left-temporal', lt), ('Right-temporal', rt)]) + + +def _divide_side(lobe, x): + """Make a separation between left and right lobe evenly.""" + lobe = np.asarray(lobe) + median = np.median(x[lobe]) + + left = lobe[np.where(x[lobe] < median)[0]] + right = lobe[np.where(x[lobe] > median)[0]] + medians = np.where(x[lobe] == median)[0] + + left = np.sort(np.concatenate([left, lobe[medians[1::2]]])) + right = np.sort(np.concatenate([right, lobe[medians[::2]]])) + return list(left), list(right) + + +@verbose +def read_vectorview_selection(name, fname=None, info=None, verbose=None): + """Read Neuromag Vector View channel selection from a file. + + Parameters + ---------- + name : str | list of str + Name of the selection. If a list, the selections are combined. + Supported selections are: ``'Vertex'``, ``'Left-temporal'``, + ``'Right-temporal'``, ``'Left-parietal'``, ``'Right-parietal'``, + ``'Left-occipital'``, ``'Right-occipital'``, ``'Left-frontal'`` and + ``'Right-frontal'``. Selections can also be matched and combined by + spcecifying common substrings. For example, ``name='temporal`` will + produce a combination of ``'Left-temporal'`` and ``'Right-temporal'``. + fname : str + Filename of the selection file (if ``None``, built-in selections are + used). + info : instance of Info + Measurement info file, which will be used to determine the spacing + of channel names to return, e.g. ``'MEG 0111'`` for old Neuromag + systems and ``'MEG0111'`` for new ones. + %(verbose)s + + Returns + ------- + sel : list of str + List with channel names in the selection. + """ + # convert name to list of string + if not isinstance(name, (list, tuple)): + name = [name] + if isinstance(info, Info): + picks = pick_types(info, meg=True, exclude=()) + if len(picks) > 0 and ' ' not in info['ch_names'][picks[0]]: + spacing = 'new' + else: + spacing = 'old' + elif info is not None: + raise TypeError('info must be an instance of Info or None, not %s' + % (type(info),)) + else: # info is None + spacing = 'old' + + # use built-in selections by default + if fname is None: + fname = op.join(op.dirname(__file__), '..', 'data', 'mne_analyze.sel') + + fname = _check_fname(fname, must_exist=True, overwrite='read') + + # use this to make sure we find at least one match for each name + name_found = {n: False for n in name} + with open(fname, 'r') as fid: + sel = [] + for line in fid: + line = line.strip() + # skip blank lines and comments + if len(line) == 0 or line[0] == '#': + continue + # get the name of the selection in the file + pos = line.find(':') + if pos < 0: + logger.info('":" delimiter not found in selections file, ' + 'skipping line') + continue + sel_name_file = line[:pos] + # search for substring match with name provided + for n in name: + if sel_name_file.find(n) >= 0: + sel.extend(line[pos + 1:].split('|')) + name_found[n] = True + break + + # make sure we found at least one match for each name + for n, found in name_found.items(): + if not found: + raise ValueError('No match for selection name "%s" found' % n) + + # make the selection a sorted list with unique elements + sel = list(set(sel)) + sel.sort() + if spacing == 'new': # "new" or "old" by now, "old" is default + sel = [s.replace('MEG ', 'MEG') for s in sel] + return sel diff --git a/mne/channels/data/montages/artinis-brite23.elc b/mne/channels/data/montages/artinis-brite23.elc new file mode 100644 index 00000000000..1e14b0a0a2b --- /dev/null +++ b/mne/channels/data/montages/artinis-brite23.elc @@ -0,0 +1,48 @@ +# ASA optode file +ReferenceLabel avg +UnitPosition mm +NumberPositions= 21 +Positions +-4.62 82.33 -45.74 +79.66 -18.72 -45.89 +-81.41 -17.18 -45.56 +65.18 27.28 35.31 +48.62 59.71 22.68 +18.95 72.41 38.32 +-3.97 79.74 30.28 +-25.96 72.19 35.16 +-52.51 60.53 14.54 +-66.37 32.04 31.08 +76.10 -0.29 31.24 +65.61 -0.26 56.15 +64.93 42.43 8.29 +43.32 46.36 50.77 +21.58 82.45 1.06 +-2.91 59.57 61.59 +-29.62 79.35 2.38 +-48.13 44.76 49.15 +-67.68 43.26 -3.18 +-65.37 4.89 56.36 +-77.24 5.88 27.58 +Labels +Nz +RPA +LPA +D1 +D2 +D3 +D4 +D5 +D6 +D7 +S1 +S2 +S3 +S4 +S5 +S6 +S7 +S8 +S9 +S10 +S11 diff --git a/mne/channels/data/montages/artinis-octamon.elc b/mne/channels/data/montages/artinis-octamon.elc new file mode 100644 index 00000000000..748a19d04e1 --- /dev/null +++ b/mne/channels/data/montages/artinis-octamon.elc @@ -0,0 +1,32 @@ +# ASA optode file +ReferenceLabel avg +UnitPosition mm +NumberPositions= 13 +Positions +0.96 83.56 -48.63 +80.25 -19.67 -43.88 +-82.58 -20.09 -43.10 +47.77 65.28 7.28 +-46.45 67.76 8.81 +63.88 34.84 28.34 +64.96 45.02 -10.31 +22.07 74.86 31.03 +17.84 84.96 -10.84 +-10.81 77.96 32.10 +-15.96 85.24 -7.41 +-61.78 40.78 29.92 +-65.28 48.14 -10.73 +Labels +Nz +RPA +LPA +D1 +D2 +S1 +S2 +S3 +S4 +S5 +S6 +S7 +S8 diff --git a/mne/channels/data/neighbors/neuromag122_neighb.mat b/mne/channels/data/neighbors/neuromag122_neighb.mat deleted file mode 100755 index e8bbb753600..00000000000 Binary files a/mne/channels/data/neighbors/neuromag122_neighb.mat and /dev/null differ diff --git a/mne/channels/interpolation.py b/mne/channels/interpolation.py index 7f9d00af9f3..18a800d704f 100644 --- a/mne/channels/interpolation.py +++ b/mne/channels/interpolation.py @@ -4,7 +4,6 @@ import numpy as np from numpy.polynomial.legendre import legval -from scipy import linalg from ..utils import logger, warn, verbose from ..io.meas_info import _simplify_info @@ -83,6 +82,7 @@ def _make_interpolation_matrix(pos_from, pos_to, alpha=1e-5): Spherical splines for scalp potential and current density mapping. Electroencephalography Clinical Neurophysiology, Feb; 72(2):184-7. """ + from scipy import linalg pos_from = pos_from.copy() pos_to = pos_to.copy() n_from = pos_from.shape[0] @@ -123,20 +123,13 @@ def _do_interp_dots(inst, interpolation, goods_idx, bads_idx): @verbose -def _interpolate_bads_eeg(inst, origin, verbose=None): - """Interpolate bad EEG channels. - - Operates in place. - - Parameters - ---------- - inst : mne.io.Raw, mne.Epochs or mne.Evoked - The data to interpolate. Must be preloaded. - """ +def _interpolate_bads_eeg(inst, origin, exclude=None, verbose=None): + if exclude is None: + exclude = list() bads_idx = np.zeros(len(inst.ch_names), dtype=bool) goods_idx = np.zeros(len(inst.ch_names), dtype=bool) - picks = pick_types(inst.info, meg=False, eeg=True, exclude=[]) + picks = pick_types(inst.info, meg=False, eeg=True, exclude=exclude) inst.info._check_consistency() bads_idx[picks] = [inst.ch_names[ch] in inst.info['bads'] for ch in picks] @@ -177,29 +170,8 @@ def _interpolate_bads_meg(inst, mode='accurate', origin=(0., 0., 0.04), @verbose def _interpolate_bads_meeg(inst, mode='accurate', origin=(0., 0., 0.04), - meg=True, eeg=True, ref_meg=False, verbose=None): - """Interpolate bad channels from data in good channels. - - Parameters - ---------- - inst : mne.io.Raw, mne.Epochs or mne.Evoked - The data to interpolate. Must be preloaded. - mode : str - Either `'accurate'` or `'fast'`, determines the quality of the - Legendre polynomial expansion used for interpolation. `'fast'` should - be sufficient for most applications. - origin : array-like, shape (3,) | str - Origin of the sphere in the head coordinate frame and in meters. - Can be ``'auto'``, which means a head-digitization-based origin - fit. Default is ``(0., 0., 0.04)``. - %(verbose)s - ref_meg : bool - Should always be False; only exists for testing purpose. - meg : bool - If True, interpolate bad MEG channels. - eeg : bool - If True, interpolate bad EEG channels. - """ + meg=True, eeg=True, ref_meg=False, + exclude=(), verbose=None): bools = dict(meg=meg, eeg=eeg) info = _simplify_info(inst.info) for ch_type, do in bools.items(): @@ -207,7 +179,7 @@ def _interpolate_bads_meeg(inst, mode='accurate', origin=(0., 0., 0.04), continue kw = dict(meg=False, eeg=False) kw[ch_type] = True - picks_type = pick_types(info, ref_meg=ref_meg, exclude=[], **kw) + picks_type = pick_types(info, ref_meg=ref_meg, exclude=exclude, **kw) picks_good = pick_types(info, ref_meg=ref_meg, exclude='bads', **kw) use_ch_names = [inst.info['ch_names'][p] for p in picks_type] bads_type = [ch for ch in inst.info['bads'] if ch in use_ch_names] @@ -231,29 +203,19 @@ def _interpolate_bads_meeg(inst, mode='accurate', origin=(0., 0., 0.04), @verbose -def _interpolate_bads_nirs(inst, method='nearest', verbose=None): - """Interpolate bad nirs channels. Simply replaces by closest non bad. - - Parameters - ---------- - inst : mne.io.Raw, mne.Epochs or mne.Evoked - The data to interpolate. Must be preloaded. - method : str - Only the method 'nearest' is currently available. This method replaces - each bad channel with the nearest non bad channel. - %(verbose)s - """ +def _interpolate_bads_nirs(inst, method='nearest', exclude=(), verbose=None): from scipy.spatial.distance import pdist, squareform from mne.preprocessing.nirs import _channel_frequencies,\ _check_channels_ordered # Returns pick of all nirs and ensures channels are correctly ordered - freqs = np.unique(_channel_frequencies(inst)) - picks_nirs = _check_channels_ordered(inst, freqs) + freqs = np.unique(_channel_frequencies(inst.info)) + picks_nirs = _check_channels_ordered(inst.info, freqs) if len(picks_nirs) == 0: return nirs_ch_names = [inst.info['ch_names'][p] for p in picks_nirs] + nirs_ch_names = [ch for ch in nirs_ch_names if ch not in exclude] bads_nirs = [ch for ch in inst.info['bads'] if ch in nirs_ch_names] if len(bads_nirs) == 0: return @@ -280,6 +242,6 @@ def _interpolate_bads_nirs(inst, method='nearest', verbose=None): closest_idx = np.argmin(dists_to_bad) + (bad % 2) inst._data[bad] = inst._data[closest_idx] - inst.info['bads'] = [] + inst.info['bads'] = [ch for ch in inst.info['bads'] if ch in exclude] return inst diff --git a/mne/channels/layout.py b/mne/channels/layout.py index 6a20cbf9ed9..6a2e566bfab 100644 --- a/mne/channels/layout.py +++ b/mne/channels/layout.py @@ -36,7 +36,8 @@ class directly if you're constructing a new layout. box : tuple of length 4 The box dimension (x_min, x_max, y_min, y_max). pos : array, shape=(n_channels, 4) - The positions of the channels in 2d (x, y, width, height). + The unit-normalized positions of the channels in 2d + (x, y, width, height). names : list The channel names. ids : list @@ -91,12 +92,14 @@ def __repr__(self): ', '.join(self.names[:3])) @fill_doc - def plot(self, picks=None, show=True): + def plot(self, picks=None, show_axes=False, show=True): """Plot the sensor positions. Parameters ---------- %(picks_nostr)s + show_axes : bool + Show layout axes if True. Defaults to False. show : bool Show figure if True. Defaults to True. @@ -110,7 +113,7 @@ def plot(self, picks=None, show=True): .. versionadded:: 0.12.0 """ from ..viz.topomap import plot_layout - return plot_layout(self, picks=picks, show=show) + return plot_layout(self, picks=picks, show_axes=show_axes, show=show) def _read_lout(fname): diff --git a/mne/channels/montage.py b/mne/channels/montage.py index 5dc268a062c..4c9341a8b4c 100644 --- a/mne/channels/montage.py +++ b/mne/channels/montage.py @@ -20,6 +20,7 @@ import numpy as np from ..defaults import HEAD_SIZE_DEFAULT +from ..source_space import get_mni_fiducials from ..viz import plot_montage from ..transforms import (apply_trans, get_ras_to_neuromag_trans, _sph_to_cart, _topo_to_sph, _frame_to_str, Transform, @@ -32,8 +33,8 @@ _get_data_as_dict_from_dig) from ..io.meas_info import create_info from ..io.open import fiff_open -from ..io.pick import pick_types -from ..io.constants import FIFF +from ..io.pick import pick_types, _picks_to_idx +from ..io.constants import FIFF, CHANNEL_LOC_ALIASES from ..utils import (warn, copy_function_doc_to_method_doc, _pl, verbose, _check_option, _validate_type, _check_fname, _on_missing, fill_doc) @@ -51,7 +52,8 @@ 'easycap-M1', 'easycap-M10', 'mgh60', 'mgh70', 'standard_1005', 'standard_1020', 'standard_alphabetic', - 'standard_postfixed', 'standard_prefixed', 'standard_primed' + 'standard_postfixed', 'standard_prefixed', 'standard_primed', + 'artinis-octamon', 'artinis-brite23' ] @@ -118,6 +120,12 @@ def make_dig_montage(ch_pos=None, nasion=None, lpa=None, rpa=None, read_dig_egi read_dig_fif read_dig_polhemus_isotrak + + Notes + ----- + Valid ``coord_frame`` arguments are 'meg', 'mri', 'mri_voxel', 'head', + 'mri_tal', 'ras', 'fs_tal', 'ctf_head', 'ctf_meg', 'unknown'. For custom + montages without fiducials this parameter has to be set to 'head'. """ _validate_type(ch_pos, (dict, None), 'ch_pos') if ch_pos is None: @@ -195,12 +203,12 @@ def plot(self, scale_factor=20, show_names=True, kind='topomap', show=True, sphere=sphere) @fill_doc - def rename_channels(self, mapping): + def rename_channels(self, mapping, allow_duplicates=False): """Rename the channels. Parameters ---------- - %(rename_channels_mapping)s + %(rename_channels_mapping_duplicates)s Returns ------- @@ -209,7 +217,7 @@ def rename_channels(self, mapping): """ from .channels import rename_channels temp_info = create_info(list(self._get_ch_pos()), 1000., 'eeg') - rename_channels(temp_info, mapping) + rename_channels(temp_info, mapping, allow_duplicates) self.ch_names = temp_info['ch_names'] def save(self, fname): @@ -322,6 +330,7 @@ def get_positions(self): { 'ch_pos': {'EEG061': [0, 0, 0]}, 'nasion': [0, 0, 1], + 'coord_frame': 'mni_tal', 'lpa': [0, 1, 0], 'rpa': [1, 0, 0], 'hsp': None, @@ -331,7 +340,6 @@ def get_positions(self): # get channel positions as dict ch_pos = self._get_ch_pos() - # _get_fid_coords(self.dig) # get coordframe and fiducial coordinates montage_bunch = _get_data_as_dict_from_dig(self.dig) coord_frame = _frame_to_str.get(montage_bunch.coord_frame) @@ -348,6 +356,58 @@ def get_positions(self): ) return positions + @verbose + def add_estimated_fiducials(self, subject, subjects_dir=None, + verbose=None): + """Estimate fiducials based on FreeSurfer ``fsaverage`` subject. + + This takes a montage with the ``mri`` coordinate frame, + corresponding to the FreeSurfer RAS (xyz in the volume) T1w + image of the specific subject. It will call + :func:`mne.coreg.get_mni_fiducials` to estimate LPA, RPA and + Nasion fiducial points. + + Parameters + ---------- + %(subject)s + %(subjects_dir)s + %(verbose)s + + Returns + ------- + inst : instance of DigMontage + The instance, modified in-place. + + See Also + -------- + :ref:`plot_source_alignment` + + Notes + ----- + Since MNE uses the FIF data structure, it relies on the ``head`` + coordinate frame. Any coordinate frame can be transformed + to ``head`` if the fiducials (i.e. LPA, RPA and Nasion) are + defined. One can use this function to estimate those fiducials + and then use ``montage.get_native_head_t()`` to get the + head <-> MRI transform. + """ + # get coordframe and fiducial coordinates + montage_bunch = _get_data_as_dict_from_dig(self.dig) + + # get the coordinate frame as a string and check that it's MRI + if montage_bunch.coord_frame != FIFF.FIFFV_COORD_MRI: + raise RuntimeError( + f'Montage should be in mri coordinate frame to call ' + f'`add_estimated_fiducials`. The current coordinate ' + f'frame is {montage_bunch.coord_frame}') + + # estimate LPA, nasion, RPA from FreeSurfer fsaverage + fids_mri = list(get_mni_fiducials(subject, subjects_dir)) + + # add those digpoints to front of montage + self.dig = fids_mri + self.dig + return self + VALID_SCALES = dict(mm=1e-3, cm=1e-2, m=1) @@ -666,8 +726,50 @@ def _get_montage_in_head(montage): return transform_to_head(montage.copy()) +def _set_montage_fnirs(info, montage): + """Set the montage for fNIRS data. + + This needs to be different to electrodes as each channel has three + coordinates that need to be set. For each channel there is a source optode + location, a detector optode location, and a channel midpoint that must be + stored. This function modifies info['chs'][#]['loc'] and info['dig'] in + place. + """ + from ..preprocessing.nirs import (_channel_frequencies, + _channel_chromophore, + _check_channels_ordered) + # Validate that the fNIRS info is correctly formatted + freqs = np.unique(_channel_frequencies(info)) + if freqs.size > 0: + picks = _check_channels_ordered(info, freqs) + else: + picks = _check_channels_ordered(info, + np.unique(_channel_chromophore(info))) + + # Modify info['chs'][#]['loc'] in place + num_ficiduals = len(montage.dig) - len(montage.ch_names) + for ch_idx in picks: + ch = info['chs'][ch_idx]['ch_name'] + source, detector = ch.split(' ')[0].split('_') + source_pos = montage.dig[montage.ch_names.index(source) + + num_ficiduals]['r'] + detector_pos = montage.dig[montage.ch_names.index(detector) + + num_ficiduals]['r'] + + info['chs'][ch_idx]['loc'][3:6] = source_pos + info['chs'][ch_idx]['loc'][6:9] = detector_pos + midpoint = (source_pos + detector_pos) / 2 + info['chs'][ch_idx]['loc'][:3] = midpoint + + # Modify info['dig'] in place + info['dig'] = montage.dig + + return info + + @fill_doc -def _set_montage(info, montage, match_case=True, on_missing='raise'): +def _set_montage(info, montage, match_case=True, match_alias=False, + on_missing='raise'): """Apply montage to data. With a DigMontage, this function will replace the digitizer info with @@ -682,6 +784,7 @@ def _set_montage(info, montage, match_case=True, on_missing='raise'): The measurement info to update. %(montage)s %(match_case)s + %(match_alias)s %(on_missing_montage)s Notes @@ -697,6 +800,7 @@ def _set_montage(info, montage, match_case=True, on_missing='raise'): if isinstance(montage, DigMontage): mnt_head = _get_montage_in_head(montage) + del montage def _backcompat_value(pos, ref_pos): if any(np.isnan(pos)): @@ -707,16 +811,16 @@ def _backcompat_value(pos, ref_pos): # get the channels in the montage in head ch_pos = mnt_head._get_ch_pos() - # only get the eeg, seeg, ecog channels + # only get the eeg, seeg, dbs, ecog channels _pick_chs = partial( - pick_types, exclude=[], eeg=True, seeg=True, ecog=True, meg=False, - ) + pick_types, exclude=[], eeg=True, seeg=True, dbs=True, ecog=True, + meg=False) # get the reference position from the loc[3:6] chs = info['chs'] ref_pos = [chs[ii]['loc'][3:6] for ii in _pick_chs(info)] - # keep reference location from EEG/ECoG/SEEG channels if they + # keep reference location from EEG/ECoG/SEEG/DBS channels if they # already exist and are all the same. custom_eeg_ref_dig = False # Note: ref position is an empty list for fieldtrip data @@ -758,6 +862,27 @@ def _backcompat_value(pos, ref_pos): raise ValueError('Cannot use match_case=False as %s channel ' 'name(s) require case sensitivity' % n_dup) + # use lookup table to match unrecognized channel names to known aliases + if match_alias: + alias_dict = (match_alias if isinstance(match_alias, dict) else + CHANNEL_LOC_ALIASES) + if not match_case: + alias_dict = { + ch_name.lower(): ch_alias.lower() + for ch_name, ch_alias in alias_dict.items() + } + + # excluded ch_alias not in info, to prevent unnecessary mapping and + # warning messages based on aliases. + alias_dict = { + ch_name: ch_alias + for ch_name, ch_alias in alias_dict.items() + if ch_alias in ch_pos_use + } + info_names_use = [ + alias_dict.get(ch_name, ch_name) for ch_name in info_names_use + ] + # warn user if there is not a full overlap of montage with info_chs not_in_montage = [name for name, use in zip(info_names, info_names_use) if use not in ch_pos_use] @@ -780,6 +905,7 @@ def _backcompat_value(pos, ref_pos): for name, use in zip(info_names, info_names_use): _loc_view = info['chs'][info['ch_names'].index(name)]['loc'] + # Next line modifies info['chs'][#]['loc'] in place _loc_view[:6] = _backcompat_value(ch_pos_use[use], eeg_ref_pos) del ch_pos_use @@ -808,14 +934,23 @@ def _backcompat_value(pos, ref_pos): # in the old dig if ref_dig_point in old_dig: digpoints.append(ref_dig_point) + # Next line modifies info['dig'] in place info['dig'] = _format_dig_points(digpoints, enforce_order=True) if mnt_head.dev_head_t is not None: + # Next line modifies info['dev_head_t'] in place info['dev_head_t'] = Transform('meg', 'head', mnt_head.dev_head_t) + # Handle fNIRS with source, detector and channel + fnirs_picks = _picks_to_idx(info, 'fnirs', allow_empty=True) + if len(fnirs_picks) > 0: + info = _set_montage_fnirs(info, mnt_head) + else: # None case + # Next line modifies info['dig'] in place info['dig'] = None for ch in info['chs']: + # Next line modifies info['chs'][#]['loc'] in place ch['loc'] = np.full(12, np.nan) @@ -1031,7 +1166,8 @@ def read_custom_montage(fname, head_size=HEAD_SIZE_DEFAULT, coord_frame=None): '.loc' or '.locs' or '.eloc' (for EEGLAB files), '.sfp' (BESA/EGI files), '.csd', '.elc', '.txt', '.csd', '.elp' (BESA spherical), - '.bvef' (BrainVision files). + '.bvef' (BrainVision files), + '.csv', '.tsv', '.xyz' (XYZ coordinates). head_size : float | None The size of the head (radius, in [m]). If ``None``, returns the values read from the montage file with no modification. Defaults to 0.095m. @@ -1065,7 +1201,7 @@ def read_custom_montage(fname, head_size=HEAD_SIZE_DEFAULT, coord_frame=None): """ from ._standard_montage_utils import ( _read_theta_phi_in_degrees, _read_sfp, _read_csd, _read_elc, - _read_elp_besa, _read_brainvision + _read_elp_besa, _read_brainvision, _read_xyz ) SUPPORTED_FILE_EXT = { 'eeglab': ('.loc', '.locs', '.eloc', ), @@ -1075,6 +1211,7 @@ def read_custom_montage(fname, head_size=HEAD_SIZE_DEFAULT, coord_frame=None): 'generic (Theta-phi in degrees)': ('.txt', ), 'standard BESA spherical': ('.elp', ), # XXX: not same as polhemus elp 'brainvision': ('.bvef', ), + 'xyz': ('.csv', '.tsv', '.xyz'), } _, ext = op.splitext(fname) @@ -1115,6 +1252,9 @@ def read_custom_montage(fname, head_size=HEAD_SIZE_DEFAULT, coord_frame=None): elif ext in SUPPORTED_FILE_EXT['brainvision']: montage = _read_brainvision(fname, head_size) + elif ext in SUPPORTED_FILE_EXT['xyz']: + montage = _read_xyz(fname) + if coord_frame is not None: coord_frame = _coord_frame_const(coord_frame) for d in montage.dig: @@ -1274,6 +1414,10 @@ def make_standard_montage(kind, head_size=HEAD_SIZE_DEFAULT): MGH (60+3 locations) mgh70 The (newer) 70-channel BrainVision cap used at MGH (70+3 locations) + + artinis-octamon Artinis OctaMon fNIRS (8 sources, 2 detectors) + + artinis-brite23 Artinis Brite23 fNIRS (11 sources, 7 detectors) =================== ===================================================== .. versionadded:: 0.19.0 diff --git a/mne/channels/tests/test_channels.py b/mne/channels/tests/test_channels.py index 06dec3155ec..0ca3f32112b 100644 --- a/mne/channels/tests/test_channels.py +++ b/mne/channels/tests/test_channels.py @@ -29,7 +29,7 @@ io_dir = op.join(op.dirname(__file__), '..', '..', 'io') base_dir = op.join(io_dir, 'tests', 'data') raw_fname = op.join(base_dir, 'test_raw.fif') -eve_fname = op .join(base_dir, 'test-eve.fif') +eve_fname = op.join(base_dir, 'test-eve.fif') fname_kit_157 = op.join(io_dir, 'kit', 'tests', 'data', 'test.sqd') @@ -83,10 +83,6 @@ def test_rename_channels(): # Test bad input pytest.raises(ValueError, rename_channels, info, 1.) pytest.raises(ValueError, rename_channels, info, 1.) - # Test name too long (channel names must be less than 15 characters) - A16 = 'A' * 16 - mapping = {'MEG 2641': A16} - pytest.raises(ValueError, rename_channels, info, mapping) # Test successful changes # Test ch_name and ch_names are changed @@ -107,6 +103,15 @@ def test_rename_channels(): rename_channels(info2, mapping) assert_array_equal(['EEG060', 'EEG060'], info2['bads']) + # test that keys in Raw._orig_units will be renamed, too + raw = read_raw_fif(raw_fname).crop(0, 0.1) + old, new = 'EEG 060', 'New' + raw._orig_units = {old: 'V'} + + raw.rename_channels({old: new}) + assert old not in raw._orig_units + assert new in raw._orig_units + def test_set_channel_types(): """Test set_channel_types.""" @@ -121,9 +126,9 @@ def test_set_channel_types(): with pytest.raises(ValueError, match='cannot change to this channel type'): raw.set_channel_types(mapping) # Test changing type if in proj - mapping = {'EEG 058': 'ecog', 'EEG 059': 'ecg', 'EEG 060': 'eog', - 'EOG 061': 'seeg', 'MEG 2441': 'eeg', 'MEG 2443': 'eeg', - 'MEG 2442': 'hbo'} + mapping = {'EEG 057': 'dbs', 'EEG 058': 'ecog', 'EEG 059': 'ecg', + 'EEG 060': 'eog', 'EOG 061': 'seeg', 'MEG 2441': 'eeg', + 'MEG 2443': 'eeg', 'MEG 2442': 'hbo', 'EEG 001': 'resp'} raw2 = read_raw_fif(raw_fname) raw2.info['bads'] = ['EEG 059', 'EEG 060', 'EOG 061'] with pytest.raises(RuntimeError, match='type .* in projector "PCA-v1"'): @@ -132,6 +137,10 @@ def test_set_channel_types(): with pytest.warns(RuntimeWarning, match='unit for channel.* has changed'): raw2 = raw2.set_channel_types(mapping) info = raw2.info + assert info['chs'][371]['ch_name'] == 'EEG 057' + assert info['chs'][371]['kind'] == FIFF.FIFFV_DBS_CH + assert info['chs'][371]['unit'] == FIFF.FIFF_UNIT_V + assert info['chs'][371]['coil_type'] == FIFF.FIFFV_COIL_EEG assert info['chs'][372]['ch_name'] == 'EEG 058' assert info['chs'][372]['kind'] == FIFF.FIFFV_ECOG_CH assert info['chs'][372]['unit'] == FIFF.FIFF_UNIT_V @@ -157,6 +166,12 @@ def test_set_channel_types(): assert info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_MOL assert info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO + # resp channel type + idx = pick_channels(raw.ch_names, ['EEG 001'])[0] + assert info['chs'][idx]['kind'] == FIFF.FIFFV_RESP_CH + assert info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_V + assert info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_NONE + # Test meaningful error when setting channel type with unknown unit raw.info['chs'][0]['unit'] = 0. ch_types = {raw.ch_names[0]: 'misc'} @@ -329,6 +344,18 @@ def test_find_ch_adjacency(): assert ch_names[0] == 'MEG 001' +@testing.requires_testing_data +def test_neuromag122_adjacency(): + """Test computing the adjacency matrix of Neuromag122-Data.""" + nm122_fname = op.join(testing.data_path(), 'misc', + 'neuromag122_test_file-raw.fif') + raw = read_raw_fif(nm122_fname, preload=True) + conn, ch_names = find_ch_adjacency(raw.info, 'grad') + assert conn.getnnz() == 1564 + assert len(ch_names) == 122 + assert conn.shape == (122, 122) + + def test_drop_channels(): """Test if dropping channels works with various arguments.""" raw = read_raw_fif(raw_fname, preload=True).crop(0, 0.1) @@ -339,6 +366,31 @@ def test_drop_channels(): pytest.raises(ValueError, raw.drop_channels, 5) # must be list or str +def test_add_reference_channels(): + """Test if there is a new reference channel that consist of all zeros.""" + raw = read_raw_fif(raw_fname, preload=True) + n_raw_original_channels = len(raw.ch_names) + epochs = Epochs(raw, read_events(eve_fname)) + epochs.load_data() + epochs_original_shape = epochs._data.shape[1] + evoked = epochs.average() + n_evoked_original_channels = len(evoked.ch_names) + + # Raw object + raw.add_reference_channels(['REF 123']) + assert len(raw.ch_names) == n_raw_original_channels + 1 + assert np.all(raw.get_data()[-1] == 0) + + # Epochs object + epochs.add_reference_channels(['REF 123']) + assert epochs._data.shape[1] == epochs_original_shape + 1 + + # Evoked object + evoked.add_reference_channels(['REF 123']) + assert len(evoked.ch_names) == n_evoked_original_channels + 1 + assert np.all(evoked._data[-1] == 0) + + def test_equalize_channels(): """Test equalizing channels and their ordering.""" # This function only tests the generic functionality of equalize_channels. diff --git a/mne/channels/tests/test_interpolation.py b/mne/channels/tests/test_interpolation.py index f81ed1c8d07..858aec94201 100644 --- a/mne/channels/tests/test_interpolation.py +++ b/mne/channels/tests/test_interpolation.py @@ -96,8 +96,31 @@ def test_interpolation_eeg(offset, avg_proj, ctol, atol, method): good_picks = pick_types(evoked_eeg.info, meg=False, eeg=True) assert_allclose(evoked_eeg.data[good_picks].mean(0), 0., atol=1e-20) evoked_eeg_bad = evoked_eeg.copy() - evoked_eeg_bad.data[ - evoked_eeg.ch_names.index(epochs_eeg.info['bads'][0])] = 1e10 + bads_picks = pick_channels( + epochs_eeg.ch_names, include=epochs_eeg.info['bads'], ordered=True + ) + evoked_eeg_bad.data[bads_picks, :] = 1e10 + + # Test first the exclude parameter + evoked_eeg_2_bads = evoked_eeg_bad.copy() + evoked_eeg_2_bads.info['bads'] = ['EEG 004', 'EEG 012'] + evoked_eeg_2_bads.data[ + pick_channels(evoked_eeg_bad.ch_names, ['EEG 004', 'EEG 012']) + ] = 1e10 + evoked_eeg_interp = evoked_eeg_2_bads.interpolate_bads( + origin=(0., 0., 0.), exclude=['EEG 004'], **kw) + assert evoked_eeg_interp.info['bads'] == ['EEG 004'] + assert np.all( + evoked_eeg_interp.data[evoked_eeg_interp.ch_names.index('EEG 004'), :] + == 1e10 + ) + assert np.all( + evoked_eeg_interp.data[evoked_eeg_interp.ch_names.index('EEG 012'), :] + != 1e10 + ) + + # Now test without exclude parameter + evoked_eeg_bad.info['bads'] = ['EEG 012'] evoked_eeg_interp = evoked_eeg_bad.copy().interpolate_bads( origin=(0., 0., 0.), **kw) if avg_proj: @@ -203,6 +226,18 @@ def test_interpolation_meg(): data2 = evoked.interpolate_bads(origin='auto').data[pick] assert np.corrcoef(data1, data2)[0, 1] > thresh + # MEG -- with exclude + evoked.info['bads'] = ['MEG 0141', 'MEG 0121'] + pick = pick_channels(evoked.ch_names, evoked.info['bads'], ordered=True) + evoked.data[pick[-1]] = 1e10 + data1 = evoked.data[pick] + evoked.info.normalize_proj() + data2 = evoked.interpolate_bads( + origin='auto', exclude=['MEG 0121'] + ).data[pick] + assert np.corrcoef(data1[0], data2[0])[0, 1] > thresh + assert np.all(data2[1] == 1e10) + def _this_interpol(inst, ref_meg=False): from mne.channels.interpolation import _interpolate_bads_meg @@ -263,6 +298,9 @@ def test_interpolation_nirs(): bad_0 = np.where([name == raw_od.info['bads'][0] for name in raw_od.ch_names])[0][0] bad_0_std_pre_interp = np.std(raw_od._data[bad_0]) + bads_init = list(raw_od.info['bads']) + raw_od.interpolate_bads(exclude=bads_init[:1]) + assert raw_od.info['bads'] == bads_init[:1] raw_od.interpolate_bads() assert raw_od.info['bads'] == [] assert bad_0_std_pre_interp > np.std(raw_od._data[bad_0]) diff --git a/mne/channels/tests/test_montage.py b/mne/channels/tests/test_montage.py index bca551db139..208fdc2b323 100644 --- a/mne/channels/tests/test_montage.py +++ b/mne/channels/tests/test_montage.py @@ -326,6 +326,59 @@ def test_documented(): None, id='legacy mne-c'), + pytest.param( + read_custom_montage, + ('ch_name, x, y, z\n' + 'Fp1, -95.0, -3., -3.\n' + 'AF7, -1, -1, -3\n' + 'A3, -2, -2, 2\n' + 'A, 0, 0, 0'), + make_dig_montage( + ch_pos={ + 'A': [0., 0., 0.], 'A3': [-2., -2., 2.], + 'AF7': [-1., -1., -3.], 'Fp1': [-95., -3., -3.], + }, + nasion=None, lpa=None, rpa=None, + ), + 'csv', + None, + id='CSV file'), + + pytest.param( + read_custom_montage, + ('1\t-95.0\t-3.\t-3.\tFp1\n' + '2\t-1\t-1\t-3\tAF7\n' + '3\t-2\t-2\t2\tA3\n' + '4\t0\t0\t0\tA'), + make_dig_montage( + ch_pos={ + 'A': [0., 0., 0.], 'A3': [-2., -2., 2.], + 'AF7': [-1., -1., -3.], 'Fp1': [-95., -3., -3.], + }, + nasion=None, lpa=None, rpa=None, + ), + 'xyz', + None, + id='XYZ file'), + + pytest.param( + read_custom_montage, + ('ch_name\tx\ty\tz\n' + 'Fp1\t-95.0\t-3.\t-3.\n' + 'AF7\t-1\t-1\t-3\n' + 'A3\t-2\t-2\t2\n' + 'A\t0\t0\t0'), + make_dig_montage( + ch_pos={ + 'A': [0., 0., 0.], 'A3': [-2., -2., 2.], + 'AF7': [-1., -1., -3.], 'Fp1': [-95., -3., -3.], + }, + nasion=None, lpa=None, rpa=None, + ), + 'tsv', + None, + id='TSV file'), + pytest.param( partial(read_custom_montage, head_size=None), ('\n' @@ -1199,9 +1252,38 @@ def test_set_montage_with_sub_super_set_of_ch_names(): assert exc.match('on_missing') +def test_set_montage_with_known_aliases(): + """Test matching unrecognized channel locations to known aliases.""" + # montage and info match + mock_montage_ch_names = ['POO7', 'POO8'] + n_channels = len(mock_montage_ch_names) + + montage = make_dig_montage(ch_pos=dict( + zip( + mock_montage_ch_names, + np.arange(n_channels * 3).reshape(n_channels, 3), + )), + coord_frame='head') + + mock_info_ch_names = ['Cb1', 'Cb2'] + info = create_info(ch_names=mock_info_ch_names, sfreq=1, ch_types='eeg') + info.set_montage(montage, match_alias=True) + + # work with match_case + mock_info_ch_names = ['cb1', 'cb2'] + info = create_info(ch_names=mock_info_ch_names, sfreq=1, ch_types='eeg') + info.set_montage(montage, match_case=False, match_alias=True) + + # should warn user T1 instead of its alias T9 + mock_info_ch_names = ['Cb1', 'T1'] + info = create_info(ch_names=mock_info_ch_names, sfreq=1, ch_types='eeg') + with pytest.raises(ValueError, match='T1'): + info.set_montage(montage, match_case=False, match_alias=True) + + def test_heterogeneous_ch_type(): """Test ch_names matching criteria with heterogeneous ch_type.""" - VALID_MONTAGE_NAMED_CHS = ('eeg', 'ecog', 'seeg') + VALID_MONTAGE_NAMED_CHS = ('eeg', 'ecog', 'seeg', 'dbs') montage = _make_toy_dig_montage( n_channels=len(VALID_MONTAGE_NAMED_CHS), @@ -1210,7 +1292,7 @@ def test_heterogeneous_ch_type(): # Montage and info match info = create_info(montage.ch_names, 1., list(VALID_MONTAGE_NAMED_CHS)) - RawArray(np.zeros((3, 1)), info, copy=None).set_montage(montage) + RawArray(np.zeros((4, 1)), info, copy=None).set_montage(montage) def test_set_montage_coord_frame_in_head_vs_unknown(): @@ -1416,7 +1498,7 @@ def test_read_dig_hpts(): def test_get_builtin_montages(): """Test help function to obtain builtin montages.""" - EXPECTED_NUM = 24 + EXPECTED_NUM = 26 assert len(get_builtin_montages()) == EXPECTED_NUM @@ -1429,4 +1511,36 @@ def test_plot_montage(): plt.close('all') +@testing.requires_testing_data +def test_montage_add_estimated_fiducials(): + """Test montage can add estimated fiducials for rpa, lpa, nas.""" + # get the fiducials from test file + subjects_dir = op.join(data_path, 'subjects') + subject = 'sample' + fid_fname = op.join(subjects_dir, subject, 'bem', + 'sample-fiducials.fif') + test_fids, test_coord_frame = read_fiducials(fid_fname) + test_fids = np.array([f['r'] for f in test_fids]) + + # create test montage and add estimated fiducials + test_ch_pos = {'A1': [0, 0, 0]} + montage = make_dig_montage(ch_pos=test_ch_pos, coord_frame='mri') + montage.add_estimated_fiducials(subject=subject, subjects_dir=subjects_dir) + + # check that these fiducials are close to the estimated fiducials + ch_pos = montage.get_positions() + fids_est = [ch_pos['lpa'], ch_pos['nasion'], ch_pos['rpa']] + + dists = np.linalg.norm(test_fids - fids_est, axis=-1) * 1000. # -> mm + assert (dists < 8).all(), dists + + # an error should be raised if the montage is not in `mri` coord_frame + # which is the FreeSurfer RAS + montage = make_dig_montage(ch_pos=test_ch_pos, coord_frame='mni_tal') + with pytest.raises(RuntimeError, match='Montage should be in mri ' + 'coordinate frame'): + montage.add_estimated_fiducials(subject=subject, + subjects_dir=subjects_dir) + + run_tests_if_main() diff --git a/mne/channels/tests/test_standard_montage.py b/mne/channels/tests/test_standard_montage.py index eed6a2670ca..f8928584c20 100644 --- a/mne/channels/tests/test_standard_montage.py +++ b/mne/channels/tests/test_standard_montage.py @@ -8,12 +8,17 @@ import numpy as np -from numpy.testing import assert_allclose +from numpy.testing import (assert_allclose, assert_array_almost_equal, + assert_raises) -from mne.channels import make_standard_montage -from mne.io._digitization import _get_dig_eeg, _get_fid_coords +from mne import create_info +from mne.channels import make_standard_montage, compute_native_head_t from mne.channels.montage import get_builtin_montages, HEAD_SIZE_DEFAULT +from mne.io import RawArray +from mne.io._digitization import _get_dig_eeg, _get_fid_coords from mne.io.constants import FIFF +from mne.preprocessing.nirs import optical_density, beer_lambert_law +from mne.transforms import _get_trans, _angle_between_quats, rot_to_quat @pytest.mark.parametrize('kind', get_builtin_montages()) @@ -24,7 +29,10 @@ def test_standard_montages_have_fids(kind): for k, v in fids.items(): assert v is not None, k for d in montage.dig: - assert d['coord_frame'] == FIFF.FIFFV_COORD_UNKNOWN + if kind == 'artinis-octamon' or kind == 'artinis-brite23': + assert d['coord_frame'] == FIFF.FIFFV_COORD_MRI + else: + assert d['coord_frame'] == FIFF.FIFFV_COORD_UNKNOWN def test_standard_montage_errors(): @@ -73,3 +81,164 @@ def test_standard_superset(): for key, value in m_1020._get_ch_pos().items(): if key not in ('O10', 'O9'): assert_allclose(c_1005[key], value, atol=1e-4, err_msg=key) + + +def _simulate_artinis_octamon(): + """Simulate artinis OctaMon channel data from numpy data. + + This is to test data that is imported with missing or incorrect montage + info. This data can then be used to test the set_montage function. + """ + np.random.seed(42) + data = np.absolute(np.random.normal(size=(16, 100))) + ch_names = ['S1_D1 760', 'S1_D1 850', 'S2_D1 760', 'S2_D1 850', + 'S3_D1 760', 'S3_D1 850', 'S4_D1 760', 'S4_D1 850', + 'S5_D2 760', 'S5_D2 850', 'S6_D2 760', 'S6_D2 850', + 'S7_D2 760', 'S7_D2 850', 'S8_D2 760', 'S8_D2 850'] + ch_types = ['fnirs_cw_amplitude' for _ in ch_names] + sfreq = 10. # Hz + info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=sfreq) + for i, ch_name in enumerate(ch_names): + info['chs'][i]['loc'][9] = int(ch_name.split(' ')[1]) + raw = RawArray(data, info) + + return raw + + +def _simulate_artinis_brite23(): + """Simulate artinis Brite23 channel data from numpy data. + + This is to test data that is imported with missing or incorrect montage + info. This data can then be used to test the set_montage function. + """ + np.random.seed(0) + data = np.random.normal(size=(46, 100)) + sd_names = ['S1_D1', 'S2_D1', 'S3_D1', 'S4_D1', 'S3_D2', 'S4_D2', 'S5_D2', + 'S4_D3', 'S5_D3', 'S6_D3', 'S5_D4', 'S6_D4', 'S7_D4', 'S6_D5', + 'S7_D5', 'S8_D5', 'S7_D6', 'S8_D6', 'S9_D6', 'S8_D7', 'S9_D7', + 'S10_D7', 'S11_D7'] + ch_names = [] + ch_types = [] + for name in sd_names: + ch_names.append(name + ' hbo') + ch_types.append('hbo') + ch_names.append(name + ' hbr') + ch_types.append('hbr') + sfreq = 10. # Hz + info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=sfreq) + raw = RawArray(data, info) + + return raw + + +@pytest.mark.parametrize('kind', ('octamon', 'brite23')) +def test_set_montage_artinis_fsaverage(kind): + """Test that artinis montages match fsaverage's head<->MRI transform.""" + # Compare OctaMon and Brite23 to fsaverage + trans_fs, _ = _get_trans('fsaverage') + montage = make_standard_montage(f'artinis-{kind}') + trans = compute_native_head_t(montage) + assert trans['to'] == trans_fs['to'] + assert trans['from'] == trans_fs['from'] + translation = 1000 * np.linalg.norm(trans['trans'][:3, 3] - + trans_fs['trans'][:3, 3]) + # TODO: This is actually quite big... + assert 15 < translation < 18 # mm + rotation = np.rad2deg( + _angle_between_quats(rot_to_quat(trans['trans'][:3, :3]), + rot_to_quat(trans_fs['trans'][:3, :3]))) + assert 3 < rotation < 7 # degrees + + +def test_set_montage_artinis_basic(): + """Test that OctaMon and Brite23 montages are set properly.""" + # Test OctaMon montage + montage_octamon = make_standard_montage('artinis-octamon') + montage_brite23 = make_standard_montage('artinis-brite23') + raw = _simulate_artinis_octamon() + raw_od = optical_density(raw) + old_info = raw.info.copy() + old_info_od = raw_od.info.copy() + raw.set_montage(montage_octamon) + raw_od.set_montage(montage_octamon) + raw_hb = beer_lambert_law(raw_od) # montage needed for Beer Lambert law + # Check that the montage was actually modified + assert_raises(AssertionError, assert_array_almost_equal, + old_info['chs'][0]['loc'][:9], + raw.info['chs'][0]['loc'][:9]) + assert_raises(AssertionError, assert_array_almost_equal, + old_info_od['chs'][0]['loc'][:9], + raw_od.info['chs'][0]['loc'][:9]) + + # Check a known location + assert_array_almost_equal(raw.info['chs'][0]['loc'][:3], + [0.0616, 0.075398, 0.07347]) + assert_array_almost_equal(raw.info['chs'][8]['loc'][:3], + [-0.033875, 0.101276, 0.077291]) + assert_array_almost_equal(raw.info['chs'][12]['loc'][:3], + [-0.062749, 0.080417, 0.074884]) + assert_array_almost_equal(raw_od.info['chs'][12]['loc'][:3], + [-0.062749, 0.080417, 0.074884]) + assert_array_almost_equal(raw_hb.info['chs'][12]['loc'][:3], + [-0.062749, 0.080417, 0.074884]) + # Check that locations are identical for a pair of channels (all elements + # except the 10th which is the wavelength if not hbo and hbr type) + assert_array_almost_equal(raw.info['chs'][0]['loc'][:9], + raw.info['chs'][1]['loc'][:9]) + assert_array_almost_equal(raw_od.info['chs'][0]['loc'][:9], + raw_od.info['chs'][1]['loc'][:9]) + assert_array_almost_equal(raw_hb.info['chs'][0]['loc'][:9], + raw_hb.info['chs'][1]['loc'][:9]) + + # Test Brite23 montage + raw = _simulate_artinis_brite23() + old_info = raw.info.copy() + raw.set_montage(montage_brite23) + # Check that the montage was actually modified + assert_raises(AssertionError, assert_array_almost_equal, + old_info['chs'][0]['loc'][:9], + raw.info['chs'][0]['loc'][:9]) + # Check a known location + assert_array_almost_equal(raw.info['chs'][0]['loc'][:3], + [0.085583, 0.036275, 0.089426]) + assert_array_almost_equal(raw.info['chs'][8]['loc'][:3], + [0.069555, 0.078579, 0.069305]) + assert_array_almost_equal(raw.info['chs'][12]['loc'][:3], + [0.044861, 0.100952, 0.065175]) + # Check that locations are identical for a pair of channels (all elements + # except the 10th which is the wavelength if not hbo and hbr type) + assert_array_almost_equal(raw.info['chs'][0]['loc'][:9], + raw.info['chs'][1]['loc'][:9]) + + # Test channel variations + raw_old = _simulate_artinis_brite23() + # Raw missing some channels that are in the montage: pass + raw = raw_old.copy() + raw.pick(['S1_D1 hbo', 'S1_D1 hbr']) + raw.set_montage('artinis-brite23') + + # Unconventional channel pair: pass + raw = raw_old.copy() + info_new = create_info(['S11_D1 hbo', 'S11_D1 hbr'], raw.info['sfreq'], + ['hbo', 'hbr']) + new = RawArray(np.random.normal(size=(2, len(raw))), info_new) + raw.add_channels([new], force_update_info=True) + raw.set_montage('artinis-brite23') + + # Source not in montage: fail + raw = raw_old.copy() + info_new = create_info(['S12_D7 hbo', 'S12_D7 hbr'], raw.info['sfreq'], + ['hbo', 'hbr']) + new = RawArray(np.random.normal(size=(2, len(raw))), info_new) + raw.add_channels([new], force_update_info=True) + with pytest.raises(ValueError, match='is not in list'): + raw.set_montage('artinis-brite23') + + # Detector not in montage: fail + raw = raw_old.copy() + info_new = create_info(['S11_D8 hbo', 'S11_D8 hbr'], raw.info['sfreq'], + ['hbo', 'hbr']) + new = RawArray(np.random.normal(size=(2, len(raw))), info_new) + raw.add_channels([new], force_update_info=True) + with pytest.raises(ValueError, match='is not in list'): + raw.set_montage('artinis-brite23') diff --git a/mne/chpi.py b/mne/chpi.py index 972cb400bd8..7bcbb80913c 100644 --- a/mne/chpi.py +++ b/mne/chpi.py @@ -22,11 +22,13 @@ from functools import partial import numpy as np -from scipy import linalg import itertools +from .event import find_events from .io.base import BaseRaw -from .io.meas_info import _simplify_info +from .io.kit.constants import KIT +from .io.kit.kit import RawKIT as _RawKIT +from .io.meas_info import _simplify_info, Info from .io.pick import (pick_types, pick_channels, pick_channels_regexp, pick_info) from .io.proj import Projection, setup_proj @@ -41,9 +43,9 @@ _regularize_out, _get_mf_picks_fix_mags) from .transforms import (apply_trans, invert_transform, _angle_between_quats, quat_to_rot, rot_to_quat, _fit_matched_points, - _quat_to_affine) + _quat_to_affine, als_ras_trans) from .utils import (verbose, logger, use_log_level, _check_fname, warn, - _validate_type, ProgressBar, _check_option) + _validate_type, ProgressBar, _check_option, _pl) # Eventually we should add: # hpicons @@ -212,6 +214,85 @@ def extract_chpi_locs_ctf(raw, verbose=None): return dict(rrs=rrs, gofs=gofs, times=times, moments=moments) +@verbose +def extract_chpi_locs_kit(raw, stim_channel='MISC 064', *, verbose=None): + """Extract cHPI locations from KIT data. + + Parameters + ---------- + raw : instance of RawKIT + Raw data with KIT cHPI information. + stim_channel : str + The stimulus channel that encodes HPI measurement intervals. + %(verbose)s + + Returns + ------- + %(chpi_locs)s + + Notes + ----- + .. versionadded:: 0.23 + """ + _validate_type(raw, (_RawKIT,), 'raw') + stim_chs = [ + raw.info['ch_names'][pick] for pick in pick_types( + raw.info, stim=True, misc=True, ref_meg=False)] + _validate_type(stim_channel, str, 'stim_channel') + _check_option('stim_channel', stim_channel, stim_chs) + idx = raw.ch_names.index(stim_channel) + events_on = find_events( + raw, stim_channel=raw.ch_names[idx], output='onset', + verbose=False)[:, 0] + events_off = find_events( + raw, stim_channel=raw.ch_names[idx], output='offset', + verbose=False)[:, 0] + bad = False + if len(events_on) == 0 or len(events_off) == 0: + bad = True + else: + if events_on[-1] > events_off[-1]: + events_on = events_on[:-1] + if events_on.size != events_off.size or not \ + (events_on < events_off).all(): + bad = True + if bad: + raise RuntimeError( + f'Could not find appropriate cHPI intervals from {stim_channel}') + # use the midpoint for times + times = (events_on + events_off) / (2 * raw.info['sfreq']) + del events_on, events_off + # XXX remove first two rows. It is unknown currently if there is a way to + # determine from the con file the number of initial pulses that + # indicate the start of reading. The number is shown by opening the con + # file in MEG160, but I couldn't find the value in the .con file, so it + # may just always be 2... + times = times[2:] + n_coils = 5 # KIT always has 5 (hard-coded in reader) + header = raw._raw_extras[0]['dirs'][KIT.DIR_INDEX_CHPI_DATA] + dtype = np.dtype([('good', ' 0.9999 - # For symmetrical arrangements, flips can produce roughly - # equivalent g values. To avoid this, heavily penalize - # large rotations. - rotation = _angle_between_quats(this_quat[:3], np.zeros(3)) - check_g = g * max(1. - rotation / np.pi, 0) ** 0.25 + if bias: + # For symmetrical arrangements, flips can produce roughly + # equivalent g values. To avoid this, heavily penalize + # large rotations. + rotation = _angle_between_quats(this_quat[:3], np.zeros(3)) + check_g = g * max(1. - rotation / np.pi, 0) ** 0.25 + else: + check_g = g if check_g > best_g: out_g = g best_g = check_g @@ -487,7 +571,7 @@ def _setup_hpi_amplitude_fitting(info, t_window, remove_aliased=False, model += [np.sin(l_t), np.cos(l_t)] # line freqs model += [slope, np.ones(slope.shape)] model = np.concatenate(model, axis=1) - inv_model = linalg.pinv(model) + inv_model = np.linalg.pinv(model) inv_model_reord = _reorder_inv_model(inv_model, len(hpi_freqs)) proj, proj_op, meg_picks = _setup_ext_proj(info, ext_order) @@ -508,6 +592,7 @@ def _reorder_inv_model(inv_model, n_freqs): def _setup_ext_proj(info, ext_order): + from scipy import linalg meg_picks = pick_types(info, meg=True, eeg=False, exclude='bads') info = pick_info(_simplify_info(info), meg_picks) # makes a copy _, _, _, _, mag_or_fine = _get_mf_picks_fix_mags( @@ -672,6 +757,7 @@ def compute_head_pos(info, chpi_locs, dist_limit=0.005, gof_limit=0.98, .. versionadded:: 0.20 """ _check_chpi_param(chpi_locs, 'chpi_locs') + _validate_type(info, Info, 'info') hpi_dig_head_rrs = _get_hpi_initial_fit(info, adjust=adjust_dig, verbose='error') n_coils = len(hpi_dig_head_rrs) @@ -693,7 +779,7 @@ def compute_head_pos(info, chpi_locs, dist_limit=0.005, gof_limit=0.98, # if len(use_idx) < 3: msg = (_time_prefix(fit_time) + '%s/%s good HPI fits, cannot ' - 'determine the transformation (%s)!' + 'determine the transformation (%s GOF)!' % (len(use_idx), n_coils, ', '.join('%0.2f' % g for g in g_coils))) warn(msg) @@ -718,8 +804,10 @@ def compute_head_pos(info, chpi_locs, dist_limit=0.005, gof_limit=0.98, n_good = ((g_coils >= gof_limit) & (errs < dist_limit)).sum() if n_good < 3: warn(_time_prefix(fit_time) + '%s/%s good HPI fits, cannot ' - 'determine the transformation (%s)!' - % (n_good, n_coils, ', '.join('%0.2f' % g for g in g_coils))) + 'determine the transformation (%s mm/GOF)!' + % (n_good, n_coils, + ', '.join(f'{1000 * e:0.1f}::{g:0.2f}' + for e, g in zip(errs, g_coils)))) continue # velocities, in device coords, of HPI coils @@ -858,8 +946,8 @@ def compute_chpi_amplitudes(raw, t_step_min=0.01, t_window='auto', % (len(hpi['freqs']), len(fit_idxs), tmax - tmin)) del tmin, tmax sin_fits = dict() - sin_fits['times'] = (fit_idxs + raw.first_samp - - hpi['n_window'] / 2.) / raw.info['sfreq'] + sin_fits['times'] = np.round(fit_idxs + raw.first_samp - + hpi['n_window'] / 2.) / raw.info['sfreq'] sin_fits['proj'] = hpi['proj'] sin_fits['slopes'] = np.empty( (len(sin_fits['times']), @@ -930,6 +1018,7 @@ def compute_chpi_locs(info, chpi_amplitudes, t_step_max=1., too_close='raise', # Set up magnetic dipole fits _check_option('too_close', too_close, ['raise', 'warning', 'info']) _check_chpi_param(chpi_amplitudes, 'chpi_amplitudes') + _validate_type(info, Info, 'info') sin_fits = chpi_amplitudes # use the old name below del chpi_amplitudes proj = sin_fits['proj'] @@ -965,6 +1054,7 @@ def compute_chpi_locs(info, chpi_amplitudes, t_step_max=1., too_close='raise', _get_hpi_initial_fit(info, adjust=adjust_dig)) last = dict(sin_fit=None, coil_fit_time=sin_fits['times'][0] - 1, coil_dev_rrs=hpi_dig_dev_rrs) + n_hpi = len(hpi_dig_dev_rrs) del hpi_dig_dev_rrs for fit_time, sin_fit in ProgressBar(iter_, mesg='cHPI locations '): # skip this window if bad @@ -1000,8 +1090,15 @@ def compute_chpi_locs(info, chpi_amplitudes, t_step_max=1., too_close='raise', chpi_locs['moments'].append(moments) last['coil_fit_time'] = fit_time last['coil_dev_rrs'] = rrs + n_times = len(chpi_locs['times']) + shapes = dict( + times=(n_times,), + rrs=(n_times, n_hpi, 3), + gofs=(n_times, n_hpi), + moments=(n_times, n_hpi, 3), + ) for key, val in chpi_locs.items(): - chpi_locs[key] = np.array(val, float) + chpi_locs[key] = np.array(val, float).reshape(shapes[key]) return chpi_locs @@ -1095,7 +1192,7 @@ def filter_chpi(raw, include_line=True, t_step=0.01, t_window='auto', this_recon = recon else: # first or last window model = hpi['model'][:this_len] - inv_model = linalg.pinv(model) + inv_model = np.linalg.pinv(model) this_recon = np.dot(model[:, :n_remove], inv_model[:n_remove]).T this_data = raw._data[meg_picks, time_sl] subt_pt = min(midpt + n_step, n_times) diff --git a/mne/commands/mne_browse_raw.py b/mne/commands/mne_browse_raw.py index 020f02b3578..2d1a443fd3a 100755 --- a/mne/commands/mne_browse_raw.py +++ b/mne/commands/mne_browse_raw.py @@ -1,6 +1,9 @@ #!/usr/bin/env python r"""Browse raw data. +This uses :func:`mne.io.read_raw` so it supports the same formats +(without keyword arguments). + Examples -------- .. code-block:: console @@ -31,6 +34,9 @@ def run(): parser.add_option("--proj", dest="proj_in", help="Projector file", metavar="FILE", default='') + parser.add_option("--projoff", dest="proj_off", + help="Disable all projectors", + default=False, action="store_true") parser.add_option("--eve", dest="eve_in", help="Events file", metavar="FILE", default='') @@ -84,6 +90,7 @@ def run(): preload = options.preload show_options = options.show_options proj_in = options.proj_in + proj_off = options.proj_off eve_in = options.eve_in maxshield = options.maxshield highpass = options.highpass @@ -105,8 +112,10 @@ def run(): parser.print_help() sys.exit(1) - raw = mne.io.read_raw_fif(raw_in, preload=preload, - allow_maxshield=maxshield) + kwargs = dict(preload=preload) + if maxshield: + kwargs.update(allow_maxshield='yes') + raw = mne.io.read_raw(raw_in, **kwargs) if len(proj_in) > 0: projs = mne.read_proj(proj_in) raw.info['projs'] = projs @@ -126,7 +135,7 @@ def run(): raw.plot(duration=duration, start=start, n_channels=n_channels, group_by=group_by, show_options=show_options, events=events, highpass=highpass, lowpass=lowpass, filtorder=filtorder, - clipping=clipping, verbose=verbose) + clipping=clipping, proj=not proj_off, verbose=verbose) plt.show(block=True) diff --git a/mne/commands/mne_make_scalp_surfaces.py b/mne/commands/mne_make_scalp_surfaces.py index 649aa621dda..b6528dff893 100755 --- a/mne/commands/mne_make_scalp_surfaces.py +++ b/mne/commands/mne_make_scalp_surfaces.py @@ -112,7 +112,7 @@ def check_seghead(surf_path=op.join(subj_path, 'surf')): surf = mne.bem._surfaces_to_bem( [surf], [mne.io.constants.FIFF.FIFFV_BEM_SURF_ID_HEAD], [1], incomplete=incomplete, extra=msg)[0] - mne.write_bem_surfaces(dense_fname, surf) + mne.write_bem_surfaces(dense_fname, surf, overwrite=overwrite) levels = 'medium', 'sparse' tris = [] if no_decimate else [30000, 2500] if os.getenv('_MNE_TESTING_SCALP', 'false') == 'true': @@ -131,7 +131,7 @@ def check_seghead(surf_path=op.join(subj_path, 'surf')): [dict(rr=points, tris=tris)], [mne.io.constants.FIFF.FIFFV_BEM_SURF_ID_HEAD], [1], rescale=False, incomplete=incomplete, extra=msg) - mne.write_bem_surfaces(dec_fname, dec_surf) + mne.write_bem_surfaces(dec_fname, dec_surf, overwrite=overwrite) mne.utils.run_command_if_main() diff --git a/mne/commands/mne_report.py b/mne/commands/mne_report.py index 7464704e7f0..2d96570f26f 100755 --- a/mne/commands/mne_report.py +++ b/mne/commands/mne_report.py @@ -14,7 +14,8 @@ ============ ============================================================== Data object Filename convention (ends with) ============ ============================================================== -raw -raw.fif(.gz), -raw_sss.fif(.gz), -raw_tsss.fif(.gz), _meg.fif +raw -raw.fif(.gz), -raw_sss.fif(.gz), -raw_tsss.fif(.gz), + _meg.fif(.gz), _eeg.fif(.gz), _ieeg.fif(.gz) events -eve.fif(.gz) epochs -epo.fif(.gz) evoked -ave.fif(.gz) diff --git a/mne/commands/mne_surf2bem.py b/mne/commands/mne_surf2bem.py index 6512630a2e0..a7adaab594f 100755 --- a/mne/commands/mne_surf2bem.py +++ b/mne/commands/mne_surf2bem.py @@ -30,7 +30,7 @@ def run(): parser.add_option("-f", "--fif", dest="fif", help="FIF file produced", metavar="FILE") parser.add_option("-i", "--id", dest="id", default=4, - help=("Surface Id (e.g. 4 sur head surface)")) + help=("Surface Id (e.g. 4 for head surface)")) options, args = parser.parse_args() diff --git a/mne/commands/tests/test_commands.py b/mne/commands/tests/test_commands.py index c9141584f75..d646421f2bb 100644 --- a/mne/commands/tests/test_commands.py +++ b/mne/commands/tests/test_commands.py @@ -331,6 +331,7 @@ def test_setup_source_space(tmpdir): assert mne_setup_source_space.run() +@pytest.mark.slowtest @testing.requires_testing_data def test_setup_forward_model(tmpdir): """Test mne setup_forward_model.""" diff --git a/mne/conftest.py b/mne/conftest.py index eb5a54dfe28..ec4f14f2093 100644 --- a/mne/conftest.py +++ b/mne/conftest.py @@ -3,6 +3,7 @@ # # License: BSD (3-clause) +from contextlib import contextmanager from distutils.version import LooseVersion import gc import os @@ -12,29 +13,21 @@ import sys import warnings import pytest -# For some unknown reason, on Travis-xenial there are segfaults caused on -# the line pytest -> pdb.Pdb.__init__ -> "import readline". Forcing an -# import here seems to prevent them (!?). This suggests a potential problem -# with some other library stepping on memory where it shouldn't. It only -# seems to happen on the Linux runs that install Mayavi. Anectodally, -# @larsoner has had problems a couple of years ago where a mayavi import -# seemed to corrupt SciPy linalg function results (!), likely due to the -# associated VTK import, so this could be another manifestation of that. -try: - import readline # noqa -except Exception: - pass import numpy as np + import mne from mne.datasets import testing -from mne.utils import _pl, _assert_no_instances +from mne.fixes import has_numba +from mne.stats import cluster_level +from mne.utils import _pl, _assert_no_instances, numerics test_path = testing.data_path(download=False) s_path = op.join(test_path, 'MEG', 'sample') fname_evoked = op.join(s_path, 'sample_audvis_trunc-ave.fif') fname_cov = op.join(s_path, 'sample_audvis_trunc-cov.fif') fname_fwd = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif') +fname_fwd_full = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif') bem_path = op.join(test_path, 'subjects', 'sample', 'bem') fname_bem = op.join(bem_path, 'sample-1280-bem.fif') fname_aseg = op.join(test_path, 'subjects', 'sample', 'mri', 'aseg.mgz') @@ -102,6 +95,9 @@ def pytest_configure(config): ignore:.*tostring.*is deprecated.*:DeprecationWarning ignore:.*QDesktopWidget\.availableGeometry.*:DeprecationWarning ignore:Unable to enable faulthandler.*:UserWarning + ignore:Fetchers from the nilearn.*:FutureWarning + ignore:SelectableGroups dict interface is deprecated\. Use select\.:DeprecationWarning + ignore:Call to deprecated class vtk.*:DeprecationWarning always:.*get_data.* is deprecated in favor of.*:DeprecationWarning always::ResourceWarning """ # noqa: E501 @@ -240,7 +236,8 @@ def bias_params_free(evoked, noise_cov): def bias_params_fixed(evoked, noise_cov): """Provide inputs for fixed bias functions.""" fwd = mne.read_forward_solution(fname_fwd) - fwd = mne.convert_forward_solution(fwd, force_fixed=True, surf_ori=True) + mne.convert_forward_solution( + fwd, force_fixed=True, surf_ori=True, copy=False) return _bias_params(evoked, noise_cov, fwd) @@ -248,14 +245,23 @@ def _bias_params(evoked, noise_cov, fwd): evoked.pick_types(meg=True, eeg=True, exclude=()) # restrict to limited set of verts (small src here) and one hemi for speed vertices = [fwd['src'][0]['vertno'].copy(), []] - stc = mne.SourceEstimate(np.zeros((sum(len(v) for v in vertices), 1)), - vertices, 0., 1.) + stc = mne.SourceEstimate( + np.zeros((sum(len(v) for v in vertices), 1)), vertices, 0, 1) fwd = mne.forward.restrict_forward_to_stc(fwd, stc) assert fwd['sol']['row_names'] == noise_cov['names'] assert noise_cov['names'] == evoked.ch_names evoked = mne.EvokedArray(fwd['sol']['data'].copy(), evoked.info) data_cov = noise_cov.copy() - data_cov['data'] = np.dot(fwd['sol']['data'], fwd['sol']['data'].T) + data = fwd['sol']['data'] @ fwd['sol']['data'].T + data *= 1e-14 # 100 nAm at each source, effectively (1e-18 would be 1 nAm) + # This is rank-deficient, so let's make it actually positive semidefinite + # by regularizing a tiny bit + data.flat[::data.shape[0] + 1] += mne.make_ad_hoc_cov(evoked.info)['data'] + # Do our projection + proj, _, _ = mne.io.proj.make_projector( + data_cov['projs'], data_cov['names']) + data = proj @ data @ proj.T + data_cov['data'][:] = data assert data_cov['data'].shape[0] == len(noise_cov['names']) want = np.arange(fwd['sol']['data'].shape[1]) if not mne.forward.is_fixed_orient(fwd): @@ -263,76 +269,83 @@ def _bias_params(evoked, noise_cov, fwd): return evoked, fwd, noise_cov, data_cov, want -@pytest.fixture(scope="module", params=[ - "mayavi", - "pyvista", -]) -def backend_name(request): - """Get the backend name.""" - yield request.param +@pytest.fixture +def garbage_collect(): + """Garbage collect on exit.""" + yield + gc.collect() -@pytest.yield_fixture -def renderer(backend_name, garbage_collect): +@pytest.fixture(params=["mayavi", "pyvista"]) +def renderer(request, garbage_collect): """Yield the 3D backends.""" - from mne.viz.backends.renderer import _use_test_3d_backend - _check_skip_backend(backend_name) - with _use_test_3d_backend(backend_name): - from mne.viz.backends import renderer + with _use_backend(request.param, interactive=False) as renderer: yield renderer - renderer.backend._close_all() -@pytest.yield_fixture -def garbage_collect(): - """Garbage collect on exit.""" - yield - gc.collect() +@pytest.fixture(params=["pyvista"]) +def renderer_pyvista(request, garbage_collect): + """Yield the PyVista backend.""" + with _use_backend(request.param, interactive=False) as renderer: + yield renderer -@pytest.fixture(scope="module", params=[ - "pyvista", - "mayavi", -]) -def backend_name_interactive(request): - """Get the backend name.""" - yield request.param +@pytest.fixture(params=["notebook"]) +def renderer_notebook(request): + """Yield the 3D notebook renderer.""" + with _use_backend(request.param, interactive=False) as renderer: + yield renderer -@pytest.yield_fixture -def renderer_interactive(backend_name_interactive): - """Yield the 3D backends.""" +@pytest.fixture(scope="module", params=["pyvista"]) +def renderer_interactive_pyvista(request): + """Yield the interactive PyVista backend.""" + with _use_backend(request.param, interactive=True) as renderer: + yield renderer + + +@pytest.fixture(scope="module", params=["pyvista", "mayavi"]) +def renderer_interactive(request): + """Yield the interactive 3D backends.""" + with _use_backend(request.param, interactive=True) as renderer: + if renderer._get_3d_backend() == 'mayavi': + with warnings.catch_warnings(record=True): + try: + from surfer import Brain # noqa: 401 analysis:ignore + except Exception: + pytest.skip('Requires PySurfer') + yield renderer + + +@contextmanager +def _use_backend(backend_name, interactive): from mne.viz.backends.renderer import _use_test_3d_backend - _check_skip_backend(backend_name_interactive) - with _use_test_3d_backend(backend_name_interactive, interactive=True): + _check_skip_backend(backend_name) + with _use_test_3d_backend(backend_name, interactive=interactive): from mne.viz.backends import renderer - yield renderer - renderer.backend._close_all() + try: + yield renderer + finally: + renderer.backend._close_all() def _check_skip_backend(name): from mne.viz.backends.tests._utils import (has_mayavi, has_pyvista, has_pyqt5, has_imageio_ffmpeg) + check_pyvista = name in ('pyvista', 'notebook') + check_pyqt5 = name in ('mayavi', 'pyvista') if name == 'mayavi': if not has_mayavi(): pytest.skip("Test skipped, requires mayavi.") elif name == 'pyvista': - if not has_pyvista(): - pytest.skip("Test skipped, requires pyvista.") if not has_imageio_ffmpeg(): pytest.skip("Test skipped, requires imageio-ffmpeg") - if not has_pyqt5(): + if check_pyvista and not has_pyvista(): + pytest.skip("Test skipped, requires pyvista.") + if check_pyqt5 and not has_pyqt5(): pytest.skip("Test skipped, requires PyQt5.") -@pytest.fixture() -def renderer_notebook(): - """Verify that pytest_notebook is installed.""" - from mne.viz.backends import renderer - with renderer._use_test_3d_backend('notebook'): - yield renderer - - @pytest.fixture(scope='session') def pixel_ratio(): """Get the pixel ratio.""" @@ -486,7 +499,14 @@ def download_is_error(monkeypatch): @pytest.fixture() def brain_gc(request): """Ensure that brain can be properly garbage collected.""" - keys = ('renderer_interactive', 'renderer', 'renderer_notebook') + keys = ( + 'renderer_interactive', + 'renderer_interactive_pyvista', + 'renderer_interactive_pysurfer', + 'renderer', + 'renderer_pyvista', + 'renderer_notebook', + ) assert set(request.fixturenames) & set(keys) != set() for key in keys: if key in request.fixturenames: @@ -501,10 +521,16 @@ def brain_gc(request): yield return from mne.viz import Brain - _assert_no_instances(Brain, 'before') ignore = set(id(o) for o in gc.get_objects()) yield close_func() + # no need to warn if the test itself failed, pytest-harvest helps us here + try: + outcome = request.node.harvest_rep_call + except Exception: + outcome = 'failed' + if outcome != 'passed': + return _assert_no_instances(Brain, 'after') # We only check VTK for PyVista -- Mayavi/PySurfer is not as strict objs = gc.get_objects() @@ -560,3 +586,21 @@ def pytest_sessionfinish(session, exitstatus): timings = [timing.rjust(rjust) for timing in timings] for name, timing in zip(names, timings): writer.line(f'{timing.ljust(15)}{name}') + + +@pytest.fixture(scope="function", params=('Numba', 'NumPy')) +def numba_conditional(monkeypatch, request): + """Test both code paths on machines that have Numba.""" + assert request.param in ('Numba', 'NumPy') + if request.param == 'NumPy' and has_numba: + monkeypatch.setattr( + cluster_level, '_get_buddies', cluster_level._get_buddies_fallback) + monkeypatch.setattr( + cluster_level, '_get_selves', cluster_level._get_selves_fallback) + monkeypatch.setattr( + cluster_level, '_where_first', cluster_level._where_first_fallback) + monkeypatch.setattr( + numerics, '_arange_div', numerics._arange_div_fallback) + if request.param == 'Numba' and not has_numba: + pytest.skip('Numba not installed') + yield request.param diff --git a/mne/connectivity/envelope.py b/mne/connectivity/envelope.py index 4d813ed4984..193a5477ec5 100644 --- a/mne/connectivity/envelope.py +++ b/mne/connectivity/envelope.py @@ -13,7 +13,7 @@ @verbose def envelope_correlation(data, combine='mean', orthogonalize="pairwise", - verbose=None): + log=False, absolute=True, verbose=None): """Compute the envelope correlation. Parameters @@ -40,6 +40,18 @@ def envelope_correlation(data, combine='mean', orthogonalize="pairwise", absolute values. .. versionadded:: 0.19 + log : bool + If True (default False), square and take the log before orthonalizing + envelopes or computing correlations. + + .. versionadded:: 0.22 + absolute : bool + If True (default), then take the absolute value of correlation + coefficients before making each epoch's correlation matrix + symmetric (and thus before combining matrices across epochs). + Only used when ``orthogonalize=True``. + + .. versionadded:: 0.22 %(verbose)s Returns @@ -52,16 +64,15 @@ def envelope_correlation(data, combine='mean', orthogonalize="pairwise", Notes ----- This function computes the power envelope correlation between - orthogonalized signals [1]_ [2]_. + orthogonalized signals :footcite:`HippEtAl2012,KhanEtAl2018`. + + .. versionchanged:: 0.22 + Computations fixed for ``orthogonalize=True`` and diagonal entries are + set explicitly to zero. References ---------- - .. [1] Hipp JF, Hawellek DJ, Corbetta M, Siegel M, Engel AK (2012) - Large-scale cortical correlation structure of spontaneous - oscillatory activity. Nature Neuroscience 15:884–890 - .. [2] Khan S et al. (2018). Maturation trajectories of cortical - resting-state networks depend on the mediating frequency band. - Neuroimage 174:57–68 + .. footbibliography:: """ _check_option('orthogonalize', orthogonalize, (False, 'pairwise')) from scipy.signal import hilbert @@ -99,6 +110,9 @@ def envelope_correlation(data, combine='mean', orthogonalize="pairwise", data_mag = np.abs(epoch_data) data_conj_scaled = epoch_data.conj() data_conj_scaled /= data_mag + if log: + data_mag *= data_mag + np.log(data_mag, out=data_mag) # subtract means data_mag_nomean = data_mag - np.mean(data_mag, axis=-1, keepdims=True) # compute variances using linalg.norm (square, sum, sqrt) since mean=0 @@ -107,21 +121,29 @@ def envelope_correlation(data, combine='mean', orthogonalize="pairwise", corr = np.empty((n_nodes, n_nodes)) for li, label_data in enumerate(epoch_data): if orthogonalize is False: # the new code - label_data_orth = data_mag - label_data_orth_std = data_mag_std + label_data_orth = data_mag[li] + label_data_orth_std = data_mag_std[li] else: label_data_orth = (label_data * data_conj_scaled).imag + np.abs(label_data_orth, out=label_data_orth) + # protect against invalid value -- this will be zero + # after (log and) mean subtraction + label_data_orth[li] = 1. + if log: + label_data_orth *= label_data_orth + np.log(label_data_orth, out=label_data_orth) label_data_orth -= np.mean(label_data_orth, axis=-1, keepdims=True) label_data_orth_std = np.linalg.norm(label_data_orth, axis=-1) label_data_orth_std[label_data_orth_std == 0] = 1 # correlation is dot product divided by variances - corr[li] = np.dot(label_data_orth, data_mag_nomean[li]) - corr[li] /= data_mag_std[li] + corr[li] = np.sum(label_data_orth * data_mag_nomean, axis=1) + corr[li] /= data_mag_std corr[li] /= label_data_orth_std if orthogonalize is not False: # Make it symmetric (it isn't at this point) - corr = np.abs(corr) + if absolute: + corr = np.abs(corr) corr = (corr.T + corr) / 2. corrs.append(corr) del corr diff --git a/mne/connectivity/spectral.py b/mne/connectivity/spectral.py index 2aa99f423ee..b900a09a93d 100644 --- a/mne/connectivity/spectral.py +++ b/mne/connectivity/spectral.py @@ -10,7 +10,7 @@ from .utils import check_indices from ..utils import _check_option -from ..fixes import _get_args, rfftfreq +from ..fixes import _get_args, _import_fft from ..parallel import parallel_func from ..source_estimate import _BaseSourceEstimate from ..epochs import BaseEpochs @@ -18,7 +18,7 @@ _psd_from_mt, _csd_from_mt, _psd_from_mt_adaptive) from ..time_frequency.tfr import morlet, cwt -from ..utils import logger, verbose, _time_mask, warn +from ..utils import logger, verbose, _time_mask, warn, _arange_div ######################################################################## # Various connectivity estimators @@ -475,11 +475,15 @@ def _check_method(method): return True, None -def _get_and_verify_data_sizes(data, n_signals=None, n_times=None, times=None): +def _get_and_verify_data_sizes(data, sfreq, n_signals=None, n_times=None, + times=None, warn_times=True): """Get and/or verify the data sizes and time scales.""" if not isinstance(data, (list, tuple)): raise ValueError('data has to be a list or tuple') n_signals_tot = 0 + # Sometimes data can be (ndarray, SourceEstimate) groups so in the case + # where ndarray comes first, don't use it for times + times_inferred = False for this_data in data: this_n_signals, this_n_times = this_data.shape if n_times is not None: @@ -491,12 +495,19 @@ def _get_and_verify_data_sizes(data, n_signals=None, n_times=None, times=None): n_signals_tot += this_n_signals if hasattr(this_data, 'times'): + assert isinstance(this_data, _BaseSourceEstimate) this_times = this_data.times - if times is not None: - if np.any(times != this_times): - warn('time scales of input time series do not match') + if times is not None and not times_inferred: + if warn_times and not np.allclose(times, this_times): + with np.printoptions(threshold=4, linewidth=120): + warn('time scales of input time series do not match:\n' + f'{this_times}\n{times}') + warn_times = False else: times = this_times + elif times is None: + times_inferred = True + times = _arange_div(n_times, sfreq) if n_signals is not None: if n_signals != n_signals_tot: @@ -504,7 +515,7 @@ def _get_and_verify_data_sizes(data, n_signals=None, n_times=None, times=None): 'each epoch') n_signals = n_signals_tot - return n_signals, n_times, times + return n_signals, n_times, times, warn_times # map names to estimator types @@ -688,60 +699,47 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi, C = --------------------- sqrt(E[Sxx] * E[Syy]) - 'imcoh' : Imaginary coherence [1]_ given by:: + 'imcoh' : Imaginary coherence :footcite:`NolteEtAl2004` given by:: Im(E[Sxy]) C = ---------------------- sqrt(E[Sxx] * E[Syy]) - 'plv' : Phase-Locking Value (PLV) [2]_ given by:: + 'plv' : Phase-Locking Value (PLV) :footcite:`LachauxEtAl1999` given + by:: PLV = |E[Sxy/|Sxy|]| - 'ciplv' : corrected imaginary PLV (icPLV) [3]_ given by:: + 'ciplv' : corrected imaginary PLV (icPLV) + :footcite:`BrunaEtAl2018` given by:: |E[Im(Sxy/|Sxy|)]| ciPLV = ------------------------------------ sqrt(1 - |E[real(Sxy/|Sxy|)]| ** 2) 'ppc' : Pairwise Phase Consistency (PPC), an unbiased estimator - of squared PLV [4]_. + of squared PLV :footcite:`VinckEtAl2010`. - 'pli' : Phase Lag Index (PLI) [5]_ given by:: + 'pli' : Phase Lag Index (PLI) :footcite:`StamEtAl2007` given by:: PLI = |E[sign(Im(Sxy))]| - 'pli2_unbiased' : Unbiased estimator of squared PLI [6]_. + 'pli2_unbiased' : Unbiased estimator of squared PLI + :footcite:`VinckEtAl2011`. - 'wpli' : Weighted Phase Lag Index (WPLI) [6]_ given by:: + 'wpli' : Weighted Phase Lag Index (WPLI) :footcite:`VinckEtAl2011` + given by:: |E[Im(Sxy)]| WPLI = ------------------ E[|Im(Sxy)|] - 'wpli2_debiased' : Debiased estimator of squared WPLI [6]_. + 'wpli2_debiased' : Debiased estimator of squared WPLI + :footcite:`VinckEtAl2011`. References ---------- - .. [1] Nolte et al. "Identifying true brain interaction from EEG data using - the imaginary part of coherency" Clinical neurophysiology, vol. 115, - no. 10, pp. 2292-2307, Oct. 2004. - .. [2] Lachaux et al. "Measuring phase synchrony in brain signals" Human - brain mapping, vol. 8, no. 4, pp. 194-208, Jan. 1999. - .. [3] Bruña et al. "Phase locking value revisited: teaching new tricks to - an old dog" Journal of Neural Engineering, vol. 15, no. 5, pp. - 056011 , Jul. 2018. - .. [4] Vinck et al. "The pairwise phase consistency: a bias-free measure of - rhythmic neuronal synchronization" NeuroImage, vol. 51, no. 1, - pp. 112-122, May 2010. - .. [5] Stam et al. "Phase lag index: assessment of functional connectivity - from multi channel EEG and MEG with diminished bias from common - sources" Human brain mapping, vol. 28, no. 11, pp. 1178-1193, - Nov. 2007. - .. [6] Vinck et al. "An improved index of phase-synchronization for - electro-physiological data in the presence of volume-conduction, - noise and sample-size bias" NeuroImage, vol. 55, no. 4, - pp. 1548-1565, Apr. 2011. + .. footbibliography:: """ if n_jobs != 1: parallel, my_epoch_spectral_connectivity, _ = \ @@ -772,20 +770,23 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi, if isinstance(data, BaseEpochs): times_in = data.times # input times for Epochs input type sfreq = data.info['sfreq'] + else: + times_in = None # loop over data; it could be a generator that returns # (n_signals x n_times) arrays or SourceEstimates epoch_idx = 0 logger.info('Connectivity computation...') + warn_times = True for epoch_block in _get_n_epochs(data, n_jobs): if epoch_idx == 0: # initialize everything times and frequencies (n_cons, times, n_times, times_in, n_times_in, tmin_idx, tmax_idx, n_freqs, freq_mask, freqs, freqs_bands, freq_idx_bands, - n_signals, indices_use) = _prepare_connectivity( - epoch_block=epoch_block, tmin=tmin, tmax=tmax, fmin=fmin, - fmax=fmax, sfreq=sfreq, indices=indices, mode=mode, - fskip=fskip, n_bands=n_bands, + n_signals, indices_use, warn_times) = _prepare_connectivity( + epoch_block=epoch_block, times_in=times_in, + tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, sfreq=sfreq, + indices=indices, mode=mode, fskip=fskip, n_bands=n_bands, cwt_freqs=cwt_freqs, faverage=faverage) # get the window function, wavelets, etc for different modes @@ -823,8 +824,9 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi, # check dimensions and time scale for this_epoch in epoch_block: - _get_and_verify_data_sizes(this_epoch, n_signals, n_times_in, - times_in) + _, _, _, warn_times = _get_and_verify_data_sizes( + this_epoch, sfreq, n_signals, n_times_in, times_in, + warn_times=warn_times) call_params = dict( sig_idx=sig_idx, tmin_idx=tmin_idx, @@ -906,8 +908,7 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi, if indices is None: # return all-to-all connectivity matrices - logger.info(' assembling connectivity matrix ' - '(filling the upper triangular region of the matrix)') + logger.info(' assembling connectivity matrix') con_flat = con con = list() for this_con_flat in con_flat: @@ -930,21 +931,27 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi, return con, freqs, times, n_epochs, n_tapers -def _prepare_connectivity(epoch_block, tmin, tmax, fmin, fmax, sfreq, indices, +def _prepare_connectivity(epoch_block, times_in, tmin, tmax, + fmin, fmax, sfreq, indices, mode, fskip, n_bands, cwt_freqs, faverage): """Check and precompute dimensions of results data.""" + rfftfreq = _import_fft('rfftfreq') first_epoch = epoch_block[0] # get the data size and time scale - n_signals, n_times_in, times_in = _get_and_verify_data_sizes(first_epoch) - - if times_in is None: - # we are not using Epochs or SourceEstimate(s) as input - times_in = np.linspace(0.0, n_times_in / sfreq, n_times_in, - endpoint=False) + n_signals, n_times_in, times_in, warn_times = _get_and_verify_data_sizes( + first_epoch, sfreq, times=times_in) n_times_in = len(times_in) + + if tmin is not None and tmin < times_in[0]: + warn('start time tmin=%0.2f s outside of the time scope of the data ' + '[%0.2f s, %0.2f s]' % (tmin, times_in[0], times_in[-1])) + if tmax is not None and tmax > times_in[-1]: + warn('stop time tmax=%0.2f s outside of the time scope of the data ' + '[%0.2f s, %0.2f s]' % (tmax, times_in[0], times_in[-1])) + mask = _time_mask(times_in, tmin, tmax, sfreq=sfreq) tmin_idx, tmax_idx = np.where(mask)[0][[0, -1]] tmax_idx += 1 @@ -1044,7 +1051,7 @@ def _prepare_connectivity(epoch_block, tmin, tmax, fmin, fmax, sfreq, indices, return (n_cons, times, n_times, times_in, n_times_in, tmin_idx, tmax_idx, n_freqs, freq_mask, freqs, freqs_bands, freq_idx_bands, - n_signals, indices_use) + n_signals, indices_use, warn_times) def _assemble_spectral_params(mode, n_times, mt_adaptive, mt_bandwidth, sfreq, diff --git a/mne/connectivity/tests/test_envelope.py b/mne/connectivity/tests/test_envelope.py index 5220a3f65f9..41d0930510b 100644 --- a/mne/connectivity/tests/test_envelope.py +++ b/mne/connectivity/tests/test_envelope.py @@ -15,19 +15,23 @@ def _compute_corrs_orig(data): # This is the version of the code by Sheraz and Denis. # For this version (epochs, labels, time) must be -> (labels, time, epochs) - data = np.transpose(data, (1, 2, 0)) - corr_mats = np.empty((data.shape[0], data.shape[0], data.shape[2])) - for index, label_data in enumerate(data): - label_data_orth = np.imag(label_data * (data.conj() / np.abs(data))) - label_data_orig = np.abs(label_data) - label_data_cont = np.transpose( - np.dstack((label_data_orig, np.transpose(label_data_orth, - (1, 2, 0)))), (1, 2, 0)) - corr_mats[index] = np.array([np.corrcoef(dat) - for dat in label_data_cont])[:, 0, 1:].T - corr_mats = np.transpose(corr_mats, (2, 0, 1)) - corr = np.mean(np.array([(np.abs(corr_mat) + np.abs(corr_mat).T) / 2. - for corr_mat in corr_mats]), axis=0) + n_epochs, n_labels, _ = data.shape + corr = np.zeros((n_labels, n_labels)) + for epoch_data in data: + for ii in range(n_labels): + for jj in range(n_labels): + # Get timeseries for each pair + x, y = epoch_data[ii], epoch_data[jj] + x_mag = np.abs(x) + x_conj_scaled = x.conj() + x_conj_scaled /= x_mag + # Calculate orthogonalization + y_orth_x = (y * x_conj_scaled).imag + y_orth_x_mag = np.abs(y_orth_x) + # Estimate correlation + corr[ii, jj] += np.abs(np.corrcoef(x_mag, y_orth_x_mag)[0, 1]) + corr = (corr + corr.T) / (2. * n_epochs) + corr.flat[::n_labels + 1] = 0. return corr @@ -37,7 +41,7 @@ def test_envelope_correlation(): data = rng.randn(2, 4, 64) data_hilbert = hilbert(data, axis=-1) corr_orig = _compute_corrs_orig(data_hilbert) - assert (0 < corr_orig).all() + assert (0 <= corr_orig).all() assert (corr_orig < 1).all() # using complex data corr = envelope_correlation(data_hilbert) @@ -72,3 +76,25 @@ def test_envelope_correlation(): assert_allclose(np.diag(corr_plain_mean), 1) np_corr = np.array([np.corrcoef(np.abs(x)) for x in data_hilbert]) assert_allclose(corr_plain, np_corr) + + # check against FieldTrip, which uses the square-log-norm version + # from scipy.io import savemat + # savemat('data.mat', dict(data_hilbert=data_hilbert)) + # matlab + # load data + # ft_connectivity_powcorr_ortho(reshape(data_hilbert(1,:,:), [4, 64])) + # ft_connectivity_powcorr_ortho(reshape(data_hilbert(2,:,:), [4, 64])) + ft_vals = np.array([ + [[np.nan, 0.196734553900236, 0.063173148355451, -0.242638384630448], + [0.196734553900236, np.nan, 0.041799775495150, -0.088205187548542], + [0.063173148355451, 0.041799775495150, np.nan, 0.090331428512317], + [-0.242638384630448, -0.088205187548542, 0.090331428512317, np.nan]], + [[np.nan, -0.013270857462890, 0.185200598081295, 0.140284351572544], + [-0.013270857462890, np.nan, 0.150981508043722, -0.000671809276372], + [0.185200598081295, 0.150981508043722, np.nan, 0.137460244313337], + [0.140284351572544, -0.000671809276372, 0.137460244313337, np.nan]], + ], float) + ft_vals[np.isnan(ft_vals)] = 0 + corr_log = envelope_correlation( + data, combine=None, log=True, absolute=False) + assert_allclose(corr_log, ft_vals) diff --git a/mne/connectivity/tests/test_spectral.py b/mne/connectivity/tests/test_spectral.py index 76000d5af43..6a4dc6d8c4d 100644 --- a/mne/connectivity/tests/test_spectral.py +++ b/mne/connectivity/tests/test_spectral.py @@ -1,12 +1,10 @@ import numpy as np -from numpy.testing import assert_array_almost_equal +from numpy.testing import assert_array_almost_equal, assert_allclose import pytest +from mne import EpochsArray, SourceEstimate, create_info from mne.connectivity import spectral_connectivity from mne.connectivity.spectral import _CohEst, _get_n_epochs - -from mne import SourceEstimate -from mne.utils import run_tests_if_main from mne.filter import filter_data @@ -212,4 +210,63 @@ def test_spectral_connectivity(method, mode): assert (out_lens[0] == 10) -run_tests_if_main() +@pytest.mark.parametrize('kind', ('epochs', 'ndarray', 'stc', 'combo')) +def test_epochs_tmin_tmax(kind): + """Test spectral.spectral_connectivity with epochs and arrays.""" + rng = np.random.RandomState(0) + n_epochs, n_chs, n_times, sfreq, f = 10, 2, 2000, 1000., 20. + data = rng.randn(n_epochs, n_chs, n_times) + sig = np.sin(2 * np.pi * f * np.arange(1000) / sfreq) * np.hanning(1000) + data[:, :, 500:1500] += sig + info = create_info(n_chs, sfreq, 'eeg') + if kind == 'epochs': + tmin = -1 + X = EpochsArray(data, info, tmin=tmin) + elif kind == 'stc': + tmin = -1 + X = [SourceEstimate(d, [[0], [0]], tmin, 1. / sfreq) for d in data] + elif kind == 'combo': + tmin = -1 + X = [(d[[0]], SourceEstimate(d[[1]], [[0], []], tmin, 1. / sfreq)) + for d in data] + else: + assert kind == 'ndarray' + tmin = 0 + X = data + want_times = np.arange(n_times) / sfreq + tmin + + # Parameters for computing connectivity + fmin, fmax = f - 2, f + 2 + kwargs = {'method': 'coh', 'mode': 'multitaper', 'sfreq': sfreq, + 'fmin': fmin, 'fmax': fmax, 'faverage': True, + 'mt_adaptive': False, 'n_jobs': 1} + + # Check the entire interval + conn = spectral_connectivity(X, **kwargs) + assert 0.89 < conn[0][1, 0] < 0.91 + assert_allclose(conn[2], want_times) + # Check a time interval before the sinusoid + conn = spectral_connectivity(X, tmax=tmin + 0.5, **kwargs) + assert 0 < conn[0][1, 0] < 0.15 + # Check a time during the sinusoid + conn = spectral_connectivity(X, tmin=tmin + 0.5, tmax=tmin + 1.5, **kwargs) + assert 0.93 < conn[0][1, 0] <= 0.94 + # Check a time interval after the sinusoid + conn = spectral_connectivity(X, tmin=tmin + 1.5, tmax=tmin + 1.9, **kwargs) + assert 0 < conn[0][1, 0] < 0.15 + + # Check for warning if tmin, tmax is outside of the time limits of data + with pytest.warns(RuntimeWarning, match='start time tmin'): + spectral_connectivity(X, **kwargs, tmin=tmin - 0.1) + + with pytest.warns(RuntimeWarning, match='stop time tmax'): + spectral_connectivity(X, **kwargs, tmax=tmin + 2.5) + + # make one with mismatched times + if kind != 'combo': + return + X = [(SourceEstimate(d[[0]], [[0], []], tmin - 1, 1. / sfreq), + SourceEstimate(d[[1]], [[0], []], tmin, 1. / sfreq)) for d in data] + with pytest.warns(RuntimeWarning, match='time scales of input') as w: + spectral_connectivity(X, **kwargs) + assert len(w) == 1 # just one even though there were multiple epochs diff --git a/mne/coreg.py b/mne/coreg.py index e5d9a23380f..045026dbb2e 100644 --- a/mne/coreg.py +++ b/mne/coreg.py @@ -21,13 +21,14 @@ from .io import read_fiducials, write_fiducials, read_info from .io.constants import FIFF from .label import read_label, Label -from .source_space import (add_source_space_distances, read_source_spaces, - write_source_spaces, read_talxfm, _read_mri_info) +from .source_space import (add_source_space_distances, read_source_spaces, # noqa: E501,F401 + write_source_spaces, _read_mri_info, + get_mni_fiducials) from .surface import read_surface, write_surface, _normalize_vectors from .bem import read_bem_surfaces, write_bem_surfaces from .transforms import (rotation, rotation3d, scaling, translation, Transform, _read_fs_xfm, _write_fs_xfm, invert_transform, - combine_transforms, apply_trans, _quat_to_euler, + combine_transforms, _quat_to_euler, _fit_matched_points) from .utils import (get_config, get_subjects_dir, logger, pformat, verbose, warn, has_nibabel) @@ -1218,54 +1219,3 @@ def _scale_xfm(subject_to, xfm_fname, mri_name, subject_from, scale, F_mri_ras, 'ras', 'ras'), F_ras_mni, 'ras', 'mni_tal') _write_fs_xfm(fname_to, T_ras_mni['trans'], kind) - - -@verbose -def get_mni_fiducials(subject, subjects_dir=None, verbose=None): - """Estimate fiducials for a subject. - - Parameters - ---------- - subject : str - Name of the mri subject - subjects_dir : None | str - Override the SUBJECTS_DIR environment variable - (sys.environ['SUBJECTS_DIR']) - %(verbose)s - - Returns - ------- - fids_mri : list - List of estimated fiducials (each point in a dict), in the order - LPA, nasion, RPA. - - Notes - ----- - This takes the ``fsaverage-fiducials.fif`` file included with MNE—which - contain the LPA, nasion, and RPA for the ``fsaverage`` subject—and - transforms them to the given FreeSurfer subject's MRI space. - The MRI of ``fsaverage`` is already in MNI Talairach space, so applying - the inverse of the given subject's MNI Talairach affine transformation - (``$SUBJECTS_DIR/$SUBJECT/mri/transforms/talairach.xfm``) is used - to estimate the subject's fiducial locations. - - For more details about the coordinate systems and transformations involved, - see https://surfer.nmr.mgh.harvard.edu/fswiki/CoordinateSystems and - :ref:`plot_source_alignment`. - """ - # Eventually we might want to allow using the MNI Talairach with-skull - # transformation rather than the standard brain-based MNI Talaranch - # transformation, and/or project the points onto the head surface - # (if available). - fname_fids_fs = os.path.join(os.path.dirname(__file__), 'data', - 'fsaverage', 'fsaverage-fiducials.fif') - - # Read fsaverage fiducials file and subject Talairach. - fids, coord_frame = read_fiducials(fname_fids_fs) - assert coord_frame == FIFF.FIFFV_COORD_MRI - if subject == 'fsaverage': - return fids # special short-circuit for fsaverage - mni_mri_t = invert_transform(read_talxfm(subject, subjects_dir)) - for f in fids: - f['r'] = apply_trans(mni_mri_t, f['r']) - return fids diff --git a/mne/cov.py b/mne/cov.py index 72e33bec09f..aa76cb60dd0 100644 --- a/mne/cov.py +++ b/mne/cov.py @@ -11,7 +11,6 @@ import os import numpy as np -from scipy import linalg, sparse from .defaults import _EXTRAPOLATE_DEFAULT, _BORDER_DEFAULT, DEFAULTS from .io.write import start_file, end_file @@ -25,7 +24,7 @@ _DATA_CH_TYPES_SPLIT) from .io.constants import FIFF -from .io.meas_info import read_bad_channels, create_info +from .io.meas_info import _read_bad_channels, create_info from .io.tag import find_tag from .io.tree import dir_tree_find from .io.write import (start_block, end_block, write_int, write_name_list, @@ -38,7 +37,8 @@ from .utils import (check_fname, logger, verbose, check_version, _time_mask, warn, copy_function_doc_to_method_doc, _pl, _undo_scaling_cov, _scaled_array, _validate_type, - _check_option, eigh, fill_doc) + _check_option, eigh, fill_doc, _on_missing, + _check_on_missing) from . import viz from .fixes import (BaseEstimator, EmpiricalCovariance, _logdet, @@ -198,6 +198,14 @@ def as_diag(self): self['eigvec'] = None return self + def _as_square(self): + # This is a hack but it works because np.diag() behaves nicely + if self['diag']: + self['diag'] = False + self.as_diag() + self['diag'] = False + return self + def _get_square(self): if self['diag'] != (self.data.ndim == 1): raise RuntimeError( @@ -872,7 +880,7 @@ def _unpack_epochs(epochs): 'matrix may be inaccurate') orig = epochs[0].info['dev_head_t'] - _check_option('on_mismatch', on_mismatch, ['raise', 'warn', 'ignore']) + _check_on_missing(on_mismatch, 'on_mismatch') for ei, epoch in enumerate(epochs): epoch.info._check_consistency() if (orig is None) != (epoch.info['dev_head_t'] is None) or \ @@ -882,10 +890,7 @@ def _unpack_epochs(epochs): msg = ('MEG<->Head transform mismatch between epochs[0]:\n%s\n\n' 'and epochs[%s]:\n%s' % (orig, ei, epoch.info['dev_head_t'])) - if on_mismatch == 'raise': - raise ValueError(msg) - elif on_mismatch == 'warn': - warn(msg) + _on_missing(on_mismatch, msg, 'on_mismatch') bads = epochs[0].info['bads'] if projs is None: @@ -1252,10 +1257,11 @@ def _auto_low_rank_model(data, mode, n_jobs, method_params, cv, class _RegCovariance(BaseEstimator): """Aux class.""" - def __init__(self, info, grad=0.1, mag=0.1, eeg=0.1, seeg=0.1, ecog=0.1, - hbo=0.1, hbr=0.1, fnirs_cw_amplitude=0.1, + def __init__(self, info, grad=0.1, mag=0.1, eeg=0.1, seeg=0.1, + ecog=0.1, hbo=0.1, hbr=0.1, fnirs_cw_amplitude=0.1, fnirs_fd_ac_amplitude=0.1, fnirs_fd_phase=0.1, fnirs_od=0.1, - csd=0.1, store_precision=False, assume_centered=False): + csd=0.1, dbs=0.1, store_precision=False, + assume_centered=False): self.info = info # For sklearn compat, these cannot (easily?) be combined into # a single dictionary @@ -1263,6 +1269,7 @@ def __init__(self, info, grad=0.1, mag=0.1, eeg=0.1, seeg=0.1, ecog=0.1, self.mag = mag self.eeg = eeg self.seeg = seeg + self.dbs = dbs self.ecog = ecog self.hbo = hbo self.hbr = hbr @@ -1289,7 +1296,7 @@ def fit(self, X): cov_ = regularize( cov_, self.info, proj=False, exclude='bads', grad=self.grad, mag=self.mag, eeg=self.eeg, - ecog=self.ecog, seeg=self.seeg, + ecog=self.ecog, seeg=self.seeg, dbs=self.dbs, hbo=self.hbo, hbr=self.hbr, rank='full') self.estimator_.covariance_ = self.covariance_ = cov_.data return self @@ -1426,7 +1433,7 @@ def _get_ch_whitener(A, pca, ch_type, rank): @verbose def prepare_noise_cov(noise_cov, info, ch_names=None, rank=None, - scalings=None, verbose=None): + scalings=None, on_rank_mismatch='ignore', verbose=None): """Prepare noise covariance matrix. Parameters @@ -1447,6 +1454,7 @@ def prepare_noise_cov(noise_cov, info, ch_names=None, rank=None, If dict, it will override the following dict (default if None):: dict(mag=1e12, grad=1e11, eeg=1e5) + %(on_rank_mismatch)s %(verbose)s Returns @@ -1478,7 +1486,7 @@ def prepare_noise_cov(noise_cov, info, ch_names=None, rank=None, loglik=noise_cov.get('loglik', None)) eig, eigvec, _ = _smart_eigh(noise_cov, info, rank, scalings, projs, - ch_names) + ch_names, on_rank_mismatch=on_rank_mismatch) noise_cov.update(eig=eig, eigvec=eigvec) return noise_cov @@ -1486,7 +1494,7 @@ def prepare_noise_cov(noise_cov, info, ch_names=None, rank=None, @verbose def _smart_eigh(C, info, rank, scalings=None, projs=None, ch_names=None, proj_subspace=False, do_compute_rank=True, - verbose=None): + on_rank_mismatch='ignore', verbose=None): """Compute eigh of C taking into account rank and ch_type scalings.""" scalings = _handle_default('scalings_cov_rank', scalings) projs = info['projs'] if projs is None else projs @@ -1508,7 +1516,8 @@ def _smart_eigh(C, info, rank, scalings=None, projs=None, noise_cov = Covariance(C, ch_names, [], projs, 0) if do_compute_rank: # if necessary - rank = compute_rank(noise_cov, rank, scalings, info) + rank = compute_rank( + noise_cov, rank, scalings, info, on_rank_mismatch=on_rank_mismatch) assert C.ndim == 2 and C.shape[0] == C.shape[1] # time saving short-circuit @@ -1549,7 +1558,7 @@ def _smart_eigh(C, info, rank, scalings=None, projs=None, def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', proj=True, seeg=0.1, ecog=0.1, hbo=0.1, hbr=0.1, fnirs_cw_amplitude=0.1, fnirs_fd_ac_amplitude=0.1, - fnirs_fd_phase=0.1, fnirs_od=0.1, csd=0.1, + fnirs_fd_phase=0.1, fnirs_od=0.1, csd=0.1, dbs=0.1, rank=None, scalings=None, verbose=None): """Regularize noise covariance matrix. @@ -1600,6 +1609,8 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', Regularization factor for fNIRS optical density signals. csd : float (default 0.1) Regularization factor for EEG-CSD signals. + dbs : float (default 0.1) + Regularization factor for DBS signals. %(rank_None)s .. versionadded:: 0.17 @@ -1622,10 +1633,11 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', -------- mne.compute_covariance """ # noqa: E501 + from scipy import linalg cov = cov.copy() info._check_consistency() scalings = _handle_default('scalings_cov_rank', scalings) - regs = dict(eeg=eeg, seeg=seeg, ecog=ecog, hbo=hbo, hbr=hbr, + regs = dict(eeg=eeg, seeg=seeg, dbs=dbs, ecog=ecog, hbo=hbo, hbr=hbr, fnirs_cw_amplitude=fnirs_cw_amplitude, fnirs_fd_ac_amplitude=fnirs_fd_ac_amplitude, fnirs_fd_phase=fnirs_fd_phase, fnirs_od=fnirs_od, csd=csd) @@ -1763,7 +1775,8 @@ def _regularized_covariance(data, reg=None, method_params=None, info=None, @verbose def compute_whitener(noise_cov, info=None, picks=None, rank=None, scalings=None, return_rank=False, pca=False, - return_colorer=False, verbose=None): + return_colorer=False, on_rank_mismatch='warn', + verbose=None): """Compute whitening matrix. Parameters @@ -1801,6 +1814,7 @@ def compute_whitener(noise_cov, info=None, picks=None, rank=None, .. versionadded:: 0.18 return_colorer : bool If True, return the colorer as well. + %(on_rank_mismatch)s %(verbose)s Returns @@ -1829,7 +1843,8 @@ def compute_whitener(noise_cov, info=None, picks=None, rank=None, ch_names = [info['ch_names'][k] for k in picks] del picks noise_cov = prepare_noise_cov( - noise_cov, info, ch_names, rank, scalings) + noise_cov, info, ch_names, rank, scalings, + on_rank_mismatch=on_rank_mismatch) n_chan = len(ch_names) assert n_chan == len(noise_cov['eig']) @@ -1919,6 +1934,7 @@ def whiten_evoked(evoked, noise_cov, picks=None, diag=None, rank=None, def _read_cov(fid, node, cov_kind, limited=False, verbose=None): """Read a noise covariance matrix.""" # Find all covariance matrices + from scipy import sparse covs = dir_tree_find(node, FIFF.FIFFB_MNE_COV) if len(covs) == 0: raise ValueError('No covariance matrices found') @@ -2006,7 +2022,7 @@ def _read_cov(fid, node, cov_kind, limited=False, verbose=None): projs = _read_proj(fid, this) # Read the bad channel list - bads = read_bad_channels(fid, this) + bads = _read_bad_channels(fid, this, None) # Put it together assert dim == len(data) diff --git a/mne/cuda.py b/mne/cuda.py index 07abf3df231..6b59b8dfa91 100644 --- a/mne/cuda.py +++ b/mne/cuda.py @@ -4,7 +4,7 @@ import numpy as np -from .fixes import rfft, irfft +from .fixes import _import_fft from .utils import (sizeof_fmt, logger, get_config, warn, _explain_exception, verbose) @@ -154,6 +154,7 @@ def _setup_cuda_fft_multiply_repeated(n_jobs, h, n_fft, ----- This function is designed to be used with fft_multiply_repeated(). """ + rfft, irfft = _import_fft(('rfft', 'irfft')) cuda_dict = dict(n_fft=n_fft, rfft=rfft, irfft=irfft, h_fft=rfft(h, n=n_fft)) if n_jobs == 'cuda': @@ -246,6 +247,7 @@ def _setup_cuda_fft_resample(n_jobs, W, new_len): ----- This function is designed to be used with fft_resample(). """ + rfft, irfft = _import_fft(('rfft', 'irfft')) cuda_dict = dict(use_cuda=False, rfft=rfft, irfft=irfft) rfft_len_x = len(W) // 2 + 1 # fold the window onto inself (should be symmetric) and truncate diff --git a/mne/data/html_templates.py b/mne/data/html_templates.py new file mode 100644 index 00000000000..e1943fe0e0d --- /dev/null +++ b/mne/data/html_templates.py @@ -0,0 +1,102 @@ +from ..externals.tempita import Template + + +info_template = Template(""" + + + + {{if meas_date is not None}} + + {{else}}{{endif}} + + + + {{if info['experimenter'] is not None}} + + {{else}}{{endif}} + + + {{if info['subject_info'] is not None}} + {{if 'his_id' in info['subject_info'].keys()}} + + {{endif}} + {{else}}{{endif}} + + + + {{if info['dig'] is not None}} + + {{else}} + + {{endif}} + + + + + + + + {{if info['bads'] is not None}} + + {{else}}{{endif}} + + + + + + + + + + + + + + + + + + + + +
Measurement date{{meas_date}}Unknown
Experimenter{{info['experimenter']}}Unknown
Participant{{info['subject_info']['his_id']}}Unknown
Digitized points{{len(info['dig'])}} pointsNot available
Good channels{{n_mag}} magnetometer, {{n_grad}} gradiometer, + and {{n_eeg}} EEG channels
Bad channels{{', '.join(info['bads'])}}None
EOG channels{{eog}}
ECG channels{{ecg}}
Sampling frequency{{u'%0.2f' % info['sfreq']}} Hz
Highpass{{u'%0.2f' % info['highpass']}} Hz
Lowpass{{u'%0.2f' % info['lowpass']}} Hz
+""") + +raw_template = Template(""" +{{info_repr[:-9]}} + + Filenames + {{', '.join(filenames)}} + + + Duration + {{duration}} (HH:MM:SS) + + +""") + +epochs_template = Template(""" + + + + + + + + {{if events is not None}} + + {{else}} + + {{endif}} + + + + + + + + + +
Number of events{{len(epochs.events)}}
Events{{events}}Not available
Time range{{f'{epochs.tmin:.3f} – {epochs.tmax:.3f} sec'}}
Baseline{{baseline}}
+""") diff --git a/mne/datasets/__init__.py b/mne/datasets/__init__.py index 98ac5679ab3..d7d29926a5d 100644 --- a/mne/datasets/__init__.py +++ b/mne/datasets/__init__.py @@ -23,15 +23,19 @@ from . import sleep_physionet from . import limo from . import refmeg_noise +from . import ssvep +from . import erp_core +from . import epilepsy_ecog from .utils import (_download_all_example_data, fetch_hcp_mmp_parcellation, fetch_aparc_sub_parcellation) from ._fsaverage.base import fetch_fsaverage +from ._infant.base import fetch_infant_template __all__ = [ '_download_all_example_data', '_fake', 'brainstorm', 'eegbci', - 'fetch_aparc_sub_parcellation', 'fetch_fsaverage', + 'fetch_aparc_sub_parcellation', 'fetch_fsaverage', 'fetch_infant_template', 'fetch_hcp_mmp_parcellation', 'fieldtrip_cmc', 'hf_sef', 'kiloword', 'misc', 'mtrf', 'multimodal', 'opm', 'phantom_4dbti', 'sample', - 'sleep_physionet', 'somato', 'spm_face', 'testing', 'visual_92_categories', - 'limo', + 'sleep_physionet', 'somato', 'spm_face', 'ssvep', 'testing', + 'visual_92_categories', 'limo', 'erp_core', 'epilepsy_ecog' ] diff --git a/mne/datasets/_fsaverage/base.py b/mne/datasets/_fsaverage/base.py index cfdff437b94..7a412b00e9c 100644 --- a/mne/datasets/_fsaverage/base.py +++ b/mne/datasets/_fsaverage/base.py @@ -6,6 +6,7 @@ import os.path as op +from ..utils import _manifest_check_download, _get_path from ...utils import (verbose, get_subjects_dir, set_config) FSAVERAGE_MANIFEST_PATH = op.dirname(__file__) @@ -64,38 +65,26 @@ def fetch_fsaverage(subjects_dir=None, verbose=None): # with open('fsaverage.txt', 'w') as fid: # fid.write('\n'.join(names)) # - from ..utils import _manifest_check_download subjects_dir = _set_montage_coreg_path(subjects_dir) subjects_dir = op.abspath(subjects_dir) fs_dir = op.join(subjects_dir, 'fsaverage') os.makedirs(fs_dir, exist_ok=True) - - fsaverage_data_parts = { - 'root.zip': dict( - url='https://osf.io/3bxqt/download?revision=2', - hash_='5133fe92b7b8f03ae19219d5f46e4177', - manifest=op.join(FSAVERAGE_MANIFEST_PATH, 'root.txt'), - destination=op.join(subjects_dir), - ), - 'bem.zip': dict( - url='https://osf.io/7ve8g/download?revision=4', - hash_='b31509cdcf7908af6a83dc5ee8f49fb1', - manifest=op.join(FSAVERAGE_MANIFEST_PATH, 'bem.txt'), - destination=op.join(subjects_dir, 'fsaverage'), - ), - } - for fname, data in fsaverage_data_parts.items(): - _manifest_check_download( - destination=data['destination'], - manifest_path=data['manifest'], - url=data['url'], - hash_=data['hash_'], - ) + _manifest_check_download( + manifest_path=op.join(FSAVERAGE_MANIFEST_PATH, 'root.txt'), + destination=op.join(subjects_dir), + url='https://osf.io/3bxqt/download?revision=2', + hash_='5133fe92b7b8f03ae19219d5f46e4177', + ) + _manifest_check_download( + manifest_path=op.join(FSAVERAGE_MANIFEST_PATH, 'bem.txt'), + destination=op.join(subjects_dir, 'fsaverage'), + url='https://osf.io/7ve8g/download?revision=4', + hash_='b31509cdcf7908af6a83dc5ee8f49fb1', + ) return fs_dir def _get_create_subjects_dir(subjects_dir): - from ..utils import _get_path subjects_dir = get_subjects_dir(subjects_dir, raise_error=False) if subjects_dir is None: subjects_dir = _get_path(None, 'MNE_DATA', 'montage coregistration') diff --git a/mne/datasets/_infant/ANTS1-0Months3T.txt b/mne/datasets/_infant/ANTS1-0Months3T.txt new file mode 100644 index 00000000000..fc77acedae1 --- /dev/null +++ b/mne/datasets/_infant/ANTS1-0Months3T.txt @@ -0,0 +1,117 @@ +bem/ANTS1-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS1-0Months3T-5120-5120-5120-bem.fif +bem/ANTS1-0Months3T-fiducials.fif +bem/ANTS1-0Months3T-head.fif +bem/ANTS1-0Months3T-oct-6-src.fif +bem/ANTS1-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS10-5Months3T.txt b/mne/datasets/_infant/ANTS10-5Months3T.txt new file mode 100644 index 00000000000..cec0a3e735a --- /dev/null +++ b/mne/datasets/_infant/ANTS10-5Months3T.txt @@ -0,0 +1,115 @@ +bem/ANTS10-5Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS10-5Months3T-5120-5120-5120-bem.fif +bem/ANTS10-5Months3T-fiducials.fif +bem/ANTS10-5Months3T-head.fif +bem/ANTS10-5Months3T-oct-6-src.fif +bem/ANTS10-5Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS12-0Months3T.txt b/mne/datasets/_infant/ANTS12-0Months3T.txt new file mode 100644 index 00000000000..d1fdbbc7bb0 --- /dev/null +++ b/mne/datasets/_infant/ANTS12-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS12-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS12-0Months3T-5120-5120-5120-bem.fif +bem/ANTS12-0Months3T-fiducials.fif +bem/ANTS12-0Months3T-head.fif +bem/ANTS12-0Months3T-oct-6-src.fif +bem/ANTS12-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS15-0Months3T.txt b/mne/datasets/_infant/ANTS15-0Months3T.txt new file mode 100644 index 00000000000..50487c06c73 --- /dev/null +++ b/mne/datasets/_infant/ANTS15-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS15-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS15-0Months3T-5120-5120-5120-bem.fif +bem/ANTS15-0Months3T-fiducials.fif +bem/ANTS15-0Months3T-head.fif +bem/ANTS15-0Months3T-oct-6-src.fif +bem/ANTS15-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS18-0Months3T.txt b/mne/datasets/_infant/ANTS18-0Months3T.txt new file mode 100644 index 00000000000..8f386c820f2 --- /dev/null +++ b/mne/datasets/_infant/ANTS18-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS18-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS18-0Months3T-5120-5120-5120-bem.fif +bem/ANTS18-0Months3T-fiducials.fif +bem/ANTS18-0Months3T-head.fif +bem/ANTS18-0Months3T-oct-6-src.fif +bem/ANTS18-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS2-0Months3T.txt b/mne/datasets/_infant/ANTS2-0Months3T.txt new file mode 100644 index 00000000000..2a6b9c22c31 --- /dev/null +++ b/mne/datasets/_infant/ANTS2-0Months3T.txt @@ -0,0 +1,117 @@ +bem/ANTS2-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS2-0Months3T-5120-5120-5120-bem.fif +bem/ANTS2-0Months3T-fiducials.fif +bem/ANTS2-0Months3T-head.fif +bem/ANTS2-0Months3T-oct-6-src.fif +bem/ANTS2-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS2-0Weeks3T.txt b/mne/datasets/_infant/ANTS2-0Weeks3T.txt new file mode 100644 index 00000000000..e940f24b147 --- /dev/null +++ b/mne/datasets/_infant/ANTS2-0Weeks3T.txt @@ -0,0 +1,117 @@ +bem/ANTS2-0Weeks3T-5120-5120-5120-bem-sol.fif +bem/ANTS2-0Weeks3T-5120-5120-5120-bem.fif +bem/ANTS2-0Weeks3T-fiducials.fif +bem/ANTS2-0Weeks3T-head.fif +bem/ANTS2-0Weeks3T-oct-6-src.fif +bem/ANTS2-0Weeks3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS2-0Years3T.txt b/mne/datasets/_infant/ANTS2-0Years3T.txt new file mode 100644 index 00000000000..776396919ec --- /dev/null +++ b/mne/datasets/_infant/ANTS2-0Years3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS2-0Years3T-5120-5120-5120-bem-sol.fif +bem/ANTS2-0Years3T-5120-5120-5120-bem.fif +bem/ANTS2-0Years3T-fiducials.fif +bem/ANTS2-0Years3T-head.fif +bem/ANTS2-0Years3T-oct-6-src.fif +bem/ANTS2-0Years3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS3-0Months3T.txt b/mne/datasets/_infant/ANTS3-0Months3T.txt new file mode 100644 index 00000000000..29a7148010b --- /dev/null +++ b/mne/datasets/_infant/ANTS3-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS3-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS3-0Months3T-5120-5120-5120-bem.fif +bem/ANTS3-0Months3T-fiducials.fif +bem/ANTS3-0Months3T-head.fif +bem/ANTS3-0Months3T-oct-6-src.fif +bem/ANTS3-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS4-5Months3T.txt b/mne/datasets/_infant/ANTS4-5Months3T.txt new file mode 100644 index 00000000000..b9188492d50 --- /dev/null +++ b/mne/datasets/_infant/ANTS4-5Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS4-5Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS4-5Months3T-5120-5120-5120-bem.fif +bem/ANTS4-5Months3T-fiducials.fif +bem/ANTS4-5Months3T-head.fif +bem/ANTS4-5Months3T-oct-6-src.fif +bem/ANTS4-5Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS6-0Months3T.txt b/mne/datasets/_infant/ANTS6-0Months3T.txt new file mode 100644 index 00000000000..3235de4c576 --- /dev/null +++ b/mne/datasets/_infant/ANTS6-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS6-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS6-0Months3T-5120-5120-5120-bem.fif +bem/ANTS6-0Months3T-fiducials.fif +bem/ANTS6-0Months3T-head.fif +bem/ANTS6-0Months3T-oct-6-src.fif +bem/ANTS6-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS7-5Months3T.txt b/mne/datasets/_infant/ANTS7-5Months3T.txt new file mode 100644 index 00000000000..8b38563c5b1 --- /dev/null +++ b/mne/datasets/_infant/ANTS7-5Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS7-5Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS7-5Months3T-5120-5120-5120-bem.fif +bem/ANTS7-5Months3T-fiducials.fif +bem/ANTS7-5Months3T-head.fif +bem/ANTS7-5Months3T-oct-6-src.fif +bem/ANTS7-5Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS9-0Months3T.txt b/mne/datasets/_infant/ANTS9-0Months3T.txt new file mode 100644 index 00000000000..8d37f25d0ba --- /dev/null +++ b/mne/datasets/_infant/ANTS9-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS9-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS9-0Months3T-5120-5120-5120-bem.fif +bem/ANTS9-0Months3T-fiducials.fif +bem/ANTS9-0Months3T-head.fif +bem/ANTS9-0Months3T-oct-6-src.fif +bem/ANTS9-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/base.py b/mne/datasets/_infant/base.py new file mode 100644 index 00000000000..ff502d19619 --- /dev/null +++ b/mne/datasets/_infant/base.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +# Authors: Eric Larson +# License: BSD Style. + +import os +import os.path as op + +from ..utils import _manifest_check_download +from ...utils import verbose, get_subjects_dir, _check_option, _validate_type + +_AGES = '2wk 1mo 2mo 3mo 4.5mo 6mo 7.5mo 9mo 10.5mo 12mo 15mo 18mo 2yr' +# https://github.com/christian-oreilly/infant_template_paper/releases +_ORIGINAL_URL = 'https://github.com/christian-oreilly/infant_template_paper/releases/download/v0.1-alpha/{subject}.zip' # noqa: E501 +# Formatted the same way as md5sum *.zip on Ubuntu: +_ORIGINAL_HASHES = """ +851737d5f8f246883f2aef9819c6ec29 ANTS10-5Months3T.zip +32ab6d025f4311433a82e81374f1a045 ANTS1-0Months3T.zip +48ef349e7cc542fdf63ff36d7958ab57 ANTS12-0Months3T.zip +bba22c95aa97988c6e8892d6169ed317 ANTS15-0Months3T.zip +e1bfe5e3ef380592822ced446a4008c7 ANTS18-0Months3T.zip +fa7bee6c0985b9cd15ba53820cd72ccd ANTS2-0Months3T.zip +2ad90540cdf42837c09f8ce829458a35 ANTS2-0Weeks3T.zip +73e6a8b2579b7959a96f7d294ffb7393 ANTS2-0Years3T.zip +cb7b9752894e16a4938ddfe220f6286a ANTS3-0Months3T.zip +16b2a6804c7d5443cfba2ad6f7d4ac6a ANTS4-5Months3T.zip +dbdf2a9976121f2b106da96775690da3 ANTS6-0Months3T.zip +75fe37a1bc80ed6793a8abb47681d5ab ANTS7-5Months3T.zip +790f7dba0a264262e6c1c2dfdf216215 ANTS9-0Months3T.zip +""" +_MANIFEST_PATH = op.dirname(__file__) + + +@verbose +def fetch_infant_template(age, subjects_dir=None, verbose=None): + """Fetch and update an infant MRI template. + + Parameters + ---------- + age : str + Age to download. Can be one of ``{'2wk', '1mo', '2mo', '3mo', '4.5mo', + '6mo', '7.5mo', '9mo', '10.5mo', '12mo', '15mo', '18mo', '2yr'}``. + subjects_dir : str | None + The path to download the template data to. + %(verbose)s + + Returns + ------- + subject : str + The standard subject name, e.g. ``ANTS4-5Month3T``. + + Notes + ----- + If you use these templates in your work, please cite + :footcite:`OReillyEtAl2021` and :footcite:`RichardsEtAl2016`. + + .. versionadded:: 0.23 + + References + ---------- + .. footbibliography:: + """ + # Code used to create the lists: + # + # $ for name in 2-0Weeks 1-0Months 2-0Months 3-0Months 4-5Months 6-0Months 7-5Months 9-0Months 10-5Months 12-0Months 15-0Months 18-0Months 2-0Years; do wget https://github.com/christian-oreilly/infant_template_paper/releases/download/v0.1-alpha/ANTS${name}3T.zip; done # noqa: E501 + # $ md5sum ANTS*.zip + # $ python + # >>> import os.path as op + # >>> import zipfile + # >>> names = [f'ANTS{name}3T' for name in '2-0Weeks 1-0Months 2-0Months 3-0Months 4-5Months 6-0Months 7-5Months 9-0Months 10-5Months 12-0Months 15-0Months 18-0Months 2-0Years'.split()] # noqa: E501 + # >>> for name in names: + # ... with zipfile.ZipFile(f'{name}.zip', 'r') as zip: + # ... names = sorted(name for name in zip.namelist() if not zipfile.Path(zip, name).is_dir()) # noqa: E501 + # ... with open(f'{name}.txt', 'w') as fid: + # ... fid.write('\n'.join(names)) + _validate_type(age, str, 'age') + _check_option('age', age, _AGES.split()) + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + subjects_dir = op.abspath(subjects_dir) + unit = dict(wk='Weeks', mo='Months', yr='Years')[age[-2:]] + first = age[:-2].split('.')[0] + dash = '-5' if '.5' in age else '-0' + subject = f'ANTS{first}{dash}{unit}3T' + # Actually get and create the files + subj_dir = op.join(subjects_dir, subject) + os.makedirs(subj_dir, exist_ok=True) + # .zip -> hash mapping + orig_hashes = dict(line.strip().split()[::-1] + for line in _ORIGINAL_HASHES.strip().splitlines()) + _manifest_check_download( + manifest_path=op.join(_MANIFEST_PATH, f'{subject}.txt'), + destination=subj_dir, + url=_ORIGINAL_URL.format(subject=subject), + hash_=orig_hashes[f'{subject}.zip'], + ) + return subject diff --git a/mne/datasets/eegbci/eegbci.py b/mne/datasets/eegbci/eegbci.py index 8b355158b57..64d1e5cc1d2 100644 --- a/mne/datasets/eegbci/eegbci.py +++ b/mne/datasets/eegbci/eegbci.py @@ -17,7 +17,7 @@ def data_path(url, path=None, force_update=False, update_path=None, """Get path to local copy of EEGMMI dataset URL. This is a low-level function useful for getting a local copy of a - remote EEGBCI dataset [1]_ which is available at PhysioNet [2]_. + remote EEGBCI dataset :footcite:`SchalkEtAl2004` which is available at PhysioNet :footcite:`GoldbergerEtAl2000`. Parameters ---------- @@ -57,14 +57,7 @@ def data_path(url, path=None, force_update=False, update_path=None, References ---------- - .. [1] Schalk, G., McFarland, D.J., Hinterberger, T., Birbaumer, N., - Wolpaw, J.R. (2004) BCI2000: A General-Purpose Brain-Computer - Interface (BCI) System. IEEE TBME 51(6):1034-1043 - .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, - Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000) - PhysioBank, PhysioToolkit, and PhysioNet: Components of a New - Research Resource for Complex Physiologic Signals. - Circulation 101(23):e215-e220 + .. footbibliography:: """ # noqa: E501 key = 'MNE_DATASETS_EEGBCI_PATH' name = 'EEGBCI' @@ -90,27 +83,15 @@ def load_data(subject, runs, path=None, force_update=False, update_path=None, base_url=EEGMI_URL, verbose=None): # noqa: D301 """Get paths to local copies of EEGBCI dataset files. - This will fetch data for the EEGBCI dataset [1]_, which is also - available at PhysioNet [2]_. + This will fetch data for the EEGBCI dataset :footcite:`SchalkEtAl2004`, which is also + available at PhysioNet :footcite:`GoldbergerEtAl2000`. Parameters ---------- subject : int The subject to use. Can be in the range of 1-109 (inclusive). runs : int | list of int - The runs to use. The runs correspond to: - - ========= =================================== - run task - ========= =================================== - 1 Baseline, eyes open - 2 Baseline, eyes closed - 3, 7, 11 Motor execution: left vs right hand - 4, 8, 12 Motor imagery: left vs right hand - 5, 9, 13 Motor execution: hands vs feet - 6, 10, 14 Motor imagery: hands vs feet - ========= =================================== - + The runs to use. See Notes for details. path : None | str Location of where to look for the EEGBCI data storing location. If None, the environment variable or config parameter @@ -123,6 +104,8 @@ def load_data(subject, runs, path=None, force_update=False, update_path=None, update_path : bool | None If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python config to the given path. If None, the user is prompted. + base_url : str + The URL root for the data. %(verbose)s Returns @@ -132,11 +115,23 @@ def load_data(subject, runs, path=None, force_update=False, update_path=None, Notes ----- - For example, one could do: + The run numbers correspond to: + + ========= =================================== + run task + ========= =================================== + 1 Baseline, eyes open + 2 Baseline, eyes closed + 3, 7, 11 Motor execution: left vs right hand + 4, 8, 12 Motor imagery: left vs right hand + 5, 9, 13 Motor execution: hands vs feet + 6, 10, 14 Motor imagery: hands vs feet + ========= =================================== + + For example, one could do:: >>> from mne.datasets import eegbci - >>> eegbci.load_data(1, [4, 10, 14],\ - os.getenv('HOME') + '/datasets') # doctest:+SKIP + >>> eegbci.load_data(1, [4, 10, 14], os.getenv('HOME') + '/datasets') # doctest:+SKIP This would download runs 4, 10, and 14 (hand/foot motor imagery) runs from subject 1 in the EEGBCI dataset to the 'datasets' folder, and prompt the @@ -145,15 +140,8 @@ def load_data(subject, runs, path=None, force_update=False, update_path=None, References ---------- - .. [1] Schalk, G., McFarland, D.J., Hinterberger, T., Birbaumer, N., - Wolpaw, J.R. (2004) BCI2000: A General-Purpose Brain-Computer - Interface (BCI) System. IEEE TBME 51(6):1034-1043 - .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, - Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000) - PhysioBank, PhysioToolkit, and PhysioNet: Components of a New - Research Resource for Complex Physiologic Signals. - Circulation 101(23):e215-e220 - """ + .. footbibliography:: + """ # noqa: E501 if not hasattr(runs, '__iter__'): runs = [runs] diff --git a/mne/datasets/epilepsy_ecog/__init__.py b/mne/datasets/epilepsy_ecog/__init__.py new file mode 100644 index 00000000000..81d519983d9 --- /dev/null +++ b/mne/datasets/epilepsy_ecog/__init__.py @@ -0,0 +1,3 @@ +"""Clinical epilepsy datasets.""" + +from ._data import data_path, has_epilepsy_ecog_data, get_version diff --git a/mne/datasets/epilepsy_ecog/_data.py b/mne/datasets/epilepsy_ecog/_data.py new file mode 100644 index 00000000000..09b1ce52ee9 --- /dev/null +++ b/mne/datasets/epilepsy_ecog/_data.py @@ -0,0 +1,31 @@ +# Authors: Adam Li +# Alex Rockhill +# License: BSD Style. + +from functools import partial + +from ...utils import verbose +from ..utils import (has_dataset, _data_path, _data_path_doc, + _get_version, _version_doc) + +has_epilepsy_ecog_data = partial(has_dataset, name='epilepsy_ecog') + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, + download=True, verbose=None): # noqa: D103 + return _data_path(path=path, force_update=force_update, + update_path=update_path, name='epilepsy_ecog', + download=download) + + +data_path.__doc__ = _data_path_doc.format( + name='epilepsy_ecog', conf='MNE_DATASETS_EPILEPSY_ECOG_PATH') + + +def get_version(): # noqa: D103 + return _get_version('epilepsy_ecog') + + +get_version.__doc__ = _version_doc.format(name='epilepsy_ecog') diff --git a/mne/datasets/erp_core/__init__.py b/mne/datasets/erp_core/__init__.py new file mode 100644 index 00000000000..9e2588347da --- /dev/null +++ b/mne/datasets/erp_core/__init__.py @@ -0,0 +1,3 @@ +"""ERP-CORE EEG dataset.""" + +from .erp_core import data_path, get_version diff --git a/mne/datasets/erp_core/erp_core.py b/mne/datasets/erp_core/erp_core.py new file mode 100644 index 00000000000..1338308ffbb --- /dev/null +++ b/mne/datasets/erp_core/erp_core.py @@ -0,0 +1,26 @@ +from functools import partial + +from ...utils import verbose +from ..utils import (has_dataset, _data_path, _data_path_doc, + _get_version, _version_doc) + +has_erp_core_data = partial(has_dataset, name='erp_core') + + +@verbose +def data_path(path=None, force_update=False, update_path=True, + download=True, verbose=None): # noqa: D103 + return _data_path(path=path, force_update=force_update, + update_path=update_path, name='erp_core', + download=download) + + +data_path.__doc__ = _data_path_doc.format(name='erp_core', + conf='MNE_DATASETS_ERP_CORE_PATH') + + +def get_version(): # noqa: D103 + return _get_version('erp_core') + + +get_version.__doc__ = _version_doc.format(name='erp_core') diff --git a/mne/datasets/hf_sef/hf_sef.py b/mne/datasets/hf_sef/hf_sef.py index 569f8138dfa..edbccfddee9 100644 --- a/mne/datasets/hf_sef/hf_sef.py +++ b/mne/datasets/hf_sef/hf_sef.py @@ -16,7 +16,8 @@ def data_path(dataset='evoked', path=None, force_update=False, update_path=True, verbose=None): u"""Get path to local copy of the high frequency SEF dataset. - Gets a local copy of the high frequency SEF MEG dataset [1]_. + Gets a local copy of the high frequency SEF MEG dataset + :footcite:`NurminenEtAl2017`. Parameters ---------- @@ -44,8 +45,7 @@ def data_path(dataset='evoked', path=None, force_update=False, References ---------- - .. [1] Nurminen, J., Paananen, H., Mäkelä, J. (2017): High frequency - somatosensory MEG dataset. https://doi.org/10.5281/zenodo.889234 + .. footbibliography:: """ key = 'MNE_DATASETS_HF_SEF_PATH' name = 'HF_SEF' diff --git a/mne/datasets/kiloword/kiloword.py b/mne/datasets/kiloword/kiloword.py index f72310a1ea3..07d619fa2b8 100644 --- a/mne/datasets/kiloword/kiloword.py +++ b/mne/datasets/kiloword/kiloword.py @@ -7,10 +7,9 @@ @verbose def data_path(path=None, force_update=False, update_path=True, download=True, verbose=None): - """ - Get path to local copy of the kiloword dataset. + """Get path to local copy of the kiloword dataset. - This is the dataset from [1]_. + This is the dataset from :footcite:`DufauEtAl2015`. Parameters ---------- @@ -27,6 +26,11 @@ def data_path(path=None, force_update=False, update_path=True, download=True, update_path : bool | None If True, set the MNE_DATASETS_KILOWORD_PATH in mne-python config to the given path. If None, the user is prompted. + download : bool + If False and the kiloword dataset has not been downloaded yet, + it will not be downloaded and the path will be returned as + '' (empty string). This is mostly used for debugging purposes + and can be safely ignored by most users. %(verbose)s Returns @@ -37,9 +41,7 @@ def data_path(path=None, force_update=False, update_path=True, download=True, References ---------- - .. [1] Dufau, S., Grainger, J., Midgley, KJ., Holcomb, PJ. A thousand - words are worth a picture: Snapshots of printed-word processing in an - event-related potential megastudy. Psychological science, 2015 + .. footbibliography:: """ return _data_path(path=path, force_update=force_update, update_path=update_path, name='kiloword', diff --git a/mne/datasets/limo/limo.py b/mne/datasets/limo/limo.py index 34ca802b4ce..c02e8f73a68 100644 --- a/mne/datasets/limo/limo.py +++ b/mne/datasets/limo/limo.py @@ -49,8 +49,8 @@ def data_path(subject, path=None, force_update=False, update_path=None, """Get path to local copy of LIMO dataset URL. This is a low-level function useful for getting a local copy of the - remote LIMO dataset [1]_. The complete dataset is available at - datashare.is.ed.ac.uk/ [2]_. + remote LIMO dataset :footcite:`Rousselet2016`. The complete dataset is + available at datashare.is.ed.ac.uk/. Parameters ---------- @@ -88,10 +88,7 @@ def data_path(subject, path=None, force_update=False, update_path=None, References ---------- - .. [1] Guillaume, Rousselet. (2016). LIMO EEG Dataset, [dataset]. - University of Edinburgh, Centre for Clinical Brain Sciences. - https://doi.org/10.7488/ds/1556. - .. [2] https://datashare.is.ed.ac.uk/handle/10283/2189?show=full + .. footbibliography:: """ # noqa: E501 # set destination path for download key = 'MNE_DATASETS_LIMO_PATH' diff --git a/mne/datasets/sleep_physionet/_utils.py b/mne/datasets/sleep_physionet/_utils.py index 7c6b2d97ea9..6bdb588f85f 100644 --- a/mne/datasets/sleep_physionet/_utils.py +++ b/mne/datasets/sleep_physionet/_utils.py @@ -45,7 +45,8 @@ def _data_path(path=None, force_update=False, update_path=None, verbose=None): """Get path to local copy of EEG Physionet age Polysomnography dataset URL. This is a low-level function useful for getting a local copy of a - remote Polysomnography dataset [1]_ which is available at PhysioNet [2]_. + remote Polysomnography dataset :footcite:`KempEtAl2000` which is available + at PhysioNet :footcite:`GoldbergerEtAl2000`. Parameters ---------- @@ -71,14 +72,7 @@ def _data_path(path=None, force_update=False, update_path=None, verbose=None): References ---------- - .. [1] B Kemp, AH Zwinderman, B Tuk, HAC Kamphuisen, JJL Oberyé. Analysis of - a sleep-dependent neuronal feedback loop: the slow-wave microcontinuity - of the EEG. IEEE-BME 47(9):1185-1194 (2000). - .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, - Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000) - PhysioBank, PhysioToolkit, and PhysioNet: Components of a New - Research Resource for Complex Physiologic Signals. - Circulation 101(23):e215-e220 + .. footbibliography:: """ # noqa: E501 key = 'PHYSIONET_SLEEP_PATH' name = 'PHYSIONET_SLEEP' diff --git a/mne/datasets/sleep_physionet/age.py b/mne/datasets/sleep_physionet/age.py index 0700f8c5a3b..d1c9aa05e68 100644 --- a/mne/datasets/sleep_physionet/age.py +++ b/mne/datasets/sleep_physionet/age.py @@ -16,13 +16,14 @@ @verbose -def fetch_data(subjects, recording=[1, 2], path=None, force_update=False, +def fetch_data(subjects, recording=(1, 2), path=None, force_update=False, update_path=None, base_url=BASE_URL, on_missing='raise', verbose=None): # noqa: D301 """Get paths to local copies of PhysioNet Polysomnography dataset files. This will fetch data from the publicly available subjects from PhysioNet's - study of age effects on sleep in healthy subjects [1]_[2]_. This + study of age effects on sleep in healthy subjects + :footcite:`MourtazaevEtAl1995,GoldbergerEtAl2000`. This corresponds to a subset of 153 recordings from 37 males and 41 females that were 25-101 years old at the time of the recordings. There are two night recordings per subject except for subjects 13, 36 and 52 which have one @@ -52,6 +53,8 @@ def fetch_data(subjects, recording=[1, 2], path=None, force_update=False, update_path : bool | None If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python config to the given path. If None, the user is prompted. + base_url : str + The URL root. on_missing : 'raise' | 'warn' | 'ignore' What to do if one or several recordings are not available. Valid keys are 'raise' | 'warn' | 'ignore'. Default is 'error'. If on_missing @@ -64,6 +67,10 @@ def fetch_data(subjects, recording=[1, 2], path=None, force_update=False, paths : list List of local data paths of the given type. + See Also + -------- + mne.datasets.sleep_physionet.temazepam.fetch_data + Notes ----- For example, one could do: @@ -75,18 +82,7 @@ def fetch_data(subjects, recording=[1, 2], path=None, force_update=False, References ---------- - .. [1] MS Mourtazaev, B Kemp, AH Zwinderman, HAC Kamphuisen. Age and gender - affect different characteristics of slow waves in the sleep EEG. - Sleep 18(7):557–564 (1995). - .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, - Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000) - PhysioBank, PhysioToolkit, and PhysioNet: Components of a New - Research Resource for Complex Physiologic Signals. - Circulation 101(23):e215-e220 - - See Also - -------- - :func:`mne.datasets.sleep_physionet.temazepam.fetch_data` + .. footbibliography:: """ # noqa: E501 records = np.loadtxt(AGE_SLEEP_RECORDS, skiprows=1, diff --git a/mne/datasets/sleep_physionet/temazepam.py b/mne/datasets/sleep_physionet/temazepam.py index 9eec4ba5ef3..688aa487418 100644 --- a/mne/datasets/sleep_physionet/temazepam.py +++ b/mne/datasets/sleep_physionet/temazepam.py @@ -16,18 +16,18 @@ @verbose -def fetch_data(subjects, recording=[b'Placebo', 'temazepam'], - path=None, force_update=False, +def fetch_data(subjects, *, path=None, force_update=False, update_path=None, base_url=BASE_URL, verbose=None): """Get paths to local copies of PhysioNet Polysomnography dataset files. This will fetch data from the publicly available subjects from PhysioNet's - study of Temazepam effects on sleep [1]_. This corresponds to - a set of 22 subjects. Subjects had mild difficulty falling asleep - but were otherwise healthy. + study of Temazepam effects on sleep :footcite:`KempEtAl2000`. This + corresponds to a set of 22 subjects. Subjects had mild difficulty falling + asleep but were otherwise healthy. See more details in the `physionet website - `_. + `_ + :footcite:`GoldbergerEtAl2000`. Parameters ---------- @@ -45,6 +45,8 @@ def fetch_data(subjects, recording=[b'Placebo', 'temazepam'], update_path : bool | None If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python config to the given path. If None, the user is prompted. + base_url : str + The base URL to download from. %(verbose)s Returns @@ -52,6 +54,10 @@ def fetch_data(subjects, recording=[b'Placebo', 'temazepam'], paths : list List of local data paths of the given type. + See Also + -------- + mne.datasets.sleep_physionet.age.fetch_data + Notes ----- For example, one could do: @@ -63,18 +69,7 @@ def fetch_data(subjects, recording=[b'Placebo', 'temazepam'], References ---------- - .. [1] B Kemp, AH Zwinderman, B Tuk, HAC Kamphuisen, JJL Oberyé. Analysis - of a sleep-dependent neuronal feedback loop: the slow-wave - microcontinuity of the EEG. IEEE-BME 47(9):1185-1194 (2000). - .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, - Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000) - PhysioBank, PhysioToolkit, and PhysioNet: Components of a New - Research Resource for Complex Physiologic Signals. - Circulation 101(23):e215-e220 - - See Also - -------- - :func:`mne.datasets.sleep_physionet.age.fetch_data` + .. footbibliography:: """ records = np.loadtxt(TEMAZEPAM_SLEEP_RECORDS, skiprows=1, diff --git a/mne/datasets/ssvep/__init__.py b/mne/datasets/ssvep/__init__.py new file mode 100644 index 00000000000..fc806536a0f --- /dev/null +++ b/mne/datasets/ssvep/__init__.py @@ -0,0 +1,3 @@ +"""SSVEP dataset.""" + +from .ssvep import data_path, has_ssvep_data, get_version diff --git a/mne/datasets/ssvep/ssvep.py b/mne/datasets/ssvep/ssvep.py new file mode 100644 index 00000000000..51315eb6c21 --- /dev/null +++ b/mne/datasets/ssvep/ssvep.py @@ -0,0 +1,30 @@ +# Authors: Dominik Welke +# License: BSD Style. + +from functools import partial + +from ...utils import verbose +from ..utils import (has_dataset, _data_path, _data_path_doc, + _get_version, _version_doc) + +has_ssvep_data = partial(has_dataset, name='ssvep') + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, + download=True, verbose=None): # noqa: D103 + return _data_path(path=path, force_update=force_update, + update_path=update_path, name='ssvep', + download=download) + + +data_path.__doc__ = _data_path_doc.format(name='ssvep', + conf='MNE_DATASETS_SSVEP_PATH') + + +def get_version(): # noqa: D103 + return _get_version('ssvep') + + +get_version.__doc__ = _version_doc.format(name='ssvep') diff --git a/mne/datasets/tests/test_datasets.py b/mne/datasets/tests/test_datasets.py index 4b94e419b83..1b2cccb4e35 100644 --- a/mne/datasets/tests/test_datasets.py +++ b/mne/datasets/tests/test_datasets.py @@ -1,5 +1,6 @@ import os from os import path as op +import re import shutil import zipfile import sys @@ -7,11 +8,12 @@ import pytest from mne import datasets, read_labels_from_annot, write_labels_to_annot -from mne.datasets import testing +from mne.datasets import testing, fetch_infant_template +from mne.datasets._infant import base as infant_base from mne.datasets._fsaverage.base import _set_montage_coreg_path from mne.datasets.utils import _manifest_check_download -from mne.utils import (run_tests_if_main, requires_good_network, modified_env, +from mne.utils import (requires_good_network, get_subjects_dir, ArgvSetter, _pl, use_log_level, catch_logging, hashfunc) @@ -19,7 +21,7 @@ subjects_dir = op.join(testing.data_path(download=False), 'subjects') -def test_datasets_basic(tmpdir): +def test_datasets_basic(tmpdir, monkeypatch): """Test simple dataset functions.""" # XXX 'hf_sef' and 'misc' do not conform to these standards for dname in ('sample', 'somato', 'spm_face', 'testing', 'opm', @@ -43,27 +45,75 @@ def test_datasets_basic(tmpdir): tempdir = str(tmpdir) # don't let it read from the config file to get the directory, # force it to look for the default - with modified_env(**{'_MNE_FAKE_HOME_DIR': tempdir, 'SUBJECTS_DIR': None}): - assert (datasets.utils._get_path(None, 'foo', 'bar') == - op.join(tempdir, 'mne_data')) - assert get_subjects_dir(None) is None - _set_montage_coreg_path() - sd = get_subjects_dir() - assert sd.endswith('MNE-fsaverage-data') - - -def _fake_fetch_file(url, destination, print_destination=False): - with open(destination, 'w') as fid: - fid.write(url) + monkeypatch.setenv('_MNE_FAKE_HOME_DIR', tempdir) + monkeypatch.delenv('SUBJECTS_DIR', raising=False) + assert (datasets.utils._get_path(None, 'foo', 'bar') == + op.join(tempdir, 'mne_data')) + assert get_subjects_dir(None) is None + _set_montage_coreg_path() + sd = get_subjects_dir() + assert sd.endswith('MNE-fsaverage-data') + monkeypatch.setenv('MNE_DATA', str(tmpdir.join('foo'))) + with pytest.raises(FileNotFoundError, match='as specified by MNE_DAT'): + testing.data_path(download=False) @requires_good_network -def test_downloads(tmpdir): - """Test dataset URL handling.""" +def test_downloads(tmpdir, monkeypatch, capsys): + """Test dataset URL and version handling.""" # Try actually downloading a dataset - path = datasets._fake.data_path(path=str(tmpdir), update_path=False) + kwargs = dict(path=str(tmpdir), verbose=True) + path = datasets._fake.data_path(update_path=False, **kwargs) + out, _ = capsys.readouterr() + assert 'Downloading' in out + assert op.isdir(path) assert op.isfile(op.join(path, 'bar')) + assert not datasets.utils.has_dataset('fake') # not in the desired path assert datasets._fake.get_version() is None + assert datasets.utils._get_version('fake') is None + monkeypatch.setenv('_MNE_FAKE_HOME_DIR', str(tmpdir)) + with pytest.warns(RuntimeWarning, match='non-standard config'): + new_path = datasets._fake.data_path(update_path=True, **kwargs) + assert path == new_path + out, _ = capsys.readouterr() + assert 'Downloading' not in out + # No version: shown as existing but unknown version + assert datasets.utils.has_dataset('fake') + # XXX logic bug, should be "unknown" + assert datasets._fake.get_version() == '0.7' + # With a version but no required one: shown as existing and gives version + fname = tmpdir / 'foo' / 'version.txt' + with open(fname, 'w') as fid: + fid.write('0.1') + assert datasets.utils.has_dataset('fake') + assert datasets._fake.get_version() == '0.1' + datasets._fake.data_path(download=False, **kwargs) + out, _ = capsys.readouterr() + assert 'out of date' not in out + # With the required version: shown as existing with the required version + monkeypatch.setattr(datasets.utils, '_FAKE_VERSION', '0.1') + assert datasets.utils.has_dataset('fake') + assert datasets._fake.get_version() == '0.1' + datasets._fake.data_path(download=False, **kwargs) + out, _ = capsys.readouterr() + assert 'out of date' not in out + monkeypatch.setattr(datasets.utils, '_FAKE_VERSION', '0.2') + # With an older version: + # 1. Marked as not actually being present + assert not datasets.utils.has_dataset('fake') + # 2. Will try to update when `data_path` gets called, with logged message + want_msg = 'Correctly trying to download newer version' + + def _error_download(url, full_name, print_destination, hash_, hash_type): + assert 'foo.tgz' in url + assert str(tmpdir) in full_name + raise RuntimeError(want_msg) + + monkeypatch.setattr(datasets.utils, '_fetch_file', _error_download) + with pytest.raises(RuntimeError, match=want_msg): + datasets._fake.data_path(**kwargs) + out, _ = capsys.readouterr() + assert re.match(r'.* 0\.1 .*out of date.* 0\.2.*', out, re.MULTILINE), out @pytest.mark.slowtest @@ -153,4 +203,17 @@ def test_manifest_check_download(tmpdir, n_have, monkeypatch): assert op.isfile(op.join(destination, fname)) -run_tests_if_main() +def _fake_mcd(manifest_path, destination, url, hash_): + name = url.split('/')[-1].split('.')[0] + assert name in manifest_path + assert name in destination + assert name in url + assert len(hash_) == 32 + + +def test_infant(tmpdir, monkeypatch): + """Test fetch_infant_template.""" + monkeypatch.setattr(infant_base, '_manifest_check_download', _fake_mcd) + fetch_infant_template('12mo', subjects_dir=tmpdir) + with pytest.raises(ValueError, match='Invalid value for'): + fetch_infant_template('0mo', subjects_dir=tmpdir) diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index c11fc7a1e30..6df444191d2 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -18,7 +18,6 @@ import numpy as np -from ._fsaverage.base import fetch_fsaverage from .. import __version__ as mne_version from ..label import read_labels_from_annot, Label, write_labels_to_annot from ..utils import (get_config, set_config, _fetch_file, logger, warn, @@ -27,6 +26,8 @@ from ..externals.doccer import docformat +_FAKE_VERSION = None # used for monkeypatching while testing versioning + _data_path_doc = """Get path to local copy of {name} dataset. Parameters @@ -177,6 +178,11 @@ def _get_path(path, key, name): # 3. get_config('MNE_DATA') path = get_config(key, get_config('MNE_DATA')) if path is not None: + if not op.exists(path): + msg = (f"Download location {path} as specified by MNE_DATA does " + f"not exist. Either create this directory manually and try " + f"again, or set MNE_DATA to an existing directory.") + raise FileNotFoundError(msg) return path # 4. ~/mne_data (but use a fake home during testing so we don't # unnecessarily create ~/mne_data) @@ -240,12 +246,15 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, 'phantom_4dbti': 'MNE_DATASETS_PHANTOM_4DBTI_PATH', 'limo': 'MNE_DATASETS_LIMO_PATH', 'refmeg_noise': 'MNE_DATASETS_REFMEG_NOISE_PATH', + 'ssvep': 'MNE_DATASETS_SSVEP_PATH', + 'erp_core': 'MNE_DATASETS_ERP_CORE_PATH', + 'epilepsy_ecog': 'MNE_DATASETS_EPILEPSY_ECOG_PATH', }[name] path = _get_path(path, key, name) # To update the testing or misc dataset, push commits, then make a new # release on GitHub. Then update the "releases" variable: - releases = dict(testing='0.111', misc='0.7') + releases = dict(testing='0.117', misc='0.9') # And also update the "md5_hashes['testing']" variable below. # To update any other dataset, update the data archive itself (upload # an updated version) and update the md5 hash. @@ -278,6 +287,9 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, fieldtrip_cmc='https://osf.io/j9b6s/download?version=1', phantom_4dbti='https://osf.io/v2brw/download?version=2', refmeg_noise='https://osf.io/drt6v/download?version=1', + ssvep='https://osf.io/z8h6k/download?version=5', + erp_core='https://osf.io/rzgba/download?version=1', + epilepsy_ecog='https://osf.io/z4epq/download?revision=1', ) # filename of the resulting downloaded archive (only needed if the URL # name does not match resulting filename) @@ -296,13 +308,17 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, visual_92_categories=['MNE-visual_92_categories-data-part1.tar.gz', 'MNE-visual_92_categories-data-part2.tar.gz'], phantom_4dbti='MNE-phantom-4DBTi.zip', - refmeg_noise='sample_reference_MEG_noise-raw.zip' + refmeg_noise='sample_reference_MEG_noise-raw.zip', + ssvep='ssvep_example_data.zip', + erp_core='MNE-ERP-CORE-data.tar.gz', + epilepsy_ecog='MNE-epilepsy-ecog-data.tar.gz', ) # original folder names that get extracted (only needed if the # archive does not extract the right folder name; e.g., usually GitHub) folder_origs = dict( # not listed means None (no need to move) misc='mne-misc-data-%s' % releases['misc'], testing='mne-testing-data-%s' % releases['testing'], + ssvep='ssvep-example-data' ) # finally, where we want them to extract to (only needed if the folder name # is not the same as the last bit of the archive name without the file @@ -317,7 +333,9 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, visual_92_categories='MNE-visual_92_categories-data', fieldtrip_cmc='MNE-fieldtrip_cmc-data', phantom_4dbti='MNE-phantom-4DBTi', - refmeg_noise='MNE-refmeg-noise-data' + refmeg_noise='MNE-refmeg-noise-data', + ssvep='ssvep-example-data', + erp_core='MNE-ERP-CORE-data', ) md5_hashes = dict( brainstorm=dict( @@ -327,11 +345,11 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, bst_raw='fa2efaaec3f3d462b319bc24898f440c', bst_resting='70fc7bf9c3b97c4f2eab6260ee4a0430'), fake='3194e9f7b46039bb050a74f3e1ae9908', - misc='2b2f2fec9d1197ed459117db1c6341ee', + misc='f832ce9c4c27e83396cc977b293b0aa9', sample='12b75d1cb7df9dfb4ad73ed82f61094f', somato='32fd2f6c8c7eb0784a1de6435273c48b', spm='9f43f67150e3b694b523a21eb929ea75', - testing='e7ece4615882b99026edb76fb708a3ce', + testing='d8df35b2e625e213769e97e719de205c', multimodal='26ec847ae9ab80f58f204d09e2c08367', fnirs_motor='c4935d19ddab35422a69f3326a01fef8', opm='370ad1dcfd5c47e029e692c85358a374', @@ -341,9 +359,12 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, mtrf='273a390ebbc48da2c3184b01a82e4636', fieldtrip_cmc='6f9fd6520f9a66e20994423808d2528c', phantom_4dbti='938a601440f3ffa780d20a17bae039ff', - refmeg_noise='779fecd890d98b73a4832e717d7c7c45' + refmeg_noise='779fecd890d98b73a4832e717d7c7c45', + ssvep='af866bbc0f921114ac9d683494fe87d6', + erp_core='5866c0d6213bd7ac97f254c776f6c4b1', + epilepsy_ecog='ffb139174afa0f71ec98adbbb1729dea', ) - assert set(md5_hashes.keys()) == set(urls.keys()) + assert set(md5_hashes) == set(urls) url = urls[name] hash_ = md5_hashes[name] folder_orig = folder_origs.get(name, None) @@ -377,6 +398,15 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, logger.debug('folder_path: %s' % (folder_path,)) need_download = any(not op.exists(f) for f in folder_path) + # additional condition: check for version.txt and parse it + want_version = releases.get(name, None) + want_version = _FAKE_VERSION if name == 'fake' else want_version + if not need_download and want_version is not None: + data_version = _dataset_version(folder_path[0], name) + need_download = LooseVersion(data_version) < LooseVersion(want_version) + if need_download: + logger.info(f'Dataset {name} version {data_version} out of date, ' + f'latest version is {want_version}') if need_download and not download: return '' @@ -561,7 +591,10 @@ def has_dataset(name): 'kiloword': 'MNE-kiloword-data', 'phantom_4dbti': 'MNE-phantom-4DBTi', 'mtrf': 'mTRF_1.5', - 'refmeg_noise': 'MNE-refmeg-noise-data' + 'refmeg_noise': 'MNE-refmeg-noise-data', + 'ssvep': 'ssvep-example-data', + 'erp_core': 'MNE-ERP-CORE-data', + 'epilepsy_ecog': 'MNE-epilepsy-ecog-data' }[name] dp = _data_path(download=False, name=name, check_version=False, archive_name=archive_name) @@ -583,7 +616,8 @@ def _download_all_example_data(verbose=True): from . import (sample, testing, misc, spm_face, somato, brainstorm, eegbci, multimodal, opm, hf_sef, mtrf, fieldtrip_cmc, kiloword, phantom_4dbti, sleep_physionet, limo, - fnirs_motor, refmeg_noise) + fnirs_motor, refmeg_noise, fetch_infant_template, + fetch_fsaverage, ssvep, erp_core, epilepsy_ecog) sample_path = sample.data_path() testing.data_path() misc.data_path() @@ -598,6 +632,8 @@ def _download_all_example_data(verbose=True): kiloword.data_path() phantom_4dbti.data_path() refmeg_noise.data_path() + ssvep.data_path() + epilepsy_ecog.data_path() brainstorm.bst_raw.data_path(accept=True) brainstorm.bst_auditory.data_path(accept=True) brainstorm.bst_resting.data_path(accept=True) @@ -611,10 +647,13 @@ def _download_all_example_data(verbose=True): # If the user has SUBJECTS_DIR, respect it, if not, set it to the EEG one # (probably on CircleCI, or otherwise advanced user) fetch_fsaverage(None) + fetch_infant_template('6mo') fetch_hcp_mmp_parcellation( subjects_dir=sample_path + '/subjects', accept=True) limo.load_data(subject=1, update_path=True) + erp_core.data_path() + @verbose def fetch_aparc_sub_parcellation(subjects_dir=None, verbose=None): @@ -799,6 +838,7 @@ def fetch_hcp_mmp_parcellation(subjects_dir=None, combine=True, *, def _manifest_check_download(manifest_path, destination, url, hash_): with open(manifest_path, 'r') as fid: names = [name.strip() for name in fid.readlines()] + manifest_path = op.basename(manifest_path) need = list() for name in names: if not op.isfile(op.join(destination, name)): diff --git a/mne/decoding/base.py b/mne/decoding/base.py index 2b665a6d0d0..b2183ae76bc 100644 --- a/mne/decoding/base.py +++ b/mne/decoding/base.py @@ -144,7 +144,6 @@ def fit_transform(self, X, y): ------- y_pred : array, shape (n_samples,) The predicted targets. - """ return self.fit(X, y).transform(X) diff --git a/mne/decoding/csp.py b/mne/decoding/csp.py index 8414c483a2a..a4d5c47a6e5 100644 --- a/mne/decoding/csp.py +++ b/mne/decoding/csp.py @@ -10,7 +10,6 @@ import copy as cp import numpy as np -from scipy import linalg from .base import BaseEstimator from .mixin import TransformerMixin @@ -160,6 +159,7 @@ def fit(self, X, y): self : instance of CSP Returns the modified instance. """ + from scipy import linalg self._check_Xy(X, y) self._classes = np.unique(y) @@ -532,6 +532,7 @@ def _epoch_cov(self, x_class): return cov, weight def _decompose_covs(self, covs, sample_weights): + from scipy import linalg n_classes = len(covs) if n_classes == 2: eigen_values, eigen_vectors = linalg.eigh(covs[0], covs.sum(0)) @@ -761,6 +762,7 @@ def fit(self, X, y): self : instance of SPoC Returns the modified instance. """ + from scipy import linalg self._check_Xy(X, y) if len(np.unique(y)) < 2: diff --git a/mne/decoding/receptive_field.py b/mne/decoding/receptive_field.py index b880e176193..c6728d94d40 100644 --- a/mne/decoding/receptive_field.py +++ b/mne/decoding/receptive_field.py @@ -7,7 +7,6 @@ import numbers import numpy as np -from scipy import linalg from .base import get_coef, BaseEstimator, _check_estimator from .time_delaying_ridge import TimeDelayingRidge @@ -170,6 +169,7 @@ def fit(self, X, y): self : instance The instance so you can chain operations. """ + from scipy import linalg if self.scoring not in _SCORERS.keys(): raise ValueError('scoring must be one of %s, got' '%s ' % (sorted(_SCORERS.keys()), self.scoring)) diff --git a/mne/decoding/search_light.py b/mne/decoding/search_light.py index 14f4387a571..8876aeb5057 100644 --- a/mne/decoding/search_light.py +++ b/mne/decoding/search_light.py @@ -114,8 +114,9 @@ def fit_transform(self, X, y, **fit_params): X : array, shape (n_samples, nd_features, n_tasks) The training input samples. For each task, a clone estimator is fitted independently. The feature dimension can be - multidimensional e.g. - X.shape = (n_samples, n_features_1, n_features_2, n_estimators) + multidimensional, e.g.:: + + X.shape = (n_samples, n_features_1, n_features_2, n_estimators) y : array, shape (n_samples,) | (n_samples, n_targets) The target values. **fit_params : dict of string -> object diff --git a/mne/decoding/ssd.py b/mne/decoding/ssd.py index e62f4703552..5d9b57b6f17 100644 --- a/mne/decoding/ssd.py +++ b/mne/decoding/ssd.py @@ -3,7 +3,7 @@ # License: BSD (3-clause) import numpy as np -from scipy.linalg import eigh + from ..filter import filter_data from ..cov import _regularized_covariance from . import TransformerMixin, BaseEstimator @@ -115,11 +115,17 @@ def __init__(self, info, filt_params_signal, filt_params_noise, filt_params_noise['h_freq']) self.filt_params_signal = filt_params_signal self.filt_params_noise = filt_params_noise + # check if boolean + if not isinstance(sort_by_spectral_ratio, (bool)): + raise ValueError('sort_by_spectral_ratio must be boolean') self.sort_by_spectral_ratio = sort_by_spectral_ratio if n_fft is None: self.n_fft = int(self.info['sfreq']) else: self.n_fft = int(n_fft) + # check if boolean + if not isinstance(return_filtered, (bool)): + raise ValueError('return_filtered must be boolean') self.return_filtered = return_filtered self.reg = reg self.n_components = n_components @@ -153,6 +159,7 @@ def fit(self, X, y=None): self : instance of SSD Returns the modified instance. """ + from scipy.linalg import eigh self._check_X(X) X_aux = X[..., self.picks_, :] @@ -178,7 +185,14 @@ def fit(self, X, y=None): self.eigvals_ = eigvals_[ix] self.filters_ = eigvects_[:, ix] self.patterns_ = np.linalg.pinv(self.filters_) - + # We assume that ordering by spectral ratio is more important + # than the initial ordering. This ording should be also learned when + # fitting. + X_ssd = self.filters_.T @ X[..., self.picks_, :] + sorter_spec = Ellipsis + if self.sort_by_spectral_ratio: + _, sorter_spec = self.get_spectral_ratio(ssd_sources=X_ssd) + self.sorter_spec = sorter_spec return self def transform(self, X): @@ -199,18 +213,15 @@ def transform(self, X): self._check_X(X) if self.filters_ is None: raise RuntimeError('No filters available. Please first call fit') - + if self.return_filtered: + X_aux = X[..., self.picks_, :] + X = filter_data(X_aux, self.info['sfreq'], + **self.filt_params_signal) X_ssd = self.filters_.T @ X[..., self.picks_, :] - # We assume that ordering by spectral ratio is more important - # than the initial ordering. This is why we apply component picks - # after ordering. - sorter_spec = Ellipsis - if self.sort_by_spectral_ratio: - _, sorter_spec = self.get_spectral_ratio(ssd_sources=X_ssd) if X.ndim == 2: - X_ssd = X_ssd[sorter_spec][:self.n_components] + X_ssd = X_ssd[self.sorter_spec][:self.n_components] else: - X_ssd = X_ssd[:, sorter_spec, :][:, :self.n_components, :] + X_ssd = X_ssd[:, self.sorter_spec, :][:, :self.n_components, :] return X_ssd def get_spectral_ratio(self, ssd_sources): @@ -222,7 +233,7 @@ def get_spectral_ratio(self, ssd_sources): Parameters ---------- ssd_sources : array - Data projectded to SSD space. + Data projected to SSD space. Returns ------- @@ -278,10 +289,6 @@ def apply(self, X): The processed data. """ X_ssd = self.transform(X) - sorter_spec = Ellipsis - if self.sort_by_spectral_ratio: - _, sorter_spec = self.get_spectral_ratio(ssd_sources=X_ssd) - - pick_patterns = self.patterns_[sorter_spec, :self.n_components].T + pick_patterns = self.patterns_[self.sorter_spec][:self.n_components].T X = pick_patterns @ X_ssd return X diff --git a/mne/decoding/tests/test_receptive_field.py b/mne/decoding/tests/test_receptive_field.py index 418eeed69ec..43a5b0dc818 100644 --- a/mne/decoding/tests/test_receptive_field.py +++ b/mne/decoding/tests/test_receptive_field.py @@ -6,9 +6,10 @@ import pytest import numpy as np +from numpy import einsum +from numpy.fft import rfft, irfft from numpy.testing import assert_array_equal, assert_allclose, assert_equal -from mne.fixes import einsum, rfft, irfft from mne.utils import requires_sklearn, run_tests_if_main from mne.decoding import ReceptiveField, TimeDelayingRidge from mne.decoding.receptive_field import (_delay_time_series, _SCORERS, diff --git a/mne/decoding/tests/test_ssd.py b/mne/decoding/tests/test_ssd.py index c3b05b6fdc6..d4af2335438 100644 --- a/mne/decoding/tests/test_ssd.py +++ b/mne/decoding/tests/test_ssd.py @@ -100,6 +100,16 @@ def test_ssd(): pytest.raises(TypeError, ssd.fit, raw) + # check non-boolean return_filtered + with pytest.raises(ValueError, match='return_filtered'): + ssd = SSD(info, filt_params_signal, filt_params_noise, + return_filtered=0) + + # check non-boolean sort_by_spectral_ratio + with pytest.raises(ValueError, match='sort_by_spectral_ratio'): + ssd = SSD(info, filt_params_signal, filt_params_noise, + sort_by_spectral_ratio=0) + # More than 1 channel type ch_types = np.reshape([['mag'] * 10, ['eeg'] * 10], n_channels) info_2 = create_info(ch_names=n_channels, sfreq=sf, ch_types=ch_types) @@ -135,14 +145,22 @@ def test_ssd(): ssd.fit(X) X_denoised = ssd.apply(X) assert_array_almost_equal(X_denoised, X) + # denoised by low-rank-factorization + ssd = SSD(info, filt_params_signal, filt_params_noise, + n_components=n_components, sort_by_spectral_ratio=True) + ssd.fit(X) + X_denoised = ssd.apply(X) + assert (np.linalg.matrix_rank(X_denoised) == n_components) # Power ratio ordering - spec_ratio, _ = ssd.get_spectral_ratio(ssd.transform(X)) + ssd = SSD(info, filt_params_signal, filt_params_noise, + n_components=None, sort_by_spectral_ratio=False) + ssd.fit(X) + spec_ratio, sorter_spec = ssd.get_spectral_ratio(ssd.transform(X)) # since we now that the number of true components is 5, the relative # difference should be low for the first 5 components and then increases index_diff = np.argmax(-np.diff(spec_ratio)) assert index_diff == n_components_true - 1 - # Check detected peaks # fit ssd n_components = n_components_true @@ -159,7 +177,6 @@ def test_ssd(): psd_S, _ = psd_array_welch(S[0], sfreq=250, n_fft=250) corr = np.abs(np.corrcoef((psd_out, psd_S))[0, 1]) assert np.abs(corr) > 0.95 - # Check pattern estimation # Since there is no exact ordering of the recovered patterns # a pair-wise greedy search will be done @@ -231,3 +248,77 @@ def test_ssd_pipeline(): out = pipe.fit_transform(X_e, y) assert (out.shape == (100, 2)) assert (pipe.get_params()['SSD__n_components'] == 5) + + +def test_sorting(): + """Test sorting learning during training.""" + X, _, _ = simulate_data(n_trials=100, n_channels=20, n_samples=500) + # Epoch length is 1 second + X = np.reshape(X, (100, 20, 500)) + # split data + Xtr, Xte = X[:80], X[80:] + sf = 250 + n_channels = Xtr.shape[1] + info = create_info(ch_names=n_channels, sfreq=sf, ch_types='eeg') + + filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1], + l_trans_bandwidth=4, h_trans_bandwidth=4) + filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], + l_trans_bandwidth=4, h_trans_bandwidth=4) + + # check sort_by_spectral_ratio set to False + ssd = SSD(info, filt_params_signal, filt_params_noise, + n_components=None, sort_by_spectral_ratio=False) + ssd.fit(Xtr) + _, sorter_tr = ssd.get_spectral_ratio(ssd.transform(Xtr)) + _, sorter_te = ssd.get_spectral_ratio(ssd.transform(Xte)) + assert any(sorter_tr != sorter_te) + + # check sort_by_spectral_ratio set to True + ssd = SSD(info, filt_params_signal, filt_params_noise, + n_components=None, sort_by_spectral_ratio=True) + ssd.fit(Xtr) + + # check sorters + sorter_in = ssd.sorter_spec + ssd = SSD(info, filt_params_signal, filt_params_noise, + n_components=None, sort_by_spectral_ratio=False) + ssd.fit(Xtr) + _, sorter_out = ssd.get_spectral_ratio(ssd.transform(Xtr)) + + assert all(sorter_in == sorter_out) + + +def test_return_filtered(): + """Test return filtered option.""" + # Check return_filtered + # Simulated more noise data and with broader freqquency than the desired + X, _, _ = simulate_data(SNR=0.9, freqs_sig=[4, 13]) + sf = 250 + n_channels = X.shape[0] + info = create_info(ch_names=n_channels, sfreq=sf, ch_types='eeg') + + filt_params_signal = dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1], + l_trans_bandwidth=1, h_trans_bandwidth=1) + filt_params_noise = dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1], + l_trans_bandwidth=1, h_trans_bandwidth=1) + + # return filtered to true + ssd = SSD(info, filt_params_signal, filt_params_noise, + sort_by_spectral_ratio=False, return_filtered=True) + ssd.fit(X) + + out = ssd.transform(X) + psd_out, freqs = psd_array_welch(out[0], sfreq=250, n_fft=250) + freqs_up = int(freqs[psd_out > 0.5][0]), int(freqs[psd_out > 0.5][-1]) + assert (freqs_up == freqs_sig) + + # return filtered to false + ssd = SSD(info, filt_params_signal, filt_params_noise, + sort_by_spectral_ratio=False, return_filtered=False) + ssd.fit(X) + + out = ssd.transform(X) + psd_out, freqs = psd_array_welch(out[0], sfreq=250, n_fft=250) + freqs_up = int(freqs[psd_out > 0.5][0]), int(freqs[psd_out > 0.5][-1]) + assert (freqs_up != freqs_sig) diff --git a/mne/decoding/tests/test_transformer.py b/mne/decoding/tests/test_transformer.py index 12dce4e5b4a..f479cc3f415 100644 --- a/mne/decoding/tests/test_transformer.py +++ b/mne/decoding/tests/test_transformer.py @@ -25,7 +25,13 @@ event_name = op.join(data_dir, 'test-eve.fif') -def test_scaler(): +@pytest.mark.parametrize('info, method', [ + (True, None), + (True, dict(mag=5, grad=10, eeg=20)), + (False, 'mean'), + (False, 'median'), +]) +def test_scaler(info, method): """Test methods of Scaler.""" raw = io.read_raw_fif(raw_fname) events = read_events(event_name) @@ -38,41 +44,42 @@ def test_scaler(): epochs_data = epochs.get_data() y = epochs.events[:, -1] - methods = (None, dict(mag=5, grad=10, eeg=20), 'mean', 'median') - infos = (epochs.info, epochs.info, None, None) epochs_data_t = epochs_data.transpose([1, 0, 2]) - for method, info in zip(methods, infos): - if method in ('mean', 'median') and not check_version('sklearn'): + if method in ('mean', 'median'): + if not check_version('sklearn'): with pytest.raises(ImportError, match='No module'): Scaler(info, method) - continue - scaler = Scaler(info, method) - X = scaler.fit_transform(epochs_data, y) - assert_equal(X.shape, epochs_data.shape) - if method is None or isinstance(method, dict): - sd = DEFAULTS['scalings'] if method is None else method - stds = np.zeros(len(picks)) - for key in ('mag', 'grad'): - stds[pick_types(epochs.info, meg=key)] = 1. / sd[key] - stds[pick_types(epochs.info, meg=False, eeg=True)] = 1. / sd['eeg'] - means = np.zeros(len(epochs.ch_names)) - elif method == 'mean': - stds = np.array([np.std(ch_data) for ch_data in epochs_data_t]) - means = np.array([np.mean(ch_data) for ch_data in epochs_data_t]) - else: # median - percs = np.array([np.percentile(ch_data, [25, 50, 75]) - for ch_data in epochs_data_t]) - stds = percs[:, 2] - percs[:, 0] - means = percs[:, 1] - assert_allclose(X * stds[:, np.newaxis] + means[:, np.newaxis], - epochs_data, rtol=1e-12, atol=1e-20, err_msg=method) - - X2 = scaler.fit(epochs_data, y).transform(epochs_data) - assert_array_equal(X, X2) - - # inverse_transform - Xi = scaler.inverse_transform(X) - assert_array_almost_equal(epochs_data, Xi) + return + + if info: + info = epochs.info + scaler = Scaler(info, method) + X = scaler.fit_transform(epochs_data, y) + assert_equal(X.shape, epochs_data.shape) + if method is None or isinstance(method, dict): + sd = DEFAULTS['scalings'] if method is None else method + stds = np.zeros(len(picks)) + for key in ('mag', 'grad'): + stds[pick_types(epochs.info, meg=key)] = 1. / sd[key] + stds[pick_types(epochs.info, meg=False, eeg=True)] = 1. / sd['eeg'] + means = np.zeros(len(epochs.ch_names)) + elif method == 'mean': + stds = np.array([np.std(ch_data) for ch_data in epochs_data_t]) + means = np.array([np.mean(ch_data) for ch_data in epochs_data_t]) + else: # median + percs = np.array([np.percentile(ch_data, [25, 50, 75]) + for ch_data in epochs_data_t]) + stds = percs[:, 2] - percs[:, 0] + means = percs[:, 1] + assert_allclose(X * stds[:, np.newaxis] + means[:, np.newaxis], + epochs_data, rtol=1e-12, atol=1e-20, err_msg=method) + + X2 = scaler.fit(epochs_data, y).transform(epochs_data) + assert_array_equal(X, X2) + + # inverse_transform + Xi = scaler.inverse_transform(X) + assert_array_almost_equal(epochs_data, Xi) # Test init exception pytest.raises(ValueError, Scaler, None, None) diff --git a/mne/decoding/time_delaying_ridge.py b/mne/decoding/time_delaying_ridge.py index 243c50585cc..591a9920cb3 100644 --- a/mne/decoding/time_delaying_ridge.py +++ b/mne/decoding/time_delaying_ridge.py @@ -6,7 +6,6 @@ # License: BSD (3-clause) import numpy as np -from scipy import linalg from .base import BaseEstimator from ..cuda import _setup_cuda_fft_multiply_repeated @@ -146,6 +145,7 @@ def _toeplitz_dot(a, b): def _compute_reg_neighbors(n_ch_x, n_delays, reg_type, method='direct', normed=False): """Compute regularization parameter from neighbors.""" + from scipy import linalg from scipy.sparse.csgraph import laplacian known_types = ('ridge', 'laplacian') if isinstance(reg_type, str): @@ -201,6 +201,7 @@ def _compute_reg_neighbors(n_ch_x, n_delays, reg_type, method='direct', def _fit_corrs(x_xt, x_y, n_ch_x, reg_type, alpha, n_ch_in): """Fit the model using correlation matrices.""" # do the regularized solving + from scipy import linalg n_ch_out = x_y.shape[1] assert x_y.shape[0] % n_ch_x == 0 n_delays = x_y.shape[0] // n_ch_x diff --git a/mne/defaults.py b/mne/defaults.py index 5b85be312c2..38237176a42 100644 --- a/mne/defaults.py +++ b/mne/defaults.py @@ -9,38 +9,43 @@ DEFAULTS = dict( color=dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='m', emg='k', ref_meg='steelblue', misc='k', stim='k', resp='k', chpi='k', - exci='k', ias='k', syst='k', seeg='saddlebrown', dipole='k', - gof='k', bio='k', ecog='k', hbo='#AA3377', hbr='b', + exci='k', ias='k', syst='k', seeg='saddlebrown', dbs='seagreen', + dipole='k', gof='k', bio='k', ecog='k', hbo='#AA3377', hbr='b', fnirs_cw_amplitude='k', fnirs_fd_ac_amplitude='k', fnirs_fd_phase='k', fnirs_od='k', csd='k'), + si_units=dict(mag='T', grad='T/m', eeg='V', eog='V', ecg='V', emg='V', + misc='AU', seeg='V', dbs='V', dipole='Am', gof='GOF', + bio='V', ecog='V', hbo='M', hbr='M', ref_meg='T', + fnirs_cw_amplitude='V', fnirs_fd_ac_amplitude='V', + fnirs_fd_phase='rad', fnirs_od='V', csd='V/m²'), units=dict(mag='fT', grad='fT/cm', eeg='µV', eog='µV', ecg='µV', emg='µV', - misc='AU', seeg='mV', dipole='nAm', gof='GOF', bio='µV', - ecog='µV', hbo='µM', hbr='µM', ref_meg='fT', + misc='AU', seeg='mV', dbs='µV', dipole='nAm', gof='GOF', + bio='µV', ecog='µV', hbo='µM', hbr='µM', ref_meg='fT', fnirs_cw_amplitude='V', fnirs_fd_ac_amplitude='V', fnirs_fd_phase='rad', fnirs_od='V', csd='mV/m²'), # scalings for the units scalings=dict(mag=1e15, grad=1e13, eeg=1e6, eog=1e6, emg=1e6, ecg=1e6, - misc=1.0, seeg=1e3, dipole=1e9, gof=1.0, bio=1e6, ecog=1e6, - hbo=1e6, hbr=1e6, ref_meg=1e15, fnirs_cw_amplitude=1.0, - fnirs_fd_ac_amplitude=1.0, fnirs_fd_phase=1., - fnirs_od=1.0, csd=1e3), + misc=1.0, seeg=1e3, dbs=1e6, ecog=1e6, dipole=1e9, gof=1.0, + bio=1e6, hbo=1e6, hbr=1e6, ref_meg=1e15, + fnirs_cw_amplitude=1.0, fnirs_fd_ac_amplitude=1.0, + fnirs_fd_phase=1., fnirs_od=1.0, csd=1e3), # rough guess for a good plot scalings_plot_raw=dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4, emg=1e-3, ref_meg=1e-12, misc='auto', stim=1, resp=1, chpi=1e-4, exci=1, ias=1, syst=1, - seeg=1e-4, bio=1e-6, ecog=1e-4, hbo=10e-6, + seeg=1e-4, dbs=1e-4, bio=1e-6, ecog=1e-4, hbo=10e-6, hbr=10e-6, whitened=10., fnirs_cw_amplitude=2e-2, fnirs_fd_ac_amplitude=2e-2, fnirs_fd_phase=2e-1, fnirs_od=2e-2, csd=200e-4), scalings_cov_rank=dict(mag=1e12, grad=1e11, eeg=1e5, # ~100x scalings - seeg=1e1, ecog=1e4, hbo=1e4, hbr=1e4), + seeg=1e1, dbs=1e4, ecog=1e4, hbo=1e4, hbr=1e4), ylim=dict(mag=(-600., 600.), grad=(-200., 200.), eeg=(-200., 200.), - misc=(-5., 5.), seeg=(-20., 20.), dipole=(-100., 100.), - gof=(0., 1.), bio=(-500., 500.), ecog=(-200., 200.), hbo=(0, 20), - hbr=(0, 20), csd=(-50., 50.)), + misc=(-5., 5.), seeg=(-20., 20.), dbs=(-200., 200.), + dipole=(-100., 100.), gof=(0., 1.), bio=(-500., 500.), + ecog=(-200., 200.), hbo=(0, 20), hbr=(0, 20), csd=(-50., 50.)), titles=dict(mag='Magnetometers', grad='Gradiometers', eeg='EEG', eog='EOG', - ecg='ECG', emg='EMG', misc='misc', seeg='sEEG', bio='BIO', - dipole='Dipole', ecog='ECoG', hbo='Oxyhemoglobin', + ecg='ECG', emg='EMG', misc='misc', seeg='sEEG', dbs='DBS', + bio='BIO', dipole='Dipole', ecog='ECoG', hbo='Oxyhemoglobin', ref_meg='Reference Magnetometers', fnirs_cw_amplitude='fNIRS (CW amplitude)', fnirs_fd_ac_amplitude='fNIRS (FD AC amplitude)', @@ -55,14 +60,15 @@ markersize=4), coreg=dict( mri_fid_opacity=1.0, - dig_fid_opacity=0.3, + dig_fid_opacity=1.0, - mri_fid_scale=1e-2, - dig_fid_scale=3e-2, + mri_fid_scale=5e-3, + dig_fid_scale=8e-3, extra_scale=4e-3, eeg_scale=4e-3, eegp_scale=20e-3, eegp_height=0.1, ecog_scale=5e-3, seeg_scale=5e-3, + dbs_scale=5e-3, fnirs_scale=5e-3, source_scale=5e-3, detector_scale=5e-3, @@ -73,6 +79,7 @@ extra_color=(1., 1., 1.), eeg_color=(1., 0.596, 0.588), eegp_color=(0.839, 0.15, 0.16), ecog_color=(1., 1., 1.), + dbs_color=(0.82, 0.455, 0.659), seeg_color=(1., 1., .3), fnirs_color=(1., .647, 0.), source_color=(1., .05, 0.), @@ -91,6 +98,8 @@ volume_options=dict( alpha=None, resolution=1., surface_alpha=None, blending='mip', silhouette_alpha=None, silhouette_linewidth=2.), + prefixes={'': 1e0, 'd': 1e1, 'c': 1e2, 'm': 1e3, 'µ': 1e6, 'u': 1e6, + 'n': 1e9, 'p': 1e12, 'f': 1e15} ) diff --git a/mne/dipole.py b/mne/dipole.py index 6361ed79472..5fd4f7c15a7 100644 --- a/mne/dipole.py +++ b/mne/dipole.py @@ -7,11 +7,11 @@ # License: Simplified BSD from copy import deepcopy +import functools from functools import partial import re import numpy as np -from scipy import linalg from .cov import read_cov, compute_whitener from .io.constants import FIFF @@ -28,12 +28,12 @@ from .surface import (transform_surface_to, _compute_nearest, _points_outside_surface) -from .bem import _bem_find_surface, _surf_name -from .source_space import _make_volume_source_space, SourceSpaces +from .bem import _bem_find_surface, _bem_surf_name +from .source_space import _make_volume_source_space, SourceSpaces, head_to_mni from .parallel import parallel_func from .utils import (logger, verbose, _time_mask, warn, _check_fname, check_fname, _pl, fill_doc, _check_option, ShiftTimeMixin, - _svd_lwork, _repeated_svd, ddot, dgemv, dgemm) + _svd_lwork, _repeated_svd, _get_blas_funcs) @fill_doc @@ -103,7 +103,10 @@ def __init__(self, times, pos, amplitude, ori, gof, self.ori = np.array(ori) self.gof = np.array(gof) self.name = name - self.conf = deepcopy(conf) if conf is not None else dict() + self.conf = dict() + if conf is not None: + for key, value in conf.items(): + self.conf[key] = np.array(value) self.khi2 = np.array(khi2) if khi2 is not None else None self.nfree = np.array(nfree) if nfree is not None else None self.verbose = verbose @@ -114,17 +117,18 @@ def __repr__(self): # noqa: D105 s += ", tmax : %0.3f" % np.max(self.times) return "" % s - def save(self, fname, overwrite=False): + @verbose + def save(self, fname, overwrite=False, *, verbose=None): """Save dipole in a .dip or .bdip file. Parameters ---------- fname : str The name of the .dip or .bdip file. - overwrite : bool - If True, overwrite the file (if it exists). + %(overwrite)s .. versionadded:: 0.20 + %(verbose_meth)s Notes ----- @@ -264,6 +268,27 @@ def plot_locations(self, trans, subject, subjects_dir=None, show_all, ax, block, show, scale=scale, color=color, fig=fig, title=title) + @verbose + def to_mni(self, subject, trans, subjects_dir=None, + verbose=None): + """Convert dipole location from head coordinate system to MNI coordinates. + + Parameters + ---------- + %(subject)s + %(trans_not_none)s + %(subjects_dir)s + %(verbose)s + + Returns + ------- + pos_mni : array, shape (n_pos, 3) + The MNI coordinates (in mm) of pos. + """ + mri_head_t, trans = _get_trans(trans) + return head_to_mni(self.pos, subject, mri_head_t, + subjects_dir=subjects_dir, verbose=verbose) + def plot_amplitudes(self, color='k', show=True): """Plot the dipole amplitudes as a function of time. @@ -328,7 +353,6 @@ def __len__(self): >>> len(dipoles) # doctest: +SKIP 10 - """ return self.pos.shape[0] @@ -336,7 +360,7 @@ def __len__(self): def _read_dipole_fixed(fname): """Read a fixed dipole FIF file.""" logger.info('Reading %s ...' % fname) - info, nave, aspect_kind, comment, times, data = _read_evoked(fname) + info, nave, aspect_kind, comment, times, data, _ = _read_evoked(fname) return DipoleFixed(info, data, times, nave, aspect_kind, comment=comment) @@ -635,7 +659,7 @@ def _write_dipole_text(fname, dip): def _read_dipole_bdip(fname): name = None - nfree = 0 + nfree = None with open(fname, 'rb') as fid: # Which dipole in a multi-dipole set times = list() @@ -713,6 +737,7 @@ def _dipole_forwards(fwd_data, whitener, rr, n_jobs=1): B_orig = B.copy() # Apply projection and whiten (cov has projections already) + _, _, dgemm = _get_ddot_dgemv_dgemm() B = dgemm(1., B, whitener.T) # column normalization doesn't affect our fitting, so skip for now @@ -729,7 +754,7 @@ def _make_guesses(surf, grid, exclude, mindist, n_jobs=1, verbose=None): """Make a guess space inside a sphere or BEM surface.""" if 'rr' in surf: logger.info('Guess surface (%s) is in %s coordinates' - % (_surf_name[surf['id']], + % (_bem_surf_name[surf['id']], _coord_frame_name(surf['coord_frame']))) else: logger.info('Making a spherical guess space with radius %7.1f mm...' @@ -758,8 +783,14 @@ def _fit_eval(rd, B, B2, fwd_svd=None, fwd_data=None, whitener=None, return 1. - gof +@functools.lru_cache(None) +def _get_ddot_dgemv_dgemm(): + return _get_blas_funcs(np.float64, ('dot', 'gemv', 'gemm')) + + def _dipole_gof(uu, sing, vv, B, B2): """Calculate the goodness of fit from the forward SVD.""" + ddot, dgemv, _ = _get_ddot_dgemv_dgemm() ncomp = 3 if sing[2] / (sing[0] if sing[0] > 0 else 1.) > 0.2 else 2 one = dgemv(1., vv[:ncomp], B) # np.dot(vv[:ncomp], B) Bm2 = ddot(one, one) # np.sum(one * one) @@ -769,6 +800,7 @@ def _dipole_gof(uu, sing, vv, B, B2): def _fit_Q(fwd_data, whitener, B, B2, B_orig, rd, ori=None): """Fit the dipole moment once the location is known.""" + from scipy import linalg if 'fwd' in fwd_data: # should be a single precomputed "guess" (i.e., fixed position) assert rd is None @@ -933,6 +965,7 @@ def _fit_confidence(rd, Q, ori, whitener, fwd_data): # # And then the confidence interval is the diagonal of C, scaled by 1.96 # (for 95% confidence). + from scipy import linalg direction = np.empty((3, 3)) # The coordinate system has the x axis aligned with the dipole orientation, direction[0] = ori @@ -1157,6 +1190,7 @@ def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=1, ----- .. versionadded:: 0.9.0 """ + from scipy import linalg # This could eventually be adapted to work with other inputs, these # are what is needed: @@ -1212,7 +1246,7 @@ def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=1, kind = 'rad' else: # MEG-only # Use the minimum distance to the MEG sensors as the radius then - R = np.dot(linalg.inv(info['dev_head_t']['trans']), + R = np.dot(np.linalg.inv(info['dev_head_t']['trans']), np.hstack([r0, [1.]]))[:3] # r0 -> device R = R - [info['chs'][pick]['loc'][:3] for pick in pick_types(info, meg=True, exclude=[])] @@ -1350,7 +1384,7 @@ def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=1, guess_fwd, guess_fwd_orig, guess_fwd_scales = _dipole_forwards( fwd_data, whitener, guess_src['rr'], n_jobs=fit_n_jobs) # decompose ahead of time - guess_fwd_svd = [linalg.svd(fwd, overwrite_a=False, full_matrices=False) + guess_fwd_svd = [linalg.svd(fwd, full_matrices=False) for fwd in np.array_split(guess_fwd, len(guess_src['rr']))] guess_data = dict(fwd=guess_fwd, fwd_svd=guess_fwd_svd, diff --git a/mne/epochs.py b/mne/epochs.py index 48f975c6917..e3342750fb1 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -11,15 +11,16 @@ # # License: BSD (3-clause) +from functools import partial from collections import Counter from copy import deepcopy import json import operator import os.path as op -import warnings import numpy as np +from .io.utils import _get_als_coords_from_chs from .io.write import (start_file, start_block, end_file, end_block, write_int, write_float, write_float_matrix, write_double_matrix, write_complex_float_matrix, @@ -31,18 +32,19 @@ from .io.tag import read_tag, read_tag_info from .io.constants import FIFF from .io.fiff.raw import _get_fname_rep -from .io.pick import (pick_types, channel_indices_by_type, channel_type, +from .io.pick import (channel_indices_by_type, channel_type, pick_channels, pick_info, _pick_data_channels, - _pick_aux_channels, _DATA_CH_TYPES_SPLIT, - _picks_to_idx) + _DATA_CH_TYPES_SPLIT, _picks_to_idx) from .io.proj import setup_proj, ProjMixin, _proj_equal from .io.base import BaseRaw, TimeMixin from .bem import _check_origin from .evoked import EvokedArray, _check_decim -from .baseline import rescale, _log_rescale +from .baseline import rescale, _log_rescale, _check_baseline from .channels.channels import (ContainsMixin, UpdateChannelsMixin, SetChannelsMixin, InterpolationMixin) -from .filter import detrend, FilterMixin +from .filter import detrend, FilterMixin, _check_fun +from .parallel import parallel_func + from .event import _read_events_fif, make_fixed_length_events from .fixes import _get_args, rng_uniform from .viz import (plot_epochs, plot_epochs_psd, plot_epochs_psd_topomap, @@ -50,14 +52,17 @@ from .utils import (_check_fname, check_fname, logger, verbose, _time_mask, check_random_state, warn, _pl, sizeof_fmt, SizeMixin, copy_function_doc_to_method_doc, - _check_pandas_installed, _check_preload, GetEpochsMixin, + _check_pandas_installed, _check_eeglabio_installed, + _check_preload, GetEpochsMixin, _prepare_read_metadata, _prepare_write_metadata, _check_event_id, _gen_events, _check_option, _check_combine, ShiftTimeMixin, _build_data_frame, _check_pandas_index_arguments, _convert_times, _scale_dataframe_data, _check_time_format, object_size, - _on_missing) + _on_missing, _validate_type, _ensure_events, + _infer_check_export_fmt) from .utils.docs import fill_doc +from .data.html_templates import epochs_template def _pack_reject_params(epochs): @@ -316,51 +321,34 @@ def _handle_event_repeated(events, event_id, event_repeated, selection, class BaseEpochs(ProjMixin, ContainsMixin, UpdateChannelsMixin, ShiftTimeMixin, SetChannelsMixin, InterpolationMixin, FilterMixin, TimeMixin, SizeMixin, GetEpochsMixin): - """Abstract base class for Epochs-type classes. + """Abstract base class for `~mne.Epochs`-type classes. - This class provides basic functionality and should never be instantiated - directly. See Epochs below for an explanation of the parameters. + .. warning:: This class provides basic functionality and should never be + instantiated directly. Parameters ---------- info : dict - A copy of the info dict from the raw object. + A copy of the `~mne.Info` dictionary from the raw object. data : ndarray | None If ``None``, data will be read from the Raw object. If ndarray, must be of shape (n_epochs, n_channels, n_times). - events : array of int, shape (n_events, 3) - See `Epochs` docstring. - event_id : int | list of int | dict | None - See `Epochs` docstring. - tmin : float - See `Epochs` docstring. - tmax : float - See `Epochs` docstring. - baseline : None | tuple of length 2 - See `Epochs` docstring. - raw : Raw object - An instance of Raw. - %(picks_header)s - See `Epochs` docstring. + %(epochs_events_event_id)s + %(epochs_tmin_tmax)s + %(baseline_epochs)s + Defaults to ``(None, 0)``, i.e. beginning of the the data until + time point zero. + %(epochs_raw)s + %(picks_all)s %(reject_epochs)s - flat : dict | None - See `Epochs` docstring. - decim : int - See `Epochs` docstring. - reject_tmin : scalar | None - See `Epochs` docstring. - reject_tmax : scalar | None - See `Epochs` docstring. - detrend : int | None - See `Epochs` docstring. - proj : bool | 'delayed' - See `Epochs` docstring. - on_missing : str - See `Epochs` docstring. + %(flat)s + %(decim)s + %(epochs_reject_tmin_tmax)s + %(epochs_detrend)s + %(proj_epochs)s + %(epochs_on_missing)s preload_at_end : bool - Load all epochs from disk when creating the object - or wait before accessing each epoch (more memory - efficient but can be slower). + %(epochs_preload)s selection : iterable | None Iterable of indices of selected epochs. If ``None``, will be automatically generated, corresponding to all non-zero events. @@ -369,14 +357,8 @@ class BaseEpochs(ProjMixin, ContainsMixin, UpdateChannelsMixin, ShiftTimeMixin, be ignored. filename : str | None The filename (if the epochs are read from disk). - metadata : instance of pandas.DataFrame | None - See :class:`mne.Epochs` docstring. - - .. versionadded:: 0.16 - event_repeated : str - See :class:`mne.Epochs` docstring. - - .. versionadded:: 0.19 + %(epochs_metadata)s + %(epochs_event_repeated)s %(verbose)s Notes @@ -397,16 +379,7 @@ def __init__(self, info, data, events, event_id=None, tmin=-0.2, tmax=0.5, self.verbose = verbose if events is not None: # RtEpochs can have events=None - events_type = type(events) - with warnings.catch_warnings(record=True): - warnings.simplefilter('ignore') # deprecation for object array - events = np.asarray(events) - if not np.issubdtype(events.dtype, np.integer): - raise TypeError('events should be a NumPy array of integers, ' - f'got {events_type}') - if events.ndim != 2 or events.shape[1] != 3: - raise ValueError( - f'events must be of shape (N, 3), got {events.shape}') + events = _ensure_events(events) events_max = events.max() if events_max > INT32_MAX: raise ValueError( @@ -474,25 +447,15 @@ def __init__(self, info, data, events, event_id=None, tmin=-0.2, tmax=0.5, else: raise ValueError('No desired events found.') else: - self.drop_log = list() + self.drop_log = tuple() self.selection = np.array([], int) self.metadata = metadata # do not set self.events here, let subclass do it - # check reject_tmin and reject_tmax - if (reject_tmin is not None) and (reject_tmin < tmin): - raise ValueError("reject_tmin needs to be None or >= tmin") - if (reject_tmax is not None) and (reject_tmax > tmax): - raise ValueError("reject_tmax needs to be None or <= tmax") - if (reject_tmin is not None) and (reject_tmax is not None): - if reject_tmin >= reject_tmax: - raise ValueError('reject_tmin needs to be < reject_tmax') if (detrend not in [None, 0, 1]) or isinstance(detrend, bool): raise ValueError('detrend must be None, 0, or 1') - - self.reject_tmin = reject_tmin - self.reject_tmax = reject_tmax self.detrend = detrend + self._raw = raw info._check_consistency() self.picks = _picks_to_idx(info, picks, none='all', exclude=(), @@ -527,6 +490,33 @@ def __init__(self, info, data, events, event_id=None, tmin=-0.2, tmax=0.5, self._raw_times = np.arange(start_idx, int(round(tmax * sfreq)) + 1) / sfreq self._set_times(self._raw_times) + + # check reject_tmin and reject_tmax + if reject_tmin is not None: + if (np.isclose(reject_tmin, tmin)): + # adjust for potential small deviations due to sampling freq + reject_tmin = self.tmin + elif reject_tmin < tmin: + raise ValueError(f'reject_tmin needs to be None or >= tmin ' + f'(got {reject_tmin})') + + if reject_tmax is not None: + if (np.isclose(reject_tmax, tmax)): + # adjust for potential small deviations due to sampling freq + reject_tmax = self.tmax + elif reject_tmax > tmax: + raise ValueError(f'reject_tmax needs to be None or <= tmax ' + f'(got {reject_tmax})') + + if (reject_tmin is not None) and (reject_tmax is not None): + if reject_tmin >= reject_tmax: + raise ValueError(f'reject_tmin ({reject_tmin}) needs to be ' + f' < reject_tmax ({reject_tmax})') + + self.reject_tmin = reject_tmin + self.reject_tmax = reject_tmax + + # decimation self._decim = 1 self.decimate(decim) @@ -573,10 +563,11 @@ def __init__(self, info, data, events, event_id=None, tmin=-0.2, tmax=0.5, def _check_consistency(self): """Check invariants of epochs object.""" - assert len(self.selection) == len(self.events) + if hasattr(self, 'events'): + assert len(self.selection) == len(self.events) + assert len(self.drop_log) >= len(self.events) assert len(self.selection) == sum( (len(dl) == 0 for dl in self.drop_log)) - assert len(self.drop_log) >= len(self.events) assert hasattr(self, '_times_readonly') assert not self.times.flags['WRITEABLE'] assert isinstance(self.drop_log, tuple) @@ -706,11 +697,10 @@ def apply_baseline(self, baseline=(None, 0), *, verbose=None): raise RuntimeError('You cannot remove baseline correction ' 'from preloaded data once it has been ' 'applied.') - picks = _pick_data_channels(self.info, exclude=[], - with_ref_meg=True) - picks_aux = _pick_aux_channels(self.info, exclude=[]) - picks = np.sort(np.concatenate((picks, picks_aux))) + self._do_baseline = True + picks = self._detrend_picks rescale(self._data, self.times, baseline, copy=False, picks=picks) + self._do_baseline = False else: # logging happens in "rescale" in "if" branch logger.info(_log_rescale(baseline)) assert self._do_baseline is True @@ -789,7 +779,7 @@ def _reject_setup(self, reject, flat): reject_imax = idxs[-1] self._reject_time = slice(reject_imin, reject_imax) - @verbose + @verbose # verbose is used by mne-realtime def _is_good_epoch(self, data, verbose=None): """Determine if epoch is good.""" if isinstance(data, str): @@ -811,7 +801,7 @@ def _is_good_epoch(self, data, verbose=None): ignore_chs=self.info['bads']) @verbose - def _detrend_offset_decim(self, epoch, verbose=None): + def _detrend_offset_decim(self, epoch, picks, verbose=None): """Aux Function: detrend, baseline correct, offset, decim. Note: operates inplace @@ -821,17 +811,16 @@ def _detrend_offset_decim(self, epoch, verbose=None): # Detrend if self.detrend is not None: - picks = _pick_data_channels(self.info, exclude=[]) - epoch[picks] = detrend(epoch[picks], self.detrend, axis=1) + # We explicitly detrend just data channels (not EMG, ECG, EOG which + # are processed by baseline correction) + use_picks = _pick_data_channels(self.info, exclude=()) + epoch[use_picks] = detrend(epoch[use_picks], self.detrend, axis=1) # Baseline correct if self._do_baseline: - picks = pick_types(self.info, meg=True, eeg=True, stim=False, - ref_meg=True, eog=True, ecg=True, seeg=True, - emg=True, bio=True, ecog=True, fnirs=True, - exclude=[]) - epoch[picks] = rescale(epoch[picks], self._raw_times, - self.baseline, copy=False, verbose=False) + rescale( + epoch, self._raw_times, self.baseline, picks=picks, copy=False, + verbose=False) # Decimate if necessary (i.e., epoch not preloaded) epoch = epoch[:, self._decim_slice] @@ -856,7 +845,7 @@ def iter_evoked(self, copy=False): If False copies of data and measurement info will be omitted to save time. """ - self._current = 0 + self.__iter__() while True: try: @@ -1003,7 +992,7 @@ def standard_error(self, picks=None): return self._compute_aggregate(picks, "std") def _compute_aggregate(self, picks, mode='mean'): - """Compute the mean or std over epochs and return Evoked.""" + """Compute the mean, median, or std over epochs and return Evoked.""" # if instance contains ICA channels they won't be included unless picks # is specified if picks is None: @@ -1072,7 +1061,7 @@ def _name(self): count = Counter(self.events[:, 2]) comments = list() for key, value in self.event_id.items(): - comments.append('%.2f * %s' % ( + comments.append('%.2f × %s' % ( float(count[value]) / len(self.events), key)) comment = ' + '.join(comments) return comment @@ -1081,8 +1070,12 @@ def _evoked_from_epoch_data(self, data, info, picks, n_events, kind, comment): """Create an evoked object from epoch data.""" info = deepcopy(info) + # don't apply baseline correction; we'll set evoked.baseline manually evoked = EvokedArray(data, info, tmin=self.times[0], comment=comment, - nave=n_events, kind=kind, verbose=self.verbose) + nave=n_events, kind=kind, baseline=None, + verbose=self.verbose) + evoked.baseline = self.baseline + # XXX: above constructor doesn't recreate the times object precisely evoked.times = self.times.copy() @@ -1106,24 +1099,14 @@ def ch_names(self): @copy_function_doc_to_method_doc(plot_epochs) def plot(self, picks=None, scalings=None, n_epochs=20, n_channels=20, - title=None, events=None, event_colors=None, event_color=None, + title=None, events=None, event_color=None, order=None, show=True, block=False, decim='auto', noise_cov=None, butterfly=False, show_scrollbars=True, epoch_colors=None, event_id=None, group_by='type'): - if event_colors is not None: - depr_msg = ('event_colors is deprecated and will be replaced by ' - 'event_color in 0.23.') - if event_color is None: - event_color = event_colors - else: - depr_msg += (' Since you passed values for both event_colors ' - 'and event_color, event_colors will be ignored.') - warn(depr_msg, DeprecationWarning) return plot_epochs(self, picks=picks, scalings=scalings, n_epochs=n_epochs, n_channels=n_channels, title=title, events=events, event_color=event_color, - event_colors=event_colors, order=order, - show=show, block=block, decim=decim, + order=order, show=show, block=block, decim=decim, noise_cov=noise_cov, butterfly=butterfly, show_scrollbars=show_scrollbars, epoch_colors=epoch_colors, event_id=event_id, @@ -1190,12 +1173,7 @@ def drop_bad(self, reject='existing', flat='existing', verbose=None): Parameters ---------- %(reject_drop_bad)s - flat : dict | str | None - Rejection parameters based on flatness of signal. - Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values - are floats that set the minimum acceptable peak-to-peak amplitude. - If flat is None then no rejection is done. If 'existing', - then the flat parameters set at instantiation are used. + %(flat_drop_bad)s %(verbose_meth)s Returns @@ -1221,7 +1199,7 @@ def drop_bad(self, reject='existing', flat='existing', verbose=None): rej in (reject, flat)): raise ValueError('reject and flat, if strings, must be "existing"') self._reject_setup(reject, flat) - self._get_data(out=False) + self._get_data(out=False, verbose=verbose) return self def drop_log_stats(self, ignore=('IGNORED',)): @@ -1244,7 +1222,7 @@ def drop_log_stats(self, ignore=('IGNORED',)): return _drop_log_stats(self.drop_log, ignore) @copy_function_doc_to_method_doc(plot_drop_log) - def plot_drop_log(self, threshold=0, n_max_plot=20, subject='Unknown', + def plot_drop_log(self, threshold=0, n_max_plot=20, subject='Unknown subj', color=(0.9, 0.9, 0.9), width=0.8, ignore=('IGNORED',), show=True): if not self._bad_dropped: @@ -1377,10 +1355,12 @@ def _get_data(self, out=True, picks=None, item=None, verbose=None): return data[:, picks] # we need to load from disk, drop, and return data + detrend_picks = self._detrend_picks for ii, idx in enumerate(use_idx): # faster to pre-allocate memory here epoch_noproj = self._get_epoch_from_raw(idx) - epoch_noproj = self._detrend_offset_decim(epoch_noproj) + epoch_noproj = self._detrend_offset_decim( + epoch_noproj, detrend_picks) if self._do_delayed_proj: epoch_out = epoch_noproj else: @@ -1396,6 +1376,8 @@ def _get_data(self, out=True, picks=None, item=None, verbose=None): n_out = 0 drop_log = list(self.drop_log) assert n_events == len(self.selection) + if not self.preload: + detrend_picks = self._detrend_picks for idx, sel in enumerate(self.selection): if self.preload: # from memory if self._do_delayed_proj: @@ -1406,11 +1388,13 @@ def _get_data(self, out=True, picks=None, item=None, verbose=None): epoch = self._data[idx] else: # from disk epoch_noproj = self._get_epoch_from_raw(idx) - epoch_noproj = self._detrend_offset_decim(epoch_noproj) + epoch_noproj = self._detrend_offset_decim( + epoch_noproj, detrend_picks) epoch = self._project_epoch(epoch_noproj) epoch_out = epoch_noproj if self._do_delayed_proj else epoch - is_good, bad_tuple = self._is_good_epoch(epoch) + is_good, bad_tuple = self._is_good_epoch( + epoch, verbose=verbose) if not is_good: assert isinstance(bad_tuple, tuple) assert all(isinstance(x, str) for x in bad_tuple) @@ -1455,6 +1439,14 @@ def _get_data(self, out=True, picks=None, item=None, verbose=None): else: return None + @property + def _detrend_picks(self): + if self._do_baseline: + return _pick_data_channels( + self.info, with_ref_meg=True, with_aux=True, exclude=()) + else: + return [] + @fill_doc def get_data(self, picks=None, item=None): """Get all epochs as a 3D array. @@ -1478,6 +1470,57 @@ def get_data(self, picks=None, item=None): """ return self._get_data(picks=picks, item=item) + @verbose + def apply_function(self, fun, picks=None, dtype=None, n_jobs=1, + channel_wise=True, verbose=None, **kwargs): + """Apply a function to a subset of channels. + + %(applyfun_summary_epochs)s + + Parameters + ---------- + %(applyfun_fun)s + %(picks_all_data_noref)s + %(applyfun_dtype)s + %(n_jobs)s + %(applyfun_chwise_epo)s + %(verbose_meth)s + %(kwarg_fun)s + + Returns + ------- + self : instance of Epochs + The epochs object with transformed data. + """ + _check_preload(self, 'epochs.apply_function') + picks = _picks_to_idx(self.info, picks, exclude=(), with_ref_meg=False) + + if not callable(fun): + raise ValueError('fun needs to be a function') + + data_in = self._data + if dtype is not None and dtype != self._data.dtype: + self._data = self._data.astype(dtype) + + if channel_wise: + if n_jobs == 1: + _fun = partial(_check_fun, fun, **kwargs) + # modify data inplace to save memory + for idx in picks: + self._data[:, idx, :] = np.apply_along_axis( + _fun, -1, data_in[:, idx, :]) + else: + # use parallel function + parallel, p_fun, _ = parallel_func(_check_fun, n_jobs) + data_picks_new = parallel(p_fun( + fun, data_in[:, p, :], **kwargs) for p in picks) + for pp, p in enumerate(picks): + self._data[:, p, :] = data_picks_new[pp] + else: + self._data = _check_fun(fun, data_in, **kwargs) + + return self + @property def times(self): """Time vector in seconds.""" @@ -1514,8 +1557,7 @@ def __repr__(self): if self.baseline is None: s += 'off' else: - s += '[%s, %s] sec' % tuple(['None' if b is None else ('%g' % b) - for b in self.baseline]) + s += f'{self.baseline[0]:g} – {self.baseline[1]:g} sec' if self.baseline != _check_baseline( self.baseline, times=self.times, sfreq=self.info['sfreq'], on_baseline_outside_data='adjust'): @@ -1532,6 +1574,31 @@ def __repr__(self): class_name = 'Epochs' if class_name == 'BaseEpochs' else class_name return '<%s | %s>' % (class_name, s) + def _repr_html_(self): + if self.baseline is None: + baseline = 'off' + else: + baseline = tuple([f'{b:.3f}' for b in self.baseline]) + baseline = f'{baseline[0]} – {baseline[1]} sec' + + if isinstance(self.event_id, dict): + events = '' + for k, v in sorted(self.event_id.items()): + n_events = sum(self.events[:, 2] == v) + events += f'{k}: {n_events}
' + elif isinstance(self.event_id, list): + events = '' + for k in self.event_id: + n_events = sum(self.events[:, 2] == k) + events += f'{k}: {n_events}
' + elif isinstance(self.event_id, int): + n_events = len(self.events[:, 2]) + events = f'{self.event_id}: {n_events}
' + else: + events = None + return epochs_template.substitute(epochs=self, baseline=baseline, + events=events) + @verbose def crop(self, tmin=None, tmax=None, include_tmax=True, verbose=None): """Crop a time interval from the epochs. @@ -1576,6 +1643,18 @@ def crop(self, tmin=None, tmax=None, include_tmax=True, verbose=None): self._set_times(self.times[tmask]) self._raw_times = self._raw_times[tmask] self._data = self._data[:, :, tmask] + + # Adjust rejection period + if self.reject_tmin is not None and self.reject_tmin < self.tmin: + logger.info( + f'reject_tmin is not in epochs time interval. ' + f'Setting reject_tmin to epochs.tmin ({self.tmin} sec)') + self.reject_tmin = self.tmin + if self.reject_tmax is not None and self.reject_tmax > self.tmax: + logger.info( + f'reject_tmax is not in epochs time interval. ' + f'Setting reject_tmax to epochs.tmax ({self.tmax} sec)') + self.reject_tmax = self.tmax return self def copy(self): @@ -1628,9 +1707,7 @@ def save(self, fname, split_size='2GB', fmt='single', overwrite=False, will slightly differ due to the reduction in precision. .. versionadded:: 0.17 - overwrite : bool - If True, the destination file (if it exists) will be overwritten. - If False (default), an error will be raised if the file exists. + %(overwrite)s To overwrite original file (the same one that was loaded), data must be preloaded upon reading. This defaults to True in 0.18 but will change to False in 0.19. @@ -1736,7 +1813,56 @@ def save(self, fname, split_size='2GB', fmt='single', overwrite=False, this_epochs.event_id = self.event_id _save_split(this_epochs, fname, part_idx, n_parts, fmt) - def equalize_event_counts(self, event_ids, method='mintime'): + @verbose + def export(self, fname, fmt='auto', verbose=None): + """Export Epochs to external formats. + + Supported formats: EEGLAB (set, uses :mod:`eeglabio`) + %(export_warning)s + + Parameters + ---------- + %(export_params_fname)s + %(export_params_fmt)s + %(verbose)s + + Notes + ----- + %(export_eeglab_note)s + """ + supported_export_formats = { + 'eeglab': ('set',), + 'edf': ('edf',), + 'brainvision': ('eeg', 'vmrk', 'vhdr',) + } + fmt = _infer_check_export_fmt(fmt, fname, supported_export_formats) + + if fmt == 'eeglab': + _check_eeglabio_installed() + import eeglabio.epochs + # load data first + self.load_data() + + # remove extra epoc and STI channels + drop_chs = ['epoc', 'STI 014'] + ch_names = [ch for ch in self.ch_names if ch not in drop_chs] + cart_coords = _get_als_coords_from_chs(self.info['chs'], + drop_chs) + + eeglabio.epochs.export_set(fname, + data=self.get_data(picks=ch_names), + sfreq=self.info['sfreq'], + events=self.events, + tmin=self.tmin, tmax=self.tmax, + ch_names=ch_names, + event_id=self.event_id, + ch_locs=cart_coords) + elif fmt == 'edf': + raise NotImplementedError('Export to EDF format not implemented.') + elif fmt == 'brainvision': + raise NotImplementedError('Export to BrainVision not implemented.') + + def equalize_event_counts(self, event_ids=None, method='mintime'): """Equalize the number of trials in each condition. It tries to make the remaining epochs occurring as close as possible in @@ -1745,52 +1871,81 @@ def equalize_event_counts(self, event_ids, method='mintime'): during a recording, they could be compensated for (to some extent) in the equalization process. This method thus seeks to reduce any of those effects by minimizing the differences in the times of the events - in the two sets of epochs. For example, if one had event times - [1, 2, 3, 4, 120, 121] and the other one had [3.5, 4.5, 120.5, 121.5], - it would remove events at times [1, 2] in the first epochs and not - [120, 121]. + within a `~mne.Epochs` instance. For example, if one event type + occurred at time points ``[1, 2, 3, 4, 120, 121]`` and the another one + at ``[3.5, 4.5, 120.5, 121.5]``, this method would remove the events at + times ``[1, 2]`` for the first event type – and not the events at times + ``[120, 121]``. Parameters ---------- - event_ids : list - The event types to equalize. Each entry in the list can either be - a str (single event) or a list of str. In the case where one of - the entries is a list of str, event_ids in that list will be - grouped together before equalizing trial counts across conditions. - In the case where partial matching is used (using '/' in - ``event_ids``), ``event_ids`` will be matched according to the - provided tags, that is, processing works as if the event_ids + event_ids : None | list | dict + The event types to equalize. + + If ``None`` (default), equalize the counts of **all** event types + present in the `~mne.Epochs` instance. + + If a list, each element can either be a string (event name) or a + list of strings. In the case where one of the entries is a list of + strings, event types in that list will be grouped together before + equalizing trial counts across conditions. + + If a dictionary, the keys are considered as the event names whose + counts to equalize, i.e., passing ``dict(A=1, B=2)`` will have the + same effect as passing ``['A', 'B']``. This is useful if you intend + to pass an ``event_id`` dictionary that was used when creating + `~mne.Epochs`. + + In the case where partial matching is used (using ``/`` in + the event names), the event types will be matched according to the + provided tags, that is, processing works as if the ``event_ids`` matched by the provided tags had been supplied instead. - The event_ids must identify nonoverlapping subsets of the epochs. + The ``event_ids`` must identify non-overlapping subsets of the + epochs. method : str - If 'truncate', events will be truncated from the end of each event - list. If 'mintime', timing differences between each event list - will be minimized. + If ``'truncate'``, events will be truncated from the end of each + type of events. If ``'mintime'``, timing differences between each + event type will be minimized. Returns ------- epochs : instance of Epochs - The modified Epochs instance. + The modified instance. It is modified in-place. indices : array of int Indices from the original events list that were dropped. Notes ----- - For example (if epochs.event_id was {'Left': 1, 'Right': 2, - 'Nonspatial':3}: + For example (if ``epochs.event_id`` was ``{'Left': 1, 'Right': 2, + 'Nonspatial':3}``: epochs.equalize_event_counts([['Left', 'Right'], 'Nonspatial']) - would equalize the number of trials in the 'Nonspatial' condition with - the total number of trials in the 'Left' and 'Right' conditions. + would equalize the number of trials in the ``'Nonspatial'`` condition + with the total number of trials in the ``'Left'`` and ``'Right'`` + conditions combined. - If multiple indices are provided (e.g. 'Left' and 'Right' in the - example above), it is not guaranteed that after equalization, the - conditions will contribute evenly. E.g., it is possible to end up - with 70 'Nonspatial' trials, 69 'Left' and 1 'Right'. + If multiple indices are provided (e.g. ``'Left'`` and ``'Right'`` in + the example above), it is not guaranteed that after equalization the + conditions will contribute equally. E.g., it is possible to end up + with 70 ``'Nonspatial'`` epochs, 69 ``'Left'`` and 1 ``'Right'``. + + .. versionchanged:: 0.23 + Default to equalizing all events in the passed instance if no + event names were specified explicitly. """ - if len(event_ids) == 0: + from collections.abc import Iterable + _validate_type(event_ids, types=(Iterable, None), + item_name='event_ids', type_name='list-like or None') + if isinstance(event_ids, str): + raise TypeError(f'event_ids must be list-like or None, but ' + f'received a string: {event_ids}') + + if event_ids is None: + event_ids = list(self.event_id) + elif not event_ids: raise ValueError('event_ids must have at least one element') + if not self._bad_dropped: self.drop_bad() # figure out how to equalize @@ -1939,82 +2094,6 @@ def as_type(self, ch_type='grad', mode='fast'): return _as_meg_type_inst(self, ch_type=ch_type, mode=mode) -def _check_baseline(baseline, times, sfreq, on_baseline_outside_data='raise'): - """Check if the baseline is valid, and adjust it if requested. - - ``None`` values inside the baseline parameter will be replaced with - ``times[0]`` and ``times[-1]``. - - Parameters - ---------- - baseline : tuple - Beginning and end of the baseline period, in seconds. - times : array - The time points. - sfreq : float - The sampling rate. - on_baseline_outside_data : 'raise' | 'info' | 'adjust' - What do do if the baseline period exceeds the data. - If ``'raise'``, raise an exception (default). - If ``'info'``, log an info message. - If ``'adjust'``, adjust the baseline such that it's within the data - range again. - - Returns - ------- - (baseline_tmin, baseline_tmax) | None - The baseline with ``None`` values replaced with times, and with - adjusted times if ``on_baseline_outside_data='adjust'``; or ``None`` - if the ``baseline`` parameter is ``None``. - - """ - if baseline is None: - return None - - if not isinstance(baseline, tuple) or len(baseline) != 2: - raise ValueError('`baseline=%s` is an invalid argument, must be ' - 'a tuple of length 2 or None' % str(baseline)) - - tmin, tmax = times[0], times[-1] - tstep = 1. / float(sfreq) - - # check default value of baseline and `tmin=0` - if baseline == (None, 0) and tmin == 0: - raise ValueError('Baseline interval is only one sample. Use ' - '`baseline=(0, 0)` if this is desired.') - - baseline_tmin, baseline_tmax = baseline - - if baseline_tmin is None: - baseline_tmin = tmin - baseline_tmin = float(baseline_tmin) - - if baseline_tmax is None: - baseline_tmax = tmax - baseline_tmax = float(baseline_tmax) - - if baseline_tmin > baseline_tmax: - raise ValueError( - "Baseline min (%s) must be less than baseline max (%s)" - % (baseline_tmin, baseline_tmax)) - - if (baseline_tmin < tmin - tstep) or (baseline_tmax > tmax + tstep): - msg = (f"Baseline interval [{baseline_tmin}, {baseline_tmax}] sec " - f"is outside of epochs data [{tmin}, {tmax}] sec. Epochs were " - f"probably cropped.") - if on_baseline_outside_data == 'raise': - raise ValueError(msg) - elif on_baseline_outside_data == 'info': - logger.info(msg) - elif on_baseline_outside_data == 'adjust': - if baseline_tmin < tmin - tstep: - baseline_tmin = tmin - if baseline_tmax > tmax + tstep: - baseline_tmax = tmax - - return baseline_tmin, baseline_tmax - - def _drop_log_stats(drop_log, ignore=('IGNORED',)): """Compute drop log stats. @@ -2039,86 +2118,333 @@ def _drop_log_stats(drop_log, ignore=('IGNORED',)): return perc +def make_metadata(events, event_id, tmin, tmax, sfreq, + row_events=None, keep_first=None, keep_last=None): + """Generate metadata from events for use with `mne.Epochs`. + + This function mimics the epoching process (it constructs time windows + around time-locked "events of interest") and collates information about + any other events that occurred within those time windows. The information + is returned as a :class:`pandas.DataFrame` suitable for use as + `~mne.Epochs` metadata: one row per time-locked event, and columns + indicating presence/absence and latency of each ancillary event type. + + The function will also return a new ``events`` array and ``event_id`` + dictionary that correspond to the generated metadata. + + Parameters + ---------- + events : array, shape (m, 3) + The :term:`events array `. By default, the returned metadata + :class:`~pandas.DataFrame` will have as many rows as the events array. + To create rows for only a subset of events, pass the ``row_events`` + parameter. + event_id : dict + A mapping from event names (keys) to event IDs (values). The event + names will be incorporated as columns of the returned metadata + :class:`~pandas.DataFrame`. + tmin, tmax : float + Start and end of the time interval for metadata generation in seconds, + relative to the time-locked event of the respective time window. + + .. note:: + If you are planning to attach the generated metadata to + `~mne.Epochs` and intend to include only events that fall inside + your epochs time interval, pass the same ``tmin`` and ``tmax`` + values here as you use for your epochs. + + sfreq : float + The sampling frequency of the data from which the events array was + extracted. + row_events : list of str | str | None + Event types around which to create the time windows / for which to + create **rows** in the returned metadata :class:`pandas.DataFrame`. If + provided, the string(s) must be keys of ``event_id``. If ``None`` + (default), rows are created for **all** event types present in + ``event_id``. + keep_first : str | list of str | None + Specify subsets of :term:`hierarchical event descriptors` (HEDs, + inspired by :footcite:`BigdelyShamloEtAl2013`) matching events of which + the **first occurrence** within each time window shall be stored in + addition to the original events. + + .. note:: + There is currently no way to retain **all** occurrences of a + repeated event. The ``keep_first`` parameter can be used to specify + subsets of HEDs, effectively creating a new event type that is the + union of all events types described by the matching HED pattern. + Only the very first event of this set will be kept. + + For example, you might have two response events types, + ``response/left`` and ``response/right``; and in trials with both + responses occurring, you want to keep only the first response. In this + case, you can pass ``keep_first='response'``. This will add two new + columns to the metadata: ``response``, indicating at what **time** the + event occurred, relative to the time-locked event; and + ``first_response``, stating which **type** (``'left'`` or ``'right'``) + of event occurred. + To match specific subsets of HEDs describing different sets of events, + pass a list of these subsets, e.g. + ``keep_first=['response', 'stimulus']``. If ``None`` (default), no + event aggregation will take place and no new columns will be created. + + .. note:: + By default, this function will always retain the first instance + of any event in each time window. For example, if a time window + contains two ``'response'`` events, the generated ``response`` + column will automatically refer to the first of the two events. In + this specific case, it is therefore **not** necessary to make use of + the ``keep_first`` parameter – unless you need to differentiate + between two types of responses, like in the example above. + + keep_last : list of str | None + Same as ``keep_first``, but for keeping only the **last** occurrence + of matching events. The column indicating the **type** of an event + ``myevent`` will be named ``last_myevent``. + + Returns + ------- + metadata : pandas.DataFrame + Metadata for each row event, with the following columns: + + - ``event_name``, with strings indicating the name of the time-locked + event ("row event") for that specific time window + + - one column per event type in ``event_id``, with the same name; floats + indicating the latency of the event in seconds, relative to the + time-locked event + + - if applicable, additional columns named after the ``keep_first`` and + ``keep_last`` event types; floats indicating the latency of the + event in seconds, relative to the time-locked event + + - if applicable, additional columns ``first_{event_type}`` and + ``last_{event_type}`` for ``keep_first`` and ``keep_last`` event + types, respetively; the values will be strings indicating which event + types were matched by the provided HED patterns + + events : array, shape (n, 3) + The events corresponding to the generated metadata, i.e. one + time-locked event per row. + event_id : dict + The event dictionary corresponding to the new events array. This will + be identical to the input dictionary unless ``row_events`` is supplied, + in which case it will only contain the events provided there. + + Notes + ----- + The time window used for metadata generation need not correspond to the + time window used to create the `~mne.Epochs`, to which the metadata will + be attached; it may well be much shorter or longer, or not overlap at all, + if desired. The can be useful, for example, to include events that ccurred + before or after an epoch, e.g. during the inter-trial interval. + + .. versionadded:: 0.23 + + References + ---------- + .. footbibliography:: + """ + from .utils.mixin import _hid_match + pd = _check_pandas_installed() + + _validate_type(event_id, types=(dict,), item_name='event_id') + _validate_type(row_events, types=(None, str, list, tuple), + item_name='row_events') + _validate_type(keep_first, types=(None, str, list, tuple), + item_name='keep_first') + _validate_type(keep_last, types=(None, str, list, tuple), + item_name='keep_last') + + if not event_id: + raise ValueError('event_id dictionary must contain at least one entry') + + def _ensure_list(x): + if x is None: + return [] + elif isinstance(x, str): + return [x] + else: + return list(x) + + row_events = _ensure_list(row_events) + keep_first = _ensure_list(keep_first) + keep_last = _ensure_list(keep_last) + + keep_first_and_last = set(keep_first) & set(keep_last) + if keep_first_and_last: + raise ValueError(f'The event names in keep_first and keep_last must ' + f'be mutually exclusive. Specified in both: ' + f'{", ".join(sorted(keep_first_and_last))}') + del keep_first_and_last + + for param_name, values in dict(keep_first=keep_first, + keep_last=keep_last).items(): + for first_last_event_name in values: + try: + _hid_match(event_id, [first_last_event_name]) + except KeyError: + raise ValueError( + f'Event "{first_last_event_name}", specified in ' + f'{param_name}, cannot be found in event_id dictionary') + + event_name_diff = sorted(set(row_events) - set(event_id.keys())) + if event_name_diff: + raise ValueError( + f'Present in row_events, but missing from event_id: ' + f'{", ".join(event_name_diff)}') + del event_name_diff + + # First and last sample of each epoch, relative to the time-locked event + # This follows the approach taken in mne.Epochs + start_sample = int(round(tmin * sfreq)) + stop_sample = int(round(tmax * sfreq)) + 1 + + # Make indexing easier + # We create the DataFrame before subsetting the events so we end up with + # indices corresponding to the original event indices. Not used for now, + # but might come in handy sometime later + events_df = pd.DataFrame(events, columns=('sample', 'prev_id', 'id')) + id_to_name_map = {v: k for k, v in event_id.items()} + + # Only keep events that are of interest + events = events[np.in1d(events[:, 2], list(event_id.values()))] + events_df = events_df.loc[events_df['id'].isin(event_id.values()), :] + + # Prepare & condition the metadata DataFrame + + # Avoid column name duplications if the exact same event name appears in + # event_id.keys() and keep_first / keep_last simultaneously + keep_first_cols = [col for col in keep_first if col not in event_id] + keep_last_cols = [col for col in keep_last if col not in event_id] + first_cols = [f'first_{col}' for col in keep_first_cols] + last_cols = [f'last_{col}' for col in keep_last_cols] + + columns = ['event_name', + *event_id.keys(), + *keep_first_cols, + *keep_last_cols, + *first_cols, + *last_cols] + + data = np.empty((len(events_df), len(columns))) + metadata = pd.DataFrame(data=data, columns=columns, index=events_df.index) + + # Event names + metadata.iloc[:, 0] = '' + + # Event times + start_idx = 1 + stop_idx = (start_idx + len(event_id.keys()) + + len(keep_first_cols + keep_last_cols)) + metadata.iloc[:, start_idx:stop_idx] = np.nan + + # keep_first and keep_last names + start_idx = stop_idx + metadata.iloc[:, start_idx:] = None + + # We're all set, let's iterate over all eventns and fill in in the + # respective cells in the metadata. We will subset this to include only + # `row_events` later + for row_event in events_df.itertuples(name='RowEvent'): + row_idx = row_event.Index + metadata.loc[row_idx, 'event_name'] = \ + id_to_name_map[row_event.id] + + # Determine which events fall into the current epoch + window_start_sample = row_event.sample + start_sample + window_stop_sample = row_event.sample + stop_sample + events_in_window = events_df.loc[ + (events_df['sample'] >= window_start_sample) & + (events_df['sample'] <= window_stop_sample), :] + + assert not events_in_window.empty + + # Store the metadata + for event in events_in_window.itertuples(name='Event'): + event_sample = event.sample - row_event.sample + event_time = event_sample / sfreq + event_time = 0 if np.isclose(event_time, 0) else event_time + event_name = id_to_name_map[event.id] + + if not np.isnan(metadata.loc[row_idx, event_name]): + # Event already exists in current time window! + assert metadata.loc[row_idx, event_name] <= event_time + + if event_name not in keep_last: + continue + + metadata.loc[row_idx, event_name] = event_time + + # Handle keep_first and keep_last event aggregation + for event_group_name in keep_first + keep_last: + if event_name not in _hid_match(event_id, [event_group_name]): + continue + + if event_group_name in keep_first: + first_last_col = f'first_{event_group_name}' + else: + first_last_col = f'last_{event_group_name}' + + old_time = metadata.loc[row_idx, event_group_name] + if not np.isnan(old_time): + if ((event_group_name in keep_first and + old_time <= event_time) or + (event_group_name in keep_last and + old_time >= event_time)): + continue + + if event_group_name not in event_id: + # This is an HED. Strip redundant information from the + # event name + name = (event_name + .replace(event_group_name, '') + .replace('//', '/') + .strip('/')) + metadata.loc[row_idx, first_last_col] = name + del name + + metadata.loc[row_idx, event_group_name] = event_time + + # Only keep rows of interest + if row_events: + event_id_timelocked = {name: val for name, val in event_id.items() + if name in row_events} + events = events[np.in1d(events[:, 2], + list(event_id_timelocked.values()))] + metadata = metadata.loc[ + metadata['event_name'].isin(event_id_timelocked)] + assert len(events) == len(metadata) + event_id = event_id_timelocked + + return metadata, events, event_id + + @fill_doc class Epochs(BaseEpochs): """Epochs extracted from a Raw instance. Parameters ---------- - raw : Raw object - An instance of Raw. - events : array of int, shape (n_events, 3) - The events typically returned by the read_events function. - If some events don't match the events of interest as specified - by event_id, they will be marked as 'IGNORED' in the drop log. - event_id : int | list of int | dict | None - The id of the event to consider. If dict, - the keys can later be used to access associated events. Example: - dict(auditory=1, visual=3). If int, a dict will be created with - the id as string. If a list, all events with the IDs specified - in the list are used. If None, all events will be used with - and a dict is created with string integer names corresponding - to the event id integers. - tmin : float - Start time before event. If nothing is provided, defaults to -0.2. - tmax : float - End time after event. If nothing is provided, defaults to 0.5. + %(epochs_raw)s + %(epochs_events_event_id)s + %(epochs_tmin_tmax)s %(baseline_epochs)s Defaults to ``(None, 0)``, i.e. beginning of the the data until time point zero. %(picks_all)s preload : bool - Load all epochs from disk when creating the object - or wait before accessing each epoch (more memory - efficient but can be slower). + %(epochs_preload)s %(reject_epochs)s - flat : dict | None - Rejection parameters based on flatness of signal. - Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values - are floats that set the minimum acceptable peak-to-peak amplitude. - If flat is None then no rejection is done. + %(flat)s %(proj_epochs)s %(decim)s - reject_tmin : scalar | None - Start of the time window used to reject epochs (with the default None, - the window will start with tmin). - reject_tmax : scalar | None - End of the time window used to reject epochs (with the default None, - the window will end with tmax). - detrend : int | None - If 0 or 1, the data channels (MEG and EEG) will be detrended when - loaded. 0 is a constant (DC) detrend, 1 is a linear detrend. None - is no detrending. Note that detrending is performed before baseline - correction. If no DC offset is preferred (zeroth order detrending), - either turn off baseline correction, as this may introduce a DC - shift, or set baseline correction to use the entire time interval - (will yield equivalent results but be slower). - on_missing : str - What to do if one or several event ids are not found in the recording. - Valid keys are 'raise' | 'warn' | 'ignore' - Default is 'raise'. If on_missing is 'warn' it will proceed but - warn, if 'ignore' it will proceed silently. Note. - If none of the event ids are found in the data, an error will be - automatically generated irrespective of this parameter. + %(epochs_reject_tmin_tmax)s + %(epochs_detrend)s + %(epochs_on_missing)s %(reject_by_annotation_epochs)s - metadata : instance of pandas.DataFrame | None - A :class:`pandas.DataFrame` specifying metadata about each epoch. - If given, ``len(metadata)`` must equal ``len(events)``. The DataFrame - may only contain values of type (str | int | float | bool). - If metadata is given, then pandas-style queries may be used to select - subsets of data, see :meth:`mne.Epochs.__getitem__`. - When a subset of the epochs is created in this (or any other - supported) manner, the metadata object is subsetted accordingly, and - the row indices will be modified to match ``epochs.selection``. - - .. versionadded:: 0.16 - event_repeated : str - How to handle duplicates in ``events[:, 0]``. Can be ``'error'`` - (default), to raise an error, 'drop' to only retain the row occurring - first in the ``events``, or ``'merge'`` to combine the coinciding - events (=duplicates) into a new event (see Notes for details). - - .. versionadded:: 0.19 + %(epochs_metadata)s + %(epochs_event_repeated)s %(verbose)s Attributes @@ -2285,11 +2611,7 @@ class EpochsArray(BaseEpochs): and a dict is created with string integer names corresponding to the event id integers. %(reject_epochs)s - flat : dict | None - Rejection parameters based on flatness of signal. - Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values - are floats that set the minimum acceptable peak-to-peak amplitude. - If flat is None then no rejection is done. + %(flat)s reject_tmin : scalar | None Start of the time window used to reject epochs (with the default None, the window will start with tmin). @@ -2323,7 +2645,7 @@ class EpochsArray(BaseEpochs): ----- Proper units of measure: - * V: eeg, eog, seeg, emg, ecg, bio, ecog + * V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog * T: mag * T/m: grad * M: hbo, hbr @@ -2362,9 +2684,10 @@ def __init__(self, data, info, events=None, tmin=0, event_id=None, list(self.event_id.values())).sum(): raise ValueError('The events must only contain event numbers from ' 'event_id') - for ii, e in enumerate(self._data): + detrend_picks = self._detrend_picks + for e in self._data: # This is safe without assignment b/c there is no decim - self._detrend_offset_decim(e) + self._detrend_offset_decim(e, detrend_picks) self.drop_bad() @@ -2384,9 +2707,14 @@ def combine_event_ids(epochs, old_event_ids, new_event_id, copy=True): copy : bool Whether to return a new instance or modify in place. + Returns + ------- + epochs : instance of Epochs + The modified epochs. + Notes ----- - This For example (if epochs.event_id was {'Left': 1, 'Right': 2}: + This For example (if epochs.event_id was ``{'Left': 1, 'Right': 2}``:: combine_event_ids(epochs, ['Left', 'Right'], {'Directional': 12}) @@ -3171,7 +3499,7 @@ def average_movements(epochs, head_pos=None, orig_sfreq=None, picks=None, origin='auto', weight_all=True, int_order=8, ext_order=3, destination=None, ignore_ref=False, return_mapping=False, mag_scale=100., verbose=None): - u"""Average data using Maxwell filtering, transforming using head positions. + """Average data using Maxwell filtering, transforming using head positions. Parameters ---------- @@ -3190,7 +3518,6 @@ def average_movements(epochs, head_pos=None, orig_sfreq=None, picks=None, receive uniform weight per epoch. %(maxwell_int)s %(maxwell_ext)s - %(maxwell_reg)s %(maxwell_dest)s %(maxwell_ref)s return_mapping : bool @@ -3228,7 +3555,6 @@ def average_movements(epochs, head_pos=None, orig_sfreq=None, picks=None, .. [1] Taulu S. and Kajola M. "Presentation of electromagnetic multichannel data: The signal space separation method," Journal of Applied Physics, vol. 97, pp. 124905 1-10, 2005. - .. [2] Wehner DT, Hämäläinen MS, Mody M, Ahlfors SP. "Head movements of children in MEG: Quantification, effects on source estimation, and compensation. NeuroImage 40:541–550, 2008. @@ -3352,7 +3678,7 @@ def average_movements(epochs, head_pos=None, orig_sfreq=None, picks=None, @verbose def make_fixed_length_epochs(raw, duration=1., preload=False, - reject_by_annotation=True, proj=True, + reject_by_annotation=True, proj=True, overlap=0., verbose=None): """Divide continuous raw data into equal-sized consecutive epochs. @@ -3369,6 +3695,11 @@ def make_fixed_length_epochs(raw, duration=1., preload=False, %(proj_epochs)s .. versionadded:: 0.22.0 + overlap : float + The overlap between epochs, in seconds. Must be + ``0 <= overlap < duration``. Default is 0, i.e., no overlap. + + .. versionadded:: 0.23.0 %(verbose)s Returns @@ -3380,7 +3711,8 @@ def make_fixed_length_epochs(raw, duration=1., preload=False, ----- .. versionadded:: 0.20 """ - events = make_fixed_length_events(raw, 1, duration=duration) + events = make_fixed_length_events(raw, 1, duration=duration, + overlap=overlap) delta = 1. / raw.info['sfreq'] return Epochs(raw, events, event_id=[1], tmin=0, tmax=duration - delta, baseline=None, preload=preload, diff --git a/mne/event.py b/mne/event.py index 8a76d92e704..3d94809a924 100644 --- a/mne/event.py +++ b/mne/event.py @@ -98,7 +98,7 @@ def define_target_events(events, reference_id, target_id, sfreq, tmin, tmax, tmax : float The upper limit border in seconds from the target event. new_id : int - new_id for the new event + New ID for the new event. fill_na : int | None Fill event to be inserted if target is not available within the time window specified. If None, the 'null' events will be dropped. @@ -106,9 +106,9 @@ def define_target_events(events, reference_id, target_id, sfreq, tmin, tmax, Returns ------- new_events : ndarray - The new defined events + The new defined events. lag : ndarray - time lag between reference and target in milliseconds. + Time lag between reference and target in milliseconds. """ if new_id is None: new_id = reference_id @@ -823,7 +823,7 @@ def shift_time_events(events, ids, tshift, sfreq): Parameters ---------- events : array, shape=(n_events, 3) - The events + The events. ids : ndarray of int | None The ids of events to shift. tshift : float @@ -858,12 +858,12 @@ def make_fixed_length_events(raw, id=1, start=0, stop=None, duration=1., id : int The id to use (default 1). start : float - Time of first event. + Time of first event (in seconds). stop : float | None - Maximum time of last event. If None, events extend to the end - of the recording. + Maximum time of last event (in seconds). If None, events extend to the + end of the recording. duration : float - The duration to separate events by. + The duration to separate events by (in seconds). first_samp : bool If True (default), times will have raw.first_samp added to them, as in :func:`mne.find_events`. This behavior is not desirable if the @@ -871,7 +871,8 @@ def make_fixed_length_events(raw, id=1, start=0, stop=None, duration=1., have ``raw.first_samp`` added to them, e.g. event times that come from :func:`mne.find_events`. overlap : float - The overlap between events. Must be ``0 <= overlap < duration``. + The overlap between events (in seconds). + Must be ``0 <= overlap < duration``. .. versionadded:: 0.18 @@ -1142,7 +1143,13 @@ def __getitem__(self, item): return cats[0] if len(cats) == 1 else cats def __len__(self): - """Return number of averaging categories marked active in DACQ.""" + """Return number of averaging categories marked active in DACQ. + + Returns + ------- + n_cat : int + The number of categories. + """ return len(self.categories) def _events_from_acq_pars(self): diff --git a/mne/evoked.py b/mne/evoked.py index c3527aba76e..340d0808385 100644 --- a/mne/evoked.py +++ b/mne/evoked.py @@ -11,17 +11,18 @@ from copy import deepcopy import numpy as np -from .baseline import rescale +from .baseline import rescale, _log_rescale, _check_baseline from .channels.channels import (ContainsMixin, UpdateChannelsMixin, SetChannelsMixin, InterpolationMixin) from .channels.layout import _merge_ch_data, _pair_grad_sensors from .defaults import _EXTRAPOLATE_DEFAULT, _BORDER_DEFAULT -from .filter import detrend, FilterMixin +from .filter import detrend, FilterMixin, _check_fun from .utils import (check_fname, logger, verbose, _time_mask, warn, sizeof_fmt, SizeMixin, copy_function_doc_to_method_doc, _validate_type, fill_doc, _check_option, ShiftTimeMixin, _build_data_frame, _check_pandas_installed, _check_pandas_index_arguments, - _convert_times, _scale_dataframe_data, _check_time_format) + _convert_times, _scale_dataframe_data, _check_time_format, + _check_preload) from .viz import (plot_evoked, plot_evoked_topomap, plot_evoked_field, plot_evoked_image, plot_evoked_topo) from .viz.evoked import plot_evoked_white, plot_evoked_joint @@ -32,12 +33,14 @@ from .io.tag import read_tag from .io.tree import dir_tree_find from .io.pick import pick_types, _picks_to_idx, _FNIRS_CH_TYPES_SPLIT -from .io.meas_info import read_meas_info, write_meas_info +from .io.meas_info import (read_meas_info, write_meas_info, + _read_extended_ch_info, _rename_list) from .io.proj import ProjMixin from .io.write import (start_file, start_block, end_file, end_block, write_int, write_string, write_float_matrix, write_id, write_float, write_complex_float_matrix) from .io.base import TimeMixin, _check_maxshield +from .parallel import parallel_func _aspect_dict = { 'average': FIFF.FIFFV_ASPECT_AVERAGE, @@ -108,11 +111,14 @@ class Evoked(ProjMixin, ContainsMixin, UpdateChannelsMixin, SetChannelsMixin, Time vector in seconds. Goes from ``tmin`` to ``tmax``. Time interval between consecutive time samples is equal to the inverse of the sampling frequency. + baseline : None | tuple of length 2 + This attribute reflects whether the data has been baseline-corrected + (it will be a ``tuple`` then) or not (it will be ``None``). %(verbose)s Notes ----- - Evoked objects contain a single condition only. + Evoked objects can only contain the average of a single set of conditions. """ @verbose @@ -122,7 +128,8 @@ def __init__(self, fname, condition=None, proj=True, _validate_type(proj, bool, "'proj'") # Read the requested data self.info, self.nave, self._aspect_kind, self.comment, self.times, \ - self.data = _read_evoked(fname, condition, kind, allow_maxshield) + self.data, self.baseline = _read_evoked(fname, condition, kind, + allow_maxshield) self._update_first_last() self.verbose = verbose self.preload = True @@ -150,6 +157,54 @@ def data(self, data): """Set the data matrix.""" self._data = data + @verbose + def apply_function(self, fun, picks=None, dtype=None, n_jobs=1, + verbose=None, **kwargs): + """Apply a function to a subset of channels. + + %(applyfun_summary_evoked)s + + Parameters + ---------- + %(applyfun_fun_evoked)s + %(picks_all_data_noref)s + %(applyfun_dtype)s + %(n_jobs)s + %(verbose_meth)s + %(kwarg_fun)s + + Returns + ------- + self : instance of Evoked + The evoked object with transformed data. + """ + _check_preload(self, 'evoked.apply_function') + picks = _picks_to_idx(self.info, picks, exclude=(), with_ref_meg=False) + + if not callable(fun): + raise ValueError('fun needs to be a function') + + data_in = self._data + if dtype is not None and dtype != self._data.dtype: + self._data = self._data.astype(dtype) + + # check the dimension of the incoming evoked data + _check_option('evoked.ndim', self._data.ndim, [2]) + + if n_jobs == 1: + # modify data inplace to save memory + for idx in picks: + self._data[idx, :] = _check_fun(fun, data_in[idx, :], **kwargs) + else: + # use parallel function + parallel, p_fun, _ = parallel_func(_check_fun, n_jobs) + data_picks_new = parallel(p_fun( + fun, data_in[p, :], **kwargs) for p in picks) + for pp, p in enumerate(picks): + self._data[p, :] = data_picks_new[pp] + + return self + @verbose def apply_baseline(self, baseline=(None, 0), *, verbose=None): """Baseline correct evoked data. @@ -172,7 +227,19 @@ def apply_baseline(self, baseline=(None, 0), *, verbose=None): .. versionadded:: 0.13.0 """ - self.data = rescale(self.data, self.times, baseline, copy=False) + baseline = _check_baseline(baseline, times=self.times, + sfreq=self.info['sfreq']) + if self.baseline is not None and baseline is None: + raise ValueError('The data has already been baseline-corrected. ' + 'Cannot remove existing basline correction.') + elif baseline is None: + # Do not rescale + logger.info(_log_rescale(None)) + else: + # Actually baseline correct the data. Logging happens in rescale(). + self.data = rescale(self.data, self.times, baseline, copy=False) + self.baseline = baseline + return self def save(self, fname): @@ -181,19 +248,32 @@ def save(self, fname): Parameters ---------- fname : str - The name of the file, which should end with -ave.fif or - -ave.fif.gz. + The name of the file, which should end with ``-ave.fif(.gz)`` or + ``_ave.fif(.gz)``. Notes ----- To write multiple conditions into a single file, use - :func:`mne.write_evokeds`. + `mne.write_evokeds`. + + .. versionchanged:: 0.23 + Information on baseline correction will be stored with the data, + and will be restored when reading again via `mne.read_evokeds`. """ write_evokeds(fname, self) def __repr__(self): # noqa: D105 s = "'%s' (%s, N=%s)" % (self.comment, self.kind, self.nave) - s += ", [%0.5g, %0.5g] sec" % (self.times[0], self.times[-1]) + s += ", %0.5g – %0.5g sec" % (self.times[0], self.times[-1]) + s += ', baseline ' + if self.baseline is None: + s += 'off' + else: + s += f'{self.baseline[0]:g} – {self.baseline[1]:g} sec' + if self.baseline != _check_baseline( + self.baseline, times=self.times, sfreq=self.info['sfreq'], + on_baseline_outside_data='adjust'): + s += ' (baseline period was cropped after baseline correction)' s += ", %s ch" % self.data.shape[0] s += ", ~%s" % (sizeof_fmt(self._size),) return "" % s @@ -241,11 +321,26 @@ def crop(self, tmin=None, tmax=None, include_tmax=True, verbose=None): ----- %(notes_tmax_included_by_default)s """ + if tmin is None: + tmin = self.tmin + elif tmin < self.tmin: + warn(f'tmin is not in Evoked time interval. tmin is set to ' + f'evoked.tmin ({self.tmin:g} sec)') + tmin = self.tmin + + if tmax is None: + tmax = self.tmax + elif tmax > self.tmax: + warn(f'tmax is not in Evoked time interval. tmax is set to ' + f'evoked.tmax ({self.tmax:g} sec)') + tmax = self.tmax + mask = _time_mask(self.times, tmin, tmax, sfreq=self.info['sfreq'], include_tmax=include_tmax) self.times = self.times[mask] self._update_first_last() self.data = self.data[:, mask] + return self @verbose @@ -319,18 +414,20 @@ def plot_topo(self, layout=None, layout_scale=0.945, color=None, border='none', ylim=None, scalings=None, title=None, proj=False, vline=[0.0], fig_background=None, merge_grads=False, legend=True, axes=None, - background_color='w', noise_cov=None, show=True): + background_color='w', noise_cov=None, exclude='bads', + show=True): """ Notes ----- .. versionadded:: 0.10.0 """ return plot_evoked_topo( - self, layout=layout, layout_scale=layout_scale, color=color, - border=border, ylim=ylim, scalings=scalings, title=title, - proj=proj, vline=vline, fig_background=fig_background, + self, layout=layout, layout_scale=layout_scale, + color=color, border=border, ylim=ylim, scalings=scalings, + title=title, proj=proj, vline=vline, fig_background=fig_background, merge_grads=merge_grads, legend=legend, axes=axes, - background_color=background_color, noise_cov=noise_cov, show=show) + background_color=background_color, noise_cov=noise_cov, + exclude=exclude, show=show) @copy_function_doc_to_method_doc(plot_evoked_topomap) def plot_topomap(self, times="auto", ch_type=None, vmin=None, @@ -381,7 +478,8 @@ def plot_joint(self, times="peaks", title='', picks=None, @fill_doc def animate_topomap(self, ch_type=None, times=None, frame_rate=None, butterfly=False, blit=True, show=True, time_unit='s', - sphere=None): + sphere=None, *, extrapolate=_EXTRAPOLATE_DEFAULT, + verbose=None): """Make animation of evoked data as topomap timeseries. The animation can be paused/resumed with left mouse button. @@ -418,6 +516,10 @@ def animate_topomap(self, ch_type=None, times=None, frame_rate=None, .. versionadded:: 0.16 %(topomap_sphere_auto)s + %(topomap_extrapolate)s + + .. versionadded:: 0.22 + %(verbose_meth)s Returns ------- @@ -433,7 +535,7 @@ def animate_topomap(self, ch_type=None, times=None, frame_rate=None, return _topomap_animation( self, ch_type=ch_type, times=times, frame_rate=frame_rate, butterfly=butterfly, blit=blit, show=show, time_unit=time_unit, - sphere=sphere) + sphere=sphere, extrapolate=extrapolate, verbose=verbose) def as_type(self, ch_type='grad', mode='fast'): """Compute virtual evoked using interpolated fields. @@ -511,7 +613,10 @@ def __neg__(self): """ out = self.copy() out.data *= -1 - out.comment = '-' + (out.comment or 'unknown') + + if out.comment is not None and ' + ' in out.comment: + out.comment = f'({out.comment})' # multiple conditions in evoked + out.comment = f'- {out.comment or "unknown"}' return out def get_peak(self, ch_type=None, tmin=None, tmax=None, @@ -558,7 +663,7 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, .. versionadded:: 0.16 """ # noqa: E501 - supported = ('mag', 'grad', 'eeg', 'seeg', 'ecog', 'misc', + supported = ('mag', 'grad', 'eeg', 'seeg', 'dbs', 'ecog', 'misc', 'None') + _FNIRS_CH_TYPES_SPLIT types_used = self.get_channel_types(unique=True, only_data_chs=True) @@ -580,7 +685,7 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, raise ValueError('Negative mode (mode=neg) does not make ' 'sense with merge_grads=True') - meg = eeg = misc = seeg = ecog = fnirs = False + meg = eeg = misc = seeg = dbs = ecog = fnirs = False picks = None if ch_type in ('mag', 'grad'): meg = ch_type @@ -590,6 +695,8 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, misc = True elif ch_type == 'seeg': seeg = True + elif ch_type == 'dbs': + dbs = True elif ch_type == 'ecog': ecog = True elif ch_type in _FNIRS_CH_TYPES_SPLIT: @@ -601,7 +708,7 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, else: picks = pick_types(self.info, meg=meg, eeg=eeg, misc=misc, seeg=seeg, ecog=ecog, ref_meg=False, - fnirs=fnirs) + fnirs=fnirs, dbs=dbs) data = self.data ch_names = self.ch_names @@ -718,6 +825,10 @@ class EvokedArray(Evoked): Number of averaged epochs. Defaults to 1. kind : str Type of data, either average or standard_error. Defaults to 'average'. + %(baseline_evoked)s + Defaults to ``None``, i.e. no baseline correction. + + .. versionadded:: 0.23 %(verbose)s See Also @@ -727,7 +838,7 @@ class EvokedArray(Evoked): Notes ----- Proper units of measure: - * V: eeg, eog, seeg, emg, ecg, bio, ecog + * V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog * T: mag * T/m: grad * M: hbo, hbr @@ -737,7 +848,7 @@ class EvokedArray(Evoked): @verbose def __init__(self, data, info, tmin=0., comment='', nave=1, kind='average', - verbose=None): # noqa: D102 + baseline=None, verbose=None): # noqa: D102 dtype = np.complex128 if np.iscomplexobj(data) else np.float64 data = np.asanyarray(data, dtype=dtype) @@ -770,6 +881,10 @@ def __init__(self, data, info, tmin=0., comment='', nave=1, kind='average', '"standard_error"' % (self.kind,)) self._aspect_kind = _aspect_dict[self.kind] + self.baseline = baseline + if self.baseline is not None: # omit log msg if not baselining + self.apply_baseline(self.baseline, verbose=self.verbose) + def _get_entries(fid, evoked_node, allow_maxshield=False): """Get all evoked entries.""" @@ -913,8 +1028,27 @@ def combine_evoked(all_evoked, weights): evoked.info['bads'] = bads evoked.data = sum(w * e.data for w, e in zip(weights, all_evoked)) evoked.nave = new_nave - evoked.comment = ' + '.join(f'{w:0.3f} × {e.comment or "unknown"}' - for w, e in zip(weights, all_evoked)) + + comment = '' + for idx, (w, e) in enumerate(zip(weights, all_evoked)): + # pick sign + sign = '' if w >= 0 else '-' + # format weight + weight = '' if np.isclose(abs(w), 1.) else f'{abs(w):0.3f}' + # format multiplier + multiplier = ' × ' if weight else '' + # format comment + if e.comment is not None and ' + ' in e.comment: # multiple conditions + this_comment = f'({e.comment})' + else: + this_comment = f'{e.comment or "unknown"}' + # assemble everything + if idx == 0: + comment += f'{sign}{weight}{multiplier}{this_comment}' + else: + comment += f' {sign or "+"} {weight}{multiplier}{this_comment}' + # special-case: combine_evoked([e1, -e2], [1, -1]) + evoked.comment = comment.replace(' - - ', ' + ') return evoked @@ -932,7 +1066,18 @@ def read_evokeds(fname, condition=None, baseline=None, kind='average', can contain multiple datasets. If None, all datasets are returned as a list. %(baseline_evoked)s - Defaults to ``None``, i.e. no baseline correction. + If ``None`` (default), do not apply baseline correction. + + .. note:: Note that if the read `~mne.Evoked` objects have already + been baseline-corrected, the data retrieved from disk will + **always** be baseline-corrected (in fact, only the + baseline-corrected version of the data will be saved, so + there is no way to undo this procedure). Only **after** the + data has been loaded, a custom (additional) baseline + correction **may** be optionally applied by passing a tuple + here. Passing ``None`` will **not** remove an existing + baseline correction, but merely omit the optional, additional + baseline correction. kind : str Either 'average' or 'standard_error', the type of data to read. proj : bool @@ -948,12 +1093,20 @@ def read_evokeds(fname, condition=None, baseline=None, kind='average', Returns ------- evoked : Evoked or list of Evoked - The evoked dataset(s); one Evoked if condition is int or str, - or list of Evoked if condition is None or list. + The evoked dataset(s); one `~mne.Evoked` if ``condition`` is an + integer or string; or a list of `~mne.Evoked` if ``condition`` is + ``None`` or a list. See Also -------- write_evokeds + + Notes + ----- + .. versionchanged:: 0.23 + If the read `~mne.Evoked` objects had been baseline-corrected before + saving, this will be reflected in their ``baseline`` attribute after + reading. """ check_fname(fname, 'evoked', ('-ave.fif', '-ave.fif.gz', '_ave.fif', '_ave.fif.gz')) @@ -966,10 +1119,21 @@ def read_evokeds(fname, condition=None, baseline=None, kind='average', condition = [condition] return_list = False - out = [Evoked(fname, c, kind=kind, proj=proj, - allow_maxshield=allow_maxshield, - verbose=verbose).apply_baseline(baseline) - for c in condition] + out = [] + for c in condition: + evoked = Evoked(fname, c, kind=kind, proj=proj, + allow_maxshield=allow_maxshield, + verbose=verbose) + if baseline is None and evoked.baseline is None: + logger.info(_log_rescale(None)) + elif baseline is None and evoked.baseline is not None: + # Don't touch an existing baseline + bmin, bmax = evoked.baseline + logger.info(f'Loaded Evoked data is baseline-corrected ' + f'(baseline: [{bmin:g}, {bmax:g}] sec)') + else: + evoked.apply_baseline(baseline) + out.append(evoked) return out if return_list else out[0] @@ -1031,6 +1195,7 @@ def _read_evoked(fname, condition=None, kind='average', allow_maxshield=False): nchan = 0 sfreq = -1 chs = [] + baseline = bmin = bmax = None comment = last = first = first_time = nsamp = None for k in range(my_evoked['nent']): my_kind = my_evoked['directory'][k].kind @@ -1059,10 +1224,21 @@ def _read_evoked(fname, condition=None, kind='average', allow_maxshield=False): elif my_kind == FIFF.FIFF_NO_SAMPLES: tag = read_tag(fid, pos) nsamp = int(tag.data) + elif my_kind == FIFF.FIFF_MNE_BASELINE_MIN: + tag = read_tag(fid, pos) + bmin = float(tag.data) + elif my_kind == FIFF.FIFF_MNE_BASELINE_MAX: + tag = read_tag(fid, pos) + bmax = float(tag.data) if comment is None: comment = 'No comment' + if bmin is not None or bmax is not None: + # None's should've been replaced with floats + assert bmin is not None and bmax is not None + baseline = (bmin, bmax) + # Local channel information? if nchan > 0: if chs is None: @@ -1073,7 +1249,9 @@ def _read_evoked(fname, condition=None, kind='average', allow_maxshield=False): raise ValueError('Number of channels and number of ' 'channel definitions are different') + ch_names_mapping = _read_extended_ch_info(chs, my_evoked, fid) info['chs'] = chs + info['bads'][:] = _rename_list(info['bads'], ch_names_mapping) logger.info(' Found channel information in evoked data. ' 'nchan = %d' % nchan) if sfreq > 0: @@ -1144,7 +1322,7 @@ def _read_evoked(fname, condition=None, kind='average', allow_maxshield=False): for k in range(info['nchan'])]) data *= cals[:, np.newaxis] - return info, nave, aspect_kind, comment, times, data + return info, nave, aspect_kind, comment, times, data, baseline def write_evokeds(fname, evoked): @@ -1162,6 +1340,13 @@ def write_evokeds(fname, evoked): See Also -------- read_evokeds + + Notes + ----- + .. versionchanged:: 0.23 + Information on baseline correction will be stored with each individual + `~mne.Evoked` object, and will be restored when reading the data again + via `mne.read_evokeds`. """ _write_evokeds(fname, evoked) @@ -1169,6 +1354,8 @@ def write_evokeds(fname, evoked): def _write_evokeds(fname, evoked, check=True): """Write evoked data.""" from .epochs import _compare_epochs_infos + from .dipole import DipoleFixed # avoid circular import + if check: check_fname(fname, 'evoked', ('-ave.fif', '-ave.fif.gz', '_ave.fif', '_ave.fif.gz')) @@ -1205,7 +1392,13 @@ def _write_evokeds(fname, evoked, check=True): write_int(fid, FIFF.FIFF_FIRST_SAMPLE, e.first) write_int(fid, FIFF.FIFF_LAST_SAMPLE, e.last) - # The epoch itself + # Baseline + if not isinstance(e, DipoleFixed) and e.baseline is not None: + bmin, bmax = e.baseline + write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, bmin) + write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, bmax) + + # The evoked data itself if e.info.get('maxshield'): aspect = FIFF.FIFFB_IAS_ASPECT else: diff --git a/mne/externals/h5io/_h5io.py b/mne/externals/h5io/_h5io.py index ce9724b5ae6..8c7afbdbf22 100644 --- a/mne/externals/h5io/_h5io.py +++ b/mne/externals/h5io/_h5io.py @@ -11,10 +11,6 @@ from os import path as op import numpy as np -try: - from scipy import sparse -except ImportError: - sparse = None # Adapted from six PY3 = sys.version_info[0] == 3 @@ -25,6 +21,14 @@ tab_str = '----' +def _import_sparse(): + try: + from scipy import sparse + except ImportError: + sparse = None + return sparse + + ############################################################################## # WRITING @@ -126,6 +130,7 @@ def write_hdf5(fname, data, overwrite=False, compression=4, def _triage_write(key, value, root, comp_kw, where, cleanup_data, slash='error', title=None, use_json=False): + sparse = _import_sparse() if key != title and '/' in key: if slash == 'error': raise ValueError('Found a key with "/", ' @@ -274,6 +279,7 @@ def _triage_read(node, slash='ignore'): if slash not in ['ignore', 'replace']: raise ValueError("slash must be one of 'replace', 'ignore'") h5py = _check_h5py() + sparse = _import_sparse() type_str = node.attrs['TITLE'] if isinstance(type_str, bytes): type_str = type_str.decode() @@ -382,7 +388,7 @@ def object_diff(a, b, pre=''): diffs : str A string representation of the differences. """ - + sparse = _import_sparse() try: from pandas import DataFrame, Series except ImportError: diff --git a/mne/filter.py b/mne/filter.py index 5477604ba67..9bb411ef9e3 100644 --- a/mne/filter.py +++ b/mne/filter.py @@ -7,10 +7,10 @@ import numpy as np from .annotations import _annotations_starts_stops +from .fixes import _import_fft from .io.pick import _picks_to_idx from .cuda import (_setup_cuda_fft_multiply_repeated, _fft_multiply_repeated, _setup_cuda_fft_resample, _fft_resample, _smart_pad) -from .fixes import irfft, ifftshift, fftfreq from .parallel import parallel_func, check_n_jobs from .time_frequency.multitaper import _mt_spectra, _compute_mt_params from .utils import (logger, verbose, sum_squared, warn, _pl, @@ -1428,6 +1428,7 @@ def resample(x, up=1., down=1., npad=100, axis=-1, window='boxcar', n_jobs=1, up=up/down and down=1. """ from scipy.signal import get_window + ifftshift, fftfreq = _import_fft(('ifftshift', 'fftfreq')) # check explicitly for backwards compatibility if not isinstance(axis, int): err = ("The axis parameter needs to be an integer (got %s). " @@ -2255,6 +2256,7 @@ def design_mne_c_filter(sfreq, l_freq=None, h_freq=40., 4197 frequencies are directly constructed, with zeroes in the stop-band and ones in the passband, with squared cosine ramps in between. """ + irfft = _import_fft('irfft') n_freqs = (4096 + 2 * 2048) // 2 + 1 freq_resp = np.ones(n_freqs) l_freq = 0 if l_freq is None else float(l_freq) diff --git a/mne/fixes.py b/mne/fixes.py index d7abc0d69a8..df8a80571a9 100644 --- a/mne/fixes.py +++ b/mne/fixes.py @@ -12,22 +12,35 @@ # Lars Buitinck # License: BSD -import inspect from distutils.version import LooseVersion +import functools +import inspect from math import log import os from pathlib import Path import warnings import numpy as np -import scipy -from scipy import linalg -from scipy.linalg import LinAlgError ############################################################################### # Misc +def _median_complex(data, axis): + """Compute marginal median on complex data safely. + + XXX: Can be removed when numpy introduces a fix. + See: https://github.com/scipy/scipy/pull/12676/. + """ + # np.median must be passed real arrays for the desired result + if np.iscomplexobj(data): + data = (np.median(np.real(data), axis=axis) + + 1j * np.median(np.imag(data), axis=axis)) + else: + data = np.median(data, axis=axis) + return data + + # helpers to get function arguments def _get_args(function, varargs=False): params = inspect.signature(function).parameters @@ -49,6 +62,7 @@ def _safe_svd(A, **kwargs): # https://software.intel.com/en-us/forums/intel-distribution-for-python/topic/628049 # noqa: E501 # For SciPy 0.18 and up, we can work around it by using # lapack_driver='gesvd' instead. + from scipy import linalg if kwargs.get('overwrite_a', False): raise ValueError('Cannot set overwrite_a=True with this function') try: @@ -63,6 +77,11 @@ def _safe_svd(A, **kwargs): raise +def _csc_matrix_cast(x): + from scipy.sparse import csc_matrix + return csc_matrix(x) + + ############################################################################### # Backporting nibabel's read_geometry @@ -144,10 +163,22 @@ def _read_geometry(filepath, read_metadata=False, read_stamp=False): ############################################################################### # Triaging FFT functions to get fast pocketfft (SciPy 1.4) -try: - from scipy.fft import fft, ifft, fftfreq, rfft, irfft, rfftfreq, ifftshift -except ImportError: - from numpy.fft import fft, ifft, fftfreq, rfft, irfft, rfftfreq, ifftshift +@functools.lru_cache(None) +def _import_fft(name): + single = False + if not isinstance(name, tuple): + name = (name,) + single = True + try: + from scipy.fft import rfft # noqa analysis:ignore + except ImportError: + from numpy import fft # noqa + else: + from scipy import fft # noqa + out = [getattr(fft, n) for n in name] + if single: + out = out[0] + return out ############################################################################### @@ -301,7 +332,7 @@ def is_regressor(estimator): class BaseEstimator(object): - """Base class for all estimators in scikit-learn + """Base class for all estimators in scikit-learn. Notes ----- @@ -342,13 +373,13 @@ def get_params(self, deep=True): Parameters ---------- - deep : boolean, optional + deep : bool, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- - params : mapping of string to any + params : dict Parameter names mapped to their values. """ out = dict() @@ -376,13 +407,21 @@ def get_params(self, deep=True): def set_params(self, **params): """Set the parameters of this estimator. + The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form ``__`` so that it's possible to update each component of a nested object. + + Parameters + ---------- + **params : dict + Parameters. + Returns ------- - self + inst : instance + The object. """ if not params: # Simple optimisation to gain speed (inspect is slow) @@ -564,6 +603,7 @@ def _set_covariance(self, covariance): is computed. """ + from scipy import linalg # covariance = check_array(covariance) # set covariance self.covariance_ = covariance @@ -582,6 +622,7 @@ def get_precision(self): The precision matrix associated to the current covariance object. """ + from scipy import linalg if self.store_precision: precision = self.precision_ else: @@ -589,23 +630,21 @@ def get_precision(self): return precision def fit(self, X, y=None): - """Fits the Maximum Likelihood Estimator covariance model - according to the given training data and parameters. + """Fit the Maximum Likelihood Estimator covariance model. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training data, where n_samples is the number of samples and n_features is the number of features. - - y : not used, present for API consistence purpose. + y : ndarray | None + Not used, present for API consistency. Returns ------- self : object Returns self. - - """ + """ # noqa: E501 # X = check_array(X) if self.assume_centered: self.location_ = np.zeros(X.shape[1]) @@ -618,8 +657,9 @@ def fit(self, X, y=None): return self def score(self, X_test, y=None): - """Computes the log-likelihood of a Gaussian data set with - `self.covariance_` as an estimator of its covariance matrix. + """Compute the log-likelihood of a Gaussian dataset. + + Uses ``self.covariance_`` as an estimator of its covariance matrix. Parameters ---------- @@ -628,15 +668,14 @@ def score(self, X_test, y=None): the number of samples and n_features is the number of features. X_test is assumed to be drawn from the same distribution than the data used in fit (including centering). - - y : not used, present for API consistence purpose. + y : ndarray | None + Not used, present for API consistency. Returns ------- res : float The likelihood of the data set with `self.covariance_` as an estimator of its covariance matrix. - """ # compute empirical covariance of the test set test_cov = empirical_covariance( @@ -649,23 +688,19 @@ def score(self, X_test, y=None): def error_norm(self, comp_cov, norm='frobenius', scaling=True, squared=True): """Computes the Mean Squared Error between two covariance estimators. - (In the sense of the Frobenius norm). Parameters ---------- comp_cov : array-like, shape = [n_features, n_features] The covariance to compare with. - norm : str The type of norm used to compute the error. Available error types: - 'frobenius' (default): sqrt(tr(A^t.A)) - 'spectral': sqrt(max(eigenvalues(A^t.A)) where A is the error ``(comp_cov - self.covariance_)``. - scaling : bool If True (default), the squared error norm is divided by n_features. If False, the squared error norm is not rescaled. - squared : bool Whether to compute the squared error norm or the error norm. If True (default), the squared error norm is returned. @@ -675,8 +710,8 @@ def error_norm(self, comp_cov, norm='frobenius', scaling=True, ------- The Mean Squared Error (in the sense of the Frobenius norm) between `self` and `comp_cov` covariance estimators. - """ + from scipy import linalg # compute the error error = comp_cov - self.covariance_ # compute the error norm @@ -753,6 +788,7 @@ def log_likelihood(emp_cov, precision): def _logdet(A): """Compute the log det of a positive semidefinite matrix.""" + from scipy import linalg vals = linalg.eigvalsh(A) # avoid negative (numerical errors) or zero (semi-definite matrix) values tol = vals.max() * vals.size * np.finfo(np.float64).eps diff --git a/mne/forward/_compute_forward.py b/mne/forward/_compute_forward.py index e8c09c5e489..ca60b8c8f4f 100644 --- a/mne/forward/_compute_forward.py +++ b/mne/forward/_compute_forward.py @@ -475,10 +475,11 @@ def _do_prim_curr(rr, coils): del coils pc = np.empty((len(rr) * 3, n_coils)) for start, stop in _rr_bounds(rr, chunk=1): - p = _bem_inf_fields(rr[start:stop], rmags, cosmags) - p *= ws - p.shape = (3 * (stop - start), -1) - pc[3 * start:3 * stop] = [bincount(bins, pp, bins[-1] + 1) for pp in p] + pp = _bem_inf_fields(rr[start:stop], rmags, cosmags) + pp *= ws + pp.shape = (3 * (stop - start), -1) + pc[3 * start:3 * stop] = [bincount(bins, this_pp, bins[-1] + 1) + for this_pp in pp] return pc diff --git a/mne/forward/_field_interpolation.py b/mne/forward/_field_interpolation.py index d4b3b938fb2..3f01ace235f 100644 --- a/mne/forward/_field_interpolation.py +++ b/mne/forward/_field_interpolation.py @@ -9,7 +9,6 @@ from copy import deepcopy import numpy as np -from scipy import linalg from ..io.constants import FWD, FIFF from ..bem import _check_origin @@ -106,6 +105,7 @@ def _compute_mapping_matrix(fmd, info): def _pinv_trunc(x, miss): """Compute pseudoinverse, truncating at most "miss" fraction of varexp.""" + from scipy import linalg u, s, v = linalg.svd(x, full_matrices=False) # Eigenvalue truncation diff --git a/mne/forward/forward.py b/mne/forward/forward.py index aa793f6ebbb..52596e3aaa8 100644 --- a/mne/forward/forward.py +++ b/mne/forward/forward.py @@ -12,7 +12,6 @@ import re import numpy as np -from scipy import sparse import shutil import os @@ -26,11 +25,13 @@ from ..io.tag import find_tag, read_tag from ..io.matrix import (_read_named_matrix, _transpose_named_matrix, write_named_matrix) -from ..io.meas_info import read_bad_channels, write_info +from ..io.meas_info import (_read_bad_channels, write_info, _write_ch_infos, + _read_extended_ch_info, _make_ch_names_mapping, + _rename_list) from ..io.pick import (pick_channels_forward, pick_info, pick_channels, pick_types) from ..io.write import (write_int, start_block, end_block, - write_coord_trans, write_ch_info, write_name_list, + write_coord_trans, write_name_list, write_string, start_file, end_file, write_id) from ..io.base import BaseRaw from ..evoked import Evoked, EvokedArray @@ -168,6 +169,7 @@ def _block_diag(A, n): bd : sparse matrix The block diagonal matrix """ + from scipy import sparse if sparse.issparse(A): # then make block sparse raise NotImplementedError('sparse reversal not implemented yet') ma, na = A.shape @@ -286,14 +288,14 @@ def _read_forward_meas_info(tree, fid): info['meas_id'] = tag.data if tag is not None else None # Add channel information - chs = list() + info['chs'] = chs = list() for k in range(parent_meg['nent']): kind = parent_meg['directory'][k].kind pos = parent_meg['directory'][k].pos if kind == FIFF.FIFF_CH_INFO: tag = read_tag(fid, pos) chs.append(tag.data) - info['chs'] = chs + ch_names_mapping = _read_extended_ch_info(chs, parent_meg, fid) info._update_redundant() # Get the MRI <-> head coordinate transformation @@ -322,7 +324,8 @@ def _read_forward_meas_info(tree, fid): else: raise ValueError('MEG/head coordinate transformation not found') - info['bads'] = read_bad_channels(fid, parent_meg) + info['bads'] = _read_bad_channels( + fid, parent_meg, ch_names_mapping=ch_names_mapping) # clean up our bad list, old versions could have non-existent bads info['bads'] = [bad for bad in info['bads'] if bad in info['ch_names']] @@ -583,6 +586,7 @@ def convert_forward_solution(fwd, surf_ori=False, force_fixed=False, fwd : Forward The modified forward solution. """ + from scipy import sparse fwd = fwd.copy() if copy else fwd if force_fixed is True: @@ -703,8 +707,7 @@ def write_forward_solution(fname, fwd, overwrite=False, verbose=None): or -fwd.fif.gz. fwd : Forward Forward solution. - overwrite : bool - If True, overwrite destination file (if it exists). + %(overwrite)s %(verbose)s See Also @@ -917,18 +920,17 @@ def write_forward_meas_info(fid, info): raise ValueError('Head<-->sensor transform not found') write_coord_trans(fid, meg_head_t) + ch_names_mapping = dict() if 'chs' in info: # Channel information + ch_names_mapping = _make_ch_names_mapping(info['chs']) write_int(fid, FIFF.FIFF_NCHAN, len(info['chs'])) - for k, c in enumerate(info['chs']): - # Scan numbers may have been messed up - c = deepcopy(c) - c['scanno'] = k + 1 - write_ch_info(fid, c) + _write_ch_infos(fid, info['chs'], False, ch_names_mapping) if 'bads' in info and len(info['bads']) > 0: # Bad channels + bads = _rename_list(info['bads'], ch_names_mapping) start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS) - write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, info['bads']) + write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, bads) end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS) end_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE) @@ -1491,7 +1493,6 @@ def apply_forward_raw(fwd, stc, info, start=None, stop=None, raw._first_samps = np.array([int(np.round(times[0] * sfreq))]) raw._last_samps = np.array([raw.first_samp + raw._data.shape[1] - 1]) raw._projector = None - raw._update_times() return raw @@ -1736,12 +1737,9 @@ def _do_forward_solution(subject, meas, fname=None, src=None, spacing=None, If True, compute the gradient of the field with respect to the dipole coordinates as well (Default: False). mricoord : bool - If True, calculate in MRI coordinates (Default: False). - overwrite : bool - If True, the destination file (if it exists) will be overwritten. - If False (default), an error will be raised if the file exists. - subjects_dir : None | str - Override the SUBJECTS_DIR environment variable. + If True, calculate in MRI coordinates (Default: False) + %(overwrite)s + %(subjects_dir)s %(verbose)s See Also diff --git a/mne/gui/__init__.py b/mne/gui/__init__.py index 9b69247d96f..a52d9a69de4 100644 --- a/mne/gui/__init__.py +++ b/mne/gui/__init__.py @@ -26,7 +26,7 @@ def coregistration(tabbed=False, split=True, width=None, inst=None, trans=None, scrollable=True, project_eeg=None, orient_to_surface=None, scale_by_distance=None, mark_inside=None, interaction=None, scale=None, - advanced_rendering=None, verbose=None): + advanced_rendering=None, head_inside=True, verbose=None): """Coregister an MRI with a subject's head shape. The recommended way to use the GUI is through bash with: @@ -108,6 +108,11 @@ def coregistration(tabbed=False, split=True, width=None, inst=None, bugs. .. versionadded:: 0.18 + head_inside : bool + If True (default), add opaque inner scalp head surface to help occlude + points behind the head. + + .. versionadded:: 0.23 %(verbose)s Returns @@ -138,6 +143,9 @@ def coregistration(tabbed=False, split=True, width=None, inst=None, config.get('MNE_COREG_ADVANCED_RENDERING', 'true') == 'true' if head_opacity is None: head_opacity = config.get('MNE_COREG_HEAD_OPACITY', 1.) + if head_inside is None: + head_inside = \ + config.get('MNE_COREG_HEAD_INSIDE', 'true').lower() == 'true' if width is None: width = config.get('MNE_COREG_WINDOW_WIDTH', 800) if height is None: @@ -162,6 +170,7 @@ def coregistration(tabbed=False, split=True, width=None, inst=None, if scale is None: scale = config.get('MNE_COREG_SCENE_SCALE', 0.16) head_opacity = float(head_opacity) + head_inside = bool(head_inside) width = int(width) height = int(height) scale = float(scale) @@ -176,7 +185,8 @@ def coregistration(tabbed=False, split=True, width=None, inst=None, orient_to_surface=orient_to_surface, scale_by_distance=scale_by_distance, mark_inside=mark_inside, interaction=interaction, - scale=scale, advanced_rendering=advanced_rendering) + scale=scale, advanced_rendering=advanced_rendering, + head_inside=head_inside) return _initialize_gui(frame, view) diff --git a/mne/gui/_coreg_gui.py b/mne/gui/_coreg_gui.py index dd36d3013f9..cb5ce0f29c3 100644 --- a/mne/gui/_coreg_gui.py +++ b/mne/gui/_coreg_gui.py @@ -1656,6 +1656,7 @@ class ViewOptionsPanel(HasTraits): bgcolor = RGBColor() coord_frame = Enum('mri', 'head', label='Display coordinate frame') head_high_res = Bool(True, label='Show high-resolution head') + head_inside = Bool(True, label='Add opaque inner head surface') advanced_rendering = Bool(True, label='Use advanced OpenGL', desc='Enable advanced OpenGL methods that do ' 'not work with all renderers (e.g., depth ' @@ -1673,7 +1674,8 @@ class ViewOptionsPanel(HasTraits): format_func=_pass)), Item('head_high_res'), Spring(), Item('advanced_rendering'), - Spring(), Spring(), columns=3, show_labels=True), + Item('head_inside'), Spring(), Spring(), + columns=3, show_labels=True), Item('hsp_cf_obj', style='custom', label='Head axes'), Item('mri_cf_obj', style='custom', label='MRI axes'), HGroup(Item('bgcolor', label='Background'), Spring()), @@ -1756,6 +1758,7 @@ class CoregFrame(HasTraits): scene = Instance(MlabSceneModel, ()) head_high_res = Bool(True) advanced_rendering = Bool(True) + head_inside = Bool(True) data_panel = Instance(DataPanel) coreg_panel = Instance(CoregPanel) # right panel @@ -1818,19 +1821,21 @@ def __init__(self, raw=None, subject=None, subjects_dir=None, project_eeg=False, orient_to_surface=False, scale_by_distance=False, mark_inside=False, interaction='trackball', scale=0.16, - advanced_rendering=True): # noqa: D102 + advanced_rendering=True, head_inside=True): # noqa: D102 self._config = config or {} super(CoregFrame, self).__init__(guess_mri_subject=guess_mri_subject, head_high_res=head_high_res, - advanced_rendering=advanced_rendering) + advanced_rendering=advanced_rendering, + head_inside=head_inside) self._initial_kwargs = dict(project_eeg=project_eeg, orient_to_surface=orient_to_surface, scale_by_distance=scale_by_distance, mark_inside=mark_inside, head_opacity=head_opacity, interaction=interaction, - scale=scale) + scale=scale, head_inside=head_inside) self._locked_opacity = self._initial_kwargs['head_opacity'] + self._locked_head_inside = self._initial_kwargs['head_inside'] if not 0 <= head_opacity <= 1: raise ValueError( "head_opacity needs to be a floating point number between 0 " @@ -1891,6 +1896,7 @@ def _init_plot(self): # [[0, 0, 0]] -- why?? ) self.mri_obj.opacity = self._initial_kwargs['head_opacity'] + self.mri_obj.rear_opacity = float(self.head_inside) self.data_panel.fid_panel.hsp_obj = self.mri_obj self._update_mri_obj() self.mri_obj.plot() @@ -1900,18 +1906,18 @@ def _init_plot(self): point_scale = defaults['mri_fid_scale'] self.mri_lpa_obj = PointObject(scene=self.scene, color=lpa_color, has_norm=True, point_scale=point_scale, - name='LPA') + name='LPA', view='oct') self.model.sync_trait('transformed_mri_lpa', self.mri_lpa_obj, 'points', mutual=False) self.mri_nasion_obj = PointObject(scene=self.scene, color=nasion_color, has_norm=True, point_scale=point_scale, - name='Nasion') + name='Nasion', view='oct') self.model.sync_trait('transformed_mri_nasion', self.mri_nasion_obj, 'points', mutual=False) self.mri_rpa_obj = PointObject(scene=self.scene, color=rpa_color, has_norm=True, point_scale=point_scale, - name='RPA') + name='RPA', view='oct') self.model.sync_trait('transformed_mri_rpa', self.mri_rpa_obj, 'points', mutual=False) @@ -2000,7 +2006,7 @@ def _init_plot(self): mri_obj=self.mri_obj, hsp_obj=self.hsp_obj, eeg_obj=self.eeg_obj, hpi_obj=self.hpi_obj, hsp_cf_obj=self.hsp_cf_obj, mri_cf_obj=self.mri_cf_obj, - head_high_res=self.head_high_res, + head_high_res=self.head_high_res, head_inside=self.head_inside, bgcolor=self.bgcolor, advanced_rendering=self.advanced_rendering) self.data_panel.headview.scale = self._initial_kwargs['scale'] self.data_panel.headview.interaction = \ @@ -2008,10 +2014,9 @@ def _init_plot(self): self.data_panel.headview.left = True self.data_panel.view_options_panel.sync_trait( 'coord_frame', self.model) - self.data_panel.view_options_panel.sync_trait('head_high_res', self) - self.data_panel.view_options_panel.sync_trait('advanced_rendering', - self) - self.data_panel.view_options_panel.sync_trait('bgcolor', self) + for key in ('head_high_res', 'advanced_rendering', 'bgcolor', + 'head_inside'): + self.data_panel.view_options_panel.sync_trait(key, self) @on_trait_change('advanced_rendering') def _on_advanced_rendering_change(self): @@ -2040,9 +2045,17 @@ def _on_lock_change(self): else: self._locked_opacity = self.mri_obj.opacity self.mri_obj.opacity = 1. + self._locked_head_inside = self.head_inside + self.head_inside = False else: if self.mri_obj is not None: self.mri_obj.opacity = self._locked_opacity + self.head_inside = self._locked_head_inside + + @on_trait_change('head_inside') + def _on_head_inside_change(self): + if self.mri_obj is not None: + self.mri_obj.rear_opacity = float(self.head_inside) # 0 or 1 @cached_property def _get_hsp_visible(self): @@ -2108,12 +2121,15 @@ def s_c(key, value, lower=True): set_env=False) s_c('MNE_COREG_GUESS_MRI_SUBJECT', self.model.guess_mri_subject) - s_c('MNE_COREG_HEAD_HIGH_RES', self.head_high_res) s_c('MNE_COREG_ADVANCED_RENDERING', self.advanced_rendering) + s_c('MNE_COREG_HEAD_HIGH_RES', self.head_high_res) if self.lock_fiducials: opacity = self.mri_obj.opacity + head_inside = self.head_inside else: opacity = self._locked_opacity + head_inside = self._locked_head_inside + s_c('MNE_COREG_HEAD_INSIDE', head_inside) s_c('MNE_COREG_HEAD_OPACITY', opacity) if size is not None: s_c('MNE_COREG_WINDOW_WIDTH', size[0]) diff --git a/mne/gui/_fiducials_gui.py b/mne/gui/_fiducials_gui.py index 02245e89251..f6b495e34d6 100644 --- a/mne/gui/_fiducials_gui.py +++ b/mne/gui/_fiducials_gui.py @@ -312,20 +312,10 @@ def _update_pos(self): if not np.allclose(getattr(self, attr), self.current_pos_mm * 1e-3): setattr(self, attr, self.current_pos_mm * 1e-3) - @on_trait_change('model:lpa') - def _update_lpa(self, name): - if self.set == 'LPA': - self.current_pos_mm = self.lpa * 1000 - - @on_trait_change('model:nasion') - def _update_nasion(self, name): - if self.set.lower() == 'Nasion': - self.current_pos_mm = self.nasion * 1000 - - @on_trait_change('model:rpa') - def _update_rpa(self, name): - if self.set.lower() == 'RPA': - self.current_pos_mm = self.rpa * 1000 + @on_trait_change('model:lpa,model:nasion,model:rpa') + def _update_fiducial(self, value): + attr = self.set.lower() + self.current_pos_mm = getattr(self, attr) * 1000 def _reset_fid_fired(self): self.model.reset = True @@ -378,6 +368,7 @@ def _on_pick(self, picker): pt = [picker.picked_positions[idx]] else: logger.debug("GUI: picked object other than MRI") + return def round_(x): return round(x, 3) @@ -400,27 +391,18 @@ def round_(x): msg.append(line) logger.debug('\n'.join(msg)) - if self.set == 'Nasion': - self.nasion = pt - elif self.set == 'LPA': - self.lpa = pt - elif self.set == 'RPA': - self.rpa = pt - else: - raise ValueError("set = %r" % self.set) + set_ = self.set.lower() + assert set_ in _VIEW_DICT, set_ + setattr(self, set_, pt) @on_trait_change('set') def _on_set_change(self, obj, name, old, new): - if new == 'Nasion': - self.current_pos_mm = self.nasion * 1000 - self.headview.front = True - elif new == 'LPA': - self.current_pos_mm = self.lpa * 1000 - self.headview.left = True - elif new == 'RPA': - self.current_pos_mm = self.rpa * 1000 - self.headview.right = True + new = new.lower() + self._update_fiducial(None) + setattr(self.headview, _VIEW_DICT[new], True) + +_VIEW_DICT = dict(lpa='left', nasion='front', rpa='right') # FiducialsPanel view that allows manipulating all coordinates numerically view2 = View(VGroup(Item('fid_file', label='Fiducials File'), @@ -500,10 +482,6 @@ def __init__(self, subject=None, subjects_dir=None, def _init_plot(self): _toggle_mlab_render(self, False) - lpa_color = defaults['lpa_color'] - nasion_color = defaults['nasion_color'] - rpa_color = defaults['rpa_color'] - # bem color = defaults['mri_color'] self.mri_obj = SurfaceObject(points=self.model.points, color=color, @@ -512,24 +490,14 @@ def _init_plot(self): self.panel.hsp_obj = self.mri_obj # fiducials - self.lpa_obj = PointObject(scene=self.scene, color=lpa_color, - has_norm=True, - point_scale=self.point_scale) - self.panel.sync_trait('lpa', self.lpa_obj, 'points', mutual=False) - self.sync_trait('point_scale', self.lpa_obj, mutual=False) - - self.nasion_obj = PointObject(scene=self.scene, color=nasion_color, - has_norm=True, - point_scale=self.point_scale) - self.panel.sync_trait('nasion', self.nasion_obj, 'points', - mutual=False) - self.sync_trait('point_scale', self.nasion_obj, mutual=False) - - self.rpa_obj = PointObject(scene=self.scene, color=rpa_color, - has_norm=True, - point_scale=self.point_scale) - self.panel.sync_trait('rpa', self.rpa_obj, 'points', mutual=False) - self.sync_trait('point_scale', self.rpa_obj, mutual=False) + for key in ('lpa', 'nasion', 'rpa'): + attr = f'{key}_obj' + setattr(self, attr, PointObject( + scene=self.scene, color=defaults[f'{key}_color'], + has_norm=True, point_scale=self.point_scale)) + obj = getattr(self, attr) + self.panel.sync_trait(key, obj, 'points', mutual=False) + self.sync_trait('point_scale', obj, mutual=False) self.headview.left = True _toggle_mlab_render(self, True) diff --git a/mne/gui/_file_traits.py b/mne/gui/_file_traits.py index 7a50cea6b9b..5e84890d355 100644 --- a/mne/gui/_file_traits.py +++ b/mne/gui/_file_traits.py @@ -13,7 +13,7 @@ from traits.api import (Any, HasTraits, HasPrivateTraits, cached_property, on_trait_change, Array, Bool, Button, DelegatesTo, Directory, Enum, Event, File, Instance, Int, List, - Property, Str, ArrayOrNone) + Property, Str, ArrayOrNone, BaseFile) from traitsui.api import View, Item, VGroup from pyface.api import DirectoryDialog, OK, ProgressDialog, error, information @@ -21,7 +21,8 @@ from ..bem import read_bem_surfaces from ..io.constants import FIFF -from ..io import read_info, read_fiducials +from ..io import read_info, read_fiducials, read_raw +from ..io._read_raw import supported from ..io.meas_info import _empty_info from ..io.open import fiff_open, dir_tree_find from ..surface import read_surface, complete_surface_info @@ -29,7 +30,7 @@ create_default_subject) from ..utils import get_config, set_config from ..viz._3d import _fiducial_coords -from ..channels import read_dig_fif, DigMontage +from ..channels import read_dig_fif fid_wildcard = "*.fif" @@ -135,6 +136,21 @@ def _mne_root_problem(mne_root): "installation, consider reinstalling." % mne_root) +class FileOrDir(File): + """Subclass File because *.mff files are actually directories.""" + + def validate(self, object, name, value): + """Validate that a specified value is valid for this trait.""" + value = os.fspath(value) + validated_value = super(BaseFile, self).validate(object, name, value) + if not self.exists: + return validated_value + elif op.exists(value): + return validated_value + + self.error(object, name, value) + + class Surf(HasTraits): """Expose a surface similar to the ones used elsewhere in MNE.""" @@ -245,7 +261,8 @@ class DigSource(HasPrivateTraits): Nasion, RAP, LAP. If no file is set all values are 0. """ - file = File(exists=True, filter=['*.fif']) + file = FileOrDir(exists=True, + filter=[' '.join([f'*{ext}' for ext in supported])]) inst_fname = Property(Str, depends_on='file') inst_dir = Property(depends_on='file') @@ -288,53 +305,45 @@ def _get_n_omitted(self): @cached_property def _get__info(self): - if self.file: + if not self.file: + return + elif self.file.endswith(('.fif', '.fif.gz')): info = None fid, tree, _ = fiff_open(self.file) fid.close() if len(dir_tree_find(tree, FIFF.FIFFB_MEAS_INFO)) > 0: info = read_info(self.file, verbose=False) elif len(dir_tree_find(tree, FIFF.FIFFB_ISOTRAK)) > 0: - info = read_dig_fif(fname=self.file) - - if isinstance(info, DigMontage): - dig = info.dig info = _empty_info(1) - info['dig'] = dig - elif info is None or info['dig'] is None: - error(None, "The selected FIFF file does not contain " - "digitizer information. Please select a different " - "file.", "Error Reading FIFF File") + info['dig'] = read_dig_fif(fname=self.file).dig + else: + info = read_raw(self.file).info + + # check that digitizer info is present + if info is None or info['dig'] is None: + error(None, "The selected file does not contain digitization " + "information. Please select a different file.", + "Error Reading Digitization File") + self.reset_traits(['file']) + return + + # check that all fiducial points are present + point_kinds = {d['kind'] for d in info['dig']} + missing = [key for key in ('LPA', 'Nasion', 'RPA') if + getattr(FIFF, f'FIFFV_POINT_{key.upper()}') not in + point_kinds] + if missing: + points = _fiducial_coords(info['dig']) + if len(points == 3): + _append_fiducials(info['dig'], *points.T) + else: + error(None, "The selected digitization file does not contain " + f"all cardinal points (missing: {', '.join(missing)}). " + "Please select a different file.", + "Error Reading Digitization File") self.reset_traits(['file']) return - else: - # check that all fiducial points are present - has_point = {FIFF.FIFFV_POINT_LPA: False, - FIFF.FIFFV_POINT_NASION: False, - FIFF.FIFFV_POINT_RPA: False} - for d in info['dig']: - if d['kind'] == FIFF.FIFFV_POINT_CARDINAL: - has_point[d['ident']] = True - if not all(has_point.values()): - points = _fiducial_coords(info['dig']) - if len(points) == 3: - _append_fiducials(info['dig'], *points.T) - else: - missing = [] - if not has_point[FIFF.FIFFV_POINT_LPA]: - missing.append('LPA') - if not has_point[FIFF.FIFFV_POINT_NASION]: - missing.append('Nasion') - if not has_point[FIFF.FIFFV_POINT_RPA]: - missing.append('RPA') - error(None, "The selected FIFF file does not contain " - "all cardinal points (missing: %s). Please " - "select a different file." % ', '.join(missing), - "Error Reading FIFF File") - self.reset_traits(['file']) - return - - return info + return info @cached_property def _get_inst_dir(self): @@ -349,8 +358,8 @@ def _get_inst_fname(self): @cached_property def _get__hsp_points(self): - if not self._info: - return np.zeros((0, 3)) + if not self._info or not self._info['dig']: + return np.empty((0, 3)) points = np.array([d['r'] for d in self._info['dig'] if d['kind'] == FIFF.FIFFV_POINT_EXTRA]) @@ -366,11 +375,12 @@ def _get_points(self): def _cardinal_point(self, ident): """Coordinates for a cardinal point.""" - if self._info: - for d in self._info['dig']: - if (d['kind'] == FIFF.FIFFV_POINT_CARDINAL and - d['ident'] == ident): - return d['r'][None, :] + if not self._info or not self._info['dig']: + return np.zeros((1, 3)) + + for d in self._info['dig']: + if d['kind'] == FIFF.FIFFV_POINT_CARDINAL and d['ident'] == ident: + return d['r'][None, :] return np.zeros((1, 3)) @cached_property @@ -387,25 +397,25 @@ def _get_rpa(self): @cached_property def _get_eeg_points(self): - if self._info: - out = [d['r'] for d in self._info['dig'] if - d['kind'] == FIFF.FIFFV_POINT_EEG and - d['coord_frame'] == FIFF.FIFFV_COORD_HEAD] - out = np.empty((0, 3)) if len(out) == 0 else np.array(out) - return out - else: + if not self._info or not self._info['dig']: return np.empty((0, 3)) + out = [d['r'] for d in self._info['dig'] if + d['kind'] == FIFF.FIFFV_POINT_EEG and + d['coord_frame'] == FIFF.FIFFV_COORD_HEAD] + out = np.empty((0, 3)) if len(out) == 0 else np.array(out) + return out + @cached_property def _get_hpi_points(self): - if self._info: - out = [d['r'] for d in self._info['dig'] if - d['kind'] == FIFF.FIFFV_POINT_HPI and - d['coord_frame'] == FIFF.FIFFV_COORD_HEAD] - out = np.empty((0, 3)) if len(out) == 0 else np.array(out) - return out - else: - return np.empty((0, 3)) + if not self._info or not self._info['dig']: + return np.zeros((0, 3)) + + out = [d['r'] for d in self._info['dig'] if + d['kind'] == FIFF.FIFFV_POINT_HPI and + d['coord_frame'] == FIFF.FIFFV_COORD_HEAD] + out = np.empty((0, 3)) if len(out) == 0 else np.array(out) + return out def _file_changed(self): self.reset_traits(('points_filter',)) diff --git a/mne/gui/_kit2fiff_gui.py b/mne/gui/_kit2fiff_gui.py index d04d53134ca..008aa2ebac1 100644 --- a/mne/gui/_kit2fiff_gui.py +++ b/mne/gui/_kit2fiff_gui.py @@ -8,10 +8,9 @@ import os import queue import sys +from threading import Thread import numpy as np -from scipy.linalg import inv -from threading import Thread from mayavi.core.ui.mayavi_scene import MayaviScene from mayavi.tools.mlab_scene_model import MlabSceneModel @@ -204,7 +203,7 @@ def _get_fid_fname(self): @cached_property def _get_head_dev_trans(self): - return inv(self.dev_head_trans) + return np.linalg.inv(self.dev_head_trans) @cached_property def _get_hsp(self): diff --git a/mne/gui/_viewer.py b/mne/gui/_viewer.py index e33f44a0680..8b1762795af 100644 --- a/mne/gui/_viewer.py +++ b/mne/gui/_viewer.py @@ -21,9 +21,9 @@ from ..defaults import DEFAULTS from ..surface import _CheckInside, _DistanceQuery -from ..transforms import apply_trans +from ..transforms import apply_trans, rotation from ..utils import SilenceStdout -from ..viz.backends._pysurfer_mayavi import (_create_mesh_surf, +from ..viz.backends._pysurfer_mayavi import (_create_mesh_surf, _oct_glyph, _toggle_mlab_render) try: @@ -235,14 +235,14 @@ def __init__(self, view='points', has_norm=False, *args, **kwargs): Parameters ---------- - view : 'points' | 'cloud' + view : 'points' | 'cloud' | 'arrow' | 'oct' Whether the view options should be tailored to individual points or a point cloud. has_norm : bool Whether a norm can be defined; adds view options based on point norms (default False). """ - assert view in ('points', 'cloud', 'arrow') + assert view in ('points', 'cloud', 'arrow', 'oct') self._view = view self._has_norm = bool(has_norm) super(PointObject, self).__init__(*args, **kwargs) @@ -264,7 +264,7 @@ def default_traits_view(self): # noqa: D102 if self._view == 'arrow': visible = Item('visible', label='Show', show_label=False) return View(HGroup(visible, scale, 'opacity', 'label', Spring())) - elif self._view == 'points': + elif self._view in ('points', 'oct'): visible = Item('visible', label='Show', show_label=True) views = (visible, color, scale, 'label') else: @@ -327,11 +327,15 @@ def _plot_points(self): # this can occur sometimes during testing w/ui.dispose() return # fig.scene.engine.current_object is scatter - mode = 'arrow' if self._view == 'arrow' else 'sphere' + mode = {'cloud': 'sphere', 'points': 'sphere', 'oct': 'sphere'}.get( + self._view, self._view) + assert mode in ('sphere', 'arrow') glyph = pipeline.glyph(scatter, color=self.color, figure=fig, scale_factor=self.point_scale, opacity=1., resolution=self.resolution, mode=mode) + if self._view == 'oct': + _oct_glyph(glyph.glyph.glyph_source, rotation(0, 0, np.pi / 4)) glyph.actor.property.backface_culling = True glyph.glyph.glyph.vector_mode = 'use_normal' glyph.glyph.glyph.clamping = False @@ -430,6 +434,8 @@ def _update_marker_type(self): gs = self.glyph.glyph.glyph_source res = getattr(gs.glyph_source, 'theta_resolution', getattr(gs.glyph_source, 'resolution', None)) + if res is None: + return if self.project_to_surface or self.orient_to_surface: gs.glyph_source = tvtk.CylinderSource() gs.glyph_source.height = defaults['eegp_height'] @@ -482,6 +488,7 @@ class SurfaceObject(Object): surf = Instance(Surface) surf_rear = Instance(Surface) + rear_opacity = Float(1.) view = View(HGroup(Item('visible', show_label=False), Item('color', show_label=False), @@ -526,7 +533,9 @@ def plot(self): self.sync_trait('color', self.surf_rear.actor.property, mutual=False) self.sync_trait('visible', self.surf_rear, 'visible') - self.surf_rear.actor.property.opacity = 1. + self.surf_rear.actor.property.opacity = self.rear_opacity + self.sync_trait( + 'rear_opacity', self.surf_rear.actor.property, 'opacity') surf = pipeline.surface( normals, figure=fig, color=self.color, representation=rep, line_width=1) diff --git a/mne/gui/tests/test_file_traits.py b/mne/gui/tests/test_file_traits.py index 0e8da21fdaa..8b094785a96 100644 --- a/mne/gui/tests/test_file_traits.py +++ b/mne/gui/tests/test_file_traits.py @@ -59,7 +59,7 @@ def test_fiducials_source(): @testing.requires_testing_data @requires_mayavi @traits_test -def test_inst_source(tmpdir): +def test_digitization_source(tmpdir): """Test DigSource.""" from mne.gui._file_traits import DigSource tempdir = str(tmpdir) @@ -70,6 +70,7 @@ def test_inst_source(tmpdir): inst.file = inst_path assert inst.inst_dir == op.dirname(inst_path) + # FIFF lpa = array([[-7.13766068e-02, 0.00000000e+00, 5.12227416e-09]]) nasion = array([[3.72529030e-09, 1.02605611e-01, 4.19095159e-09]]) rpa = array([[7.52676800e-02, 0.00000000e+00, 5.58793545e-09]]) @@ -77,7 +78,8 @@ def test_inst_source(tmpdir): assert_allclose(inst.nasion, nasion) assert_allclose(inst.rpa, rpa) - montage = read_dig_fif(inst_path) # test reading DigMontage + # DigMontage + montage = read_dig_fif(inst_path) montage_path = op.join(tempdir, 'temp_montage.fif') montage.save(montage_path) inst.file = montage_path @@ -85,6 +87,22 @@ def test_inst_source(tmpdir): assert_allclose(inst.nasion, nasion) assert_allclose(inst.rpa, rpa) + # EGI MFF + inst.file = op.join(data_path, 'EGI', 'test_egi.mff') + assert len(inst.points) == 0 + assert len(inst.eeg_points) == 130 + assert_allclose(inst.lpa * 1000, [[-67.1, 0, 0]], atol=0.1) + assert_allclose(inst.nasion * 1000, [[0.0, 103.6, 0]], atol=0.1) + assert_allclose(inst.rpa * 1000, [[67.1, 0, 0]], atol=0.1) + + # CTF + inst.file = op.join(data_path, 'CTF', 'testdata_ctf.ds') + assert len(inst.points) == 0 + assert len(inst.eeg_points) == 8 + assert_allclose(inst.lpa * 1000, [[-74.3, 0.0, 0.0]], atol=0.1) + assert_allclose(inst.nasion * 1000, [[0.0, 117.7, 0.0]], atol=0.1) + assert_allclose(inst.rpa * 1000, [[84.9, -0.0, 0.0]], atol=0.1) + @testing.requires_testing_data @requires_mayavi diff --git a/mne/inverse_sparse/_gamma_map.py b/mne/inverse_sparse/_gamma_map.py index 326d1996ed0..1b7f257b134 100644 --- a/mne/inverse_sparse/_gamma_map.py +++ b/mne/inverse_sparse/_gamma_map.py @@ -3,10 +3,8 @@ # License: Simplified BSD import numpy as np -from scipy import linalg from ..forward import is_fixed_orient - from ..minimum_norm.inverse import _check_reference, _log_exp_var from ..utils import logger, verbose, warn from .mxne_inverse import (_check_ori, _make_sparse_stc, _prepare_gain, @@ -47,6 +45,7 @@ def _gamma_map_opt(M, G, alpha, maxit=10000, tol=1e-6, update_mode=1, active_set : array, shape=(n_active,) Indices of active sources. """ + from scipy import linalg G = G.copy() M = M.copy() @@ -59,10 +58,10 @@ def _gamma_map_opt(M, G, alpha, maxit=10000, tol=1e-6, update_mode=1, n_sensors, n_times = M.shape # apply normalization so the numerical values are sane - M_normalize_constant = linalg.norm(np.dot(M, M.T), ord='fro') + M_normalize_constant = np.linalg.norm(np.dot(M, M.T), ord='fro') M /= np.sqrt(M_normalize_constant) alpha /= M_normalize_constant - G_normalize_constant = linalg.norm(G, ord=np.inf) + G_normalize_constant = np.linalg.norm(G, ord=np.inf) G /= G_normalize_constant if n_sources % group_size != 0: @@ -97,7 +96,7 @@ def denom_fun(x): CM = np.dot(G * gammas[np.newaxis, :], G.T) CM.flat[::n_sensors + 1] += alpha # Invert CM keeping symmetry - U, S, V = linalg.svd(CM, full_matrices=False) + U, S, _ = linalg.svd(CM, full_matrices=False) S = S[np.newaxis, :] del CM CMinv = np.dot(U / (S + eps), U.T) diff --git a/mne/inverse_sparse/mxne_debiasing.py b/mne/inverse_sparse/mxne_debiasing.py index b84b18982c1..54da0e9937d 100755 --- a/mne/inverse_sparse/mxne_debiasing.py +++ b/mne/inverse_sparse/mxne_debiasing.py @@ -5,7 +5,6 @@ from math import sqrt import numpy as np -from scipy import linalg from ..utils import check_random_state, logger, verbose, fill_doc @@ -38,13 +37,13 @@ def power_iteration_kron(A, C, max_iter=1000, tol=1e-3, random_state=0): AS_size = C.shape[0] rng = check_random_state(random_state) B = rng.randn(AS_size, AS_size) - B /= linalg.norm(B, 'fro') + B /= np.linalg.norm(B, 'fro') ATA = np.dot(A.T, A) CCT = np.dot(C, C.T) L0 = np.inf for _ in range(max_iter): Y = np.dot(np.dot(ATA, B), CCT) - L = linalg.norm(Y, 'fro') + L = np.linalg.norm(Y, 'fro') if abs(L - L0) < tol: break @@ -121,14 +120,14 @@ def compute_bias(M, G, X, max_iter=1000, tol=1e-6, n_orient=1, verbose=None): dt = (t0 - 1.0) / t Y = D + dt * (D - D0) - Ddiff = linalg.norm(D - D0, np.inf) + Ddiff = np.linalg.norm(D - D0, np.inf) if Ddiff < tol: logger.info("Debiasing converged after %d iterations " "max(|D - D0| = %e < %e)" % (i, Ddiff, tol)) break else: - Ddiff = linalg.norm(D - D0, np.inf) + Ddiff = np.linalg.norm(D - D0, np.inf) logger.info("Debiasing did not converge after %d iterations! " "max(|D - D0| = %e >= %e)" % (max_iter, Ddiff, tol)) return D diff --git a/mne/inverse_sparse/mxne_inverse.py b/mne/inverse_sparse/mxne_inverse.py index 57e1220bd34..289bdb0f5f8 100644 --- a/mne/inverse_sparse/mxne_inverse.py +++ b/mne/inverse_sparse/mxne_inverse.py @@ -4,7 +4,6 @@ # License: Simplified BSD import numpy as np -from scipy import linalg from ..source_estimate import SourceEstimate, _BaseSourceEstimate, _make_stc from ..minimum_norm.inverse import (combine_xyz, _prepare_forward, @@ -293,8 +292,8 @@ def mixed_norm(evoked, forward, noise_cov, alpha, loose='auto', depth=0.8, verbose=None): """Mixed-norm estimate (MxNE) and iterative reweighted MxNE (irMxNE). - Compute L1/L2 mixed-norm solution [1]_ or L0.5/L2 [2]_ mixed-norm - solution on evoked data. + Compute L1/L2 mixed-norm solution :footcite:`GramfortEtAl2012` or L0.5/L2 + :footcite:`StrohmeierEtAl2016` mixed-norm solution on evoked data. Parameters ---------- @@ -362,16 +361,9 @@ def mixed_norm(evoked, forward, noise_cov, alpha, loose='auto', depth=0.8, References ---------- - .. [1] A. Gramfort, M. Kowalski, M. Hämäläinen, - "Mixed-norm estimates for the M/EEG inverse problem using accelerated - gradient methods", Physics in Medicine and Biology, 2012. - https://doi.org/10.1088/0031-9155/57/7/1937 - - .. [2] D. Strohmeier, Y. Bekhti, J. Haueisen, A. Gramfort, - "The Iterative Reweighted Mixed-Norm Estimate for Spatio-Temporal - MEG/EEG Source Reconstruction", IEEE Transactions of Medical Imaging, - Volume 35 (10), pp. 2218-2228, 2016. + .. footbibliography:: """ + from scipy import linalg if not (0. <= alpha < 100.): raise ValueError('alpha must be in [0, 100). ' 'Got alpha = %s' % alpha) @@ -520,7 +512,7 @@ def tf_mixed_norm(evoked, forward, noise_cov, Compute L1/L2 + L1 mixed-norm solution on time-frequency dictionary. Works with evoked data - :footcite:`GramfortEtAl2013,GramfortEtAl2011`. + :footcite:`GramfortEtAl2013b,GramfortEtAl2011`. Parameters ---------- @@ -653,7 +645,7 @@ def tf_mixed_norm(evoked, forward, noise_cov, n_steps = np.ceil(M.shape[1] / tstep.astype(float)).astype(int) n_freqs = wsize // 2 + 1 n_coefs = n_steps * n_freqs - phi = _Phi(wsize, tstep, n_coefs) + phi = _Phi(wsize, tstep, n_coefs, evoked.data.shape[1]) # Scaling to make setting of tol and alpha easy tol *= sum_squared(M) diff --git a/mne/inverse_sparse/mxne_optim.py b/mne/inverse_sparse/mxne_optim.py index 64fffb1bdb5..adec7a49971 100644 --- a/mne/inverse_sparse/mxne_optim.py +++ b/mne/inverse_sparse/mxne_optim.py @@ -3,16 +3,21 @@ # Mathurin Massias # License: Simplified BSD +import functools from math import sqrt import numpy as np -from scipy import linalg from .mxne_debiasing import compute_bias -from ..utils import logger, verbose, sum_squared, warn, dgemm +from ..utils import logger, verbose, sum_squared, warn, _get_blas_funcs from ..time_frequency._stft import stft_norm1, stft_norm2, stft, istft +@functools.lru_cache(None) +def _get_dgemm(): + return _get_blas_funcs(np.float64, 'gemm') + + def groups_norm2(A, n_orient): """Compute squared L2 norms of groups inplace.""" n_positions = A.shape[0] // n_orient @@ -113,7 +118,8 @@ def prox_l1(Y, alpha, n_orient): Please note that this function computes a soft-thresholding if n_orient == 1 and a block soft-thresholding (L2 over orientation and - L1 over position (space + time)) if n_orient == 3. See also [1]_. + L1 over position (space + time)) if n_orient == 3. See also + :footcite:`GramfortEtAl2013b`. Parameters ---------- @@ -133,11 +139,7 @@ def prox_l1(Y, alpha, n_orient): References ---------- - .. [1] A. Gramfort, D. Strohmeier, J. Haueisen, M. Hämäläinen, M. Kowalski - "Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with - non-stationary source activations", - Neuroimage, Volume 70, pp. 410-422, 15 April 2013. - DOI: 10.1016/j.neuroimage.2012.12.051 + .. footbibliography:: Examples -------- @@ -174,6 +176,8 @@ def prox_l1(Y, alpha, n_orient): def dgap_l21(M, G, X, active_set, alpha, n_orient): """Duality gap for the mixed norm inverse problem. + See :footcite:`GramfortEtAl2012`. + Parameters ---------- M : array, shape (n_sensors, n_times) @@ -202,10 +206,7 @@ def dgap_l21(M, G, X, active_set, alpha, n_orient): References ---------- - .. [1] A. Gramfort, M. Kowalski, M. Hämäläinen, - "Mixed-norm estimates for the M/EEG inverse problem using accelerated - gradient methods", Physics in Medicine and Biology, 2012. - https://doi.org/10.1088/0031-9155/57/7/1937 + .. footbibilography:: """ GX = np.dot(G[:, active_set], X) R = M - GX @@ -403,6 +404,7 @@ def _bcd(G, X, R, active_set, one_ovr_lc, n_orient, n_positions, alpha * (Lipschitz constants). """ X_j_new = np.zeros_like(X[0:n_orient, :], order='C') + dgemm = _get_dgemm() for j, G_j_c in enumerate(list_G_j_c): idx = slice(j * n_orient, (j + 1) * n_orient) @@ -439,6 +441,8 @@ def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None, solver='auto', return_gap=False, dgap_freq=10): """Solve L1/L2 mixed-norm inverse problem with active set strategy. + See references :footcite:`GramfortEtAl2012,StrohmeierEtAl2016`. + Parameters ---------- M : array, shape (n_sensors, n_times) @@ -480,15 +484,7 @@ def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None, References ---------- - .. [1] A. Gramfort, M. Kowalski, M. Hämäläinen, - "Mixed-norm estimates for the M/EEG inverse problem using accelerated - gradient methods", Physics in Medicine and Biology, 2012. - https://doi.org/10.1088/0031-9155/57/7/1937 - - .. [2] D. Strohmeier, Y. Bekhti, J. Haueisen, A. Gramfort, - "The Iterative Reweighted Mixed-Norm Estimate for Spatio-Temporal - MEG/EEG Source Reconstruction", IEEE Transactions of Medical Imaging, - Volume 35 (10), pp. 2218-2228, 15 April 2013. + .. footbibliography:: """ n_dipoles = G.shape[1] n_positions = n_dipoles // n_orient @@ -534,11 +530,11 @@ def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None, lc = np.empty(n_positions) for j in range(n_positions): G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)] - lc[j] = linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2) + lc[j] = np.linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2) else: logger.info("Using proximal iterations") l21_solver = _mixed_norm_solver_prox - lc = 1.01 * linalg.norm(G, ord=2) ** 2 + lc = 1.01 * np.linalg.norm(G, ord=2) ** 2 if active_set_size is not None: E = list() @@ -558,7 +554,7 @@ def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None, elif solver == 'cd': lc_tmp = None else: - lc_tmp = 1.01 * linalg.norm(G[:, active_set], ord=2) ** 2 + lc_tmp = 1.01 * np.linalg.norm(G[:, active_set], ord=2) ** 2 X, as_, _ = l21_solver(M, G[:, active_set], alpha, lc_tmp, maxit=maxit, tol=tol, init=X_init, n_orient=n_orient, dgap_freq=dgap_freq) @@ -621,6 +617,8 @@ def iterative_mixed_norm_solver(M, G, alpha, n_mxne_iter, maxit=3000, solver='auto'): """Solve L0.5/L2 mixed-norm inverse problem with active set strategy. + See reference :footcite:`StrohmeierEtAl2016`. + Parameters ---------- M : array, shape (n_sensors, n_times) @@ -660,10 +658,7 @@ def iterative_mixed_norm_solver(M, G, alpha, n_mxne_iter, maxit=3000, References ---------- - .. [1] D. Strohmeier, Y. Bekhti, J. Haueisen, A. Gramfort, - "The Iterative Reweighted Mixed-Norm Estimate for Spatio-Temporal - MEG/EEG Source Reconstruction", IEEE Transactions of Medical Imaging, - Volume 35 (10), pp. 2218-2228, 2016. + .. footbibliography:: """ def g(w): return np.sqrt(np.sqrt(groups_norm2(w.copy(), n_orient))) @@ -707,8 +702,8 @@ def gprime(w): # Reapply weights to have correct unit X *= weights[_active_set][:, np.newaxis] weights = gprime(X) - p_obj = 0.5 * linalg.norm(M - np.dot(G[:, active_set], X), - 'fro') ** 2. + alpha * np.sum(g(X)) + p_obj = 0.5 * np.linalg.norm(M - np.dot(G[:, active_set], X), + 'fro') ** 2. + alpha * np.sum(g(X)) E.append(p_obj) # Check convergence @@ -718,7 +713,7 @@ def gprime(w): break else: active_set = np.zeros_like(active_set) - p_obj = 0.5 * linalg.norm(M) ** 2. + p_obj = 0.5 * np.linalg.norm(M) ** 2. E.append(p_obj) break @@ -775,23 +770,27 @@ def safe_max_abs_diff(A, ia, B, ib): class _Phi(object): """Have phi stft as callable w/o using a lambda that does not pickle.""" - def __init__(self, wsize, tstep, n_coefs): # noqa: D102 + def __init__(self, wsize, tstep, n_coefs, n_times): # noqa: D102 self.wsize = np.atleast_1d(wsize) self.tstep = np.atleast_1d(tstep) self.n_coefs = np.atleast_1d(n_coefs) self.n_dicts = len(tstep) self.n_freqs = wsize // 2 + 1 self.n_steps = self.n_coefs // self.n_freqs + self.n_times = n_times + # ravel freq+time here + self.ops = list() + for ws, ts in zip(self.wsize, self.tstep): + self.ops.append( + stft(np.eye(n_times), ws, ts, + verbose=False).reshape(n_times, -1)) def __call__(self, x): # noqa: D105 if self.n_dicts == 1: - return stft(x, self.wsize[0], self.tstep[0], - verbose=False).reshape(-1, self.n_coefs[0]) + return x @ self.ops[0] else: return np.hstack( - [stft(x, self.wsize[i], self.tstep[i], verbose=False).reshape( - -1, self.n_coefs[i]) for i in range(self.n_dicts)]) / np.sqrt( - self.n_dicts) + [x @ op for op in self.ops]) / np.sqrt(self.n_dicts) def norm(self, z, ord=2): """Squared L2 norm if ord == 2 and L1 norm if order == 1.""" @@ -820,19 +819,26 @@ def __init__(self, tstep, n_freqs, n_steps, n_times): # noqa: D102 self.n_steps = n_steps self.n_times = n_times self.n_dicts = len(tstep) if isinstance(tstep, np.ndarray) else 1 - self.n_coefs = self.n_freqs * self.n_steps + self.n_coefs = list() + self.op_re = list() + self.op_im = list() + for nf, ns, ts in zip(self.n_freqs, self.n_steps, self.tstep): + nc = nf * ns + self.n_coefs.append(nc) + eye = np.eye(nc).reshape(nf, ns, nf, ns) + self.op_re.append(istft( + eye, ts, n_times).reshape(nc, n_times)) + self.op_im.append(istft( + eye * 1j, ts, n_times).reshape(nc, n_times)) def __call__(self, z): # noqa: D105 if self.n_dicts == 1: - return istft(z.reshape(-1, self.n_freqs[0], self.n_steps[0]), - self.tstep[0], self.n_times) + return z.real @ self.op_re[0] + z.imag @ self.op_im[0] else: x_out = np.zeros((z.shape[0], self.n_times)) z_ = np.array_split(z, np.cumsum(self.n_coefs)[:-1], axis=1) - for i in range(self.n_dicts): - x_out += istft(z_[i].reshape(-1, self.n_freqs[i], - self.n_steps[i]), - self.tstep[i], self.n_times) + for this_z, op_re, op_im in zip(z_, self.op_re, self.op_im): + x_out += this_z.real @ op_re + this_z.imag @ op_im return x_out / np.sqrt(self.n_dicts) @@ -876,7 +882,8 @@ def norm_epsilon(Y, l1_ratio, phi, w_space=1., w_time=None): Warning: it takes into account the fact that Y only contains coefficients corresponding to the positive frequencies (see `stft_norm2()`): some entries will be counted twice. It is also assumed that all entries of both - Y and w_time are non-negative. + Y and w_time are non-negative. See + :footcite:`NdiayeEtAl2016,BurdakovMerkulov2001`. Parameters ---------- @@ -901,13 +908,7 @@ def norm_epsilon(Y, l1_ratio, phi, w_space=1., w_time=None): References ---------- - .. [1] E. Ndiaye, O. Fercoq, A. Gramfort, J. Salmon, - "GAP Safe Screening Rules for Sparse-Group Lasso", Advances in Neural - Information Processing Systems (NIPS), 2016. - - .. [2] O. Burdakov, B. Merkulov, - "On a new norm for data fitting and optimization problems", - LiTH-MAT, 2001. + .. footbibliography:: """ # since the solution is invariant to flipped signs in Y, all entries # of Y are assumed positive @@ -1042,6 +1043,8 @@ def dgap_l21l1(M, G, Z, active_set, alpha_space, alpha_time, phi, phiT, n_orient, highest_d_obj, w_space=None, w_time=None): """Duality gap for the time-frequency mixed norm inverse problem. + See :footcite:`GramfortEtAl2012,NdiayeEtAl2016` + Parameters ---------- M : array, shape (n_sensors, n_times) @@ -1083,14 +1086,7 @@ def dgap_l21l1(M, G, Z, active_set, alpha_space, alpha_time, phi, phiT, References ---------- - .. [1] A. Gramfort, M. Kowalski, M. Hämäläinen, - "Mixed-norm estimates for the M/EEG inverse problem using accelerated - gradient methods", Physics in Medicine and Biology, 2012. - https://doi.org/10.1088/0031-9155/57/7/1937 - - .. [2] E. Ndiaye, O. Fercoq, A. Gramfort, J. Salmon, - "GAP Safe Screening Rules for Sparse-Group Lasso", Advances in Neural - Information Processing Systems (NIPS), 2016. + .. footbibliography:: """ X = phiT(Z) GX = np.dot(G[:, active_set], X) @@ -1128,15 +1124,13 @@ def _tf_mixed_norm_solver_bcd_(M, G, Z, active_set, candidates, alpha_space, w_space=None, w_time=None, n_orient=1, maxit=200, tol=1e-8, dgap_freq=10, perc=None, timeit=True, verbose=None): - - # First make G fortran for faster access to blocks of columns - G = np.asfortranarray(G) - n_sources = G.shape[1] n_positions = n_sources // n_orient - Gd = G.copy() - G = dict(zip(np.arange(n_positions), np.hsplit(G, n_positions))) + # First make G fortran for faster access to blocks of columns + Gd = np.asfortranarray(G) + G = np.ascontiguousarray( + Gd.T.reshape(n_positions, n_orient, -1).transpose(0, 2, 1)) R = M.copy() # residual active = np.where(active_set[::n_orient])[0] @@ -1177,17 +1171,18 @@ def _tf_mixed_norm_solver_bcd_(M, G, Z, active_set, candidates, alpha_space, R += np.dot(G_j, X_j) X_j_new += X_j - rows_norm = linalg.norm(X_j_new, 'fro') + rows_norm = np.linalg.norm(X_j_new, 'fro') if rows_norm <= alpha_space_lc[jj]: if was_active: Z[jj] = 0.0 active_set_j[:] = False else: + GTR_phi = phi(GTR) if was_active: - Z_j_new = Z_j + phi(GTR) + Z_j_new = Z_j + GTR_phi else: - Z_j_new = phi(GTR) - col_norm = np.sqrt(np.sum(np.abs(Z_j_new) ** 2, axis=0)) + Z_j_new = GTR_phi + col_norm = np.linalg.norm(Z_j_new, axis=0) if np.all(col_norm <= alpha_time_lc[jj]): Z[jj] = 0.0 @@ -1213,7 +1208,8 @@ def _tf_mixed_norm_solver_bcd_(M, G, Z, active_set, candidates, alpha_space, Z_j_new *= shrink Z[jj] = Z_j_new.reshape(-1, *shape_init[1:]).copy() active_set_j[:] = True - R -= np.dot(G_j, phiT(Z[jj])) + Z_j_phi_T = phiT(Z[jj]) + R -= np.dot(G_j, Z_j_phi_T) if (i + 1) % dgap_freq == 0: Zd = np.vstack([Z[pos] for pos in range(n_positions) @@ -1335,6 +1331,8 @@ def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4, dgap_freq=10, verbose=None): """Solve TF L21+L1 inverse solver with BCD and active set approach. + See :footcite:`GramfortEtAl2013b,GramfortEtAl2011,BekhtiEtAl2016`. + Parameters ---------- M : array, shape (n_sensors, n_times) @@ -1386,24 +1384,7 @@ def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4, References ---------- - .. [1] A. Gramfort, D. Strohmeier, J. Haueisen, M. Hämäläinen, M. Kowalski - "Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with - non-stationary source activations", - Neuroimage, Volume 70, pp. 410-422, 15 April 2013. - DOI: 10.1016/j.neuroimage.2012.12.051 - - .. [2] A. Gramfort, D. Strohmeier, J. Haueisen, M. Hämäläinen, M. Kowalski - "Functional Brain Imaging with M/EEG Using Structured Sparsity in - Time-Frequency Dictionaries", - Proceedings Information Processing in Medical Imaging - Lecture Notes in Computer Science, Volume 6801/2011, pp. 600-611, 2011. - DOI: 10.1007/978-3-642-22092-0_49 - - .. [3] Y. Bekhti, D. Strohmeier, M. Jas, R. Badeau, A. Gramfort. - "M/EEG source localization with multiscale time-frequency dictionaries", - 6th International Workshop on Pattern Recognition in Neuroimaging - (PRNI), 2016. - DOI: 10.1109/PRNI.2016.7552337 + .. footbibliography:: """ n_sensors, n_times = M.shape n_sensors, n_sources = G.shape @@ -1419,7 +1400,7 @@ def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4, n_steps = np.ceil(M.shape[1] / tstep.astype(float)).astype(int) n_freqs = wsize // 2 + 1 n_coefs = n_steps * n_freqs - phi = _Phi(wsize, tstep, n_coefs) + phi = _Phi(wsize, tstep, n_coefs, n_times) phiT = _PhiT(tstep, n_freqs, n_steps, n_times) if n_orient == 1: @@ -1428,7 +1409,7 @@ def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4, lc = np.empty(n_positions) for j in range(n_positions): G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)] - lc[j] = linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2) + lc[j] = np.linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2) logger.info("Using block coordinate descent with active set approach") X, Z, active_set, E, gap = _tf_mixed_norm_solver_bcd_active_set( @@ -1518,7 +1499,7 @@ def iterative_tf_mixed_norm_solver(M, G, alpha_space, alpha_time, n_steps = np.ceil(n_times / tstep.astype(float)).astype(int) n_freqs = wsize // 2 + 1 n_coefs = n_steps * n_freqs - phi = _Phi(wsize, tstep, n_coefs) + phi = _Phi(wsize, tstep, n_coefs, n_times) phiT = _PhiT(tstep, n_freqs, n_steps, n_times) if n_orient == 1: @@ -1527,7 +1508,7 @@ def iterative_tf_mixed_norm_solver(M, G, alpha_space, alpha_time, lc = np.empty(n_positions) for j in range(n_positions): G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)] - lc[j] = linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2) + lc[j] = np.linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2) # space and time penalties, and inverse of their derivatives: def g_space(Z): @@ -1576,7 +1557,7 @@ def g_time_prime_inv(Z): l21_penalty = np.sum(g_space(Z.copy())) l1_penalty = phi.norm(g_time(Z.copy()), ord=1).sum() - p_obj = (0.5 * linalg.norm(M - np.dot(G[:, active_set], X), + p_obj = (0.5 * np.linalg.norm(M - np.dot(G[:, active_set], X), 'fro') ** 2. + alpha_space * l21_penalty + alpha_time * l1_penalty) E.append(p_obj) @@ -1591,7 +1572,7 @@ def g_time_prime_inv(Z): print('Convergence reached after %d reweightings!' % k) break else: - p_obj = 0.5 * linalg.norm(M) ** 2. + p_obj = 0.5 * np.linalg.norm(M) ** 2. E.append(p_obj) logger.info('Iteration %d: as_size=%d, E=%f' % ( k + 1, active_set.sum() / n_orient, p_obj)) diff --git a/mne/inverse_sparse/tests/test_mxne_optim.py b/mne/inverse_sparse/tests/test_mxne_optim.py index 08b858a9a49..1d22022f539 100644 --- a/mne/inverse_sparse/tests/test_mxne_optim.py +++ b/mne/inverse_sparse/tests/test_mxne_optim.py @@ -139,7 +139,7 @@ def test_norm_epsilon(): n_steps = np.ceil(n_times / tstep.astype(float)).astype(int) n_freqs = wsize // 2 + 1 n_coefs = n_steps * n_freqs - phi = _Phi(wsize, tstep, n_coefs) + phi = _Phi(wsize, tstep, n_coefs, n_times) Y = np.zeros(n_steps * n_freqs) l1_ratio = 0.03 @@ -186,7 +186,7 @@ def test_dgapl21l1(): n_steps = np.ceil(n_times / tstep.astype(float)).astype(int) n_freqs = wsize // 2 + 1 n_coefs = n_steps * n_freqs - phi = _Phi(wsize, tstep, n_coefs) + phi = _Phi(wsize, tstep, n_coefs, n_times) phiT = _PhiT(tstep, n_freqs, n_steps, n_times) for l1_ratio in [0.05, 0.1]: diff --git a/mne/io/__init__.py b/mne/io/__init__.py index b4535295c0b..cc066822fef 100644 --- a/mne/io/__init__.py +++ b/mne/io/__init__.py @@ -43,6 +43,7 @@ from .egi import read_raw_egi, read_evokeds_mff from .kit import read_raw_kit, read_epochs_kit from .fiff import read_raw_fif +from .nedf import read_raw_nedf from .nicolet import read_raw_nicolet from .artemis123 import read_raw_artemis123 from .eeglab import read_raw_eeglab, read_epochs_eeglab diff --git a/mne/io/_digitization.py b/mne/io/_digitization.py index 19c07a7cb8c..f010e24e629 100644 --- a/mne/io/_digitization.py +++ b/mne/io/_digitization.py @@ -406,7 +406,7 @@ def _make_dig_points(nasion=None, lpa=None, rpa=None, hpi=None, 'coord_frame': coord_frame}) if extra_points is not None: extra_points = np.asarray(extra_points) - if extra_points.shape[1] != 3: + if len(extra_points) and extra_points.shape[1] != 3: raise ValueError('Points should have the shape (n_points, 3) ' 'instead of %s' % (extra_points.shape,)) for idx, point in enumerate(extra_points): diff --git a/mne/io/_read_raw.py b/mne/io/_read_raw.py index 2bfef58f644..588e06c3613 100644 --- a/mne/io/_read_raw.py +++ b/mne/io/_read_raw.py @@ -12,7 +12,7 @@ read_raw_fif, read_raw_eeglab, read_raw_cnt, read_raw_egi, read_raw_eximia, read_raw_nirx, read_raw_fieldtrip, read_raw_artemis123, read_raw_nicolet, read_raw_kit, - read_raw_ctf) + read_raw_ctf, read_raw_boxy) from ..utils import fill_doc @@ -27,22 +27,26 @@ def _read_unsupported(fname, **kwargs): # supported read file formats -supported = {".edf": read_raw_edf, - ".bdf": read_raw_bdf, - ".gdf": read_raw_gdf, - ".vhdr": read_raw_brainvision, - ".fif": read_raw_fif, - ".fif.gz": read_raw_fif, - ".set": read_raw_eeglab, - ".cnt": read_raw_cnt, - ".mff": read_raw_egi, - ".nxe": read_raw_eximia, - ".hdr": read_raw_nirx, - ".mat": read_raw_fieldtrip, - ".bin": read_raw_artemis123, - ".data": read_raw_nicolet, - ".sqd": read_raw_kit, - ".ds": read_raw_ctf} +supported = { + ".edf": read_raw_edf, + ".bdf": read_raw_bdf, + ".gdf": read_raw_gdf, + ".vhdr": read_raw_brainvision, + ".fif": read_raw_fif, + ".fif.gz": read_raw_fif, + ".set": read_raw_eeglab, + ".cnt": read_raw_cnt, + ".mff": read_raw_egi, + ".nxe": read_raw_eximia, + ".hdr": read_raw_nirx, + ".mat": read_raw_fieldtrip, + ".bin": read_raw_artemis123, + ".data": read_raw_nicolet, + ".sqd": read_raw_kit, + ".con": read_raw_kit, + ".ds": read_raw_ctf, + ".txt": read_raw_boxy, +} # known but unsupported file formats suggested = {".vmrk": partial(_read_unsupported, suggest=".vhdr"), @@ -56,26 +60,35 @@ def _read_unsupported(fname, **kwargs): def read_raw(fname, *, preload=False, verbose=None, **kwargs): """Read raw file. + This function is a convenient wrapper for readers defined in `mne.io`. The + correct reader is automatically selected based on the detected file format. + All function arguments are passed to the respective reader. + + The following readers are currently supported: + + `~mne.io.read_raw_artemis123`, `~mne.io.read_raw_bdf`, + `~mne.io.read_raw_boxy`, `~mne.io.read_raw_brainvision`, + `~mne.io.read_raw_cnt`, `~mne.io.read_raw_ctf`, `~mne.io.read_raw_edf`, + `~mne.io.read_raw_eeglab`, `~mne.io.read_raw_egi`, + `~mne.io.read_raw_eximia`, `~mne.io.read_raw_fieldtrip`, + `~mne.io.read_raw_fif`, `~mne.io.read_raw_gdf`, `~mne.io.read_raw_kit`, + `~mne.io.read_raw_nicolet`, and `~mne.io.read_raw_nirx`. + Parameters ---------- - fname : str - File name to load. + fname : path-like + Name of the file to read. %(preload)s %(verbose)s **kwargs - Keyword arguments to pass to the underlying reader. For details, see - the arguments of the reader for the underlying file format. + Additional keyword arguments to pass to the underlying reader. For + details, see the arguments of the reader for the respective file + format. Returns ------- raw : mne.io.Raw Raw object. - - Notes - ----- - This function is a wrapper for specific read_raw_xxx readers defined in the - readers dict. If it does not work with a specific file, try using a - dedicated reader function (read_raw_xxx) instead. """ ext = "".join(Path(fname).suffixes) if ext in readers: diff --git a/mne/io/array/array.py b/mne/io/array/array.py index 47477c4fbf0..945938541fe 100644 --- a/mne/io/array/array.py +++ b/mne/io/array/array.py @@ -42,7 +42,7 @@ class RawArray(BaseRaw): Notes ----- Proper units of measure: - * V: eeg, eog, seeg, emg, ecg, bio, ecog + * V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog * T: mag * T/m: grad * M: hbo, hbr diff --git a/mne/io/array/tests/test_array.py b/mne/io/array/tests/test_array.py index 4d6966ed67e..3c211b1a723 100644 --- a/mne/io/array/tests/test_array.py +++ b/mne/io/array/tests/test_array.py @@ -28,11 +28,15 @@ def test_long_names(): info = create_info(['a' * 15 + 'b', 'a' * 16], 1000., verbose='error') data = np.empty((2, 1000)) raw = RawArray(data, info) + assert raw.ch_names == ['a' * 15 + 'b', 'a' * 16] + # and a way to get the old behavior + raw.rename_channels({k: k[:13] for k in raw.ch_names}, + allow_duplicates=True, verbose='error') assert raw.ch_names == ['a' * 13 + '-0', 'a' * 13 + '-1'] info = create_info(['a' * 16] * 11, 1000., verbose='error') data = np.empty((11, 1000)) raw = RawArray(data, info) - assert raw.ch_names == ['a' * 12 + '-%s' % ii for ii in range(11)] + assert raw.ch_names == ['a' * 16 + '-%s' % ii for ii in range(11)] def test_array_copy(): @@ -83,8 +87,9 @@ def test_array_raw(): types = list() for ci in range(101): types.extend(('grad', 'grad', 'mag')) - types.extend(['ecog', 'seeg', 'hbo']) # really 3 meg channels + types.extend(['ecog', 'seeg', 'hbo']) # really 4 meg channels types.extend(['stim'] * 9) + types.extend(['dbs']) # really eeg channel types.extend(['eeg'] * 60) picks = np.concatenate([pick_types(raw.info, meg=True)[::20], pick_types(raw.info, meg=False, stim=True), @@ -139,7 +144,8 @@ def test_array_raw(): # plotting raw2.plot() - raw2.plot_psd(tmax=2., average=True, n_fft=1024, spatial_colors=False) + raw2.plot_psd(tmax=2., average=True, n_fft=1024, + spatial_colors=False) plt.close('all') # epoching diff --git a/mne/io/artemis123/artemis123.py b/mne/io/artemis123/artemis123.py index 06a0b67f909..8b725d7393d 100644 --- a/mne/io/artemis123/artemis123.py +++ b/mne/io/artemis123/artemis123.py @@ -8,7 +8,7 @@ import calendar from .utils import _load_mne_locs, _read_pos -from ...utils import logger, warn, verbose +from ...utils import logger, warn, verbose, _check_fname from ..utils import _read_segments_file from ..base import BaseRaw from ..meas_info import _empty_info @@ -308,6 +308,7 @@ def __init__(self, input_fname, preload=False, verbose=None, from scipy.spatial.distance import cdist from ...chpi import (compute_chpi_amplitudes, compute_chpi_locs, _fit_coil_order_dev_head_trans) + input_fname = _check_fname(input_fname, 'read', True, 'input_fname') fname, ext = op.splitext(input_fname) if ext == '.txt': input_fname = fname + '.bin' diff --git a/mne/io/base.py b/mne/io/base.py index 448ee910988..787ea274a1a 100644 --- a/mne/io/base.py +++ b/mne/io/base.py @@ -6,6 +6,7 @@ # Teon Brooks # Marijn van Vliet # Stefan Appelhoff +# Clemens Brunner # # License: BSD (3-clause) @@ -14,16 +15,21 @@ import os import os.path as op import shutil +from collections import defaultdict import numpy as np from .constants import FIFF -from .utils import _construct_bids_filename, _check_orig_units -from .pick import (pick_types, pick_channels, pick_info, _picks_to_idx) +from .utils import _construct_bids_filename, _check_orig_units, \ + _get_als_coords_from_chs +from ..utils.check import _infer_check_export_fmt +from .pick import (pick_types, pick_channels, pick_info, _picks_to_idx, + channel_type) from .meas_info import write_meas_info from .proj import setup_proj, activate_proj, _proj_equal, ProjMixin from ..channels.channels import (ContainsMixin, UpdateChannelsMixin, - SetChannelsMixin, InterpolationMixin) + SetChannelsMixin, InterpolationMixin, + _unit2human) from .compensator import set_current_comp, make_compensator from .write import (start_file, end_file, start_block, end_block, write_dau_pack16, write_float, write_double, @@ -43,10 +49,13 @@ copy_function_doc_to_method_doc, _validate_type, _check_preload, _get_argvalues, _check_option, _build_data_frame, _convert_times, _scale_dataframe_data, - _check_time_format) + _check_time_format, _arange_div, + _check_eeglabio_installed) +from ..defaults import _handle_default from ..viz import plot_raw, plot_raw_psd, plot_raw_psd_topo, _RAW_CLIP_DEF from ..event import find_events, concatenate_events from ..annotations import Annotations, _combine_annotations, _sync_onset +from ..data.html_templates import raw_template class TimeMixin(object): @@ -224,7 +233,7 @@ def __init__(self, info, preload=False, # this was artificially added by the IO procedure, so remove it ch_names = list(info['ch_names']) if ('STI 014' in ch_names) and not \ - (self.filenames[0].endswith('.fif')): + (self.filenames[0].endswith('.fif')): ch_names.remove('STI 014') # Each channel in the data must have a corresponding channel in @@ -244,7 +253,6 @@ def __init__(self, info, preload=False, self._dtype_ = dtype self.set_annotations(None) # If we have True or a string, actually do the preloading - self._update_times() if load_from_disk: self._preload_data(preload) self._init_kwargs = _get_argvalues() @@ -493,7 +501,7 @@ def _check_bad_segment(self, start, stop, picks, for descr in annot.description[overlaps]: if descr.lower().startswith('bad'): return descr - return self[picks, start:stop][0] + return self._getitem((picks, slice(start, stop)), return_times=False) @verbose def load_data(self, verbose=None): @@ -519,8 +527,7 @@ def load_data(self, verbose=None): self._preload_data(True) return self - @verbose - def _preload_data(self, preload, verbose=None): + def _preload_data(self, preload): """Actually preload the data.""" data_buffer = preload if isinstance(preload, (bool, np.bool_)) and not preload: @@ -534,12 +541,6 @@ def _preload_data(self, preload, verbose=None): self._comp = None # no longer needed self.close() - def _update_times(self): - """Update times.""" - self._times = np.arange(self.n_times) / float(self.info['sfreq']) - # make it immutable - self._times.flags.writeable = False - @property def _first_time(self): return self.first_samp / float(self.info['sfreq']) @@ -614,7 +615,9 @@ def filenames(self): """The filenames used.""" return tuple(self._filenames) - def set_annotations(self, annotations, emit_warning=True): + @verbose + def set_annotations(self, annotations, emit_warning=True, + on_missing='raise', *, verbose=None): """Setter for annotations. This setter checks if they are inside the data range. @@ -625,7 +628,9 @@ def set_annotations(self, annotations, emit_warning=True): Annotations to set. If None, the annotations is defined but empty. emit_warning : bool - Whether to emit warnings when limiting or omitting annotations. + Whether to emit warnings when cropping or omitting annotations. + %(on_missing_ch_names)s + %(verbose_meth)s Returns ------- @@ -651,6 +656,7 @@ def set_annotations(self, annotations, emit_warning=True): delta = 1. / self.info['sfreq'] new_annotations = annotations.copy() + new_annotations._prune_ch_names(self.info, on_missing) if annotations.orig_time is None: new_annotations.crop(0, self.times[-1] + delta, emit_warning=emit_warning) @@ -767,14 +773,25 @@ def __getitem__(self, item): >>> data, times = raw[picks, t_idx[0]:t_idx[1]] # doctest: +SKIP """ # noqa: E501 + return self._getitem(item) + + def _getitem(self, item, return_times=True): sel, start, stop = self._parse_get_set_params(item) if self.preload: data = self._data[sel, start:stop] else: data = self._read_segment(start=start, stop=stop, sel=sel, projector=self._projector) - times = self.times[start:stop] - return data, times + + if return_times: + # Rather than compute the entire thing just compute the subset + # times = self.times[start:stop] + # stop can be None here so don't use it directly + times = np.arange(start, start + data.shape[1], dtype=float) + times /= self.info['sfreq'] + return data, times + else: + return data def __setitem__(self, item, value): """Set raw data content.""" @@ -785,7 +802,8 @@ def __setitem__(self, item, value): @verbose def get_data(self, picks=None, start=0, stop=None, - reject_by_annotation=None, return_times=False, verbose=None): + reject_by_annotation=None, return_times=False, units=None, + verbose=None): """Get data in the given range. Parameters @@ -802,6 +820,22 @@ def get_data(self, picks=None, start=0, stop=None, 'bad' are omitted. If 'NaN', the bad samples are filled with NaNs. return_times : bool Whether to return times as well. Defaults to False. + units : str | dict | None + Specify the unit(s) that the data should be returned in. If + ``None`` (default), the data is returned in the + channel-type-specific default units, which are SI units (see + :ref:`units` and :term:`data channels`). If a string, must be a + sub-multiple of SI units that will be used to scale the data from + all channels of the type associated with that unit. This only works + if the data contains one channel type that has a unit (unitless + channel types are left unchanged). For example if there are only + EEG and STIM channels, ``units='uV'`` will scale EEG channels to + micro-Volts while STIM channels will be unchanged. Finally, if a + dictionary is provided, keys must be channel types, and values must + be units to scale the data of that channel type to. For example + ``dict(grad='fT/cm', mag='fT')`` will scale the corresponding types + accordingly, but all other channel types will remain in their + channel-type-specific default unit. %(verbose_meth)s Returns @@ -817,13 +851,49 @@ def get_data(self, picks=None, start=0, stop=None, .. versionadded:: 0.14.0 """ picks = _picks_to_idx(self.info, picks, 'all', exclude=()) + + # Convert into the specified unit + _validate_type(units, types=(None, str, dict), item_name="units") + needs_conversion = False + ch_factors = np.ones(len(picks)) + si_units = _handle_default('si_units') + # Convert to dict if str units + if isinstance(units, str): + # Check that there is only one channel type + ch_types = self.get_channel_types(picks=picks, unique=True) + unit_ch_type = list(set(ch_types) & set(si_units.keys())) + if len(unit_ch_type) > 1: + raise ValueError('"units" cannot be str if there is more than ' + 'one channel type with a unit ' + f'{unit_ch_type}.') + units = {unit_ch_type[0]: units} # make the str argument a dict + # Loop over the dict to get channel factors + if isinstance(units, dict): + for ch_type, ch_unit in units.items(): + # Get the scaling factors + scaling = _get_scaling(ch_type, ch_unit) + if scaling != 1: + needs_conversion = True + ch_types = self.get_channel_types(picks=picks) + indices = [i_ch for i_ch, ch in enumerate(ch_types) + if ch == ch_type] + ch_factors[indices] *= scaling + # convert to ints picks = np.atleast_1d(np.arange(self.info['nchan'])[picks]) start = 0 if start is None else start stop = min(self.n_times if stop is None else stop, self.n_times) if len(self.annotations) == 0 or reject_by_annotation is None: - data, times = self[picks, start:stop] - return (data, times) if return_times else data + getitem = self._getitem( + (picks, slice(start, stop)), return_times=return_times) + if return_times: + data, times = getitem + if needs_conversion: + data *= ch_factors[:, np.newaxis] + return data, times + if needs_conversion: + getitem *= ch_factors[:, np.newaxis] + return getitem _check_option('reject_by_annotation', reject_by_annotation.lower(), ['omit', 'nan']) onsets, ends = _annotations_starts_stops(self, ['BAD']) @@ -832,6 +902,8 @@ def get_data(self, picks=None, start=0, stop=None, ends = np.minimum(ends[keep], stop) if len(onsets) == 0: data, times = self[picks, start:stop] + if needs_conversion: + data *= ch_factors[:, np.newaxis] if return_times: return data, times return data @@ -873,58 +945,30 @@ def get_data(self, picks=None, start=0, stop=None, else: data, times = self[picks, start:stop] + if needs_conversion: + data *= ch_factors[:, np.newaxis] if return_times: return data, times return data @verbose def apply_function(self, fun, picks=None, dtype=None, n_jobs=1, - channel_wise=True, verbose=None, *args, **kwargs): + channel_wise=True, verbose=None, **kwargs): """Apply a function to a subset of channels. - The function "fun" is applied to the channels defined in "picks". The - data of the Raw object is modified inplace. If the function returns - a different data type (e.g. numpy.complex) it must be specified using - the dtype parameter, which causes the data type used for representing - the raw data to change. - - The Raw object has to have the data loaded e.g. with ``preload=True`` - or ``self.load_data()``. - - .. note:: If n_jobs > 1, more memory is required as - ``len(picks) * n_times`` additional time points need to - be temporaily stored in memory. - - .. note:: If the data type changes (dtype != None), more memory is - required since the original and the converted data needs - to be stored in memory. + %(applyfun_summary_raw)s Parameters ---------- - fun : callable - A function to be applied to the channels. The first argument of - fun has to be a timeseries (numpy.ndarray). The function must - operate on an array of shape ``(n_times,)`` if - ``channel_wise=True`` and ``(len(picks), n_times)`` otherwise. - The function must return an ndarray shaped like its input. + %(applyfun_fun)s %(picks_all_data_noref)s - dtype : numpy.dtype (default: None) - Data type to use for raw data after applying the function. If None - the data type is not modified. - n_jobs : int (default: 1) - Number of jobs to run in parallel. Ignored if ``channel_wise`` is - False. - channel_wise : bool (default: True) - Whether to apply the function to each channel individually. If - False, the function will be applied to all channels at once. + %(applyfun_dtype)s + %(n_jobs)s + %(applyfun_chwise)s .. versionadded:: 0.18 %(verbose_meth)s - *args : list - Additional positional arguments to pass to fun (first pos. argument - of fun is the timeseries of a channel). - **kwargs : dict - Keyword arguments to pass to fun. + %(kwarg_fun)s Returns ------- @@ -946,17 +990,17 @@ def apply_function(self, fun, picks=None, dtype=None, n_jobs=1, # modify data inplace to save memory for idx in picks: self._data[idx, :] = _check_fun(fun, data_in[idx, :], - *args, **kwargs) + **kwargs) else: # use parallel function parallel, p_fun, _ = parallel_func(_check_fun, n_jobs) data_picks_new = parallel( - p_fun(fun, data_in[p], *args, **kwargs) for p in picks) + p_fun(fun, data_in[p], **kwargs) for p in picks) for pp, p in enumerate(picks): self._data[p, :] = data_picks_new[pp] else: self._data[picks, :] = _check_fun( - fun, data_in[picks, :], *args, **kwargs) + fun, data_in[picks, :], **kwargs) return self @@ -1143,7 +1187,9 @@ def resample(self, sfreq, npad='auto', window='boxcar', stim_picks=None, if stim_picks is None: stim_picks = pick_types(self.info, meg=False, ref_meg=False, stim=True, exclude=[]) - stim_picks = np.asanyarray(stim_picks) + else: + stim_picks = _picks_to_idx(self.info, stim_picks, exclude=(), + with_ref_meg=False) kwargs = dict(up=sfreq, down=o_sfreq, npad=npad, window=window, n_jobs=n_jobs, pad=pad) @@ -1191,7 +1237,6 @@ def resample(self, sfreq, npad='auto', window='boxcar', stim_picks=None, lowpass = self.info.get('lowpass') lowpass = np.inf if lowpass is None else lowpass self.info['lowpass'] = min(lowpass, sfreq / 2.) - self._update_times() # See the comment above why we ignore all errors here. if events is None: @@ -1275,7 +1320,6 @@ def crop(self, tmin=0.0, tmax=None, include_tmax=True): if self.preload: # slice and copy to avoid the reference to large array self._data = self._data[:, smin:smax + 1].copy() - self._update_times() if self.annotations.orig_time is None: self.annotations.onset -= tmin @@ -1296,8 +1340,12 @@ def save(self, fname, picks=None, tmin=0, tmax=None, buffer_size_sec=None, fname : str File name of the new dataset. This has to be a new filename unless data have been preloaded. Filenames should end with - raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz, raw_tsss.fif, - raw_tsss.fif.gz, or _meg.fif. + ``raw.fif`` (common raw data), ``raw_sss.fif`` + (Maxwell-filtered continuous data), + ``raw_tsss.fif`` (temporally signal-space-separated data), + ``_meg.fif`` (common MEG data), ``_eeg.fif`` (common EEG data), + or ``_ieeg.fif`` (common intracranial EEG data). You may also + append an additional ``.gz`` suffix to enable gzip compression. %(picks_all)s %(raw_tmin)s %(raw_tmax)s @@ -1322,9 +1370,7 @@ def save(self, fname, picks=None, tmin=0, tmax=None, buffer_size_sec=None, and neither complex data types nor real data stored as 'double' can be loaded with the MNE command-line tools. See raw.orig_format to determine the format the original data were stored in. - overwrite : bool - If True, the destination file (if it exists) will be overwritten. - If False (default), an error will be raised if the file exists. + %(overwrite)s To overwrite original file (the same one that was loaded), data must be preloaded upon reading. split_size : str | int @@ -1350,10 +1396,12 @@ def save(self, fname, picks=None, tmin=0, tmax=None, buffer_size_sec=None, or all forms of SSS). It is recommended not to concatenate and then save raw files for this reason. """ - fname = op.realpath(fname) - check_fname(fname, 'raw', ('raw.fif', 'raw_sss.fif', 'raw_tsss.fif', - 'raw.fif.gz', 'raw_sss.fif.gz', - 'raw_tsss.fif.gz', '_meg.fif')) + fname = op.abspath(fname) + endings = ('raw.fif', 'raw_sss.fif', 'raw_tsss.fif', + '_meg.fif', '_eeg.fif', '_ieeg.fif') + endings += tuple([f'{e}.gz' for e in endings]) + endings_err = ('.fif', '.fif.gz') + check_fname(fname, 'raw', endings, endings_err=endings_err) split_size = _get_split_size(split_size) if not self.preload and fname in self._filenames: @@ -1405,6 +1453,58 @@ def save(self, fname, picks=None, tmin=0, tmax=None, buffer_size_sec=None, start, stop, buffer_size, projector, drop_small_buffer, split_size, split_naming, 0, None, overwrite) + @verbose + def export(self, fname, fmt='auto', verbose=None): + """Export Raw to external formats. + + Supported formats: EEGLAB (set, uses :mod:`eeglabio`) + %(export_warning)s + + Parameters + ---------- + %(export_params_fname)s + %(export_params_fmt)s + %(verbose)s + + Notes + ----- + %(export_eeglab_note)s + """ + supported_export_formats = { # format : extensions + 'eeglab': ('set',), + 'edf': ('edf',), + 'brainvision': ('eeg', 'vmrk', 'vhdr',) + } + fmt = _infer_check_export_fmt(fmt, fname, supported_export_formats) + + if fmt == 'eeglab': + _check_eeglabio_installed() + import eeglabio.raw + # load data first + self.load_data() + + # remove extra epoc and STI channels + drop_chs = ['epoc'] + if not (self.filenames[0].endswith('.fif')): + drop_chs.append('STI 014') + + ch_names = [ch for ch in self.ch_names if ch not in drop_chs] + cart_coords = _get_als_coords_from_chs(self.info['chs'], + drop_chs) + + annotations = [self.annotations.description, + self.annotations.onset, + self.annotations.duration] + eeglabio.raw.export_set(fname, data=self.get_data(picks=ch_names), + sfreq=self.info['sfreq'], + ch_names=ch_names, + ch_locs=cart_coords, + annotations=annotations) + elif fmt == 'edf': + raise NotImplementedError('Export to EDF format not implemented.') + elif fmt == 'brainvision': + raise NotImplementedError('Export to BrainVision not implemented.') + def _tmin_tmax_to_start_stop(self, tmin, tmax): start = int(np.floor(tmin * self.info['sfreq'])) @@ -1443,7 +1543,8 @@ def plot_psd(self, fmin=0, fmax=np.inf, tmin=None, tmax=None, proj=False, picks=None, ax=None, color='black', xscale='linear', area_mode='std', area_alpha=0.33, dB=True, estimate='auto', show=True, n_jobs=1, average=False, line_alpha=None, - spatial_colors=True, sphere=None, verbose=None): + spatial_colors=True, sphere=None, window='hamming', + verbose=None): return plot_raw_psd(self, fmin=fmin, fmax=fmax, tmin=tmin, tmax=tmax, proj=proj, n_fft=n_fft, n_overlap=n_overlap, reject_by_annotation=reject_by_annotation, @@ -1452,7 +1553,7 @@ def plot_psd(self, fmin=0, fmax=np.inf, tmin=None, tmax=None, proj=False, dB=dB, estimate=estimate, show=show, n_jobs=n_jobs, average=average, line_alpha=line_alpha, spatial_colors=spatial_colors, sphere=sphere, - verbose=verbose) + window=window, verbose=verbose) @copy_function_doc_to_method_doc(plot_raw_psd_topo) def plot_psd_topo(self, tmin=0., tmax=None, fmin=0, fmax=100, proj=False, @@ -1476,7 +1577,9 @@ def ch_names(self): @property def times(self): """Time points.""" - return self._times + out = _arange_div(self.n_times, float(self.info['sfreq'])) + out.flags['WRITEABLE'] = False + return out @property def n_times(self): @@ -1497,7 +1600,6 @@ def __len__(self): >>> len(raw) # doctest: +SKIP 1000 - """ return self.n_times @@ -1619,7 +1721,6 @@ def append(self, raws, preload=None): self._raw_extras += r._raw_extras self._filenames += r._filenames assert annotations.orig_time == self.info['meas_date'] - self._update_times() self.set_annotations(annotations) for edge_samp in edge_samps: onset = _sync_onset(self, (edge_samp) / self.info['sfreq'], True) @@ -1658,6 +1759,15 @@ def __repr__(self): # noqa: D105 size_str)) return "<%s | %s>" % (self.__class__.__name__, s) + def _repr_html_(self, caption=None): + basenames = [os.path.basename(f) for f in self._filenames] + m, s = divmod(self._last_time - self.first_time, 60) + h, m = divmod(m, 60) + duration = f'{int(h):02d}:{int(m):02d}:{int(s):02d}' + return raw_template.substitute( + info_repr=self.info._repr_html_(caption=caption), + filenames=basenames, duration=duration) + def add_events(self, events, stim_channel=None, replace=False): """Add events to stim channel. @@ -1764,6 +1874,86 @@ def to_data_frame(self, picks=None, index=None, default_index=['time']) return df + def describe(self, data_frame=False): + """Describe channels (name, type, descriptive statistics). + + Parameters + ---------- + data_frame : bool + If True, return results in a pandas.DataFrame. If False, only print + results. Columns 'ch', 'type', and 'unit' indicate channel index, + channel type, and unit of the remaining five columns. These columns + are 'min' (minimum), 'Q1' (first quartile or 25% percentile), + 'median', 'Q3' (third quartile or 75% percentile), and 'max' + (maximum). + + Returns + ------- + result : None | pandas.DataFrame + If data_frame=False, returns None. If data_frame=True, returns + results in a pandas.DataFrame (requires pandas). + """ + from scipy.stats import scoreatpercentile as q + nchan = self.info["nchan"] + + # describe each channel + cols = defaultdict(list) + cols["name"] = self.ch_names + for i in range(nchan): + ch = self.info["chs"][i] + data = self[i][0] + cols["type"].append(channel_type(self.info, i)) + cols["unit"].append(_unit2human[ch["unit"]]) + cols["min"].append(np.min(data)) + cols["Q1"].append(q(data, 25)) + cols["median"].append(np.median(data)) + cols["Q3"].append(q(data, 75)) + cols["max"].append(np.max(data)) + + if data_frame: # return data frame + import pandas as pd + df = pd.DataFrame(cols) + df.index.name = "ch" + return df + + # convert into commonly used units + scalings = _handle_default("scalings") + units = _handle_default("units") + for i in range(nchan): + unit = units.get(cols['type'][i]) + scaling = scalings.get(cols['type'][i], 1) + if scaling != 1: + cols['unit'][i] = unit + for col in ["min", "Q1", "median", "Q3", "max"]: + cols[col][i] *= scaling + + lens = {"ch": max(2, len(str(nchan))), + "name": max(4, max([len(n) for n in cols["name"]])), + "type": max(4, max([len(t) for t in cols["type"]])), + "unit": max(4, max([len(u) for u in cols["unit"]]))} + + # print description, start with header + print(self) + print(f"{'ch':>{lens['ch']}} " + f"{'name':<{lens['name']}} " + f"{'type':<{lens['type']}} " + f"{'unit':<{lens['unit']}} " + f"{'min':>8} " + f"{'Q1':>8} " + f"{'median':>8} " + f"{'Q3':>8} " + f"{'max':>8}") + # print description for each channel + for i in range(nchan): + msg = (f"{i:>{lens['ch']}} " + f"{cols['name'][i]:<{lens['name']}} " + f"{cols['type'][i].upper():<{lens['type']}} " + f"{cols['unit'][i]:<{lens['unit']}} ") + for col in ["min", "Q1", "median", "Q3"]: + msg += f"{cols[col][i]:>8.2f} " + msg += f"{cols['max'][i]:>8.2f}" + print(msg) + def _allocate_data(preload, shape, dtype): """Allocate data in memory or in memmap for preloading.""" @@ -1802,6 +1992,69 @@ def _convert_slice(sel): return sel +def _get_scaling(ch_type, target_unit): + """Return the scaling factor based on the channel type and a target unit. + + Parameters + ---------- + ch_type : str + The channel type. + target_unit : str + The target unit for the provided channel type. + + Returns + ------- + scaling : float + The scaling factor to convert from the si_unit (used by default for MNE + objects) to the target unit. + """ + scaling = 1. + si_units = _handle_default('si_units') + si_units_splitted = {key: si_units[key].split('/') for key in si_units} + prefixes = _handle_default('prefixes') + prefix_list = list(prefixes.keys()) + + # Check that the provided unit exists for the ch_type + unit_list = target_unit.split('/') + if ch_type not in si_units.keys(): + raise KeyError( + f'{ch_type} is not a channel type that can be scaled ' + 'from units.') + si_unit_list = si_units_splitted[ch_type] + if len(unit_list) != len(si_unit_list): + raise ValueError( + f'{target_unit} is not a valid unit for {ch_type}, use a ' + f'sub-multiple of {si_units[ch_type]} instead.') + for i, unit in enumerate(unit_list): + valid = [prefix + si_unit_list[i] + for prefix in prefix_list] + if unit not in valid: + raise ValueError( + f'{target_unit} is not a valid unit for {ch_type}, use a ' + f'sub-multiple of {si_units[ch_type]} instead.') + + # Get the scaling factors + for i, unit in enumerate(unit_list): + has_square = False + # XXX power normally not used as csd cannot get_data() + if unit[-1] == '²': + has_square = True + if unit == 'm' or unit == 'm²': + factor = 1. + elif unit[0] in prefixes.keys(): + factor = prefixes[unit[0]] + else: + factor = 1. + if factor != 1: + if has_square: + factor *= factor + if i == 0: + scaling = scaling * factor + elif i == 1: + scaling = scaling / factor + return scaling + + class _ReadSegmentFileProtector(object): """Ensure only _filenames, _raw_extras, and _read_segment_file are used.""" @@ -2155,13 +2408,13 @@ def _check_raw_compatibility(raw): """Ensure all instances of Raw have compatible parameters.""" for ri in range(1, len(raw)): if not isinstance(raw[ri], type(raw[0])): - raise ValueError('raw[%d] type must match' % ri) - if not raw[ri].info['nchan'] == raw[0].info['nchan']: - raise ValueError('raw[%d][\'info\'][\'nchan\'] must match' % ri) - if not raw[ri].info['bads'] == raw[0].info['bads']: - raise ValueError('raw[%d][\'info\'][\'bads\'] must match' % ri) - if not raw[ri].info['sfreq'] == raw[0].info['sfreq']: - raise ValueError('raw[%d][\'info\'][\'sfreq\'] must match' % ri) + raise ValueError(f'raw[{ri}] type must match') + for key in ('nchan', 'bads', 'sfreq'): + a, b = raw[ri].info[key], raw[0].info[key] + if a != b: + raise ValueError( + f'raw[{ri}].info[{key}] must match:\n' + f'{repr(a)} != {repr(b)}') if not set(raw[ri].info['ch_names']) == set(raw[0].info['ch_names']): raise ValueError('raw[%d][\'info\'][\'ch_names\'] must match' % ri) if not all(raw[ri]._cals == raw[0]._cals): diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index a48efd67bf9..2ff5d3eaa4b 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -9,7 +9,7 @@ from ..base import BaseRaw from ..meas_info import create_info from ..utils import _mult_cal_one -from ...utils import logger, verbose, fill_doc +from ...utils import logger, verbose, fill_doc, _check_fname from ...annotations import Annotations @@ -65,6 +65,7 @@ def __init__(self, fname, preload=False, verbose=None): raw_extras = dict() raw_extras['offsets'] = list() # keep track of our offsets sfreq = None + fname = _check_fname(fname, 'read', True, 'fname') with open(fname, 'r') as fid: line_num = 0 i_line = fid.readline() diff --git a/mne/io/brainvision/brainvision.py b/mne/io/brainvision/brainvision.py index 5202f6933fc..d37e9c2126e 100644 --- a/mne/io/brainvision/brainvision.py +++ b/mne/io/brainvision/brainvision.py @@ -593,12 +593,14 @@ def _get_vhdr_info(vhdr_fname, eog, misc, scale): # But we still want to be able to double check the channel names # for alignment purposes, we keep track of the hardware setting idx idx_amp = idx + filter_list_has_ch_name = True if 'S o f t w a r e F i l t e r s' in settings: idx = settings.index('S o f t w a r e F i l t e r s') for idx, setting in enumerate(settings[idx + 1:], idx + 1): if re.match(r'#\s+Low Cutoff', setting): hp_col, lp_col = 1, 2 + filter_list_has_ch_name = False warn('Online software filter detected. Using software ' 'filter settings and ignoring hardware values') break @@ -641,8 +643,11 @@ def _get_vhdr_info(vhdr_fname, eog, misc, scale): # Correct shift for channel names with spaces # Header already gives 1 therefore has to be subtracted - ch_name_parts = re.split(divider, ch) - real_shift = shift + len(ch_name_parts) - 1 + if filter_list_has_ch_name: + ch_name_parts = re.split(divider, ch) + real_shift = shift + len(ch_name_parts) - 1 + else: + real_shift = shift line = re.split(divider, settings[idx + i]) highpass.append(line[hp_col + real_shift]) diff --git a/mne/io/brainvision/tests/data/test_old_layout_latin1_software_filter_longname.vhdr b/mne/io/brainvision/tests/data/test_old_layout_latin1_software_filter_longname.vhdr new file mode 100644 index 00000000000..003bebf2953 --- /dev/null +++ b/mne/io/brainvision/tests/data/test_old_layout_latin1_software_filter_longname.vhdr @@ -0,0 +1,156 @@ +Brain Vision Data Exchange Header File Version 1.0 +; copy of test_old_layout_latin1_software_filter.vhdr, sensor F3 manually renamed for name with spaces + +[Common Infos] +DataFile=test_old_layout_latin1_software_filter.eeg +MarkerFile=test_old_layout_latin1_software_filter.vmrk +DataFormat=BINARY +; Data orientation: VECTORIZED=ch1,pt1, ch1,pt2...,MULTIPLEXED=ch1,pt1, ch2,pt1 ... +DataOrientation=VECTORIZED +NumberOfChannels=29 +; Sampling interval in microseconds +SamplingInterval=4000 + +[Binary Infos] +BinaryFormat=IEEE_FLOAT_32 + +[Channel Infos] +; Each entry: Ch=,, +; , 15 assert_allclose(raw.info["lowpass"], raw.info["sfreq"] / 2) @@ -460,7 +464,7 @@ def test_invalid_date(tmpdir): def test_empty_chars(): """Test blank char support.""" - assert _edf_str_int(b'1819\x00 ') == 1819 + assert int(_edf_str(b'1819\x00 ')) == 1819 def _hp_lp_rev(*args, **kwargs): diff --git a/mne/io/edf/tests/test_gdf.py b/mne/io/edf/tests/test_gdf.py index 170c03c870e..0b58f00a7a1 100644 --- a/mne/io/edf/tests/test_gdf.py +++ b/mne/io/edf/tests/test_gdf.py @@ -3,6 +3,7 @@ # # License: BSD (3-clause) +from datetime import datetime, timezone, timedelta import os.path as op import shutil @@ -62,7 +63,14 @@ def test_gdf2_birthday(tmpdir): """Test reading raw GDF 2.x files.""" new_fname = str(tmpdir.join('temp.gdf')) shutil.copyfile(gdf2_path + '.gdf', new_fname) - d = int(3.1e15) # chosen by trial and error to give a reasonable age + # go back 44.5 years so the subject should show up as 44 + offset_edf = ( # to their ref + datetime.now(tz=timezone.utc) - + datetime(1, 1, 1, tzinfo=timezone.utc) + ) + offset_44_yr = offset_edf - timedelta(days=int(365 * 44.5)) # 44.5 yr ago + offset_44_yr_days = offset_44_yr.total_seconds() / (24 * 60 * 60) # days + d = (int(offset_44_yr_days) + 367) * 2 ** 32 # with their conversion with open(new_fname, 'r+b') as fid: fid.seek(176, 0) assert np.fromfile(fid, np.uint64, 1)[0] == 0 diff --git a/mne/io/eeglab/eeglab.py b/mne/io/eeglab/eeglab.py index 555c271fbb8..53c0be36d2e 100644 --- a/mne/io/eeglab/eeglab.py +++ b/mne/io/eeglab/eeglab.py @@ -12,7 +12,7 @@ from ..constants import FIFF from ..meas_info import create_info from ..base import BaseRaw -from ...utils import logger, verbose, warn, fill_doc, Bunch +from ...utils import logger, verbose, warn, fill_doc, Bunch, _check_fname from ...channels import make_dig_montage from ...epochs import BaseEpochs from ...event import read_events @@ -22,7 +22,7 @@ CAL = 1e-6 -def _check_fname(fname, dataname): +def _check_eeglab_fname(fname, dataname): """Check whether the filename is valid. Check if the file extension is ``.fdt`` (older ``.dat`` being invalid) or @@ -61,9 +61,7 @@ def _check_load_mat(fname, uint16_codec): raise NotImplementedError( 'Loading an ALLEEG array is not supported. Please contact' 'mne-python developers for more information.') - if 'EEG' not in eeg: - raise ValueError('Could not find EEG array in the .set file.') - else: + if 'EEG' in eeg: # fields are contained in EEG structure eeg = eeg['EEG'] eeg = eeg.get('EEG', eeg) # handle nested EEG structure eeg = Bunch(**eeg) @@ -316,6 +314,7 @@ class RawEEGLAB(BaseRaw): @verbose def __init__(self, input_fname, eog=(), preload=False, uint16_codec=None, verbose=None): # noqa: D102 + input_fname = _check_fname(input_fname, 'read', True, 'input_fname') eeg = _check_load_mat(input_fname, uint16_codec) if eeg.trials != 1: raise TypeError('The number of trials is %d. It must be 1 for raw' @@ -323,11 +322,11 @@ def __init__(self, input_fname, eog=(), ' the .set file contains epochs.' % eeg.trials) last_samps = [eeg.pnts - 1] - info, eeg_montage, update_ch_names = _get_info(eeg, eog=eog) + info, eeg_montage, _ = _get_info(eeg, eog=eog) # read the data if isinstance(eeg.data, str): - data_fname = _check_fname(input_fname, eeg.data) + data_fname = _check_eeglab_fname(input_fname, eeg.data) logger.info('Reading %s' % data_fname) super(RawEEGLAB, self).__init__( @@ -454,6 +453,11 @@ def __init__(self, input_fname, events=None, event_id=None, tmin=0, raise ValueError('Both `events` and `event_id` must be ' 'None or not None') + if eeg.trials <= 1: + raise ValueError("The file does not seem to contain epochs " + "(trials less than 2). " + "You should try using read_raw_eeglab function.") + if events is None and eeg.trials > 1: # first extract the events and construct an event_id dict event_name, event_latencies, unique_ev = list(), list(), list() @@ -512,7 +516,7 @@ def __init__(self, input_fname, events=None, event_id=None, tmin=0, '(event id %i)' % (key, val)) if isinstance(eeg.data, str): - data_fname = _check_fname(input_fname, eeg.data) + data_fname = _check_eeglab_fname(input_fname, eeg.data) with open(data_fname, 'rb') as data_fid: data = np.fromfile(data_fid, dtype=np.float32) data = data.reshape((eeg.nbchan, eeg.pnts, eeg.trials), diff --git a/mne/io/eeglab/tests/test_eeglab.py b/mne/io/eeglab/tests/test_eeglab.py index d79e9aaa7f3..c4382ac897e 100644 --- a/mne/io/eeglab/tests/test_eeglab.py +++ b/mne/io/eeglab/tests/test_eeglab.py @@ -20,7 +20,7 @@ from mne.io import read_raw_eeglab from mne.io.tests.test_raw import _test_raw_reader from mne.datasets import testing -from mne.utils import requires_h5py, run_tests_if_main +from mne.utils import check_version from mne.annotations import events_from_annotations, read_annotations from mne.io.eeglab.tests._utils import _read_eeglab_montage @@ -33,7 +33,8 @@ epochs_fname_onefile_mat = op.join(base_dir, 'test_epochs_onefile.set') raw_mat_fnames = [raw_fname_mat, raw_fname_onefile_mat] epochs_mat_fnames = [epochs_fname_mat, epochs_fname_onefile_mat] - +raw_fname_chanloc = op.join(base_dir, 'test_raw_chanloc.set') +raw_fname_2021 = op.join(base_dir, 'test_raw_2021.set') raw_fname_h5 = op.join(base_dir, 'test_raw_h5.set') raw_fname_onefile_h5 = op.join(base_dir, 'test_raw_onefile_h5.set') epochs_fname_h5 = op.join(base_dir, 'test_epochs_h5.set') @@ -41,25 +42,18 @@ raw_h5_fnames = [raw_fname_h5, raw_fname_onefile_h5] epochs_h5_fnames = [epochs_fname_h5, epochs_fname_onefile_h5] -raw_fnames = [raw_fname_mat, raw_fname_onefile_mat, - raw_fname_h5, raw_fname_onefile_h5] montage_path = op.join(base_dir, 'test_chans.locs') -def _check_h5(fname): - if fname.endswith('_h5.set'): - try: - import h5py # noqa, analysis:ignore - except Exception: - raise SkipTest('h5py module required') +needs_h5 = pytest.mark.skipif(not check_version('h5py'), reason='Needs h5py') -@requires_h5py @testing.requires_testing_data -@pytest.mark.slowtest -@pytest.mark.parametrize( - 'fname', [raw_fname_mat, raw_fname_h5], ids=op.basename -) +@pytest.mark.parametrize('fname', [ + raw_fname_mat, + pytest.param(raw_fname_h5, marks=needs_h5), + raw_fname_chanloc, +], ids=op.basename) def test_io_set_raw(fname): """Test importing EEGLAB .set files.""" montage = _read_eeglab_montage(montage_path) @@ -67,17 +61,43 @@ def test_io_set_raw(fname): 'EEG {0:03d}'.format(ii) for ii in range(len(montage.ch_names)) ] - _test_raw_reader(read_raw_eeglab, input_fname=fname) + kws = dict(reader=read_raw_eeglab, input_fname=fname) + if fname.endswith('test_raw_chanloc.set'): + with pytest.warns(RuntimeWarning, + match="The data contains 'boundary' events"): + raw0 = _test_raw_reader(**kws) + elif '_h5' in fname: # should be safe enough, and much faster + raw0 = read_raw_eeglab(fname, preload=True) + else: + raw0 = _test_raw_reader(**kws) + # test that preloading works - raw0 = read_raw_eeglab(input_fname=fname, preload=True) - raw0.set_montage(montage) - raw0.filter(1, None, l_trans_bandwidth='auto', filter_length='auto', - phase='zero') + if fname.endswith('test_raw_chanloc.set'): + raw0.set_montage(montage, on_missing='ignore') + # crop to check if the data has been properly preloaded; we cannot + # filter as the snippet of raw data is very short + raw0.crop(0, 1) + else: + raw0.set_montage(montage) + raw0.filter(1, None, l_trans_bandwidth='auto', filter_length='auto', + phase='zero') # test that using uint16_codec does not break stuff - raw0 = read_raw_eeglab(input_fname=fname, - preload=False, uint16_codec='ascii') - raw0.set_montage(montage) + read_raw_kws = dict(input_fname=fname, preload=False, uint16_codec='ascii') + if fname.endswith('test_raw_chanloc.set'): + with pytest.warns(RuntimeWarning, + match="The data contains 'boundary' events"): + raw0 = read_raw_eeglab(**read_raw_kws) + raw0.set_montage(montage, on_missing='ignore') + else: + raw0 = read_raw_eeglab(**read_raw_kws) + raw0.set_montage(montage) + + # Annotations + if fname != raw_fname_chanloc: + assert len(raw0.annotations) == 154 + assert set(raw0.annotations.description) == {'rt', 'square'} + assert_array_equal(raw0.annotations.duration, 0.) @testing.requires_testing_data @@ -223,11 +243,12 @@ def test_io_set_raw_more(tmpdir): np.array([np.nan, np.nan, np.nan])) -@pytest.mark.slowtest # slow-ish on Travis OSX @pytest.mark.timeout(60) # ~60 sec on Travis OSX -@requires_h5py @testing.requires_testing_data -@pytest.mark.parametrize('fnames', [epochs_mat_fnames, epochs_h5_fnames]) +@pytest.mark.parametrize('fnames', [ + epochs_mat_fnames, + pytest.param(epochs_h5_fnames, marks=[needs_h5, pytest.mark.slowtest]), +]) def test_io_set_epochs(fnames): """Test importing EEGLAB .set epochs files.""" epochs_fname, epochs_fname_onefile = fnames @@ -282,12 +303,16 @@ def test_degenerate(tmpdir): bad_epochs_fname) -@pytest.mark.parametrize("fname", raw_fnames) +@pytest.mark.parametrize("fname", [ + raw_fname_mat, + raw_fname_onefile_mat, + # We don't test the h5 varaints here because they are implicitly tested + # in test_io_set_raw +]) @pytest.mark.filterwarnings('ignore: Complex objects') @testing.requires_testing_data def test_eeglab_annotations(fname): """Test reading annotations in EEGLAB files.""" - _check_h5(fname) annotations = read_annotations(fname) assert len(annotations) == 154 assert set(annotations.description) == {'rt', 'square'} @@ -399,4 +424,16 @@ def test_position_information(one_chanpos_fname): EXPECTED_LOCATIONS_FROM_MONTAGE) -run_tests_if_main() +@testing.requires_testing_data +def test_io_set_raw_2021(): + """Test reading new default file format (no EEG struct).""" + assert "EEG" not in io.loadmat(raw_fname_2021) + _test_raw_reader(reader=read_raw_eeglab, input_fname=raw_fname_2021, + test_preloading=False, preload=True) + + +@testing.requires_testing_data +def test_read_single_epoch(): + """Test reading raw set file as an Epochs instance.""" + with pytest.raises(ValueError, match='trials less than 2'): + read_epochs_eeglab(raw_fname_mat) diff --git a/mne/io/egi/egi.py b/mne/io/egi/egi.py index 98432a9bb46..6e3d5652fcc 100644 --- a/mne/io/egi/egi.py +++ b/mne/io/egi/egi.py @@ -14,7 +14,7 @@ from ..utils import _read_segments_file, _create_chs from ..meas_info import _empty_info from ..constants import FIFF -from ...utils import verbose, logger, warn +from ...utils import verbose, logger, warn, _validate_type, _check_fname def _read_header(fid): @@ -92,9 +92,12 @@ def read_raw_egi(input_fname, eog=None, misc=None, channel_naming='E%d', verbose=None): """Read EGI simple binary as raw object. + .. note:: This function attempts to create a synthetic trigger channel. + See the Notes section below. + Parameters ---------- - input_fname : str + input_fname : path-like Path to the raw file. Files with an extension .mff are automatically considered to be EGI's native MFF format files. eog : list or tuple @@ -135,8 +138,8 @@ def read_raw_egi(input_fname, eog=None, misc=None, Notes ----- The trigger channel names are based on the arbitrary user dependent event - codes used. However this function will attempt to generate a synthetic - trigger channel named ``STI 014`` in accordance with the general + codes used. However this function will attempt to generate a **synthetic + trigger channel** named ``STI 014`` in accordance with the general Neuromag / MNE naming pattern. The event_id assignment equals ``np.arange(n_events) + 1``. The resulting @@ -147,6 +150,8 @@ def read_raw_egi(input_fname, eog=None, misc=None, This step will fail if events are not mutually exclusive. """ + _validate_type(input_fname, 'path-like', 'input_fname') + input_fname = str(input_fname) if input_fname.endswith('.mff'): return _read_raw_egi_mff(input_fname, eog, misc, include, exclude, preload, channel_naming, verbose) @@ -161,6 +166,7 @@ class RawEGI(BaseRaw): def __init__(self, input_fname, eog=None, misc=None, include=None, exclude=None, preload=False, channel_naming='E%d', verbose=None): # noqa: D102 + input_fname = _check_fname(input_fname, 'read', True, 'input_fname') if eog is None: eog = [] if misc is None: diff --git a/mne/io/egi/egimff.py b/mne/io/egi/egimff.py index dff9a894069..3d024e83153 100644 --- a/mne/io/egi/egimff.py +++ b/mne/io/egi/egimff.py @@ -1,5 +1,6 @@ """EGI NetStation Load Function.""" +from collections import OrderedDict import datetime import math import os.path as op @@ -14,11 +15,11 @@ _get_gains, _block_r) from ..base import BaseRaw from ..constants import FIFF -from ..meas_info import _empty_info, create_info +from ..meas_info import _empty_info, create_info, _ensure_meas_date_none_or_dt from ..proj import setup_proj from ..utils import _create_chs, _mult_cal_one from ...annotations import Annotations -from ...utils import verbose, logger, warn, _check_option +from ...utils import verbose, logger, warn, _check_option, _check_fname from ...evoked import EvokedArray @@ -251,23 +252,43 @@ def _get_eeg_calibration_info(filepath, egi_info): def _read_locs(filepath, chs, egi_info): """Read channel locations.""" + from ...channels.montage import make_dig_montage fname = op.join(filepath, 'coordinates.xml') if not op.exists(fname): - return chs + return chs, None + reference_names = ('VREF', 'Vertex Reference') + dig_ident_map = { + 'Left periauricular point': 'lpa', + 'Right periauricular point': 'rpa', + 'Nasion': 'nasion', + } numbers = np.array(egi_info['numbers']) coordinates = parse(fname) sensors = coordinates.getElementsByTagName('sensor') + ch_pos = OrderedDict() + hsp = list() + nlr = dict() for sensor in sensors: + name_element = sensor.getElementsByTagName('name')[0].firstChild + name = '' if name_element is None else name_element.data nr = sensor.getElementsByTagName('number')[0].firstChild.data.encode() - id = np.where(numbers == nr)[0] - if len(id) == 0: - continue - loc = chs[id[0]]['loc'] - loc[0] = sensor.getElementsByTagName('x')[0].firstChild.data - loc[1] = sensor.getElementsByTagName('y')[0].firstChild.data - loc[2] = sensor.getElementsByTagName('z')[0].firstChild.data - loc /= 100. # cm -> m - return chs + coords = [float(sensor.getElementsByTagName(coord)[0].firstChild.data) + for coord in 'xyz'] + loc = np.array(coords) / 100 # cm -> m + # create dig entry + if name in dig_ident_map: + nlr[dig_ident_map[name]] = loc + else: + if name in reference_names: + ch_pos['EEG000'] = loc + # add location to channel entry + id_ = np.flatnonzero(numbers == nr) + if len(id_) == 0: + hsp.append(loc) + else: + ch_pos[chs[id_[0]]['ch_name']] = loc + mon = make_dig_montage(ch_pos=ch_pos, hsp=hsp, **nlr) + return chs, mon def _add_pns_channel_info(chs, egi_info, ch_names): @@ -363,6 +384,8 @@ def __init__(self, input_fname, eog=None, misc=None, include=None, exclude=None, preload=False, channel_naming='E%d', verbose=None): """Init the RawMff class.""" + input_fname = _check_fname(input_fname, 'read', True, 'input_fname', + need_dir=True) logger.info('Reading EGI MFF Header from %s...' % input_fname) egi_info = _read_header(input_fname) if eog is None: @@ -406,7 +429,8 @@ def __init__(self, input_fname, eog=None, misc=None, if isinstance(v, list): for k in v: if k not in event_codes: - raise ValueError('Could find event named "%s"' % k) + raise ValueError( + f'Could not find event named {repr(k)}') elif v is not None: raise ValueError('`%s` must be None or of type list' % kk) logger.info(' Synthesizing trigger channel "STI 014" ...') @@ -431,7 +455,7 @@ def __init__(self, input_fname, eog=None, misc=None, egi_info['year'], egi_info['month'], egi_info['day'], egi_info['hour'], egi_info['minute'], egi_info['second']) my_timestamp = time.mktime(my_time.timetuple()) - info['meas_date'] = (my_timestamp, 0) + info['meas_date'] = _ensure_meas_date_none_or_dt((my_timestamp, 0)) # First: EEG ch_names = [channel_naming % (i + 1) for i in @@ -453,7 +477,7 @@ def __init__(self, input_fname, eog=None, misc=None, ch_coil = FIFF.FIFFV_COIL_EEG ch_kind = FIFF.FIFFV_EEG_CH chs = _create_chs(ch_names, cals, ch_coil, ch_kind, eog, (), (), misc) - chs = _read_locs(input_fname, chs, egi_info) + chs, mon = _read_locs(input_fname, chs, egi_info) sti_ch_idx = [i for i, name in enumerate(ch_names) if name.startswith('STI') or name in event_codes] for idx in sti_ch_idx: @@ -463,9 +487,10 @@ def __init__(self, input_fname, eog=None, misc=None, 'coil_type': FIFF.FIFFV_COIL_NONE, 'unit': FIFF.FIFF_UNIT_NONE}) chs = _add_pns_channel_info(chs, egi_info, ch_names) - info['chs'] = chs info._update_redundant() + if mon is not None: + info.set_montage(mon, on_missing='ignore') file_bin = op.join(input_fname, egi_info['eeg_fname']) egi_info['egi_events'] = egi_events @@ -826,10 +851,12 @@ def _read_evoked_mff(fname, condition, channel_naming='E%d', verbose=None): ch_coil = FIFF.FIFFV_COIL_EEG ch_kind = FIFF.FIFFV_EEG_CH chs = _create_chs(ch_names, cals, ch_coil, ch_kind, (), (), (), ()) - chs = _read_locs(fname, chs, egi_info) + chs, mon = _read_locs(fname, chs, egi_info) # Update PNS channel info chs = _add_pns_channel_info(chs, egi_info, ch_names) info['chs'] = chs + if mon is not None: + info.set_montage(mon, on_missing='ignore') # Add bad channels to info info['description'] = category diff --git a/mne/io/egi/tests/test_egi.py b/mne/io/egi/tests/test_egi.py index 40e667cf006..2b476cb47ad 100644 --- a/mne/io/egi/tests/test_egi.py +++ b/mne/io/egi/tests/test_egi.py @@ -3,21 +3,22 @@ # simplified BSD-3 license +from pathlib import Path import os.path as op import os import shutil import numpy as np -from numpy.testing import assert_array_equal, assert_allclose, assert_equal +from numpy.testing import assert_array_equal, assert_allclose import pytest from scipy import io as sio - from mne import find_events, pick_types from mne.io import read_raw_egi, read_evokeds_mff -from mne.io.tests.test_raw import _test_raw_reader +from mne.io.constants import FIFF from mne.io.egi.egi import _combine_triggers -from mne.utils import run_tests_if_main, requires_version, object_diff +from mne.io.tests.test_raw import _test_raw_reader +from mne.utils import requires_version, object_diff from mne.datasets.testing import data_path, requires_testing_data base_dir = op.join(op.dirname(op.abspath(__file__)), 'data') @@ -59,11 +60,15 @@ ]) def test_egi_mff_pause(fname, skip_times, event_times): """Test EGI MFF with pauses.""" - with pytest.warns(RuntimeWarning, match='Acquisition skips detected'): - raw = _test_raw_reader(read_raw_egi, input_fname=fname, - test_scaling=False, # XXX probably some bug - test_rank='less', - ) + if fname == egi_pause_w1337_fname: + # too slow to _test_raw_reader + raw = read_raw_egi(fname).load_data() + else: + with pytest.warns(RuntimeWarning, match='Acquisition skips detected'): + raw = _test_raw_reader(read_raw_egi, input_fname=fname, + test_scaling=False, # XXX probably some bug + test_rank='less', + ) assert raw.info['sfreq'] == 250. # true for all of these files assert len(raw.annotations) == len(skip_times) @@ -110,27 +115,38 @@ def test_io_egi_mff(): test_scaling=False, # XXX probably some bug ) assert raw.info['sfreq'] == 1000. - - assert_equal('eeg' in raw, True) + # The ref here is redundant, but we don't currently have a way in + # DigMontage to mark that a given channel is actually the ref so... + assert len(raw.info['dig']) == 133 # 129 eeg + 1 ref + 3 cardinal points + assert raw.info['dig'][0]['ident'] == 1 # EEG channel E1 + assert raw.info['dig'][3]['ident'] == 0 # Reference channel + assert raw.info['dig'][-1]['ident'] == 129 # Reference channel + ref_loc = raw.info['dig'][3]['r'] + eeg_picks = pick_types(raw.info, eeg=True) + assert len(eeg_picks) == 129 + for i in eeg_picks: + loc = raw.info['chs'][i]['loc'] + assert loc[:3].any(), loc[:3] + assert_array_equal(loc[3:6], ref_loc, err_msg=f'{i}') + + assert 'eeg' in raw eeg_chan = [c for c in raw.ch_names if 'EEG' in c] - assert_equal(len(eeg_chan), 129) - picks = pick_types(raw.info, eeg=True) - assert_equal(len(picks), 129) - assert_equal('STI 014' in raw.ch_names, True) + assert len(eeg_chan) == 129 + assert 'STI 014' in raw.ch_names events = find_events(raw, stim_channel='STI 014') - assert_equal(len(events), 8) - assert_equal(np.unique(events[:, 1])[0], 0) - assert (np.unique(events[:, 0])[0] != 0) - assert (np.unique(events[:, 2])[0] != 0) - - pytest.raises(ValueError, read_raw_egi, egi_mff_fname, include=['Foo'], - preload=False) - pytest.raises(ValueError, read_raw_egi, egi_mff_fname, exclude=['Bar'], - preload=False) + assert len(events) == 8 + assert np.unique(events[:, 1])[0] == 0 + assert np.unique(events[:, 0])[0] != 0 + assert np.unique(events[:, 2])[0] != 0 + + with pytest.raises(ValueError, match='Could not find event'): + read_raw_egi(egi_mff_fname, include=['Foo']) + with pytest.raises(ValueError, match='Could not find event'): + read_raw_egi(egi_mff_fname, exclude=['Bar']) for ii, k in enumerate(include, 1): - assert (k in raw.event_id) - assert (raw.event_id[k] == ii) + assert k in raw.event_id + assert raw.event_id[k] == ii def test_io_egi(): @@ -144,6 +160,11 @@ def test_io_egi(): with pytest.warns(RuntimeWarning, match='Did not find any event code'): raw = read_raw_egi(egi_fname, include=None) + + # The reader should accept a Path, too. + with pytest.warns(RuntimeWarning, match='Did not find any event code'): + raw = read_raw_egi(Path(egi_fname), include=None) + assert 'RawEGI' in repr(raw) data_read, t_read = raw[:256] assert_allclose(t_read, t) @@ -155,19 +176,19 @@ def test_io_egi(): test_scaling=False, # XXX probably some bug ) - assert_equal('eeg' in raw, True) + assert 'eeg' in raw eeg_chan = [c for c in raw.ch_names if c.startswith('E')] - assert_equal(len(eeg_chan), 256) + assert len(eeg_chan) == 256 picks = pick_types(raw.info, eeg=True) - assert_equal(len(picks), 256) - assert_equal('STI 014' in raw.ch_names, True) + assert len(picks) == 256 + assert 'STI 014' in raw.ch_names events = find_events(raw, stim_channel='STI 014') - assert_equal(len(events), 2) # ground truth - assert_equal(np.unique(events[:, 1])[0], 0) - assert (np.unique(events[:, 0])[0] != 0) - assert (np.unique(events[:, 2])[0] != 0) + assert len(events) == 2 # ground truth + assert np.unique(events[:, 1])[0] == 0 + assert np.unique(events[:, 0])[0] != 0 + assert np.unique(events[:, 2])[0] != 0 triggers = np.array([[0, 1, 1, 0], [0, 0, 1, 0]]) # test trigger functionality @@ -192,30 +213,29 @@ def test_io_egi_pns_mff(tmpdir): verbose='error') assert ('RawMff' in repr(raw)) pns_chans = pick_types(raw.info, ecg=True, bio=True, emg=True) - assert_equal(len(pns_chans), 7) + assert len(pns_chans) == 7 names = [raw.ch_names[x] for x in pns_chans] - pns_names = ['Resp. Temperature'[:15], + pns_names = ['Resp. Temperature', 'Resp. Pressure', 'ECG', 'Body Position', - 'Resp. Effort Chest'[:15], - 'Resp. Effort Abdomen'[:15], + 'Resp. Effort Chest', + 'Resp. Effort Abdomen', 'EMG-Leg'] _test_raw_reader(read_raw_egi, input_fname=egi_mff_pns_fname, channel_naming='EEG %03d', verbose='error', test_rank='less', test_scaling=False, # XXX probably some bug ) - assert_equal(names, pns_names) + assert names == pns_names mat_names = [ - 'Resp_Temperature'[:15], + 'Resp_Temperature', 'Resp_Pressure', 'ECG', 'Body_Position', - 'Resp_Effort_Chest'[:15], - 'Resp_Effort_Abdomen'[:15], + 'Resp_Effort_Chest', + 'Resp_Effort_Abdomen', 'EMGLeg' - ] egi_fname_mat = op.join(data_path(), 'EGI', 'test_egi_pns.mat') mc = sio.loadmat(egi_fname_mat) @@ -349,6 +369,7 @@ def test_io_egi_evokeds_mff(idx, cond, tmax, signals, bads): assert evoked_cond.info['nchan'] == 259 assert evoked_cond.info['sfreq'] == 250.0 assert not evoked_cond.info['custom_ref_applied'] + assert evoked_cond.info['dig'] is None @requires_version('mffpy', '0.5.7') @@ -368,4 +389,29 @@ def test_read_evokeds_mff_bad_input(): assert str(exc_info.value) == message -run_tests_if_main() +@requires_testing_data +def test_egi_coord_frame(): + """Test that EGI coordinate frame is changed to head.""" + info = read_raw_egi(egi_mff_fname).info + want_idents = ( + FIFF.FIFFV_POINT_LPA, + FIFF.FIFFV_POINT_NASION, + FIFF.FIFFV_POINT_RPA, + ) + for ii, want in enumerate(want_idents): + d = info['dig'][ii] + assert d['kind'] == FIFF.FIFFV_POINT_CARDINAL + assert d['ident'] == want + loc = d['r'] + if ii == 0: + assert 0.05 < -loc[0] < 0.1, 'LPA' + assert_allclose(loc[1:], 0, atol=1e-7, err_msg='LPA') + elif ii == 1: + assert 0.05 < loc[1] < 0.11, 'Nasion' + assert_allclose(loc[::2], 0, atol=1e-7, err_msg='Nasion') + else: + assert ii == 2 + assert 0.05 < loc[0] < 0.1, 'RPA' + assert_allclose(loc[1:], 0, atol=1e-7, err_msg='RPA') + for d in info['dig'][3:]: + assert d['kind'] == FIFF.FIFFV_POINT_EEG diff --git a/mne/io/eximia/eximia.py b/mne/io/eximia/eximia.py index 38b57341132..92dbc39531e 100644 --- a/mne/io/eximia/eximia.py +++ b/mne/io/eximia/eximia.py @@ -8,7 +8,7 @@ from ..base import BaseRaw from ..utils import _read_segments_file, _file_size from ..meas_info import create_info -from ...utils import logger, verbose, warn, fill_doc +from ...utils import logger, verbose, warn, fill_doc, _check_fname @fill_doc @@ -52,6 +52,7 @@ class RawEximia(BaseRaw): @verbose def __init__(self, fname, preload=False, verbose=None): + fname = _check_fname(fname, 'read', True, 'fname') data_name = op.basename(fname) logger.info('Loading %s' % data_name) # Create vhdr and vmrk files so that we can use mne_brain_vision2fiff diff --git a/mne/io/fieldtrip/fieldtrip.py b/mne/io/fieldtrip/fieldtrip.py index 5cbb2b31f86..e985c825fed 100644 --- a/mne/io/fieldtrip/fieldtrip.py +++ b/mne/io/fieldtrip/fieldtrip.py @@ -8,7 +8,8 @@ from .utils import _create_info, _set_tmin, _create_events, \ _create_event_metadata, _validate_ft_struct -from .. import RawArray +from ...utils import _check_fname +from ..array.array import RawArray from ...epochs import EpochsArray from ...evoked import EvokedArray @@ -44,6 +45,7 @@ def read_raw_fieldtrip(fname, info, data_name='data'): A Raw Object containing the loaded data. """ from ...externals.pymatreader import read_mat + fname = _check_fname(fname, overwrite='read', must_exist=True) ft_struct = read_mat(fname, ignore_fields=['previous'], diff --git a/mne/io/fieldtrip/tests/test_fieldtrip.py b/mne/io/fieldtrip/tests/test_fieldtrip.py index b4890d794a3..8fa4cb7039f 100644 --- a/mne/io/fieldtrip/tests/test_fieldtrip.py +++ b/mne/io/fieldtrip/tests/test_fieldtrip.py @@ -33,6 +33,14 @@ all_test_params_epochs = list(itertools.product(all_systems_epochs, all_versions, use_info)) +# just for speed we skip some slowest ones -- the coverage should still +# be sufficient +for obj in (all_test_params_epochs, all_test_params_raw): + for key in [('CTF', 'v73', True), ('neuromag306', 'v73', False)]: + obj.pop(obj.index(key)) + for ki, key in enumerate(obj): + if key[1] == 'v73': + obj[ki] = pytest.param(*obj[ki], marks=pytest.mark.slowtest) no_info_warning = {'expected_warning': RuntimeWarning, 'match': NOINFO_WARNING} diff --git a/mne/io/fiff/raw.py b/mne/io/fiff/raw.py index 816b5e9d1f0..8e628a37d18 100644 --- a/mne/io/fiff/raw.py +++ b/mne/io/fiff/raw.py @@ -25,7 +25,7 @@ from ...event import AcqParserFIF from ...utils import (check_fname, logger, verbose, warn, fill_doc, _file_like, - _on_missing) + _on_missing, _check_fname) @fill_doc @@ -36,10 +36,11 @@ class Raw(BaseRaw): ---------- fname : str | file-like The raw filename to load. For files that have automatically been split, - the split part will be automatically loaded. Filenames should end - with raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz, raw_tsss.fif, - raw_tsss.fif.gz, or _meg.fif. If a file-like object is provided, - preloading must be used. + the split part will be automatically loaded. Filenames not ending with + ``raw.fif``, ``raw_sss.fif``, ``raw_tsss.fif``, ``_meg.fif``, + ``_eeg.fif``, or ``_ieeg.fif`` (with or without an optional additional + ``.gz`` extension) will generate a warning. If a file-like object is + provided, preloading must be used. .. versionchanged:: 0.18 Support for file-like objects. @@ -72,15 +73,15 @@ class Raw(BaseRaw): @verbose def __init__(self, fname, allow_maxshield=False, preload=False, - on_split_missing=None, verbose=None): # noqa: D102 + on_split_missing='raise', verbose=None): # noqa: D102 raws = [] - do_check_fname = not _file_like(fname) + do_check_ext = not _file_like(fname) next_fname = fname while next_fname is not None: raw, next_fname, buffer_size_sec = \ self._read_raw_file(next_fname, allow_maxshield, - preload, do_check_fname) - do_check_fname = False + preload, do_check_ext) + do_check_ext = False raws.append(raw) if next_fname is not None: if not op.exists(next_fname): @@ -91,12 +92,6 @@ def __init__(self, fname, allow_maxshield=False, preload=False, 'manually renamed on disk (split files should be ' 'renamed by loading and re-saving with MNE-Python to ' 'preserve proper filename linkage).') - if on_split_missing is None: - warn('The default for on_split_missing is "warn" in ' - '0.22 but will change to "raise" in 0.23, set it ' - 'explicitly to avoid this message', - DeprecationWarning) - on_split_missing = 'warn' _on_missing(on_split_missing, msg, name='on_split_missing') break if _file_like(fname): @@ -137,18 +132,19 @@ def __init__(self, fname, allow_maxshield=False, preload=False, @verbose def _read_raw_file(self, fname, allow_maxshield, preload, - do_check_fname=True, verbose=None): + do_check_ext=True, verbose=None): """Read in header information from a raw file.""" logger.info('Opening raw data file %s...' % fname) # Read in the whole file if preload is on and .fif.gz (saves time) if not _file_like(fname): - if do_check_fname: - check_fname(fname, 'raw', ( - 'raw.fif', 'raw_sss.fif', 'raw_tsss.fif', 'raw.fif.gz', - 'raw_sss.fif.gz', 'raw_tsss.fif.gz', '_meg.fif')) + if do_check_ext: + endings = ('raw.fif', 'raw_sss.fif', 'raw_tsss.fif', + '_meg.fif', '_eeg.fif', '_ieeg.fif') + endings += tuple([f'{e}.gz' for e in endings]) + check_fname(fname, 'raw', endings) # filename - fname = op.realpath(fname) + fname = _check_fname(fname, 'read', True, 'fname') ext = os.path.splitext(fname)[1].lower() whole_file = preload if '.gz' in ext else False del ext @@ -441,7 +437,7 @@ def _check_entry(first, nent): @fill_doc def read_raw_fif(fname, allow_maxshield=False, preload=False, - on_split_missing=None, verbose=None): + on_split_missing='raise', verbose=None): """Reader function for Raw FIF data. Parameters diff --git a/mne/io/fiff/tests/test_raw_fiff.py b/mne/io/fiff/tests/test_raw_fiff.py index 51c1137464b..7b7d95f6b84 100644 --- a/mne/io/fiff/tests/test_raw_fiff.py +++ b/mne/io/fiff/tests/test_raw_fiff.py @@ -11,6 +11,7 @@ import os.path as op import pathlib import pickle +import shutil import sys import numpy as np @@ -22,13 +23,14 @@ from mne.filter import filter_data from mne.io.constants import FIFF from mne.io import RawArray, concatenate_raws, read_raw_fif, base +from mne.io.open import read_tag, read_tag_info from mne.io.tag import _read_tag_header from mne.io.tests.test_raw import _test_concat, _test_raw_reader from mne import (concatenate_events, find_events, equalize_channels, compute_proj_raw, pick_types, pick_channels, create_info, pick_info) from mne.utils import (requires_pandas, assert_object_equal, _dt_to_stamp, - requires_mne, run_subprocess, run_tests_if_main, + requires_mne, run_subprocess, assert_and_remove_boundary_annot) from mne.annotations import Annotations @@ -433,8 +435,8 @@ def test_split_files(tmpdir, mod, monkeypatch): os.remove(split_fname_bids_part2) with pytest.raises(ValueError, match='manually renamed'): read_raw_fif(split_fname_bids_part1, on_split_missing='raise') - with pytest.deprecated_call(): - read_raw_fif(split_fname_bids_part1) + with pytest.warns(RuntimeWarning, match='Split raw file detected'): + read_raw_fif(split_fname_bids_part1, on_split_missing='warn') read_raw_fif(split_fname_bids_part1, on_split_missing='ignore') # test the case where we only end up with one buffer to write @@ -606,7 +608,7 @@ def test_io_raw(tmpdir): def test_io_raw_additional(fname_in, fname_out, tmpdir): """Test IO for raw data (Neuromag + CTF + gz).""" fname_out = tmpdir.join(fname_out) - raw = read_raw_fif(fname_in) + raw = read_raw_fif(fname_in).crop(0, 2) nchan = raw.info['nchan'] ch_names = raw.info['ch_names'] @@ -973,16 +975,16 @@ def test_filter(): def test_filter_picks(): """Test filtering default channel picks.""" - ch_types = ['mag', 'grad', 'eeg', 'seeg', 'misc', 'stim', 'ecog', 'hbo', - 'hbr'] + ch_types = ['mag', 'grad', 'eeg', 'seeg', 'dbs', 'misc', 'stim', 'ecog', + 'hbo', 'hbr'] info = create_info(ch_names=ch_types, ch_types=ch_types, sfreq=256) raw = RawArray(data=np.zeros((len(ch_types), 1000)), info=info) # -- Deal with meg mag grad and fnirs exceptions - ch_types = ('misc', 'stim', 'meg', 'eeg', 'seeg', 'ecog') + ch_types = ('misc', 'stim', 'meg', 'eeg', 'seeg', 'dbs', 'ecog') # -- Filter data channels - for ch_type in ('mag', 'grad', 'eeg', 'seeg', 'ecog', 'hbo', 'hbr'): + for ch_type in ('mag', 'grad', 'eeg', 'seeg', 'dbs', 'ecog', 'hbo', 'hbr'): picks = {ch: ch == ch_type for ch in ch_types} picks['meg'] = ch_type if ch_type in ('mag', 'grad') else False picks['fnirs'] = ch_type if ch_type in ('hbo', 'hbr') else False @@ -1067,16 +1069,21 @@ def test_resample_equiv(): @testing.requires_testing_data -@pytest.mark.parametrize('preload', (True, False)) -def test_resample(tmpdir, preload): +@pytest.mark.parametrize('preload, n, npad', [ + (True, 512, 'auto'), + (False, 512, 0), +]) +def test_resample(tmpdir, preload, n, npad): """Test resample (with I/O and multiple files).""" - raw = read_raw_fif(fif_fname).crop(0, 3) + raw = read_raw_fif(fif_fname) + raw.crop(0, raw.times[n - 1]) + assert len(raw.times) == n if preload: raw.load_data() raw_resamp = raw.copy() sfreq = raw.info['sfreq'] # test parallel on upsample - raw_resamp.resample(sfreq * 2, n_jobs=2, npad='auto') + raw_resamp.resample(sfreq * 2, n_jobs=2, npad=npad) assert raw_resamp.n_times == len(raw_resamp.times) raw_resamp.save(tmpdir.join('raw_resamp-raw.fif')) raw_resamp = read_raw_fif(tmpdir.join('raw_resamp-raw.fif'), @@ -1086,7 +1093,7 @@ def test_resample(tmpdir, preload): assert raw_resamp.get_data().shape[1] == raw_resamp.n_times assert raw.get_data().shape[0] == raw_resamp._data.shape[0] # test non-parallel on downsample - raw_resamp.resample(sfreq, n_jobs=1, npad='auto') + raw_resamp.resample(sfreq, n_jobs=1, npad=npad) assert raw_resamp.info['sfreq'] == sfreq assert raw.get_data().shape == raw_resamp._data.shape assert raw.first_samp == raw_resamp.first_samp @@ -1109,9 +1116,9 @@ def test_resample(tmpdir, preload): raw3 = raw.copy() raw4 = raw.copy() raw1 = concatenate_raws([raw1, raw2]) - raw1.resample(10., npad='auto') - raw3.resample(10., npad='auto') - raw4.resample(10., npad='auto') + raw1.resample(10., npad=npad) + raw3.resample(10., npad=npad) + raw4.resample(10., npad=npad) raw3 = concatenate_raws([raw3, raw4]) assert_array_equal(raw1._data, raw3._data) assert_array_equal(raw1._first_samps, raw3._first_samps) @@ -1129,12 +1136,12 @@ def test_resample(tmpdir, preload): # basic decimation stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0] raw = RawArray([stim], create_info(1, len(stim), ['stim'])) - assert_allclose(raw.resample(8., npad='auto')._data, + assert_allclose(raw.resample(8., npad=npad)._data, [[1, 1, 0, 0, 1, 1, 0, 0]]) # decimation of multiple stim channels raw = RawArray(2 * [stim], create_info(2, len(stim), 2 * ['stim'])) - assert_allclose(raw.resample(8., npad='auto', verbose='error')._data, + assert_allclose(raw.resample(8., npad=npad, verbose='error')._data, [[1, 1, 0, 0, 1, 1, 0, 0], [1, 1, 0, 0, 1, 1, 0, 0]]) @@ -1142,20 +1149,20 @@ def test_resample(tmpdir, preload): # done naively stim = [0, 0, 0, 1, 1, 0, 0, 0] raw = RawArray([stim], create_info(1, len(stim), ['stim'])) - assert_allclose(raw.resample(4., npad='auto')._data, + assert_allclose(raw.resample(4., npad=npad)._data, [[0, 1, 1, 0]]) # two events are merged in this case (warning) stim = [0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0] raw = RawArray([stim], create_info(1, len(stim), ['stim'])) with pytest.warns(RuntimeWarning, match='become unreliable'): - raw.resample(8., npad='auto') + raw.resample(8., npad=npad) # events are dropped in this case (warning) stim = [0, 1, 1, 0, 0, 1, 1, 0] raw = RawArray([stim], create_info(1, len(stim), ['stim'])) with pytest.warns(RuntimeWarning, match='become unreliable'): - raw.resample(4., npad='auto') + raw.resample(4., npad=npad) # test resampling events: this should no longer give a warning # we often have first_samp != 0, include it here too @@ -1167,7 +1174,7 @@ def test_resample(tmpdir, preload): raw = RawArray([stim], create_info(1, o_sfreq, ['stim']), first_samp=first_samp) events = find_events(raw) - raw, events = raw.resample(n_sfreq, events=events, npad='auto') + raw, events = raw.resample(n_sfreq, events=events, npad=npad) # Try index into raw.times with resampled events: raw.times[events[:, 0] - raw.first_samp] n_fsamp = int(first_samp * sfreq_ratio) # how it's calc'd in base.py @@ -1183,19 +1190,27 @@ def test_resample(tmpdir, preload): # test copy flag stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0] raw = RawArray([stim], create_info(1, len(stim), ['stim'])) - raw_resampled = raw.copy().resample(4., npad='auto') + raw_resampled = raw.copy().resample(4., npad=npad) assert (raw_resampled is not raw) - raw_resampled = raw.resample(4., npad='auto') + raw_resampled = raw.resample(4., npad=npad) assert (raw_resampled is raw) # resample should still work even when no stim channel is present raw = RawArray(np.random.randn(1, 100), create_info(1, 100, ['eeg'])) raw.info['lowpass'] = 50. - raw.resample(10, npad='auto') + raw.resample(10, npad=npad) assert raw.info['lowpass'] == 5. assert len(raw) == 10 +def test_resample_stim(): + """Test stim_picks argument.""" + data = np.ones((2, 1000)) + info = create_info(2, 1000., ('eeg', 'misc')) + raw = RawArray(data, info) + raw.resample(500., stim_picks='misc') + + @testing.requires_testing_data def test_hilbert(): """Test computation of analytic signal using hilbert.""" @@ -1348,12 +1363,16 @@ def test_add_channels(): @testing.requires_testing_data def test_save(tmpdir): """Test saving raw.""" - raw = read_raw_fif(fif_fname, preload=False) + temp_fname = tmpdir.join('test_raw.fif') + shutil.copyfile(fif_fname, temp_fname) + raw = read_raw_fif(temp_fname, preload=False) # can't write over file being read - pytest.raises(ValueError, raw.save, fif_fname) - raw = read_raw_fif(fif_fname, preload=True) + with pytest.raises(ValueError, match='to the same file'): + raw.save(temp_fname) + raw.load_data() # can't overwrite file without overwrite=True - pytest.raises(IOError, raw.save, fif_fname) + with pytest.raises(IOError, match='file exists'): + raw.save(fif_fname) # test abspath support and annotations orig_time = _dt_to_stamp(raw.info['meas_date'])[0] + raw._first_time @@ -1707,4 +1726,43 @@ def test_bad_acq(fname): assert tag == ent -run_tests_if_main() +@pytest.mark.skipif(sys.platform not in ('darwin', 'linux'), + reason='Needs proper symlinking') +def test_split_symlink(tmpdir): + """Test split files with symlinks.""" + # regression test for gh-9221 + first = str(tmpdir.mkdir('first').join('test_raw.fif')) + raw = read_raw_fif(fif_fname).pick('meg').load_data() + raw.save(first, buffer_size_sec=1, split_size='10MB', verbose=True) + second = first[:-4] + '-1.fif' + assert op.isfile(second) + assert not op.isfile(first[:-4] + '-2.fif') + new_first = tmpdir.mkdir('a').join('test_raw.fif') + new_second = tmpdir.mkdir('b').join('test_raw-1.fif') + shutil.move(first, new_first) + shutil.move(second, new_second) + os.symlink(new_first, first) + os.symlink(new_second, second) + raw_new = read_raw_fif(first) + assert_allclose(raw_new.get_data(), raw.get_data()) + + +@testing.requires_testing_data +def test_corrupted(tmpdir): + """Test that a corrupted file can still be read.""" + # Must be a file written by Neuromag, not us, since we don't write the dir + # at the end, so use the skip one (straight from acq). + raw = read_raw_fif(skip_fname) + with open(skip_fname, 'rb') as fid: + tag = read_tag_info(fid) + tag = read_tag(fid) + dirpos = int(tag.data) + assert dirpos == 12641532 + fid.seek(0) + data = fid.read(dirpos) + bad_fname = tmpdir.join('test_raw.fif') + with open(bad_fname, 'wb') as fid: + fid.write(data) + with pytest.warns(RuntimeWarning, match='.*tag directory.*corrupt.*'): + raw_bad = read_raw_fif(bad_fname) + assert_allclose(raw.get_data(), raw_bad.get_data()) diff --git a/mne/io/kit/constants.py b/mne/io/kit/constants.py index 8cf725c7b57..69830cb3ccb 100644 --- a/mne/io/kit/constants.py +++ b/mne/io/kit/constants.py @@ -257,3 +257,4 @@ # BOOKMARKS = 15 # DIGITIZER = 25 KIT.DIR_INDEX_DIG_POINTS = 26 +KIT.DIR_INDEX_CHPI_DATA = 29 diff --git a/mne/io/kit/coreg.py b/mne/io/kit/coreg.py index d13bdbd18bb..5c5b264af1b 100644 --- a/mne/io/kit/coreg.py +++ b/mne/io/kit/coreg.py @@ -36,21 +36,23 @@ def read_mrk(fname): mrk_points : ndarray, shape (n_points, 3) Marker points in MEG space [m]. """ + from .kit import _read_dirs ext = op.splitext(fname)[-1] if ext in ('.sqd', '.mrk'): with open(fname, 'rb', buffering=0) as fid: - fid.seek(192) - mrk_offset = np.fromfile(fid, INT32, 1)[0] - fid.seek(mrk_offset) + dirs = _read_dirs(fid) + fid.seek(dirs[KIT.DIR_INDEX_COREG]['offset']) # skips match_done, meg_to_mri and mri_to_meg - fid.seek(KIT.INT + (2 * KIT.DOUBLE * 4 ** 2), SEEK_CUR) + fid.seek(KIT.INT + (2 * KIT.DOUBLE * 16), SEEK_CUR) mrk_count = np.fromfile(fid, INT32, 1)[0] pts = [] for _ in range(mrk_count): - # skips mri/meg mrk_type and done, mri_marker - fid.seek(KIT.INT * 4 + (KIT.DOUBLE * 3), SEEK_CUR) - pts.append(np.fromfile(fid, dtype=FLOAT64, count=3)) - mrk_points = np.array(pts) + # mri_type, meg_type, mri_done, meg_done + _, _, _, meg_done = np.fromfile(fid, INT32, 4) + _, meg_pts = np.fromfile(fid, FLOAT64, 6).reshape(2, 3) + if meg_done: + pts.append(meg_pts) + mrk_points = np.array(pts) elif ext == '.txt': mrk_points = _read_dig_kit(fname, unit='m') elif ext == '.pickled': @@ -123,6 +125,8 @@ def _set_dig_kit(mrk, elp, hsp, eeg): List of digitizer points for info['dig']. dev_head_t : dict A dictionary describe the device-head transformation. + hpi_results : list + The hpi results. """ from ...coreg import fit_matched_points, _decimate_points @@ -144,8 +148,8 @@ def _set_dig_kit(mrk, elp, hsp, eeg): raise ValueError("File %r should contain 8 points; got shape " "%s." % (elp, elp_points.shape)) elp = elp_points - elif len(elp) not in (7, 8): - raise ValueError("ELP should contain 7 or 8 points; got shape " + elif len(elp) not in (6, 7, 8): + raise ValueError("ELP should contain 6 ~ 8 points; got shape " "%s." % (elp.shape,)) if isinstance(mrk, str): mrk = read_mrk(mrk) @@ -167,7 +171,12 @@ def _set_dig_kit(mrk, elp, hsp, eeg): dig_points = _make_dig_points(nasion, lpa, rpa, elp, hsp, dig_ch_pos=eeg) dev_head_t = Transform('meg', 'head', trans) - return dig_points, dev_head_t + hpi_results = [dict(dig_points=[ + dict(ident=ci, r=r, kind=FIFF.FIFFV_POINT_HPI, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN) + for ci, r in enumerate(mrk)], coord_trans=dev_head_t)] + + return dig_points, dev_head_t, hpi_results def _read_dig_kit(fname, unit='auto'): diff --git a/mne/io/kit/kit.py b/mne/io/kit/kit.py index e486276d738..0f4e0d639c0 100644 --- a/mne/io/kit/kit.py +++ b/mne/io/kit/kit.py @@ -14,7 +14,6 @@ from os import SEEK_CUR, path as op import numpy as np -from scipy import linalg from ..pick import pick_types from ...utils import (verbose, logger, warn, fill_doc, _check_option, @@ -50,10 +49,8 @@ def _call_digitization(info, mrk, elp, hsp, kit_info): # setup digitization if mrk is not None and elp is not None and hsp is not None: - dig_points, dev_head_t = _set_dig_kit( + info['dig'], info['dev_head_t'], info['hpi_results'] = _set_dig_kit( mrk, elp, hsp, kit_info['eeg_dig']) - info['dig'] = dig_points - info['dev_head_t'] = dev_head_t elif mrk is not None or elp is not None or hsp is not None: raise ValueError("mrk, elp and hsp need to be provided as a group " "(all or none)") @@ -463,6 +460,17 @@ def _read_dir(fid): count=np.fromfile(fid, INT32, 1)[0]) +@verbose +def _read_dirs(fid, verbose=None): + dirs = list() + dirs.append(_read_dir(fid)) + for ii in range(dirs[0]['count'] - 1): + logger.debug(f' KIT dir entry {ii} @ {fid.tell()}') + dirs.append(_read_dir(fid)) + assert len(dirs) == dirs[KIT.DIR_INDEX_DIR]['count'] + return dirs + + @verbose def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, verbose=None): @@ -488,14 +496,11 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, sqd = dict() sqd['rawfile'] = rawfile unsupported_format = False - sqd['dirs'] = dirs = list() with open(rawfile, 'rb', buffering=0) as fid: # buffering=0 for np bug # # directories (0) # - dirs.append(_read_dir(fid)) - dirs.extend(_read_dir(fid) for _ in range(dirs[0]['count'] - 1)) - assert len(dirs) == dirs[KIT.DIR_INDEX_DIR]['count'] + sqd['dirs'] = dirs = _read_dirs(fid) # # system (1) @@ -715,14 +720,17 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, hsp.append(rr) # nasion, lpa, rpa, HPI in native space - elp = [dig.pop(key) for key in ( - 'fidnz', 'fidt9', 'fidt10', - 'hpi_1', 'hpi_2', 'hpi_3', 'hpi_4')] - if 'hpi_5' in dig and dig['hpi_5'].any(): - elp.append(dig.pop('hpi_5')) + elp = [] + for key in ( + 'fidnz', 'fidt9', 'fidt10', + 'hpi_1', 'hpi_2', 'hpi_3', 'hpi_4', 'hpi_5'): + if key in dig and np.isfinite(dig[key]).all(): + elp.append(dig.pop(key)) elp = np.array(elp) hsp = np.array(hsp, float).reshape(-1, 3) - assert elp.shape in ((7, 3), (8, 3)) + if elp.shape not in ((6, 3), (7, 3), (8, 3)): + raise RuntimeError( + f'Fewer than 3 HPI coils found, got {len(elp) - 3}') # coregistration fid.seek(cor_dir['offset']) mrk = np.zeros((elp.shape[0] - 3, 3)) @@ -790,7 +798,7 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, y = sin(theta) * sin(phi) z = cos(theta) vec_z = np.array([x, y, z]) - vec_z /= linalg.norm(vec_z) + vec_z /= np.linalg.norm(vec_z) vec_x = np.zeros(vec_z.size, dtype=np.float64) if vec_z[1] < vec_z[2]: if vec_z[0] < vec_z[1]: @@ -802,7 +810,7 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, else: vec_x[2] = 1.0 vec_x -= np.sum(vec_x * vec_z) * vec_z - vec_x /= linalg.norm(vec_x) + vec_x /= np.linalg.norm(vec_x) vec_y = np.cross(vec_z, vec_x) # transform to Neuromag like coordinate space vecs = np.vstack((ch['loc'][:3], vec_x, vec_y, vec_z)) diff --git a/mne/io/meas_info.py b/mne/io/meas_info.py index 3d594aa4ba6..153578dc288 100644 --- a/mne/io/meas_info.py +++ b/mne/io/meas_info.py @@ -6,7 +6,7 @@ # # License: BSD (3-clause) -from collections import Counter +from collections import Counter, OrderedDict import contextlib from copy import deepcopy import datetime @@ -15,17 +15,17 @@ from textwrap import shorten import numpy as np -from scipy import linalg from .pick import (channel_type, pick_channels, pick_info, - get_channel_type_constants) + get_channel_type_constants, pick_types) from .constants import FIFF, _coord_frame_named from .open import fiff_open from .tree import dir_tree_find -from .tag import read_tag, find_tag, _ch_coord_dict +from .tag import (read_tag, find_tag, _ch_coord_dict, _update_ch_info_named, + _rename_list) from .proj import (_read_proj, _write_proj, _uniquify_projs, _normalize_proj, Projection) -from .ctf_comp import read_ctf_comp, write_ctf_comp +from .ctf_comp import _read_ctf_comp, write_ctf_comp from .write import (start_file, end_file, start_block, end_block, write_string, write_dig_points, write_float, write_int, write_coord_trans, write_ch_info, write_name_list, @@ -33,11 +33,13 @@ from .proc_history import _read_proc_history, _write_proc_history from ..transforms import invert_transform, Transform, _coord_frame_name from ..utils import (logger, verbose, warn, object_diff, _validate_type, - _stamp_to_dt, _dt_to_stamp, _pl, _is_numeric) + _stamp_to_dt, _dt_to_stamp, _pl, _is_numeric, + _check_option) from ._digitization import (_format_dig_points, _dig_kind_proper, DigPoint, _dig_kind_rev, _dig_kind_ints, _read_dig_fif) from ._digitization import write_dig as _dig_write_dig from .compensator import get_current_comp +from ..data.html_templates import info_template b = bytes # alias @@ -101,9 +103,11 @@ def _get_valid_units(): return tuple(valid_units) -def _unique_channel_names(ch_names): +@verbose +def _unique_channel_names(ch_names, max_length=None, verbose=None): """Ensure unique channel names.""" - FIFF_CH_NAME_MAX_LENGTH = 15 + if max_length is not None: + ch_names[:] = [name[:max_length] for name in ch_names] unique_ids = np.unique(ch_names, return_index=True)[1] if len(unique_ids) != len(ch_names): dups = {ch_names[x] @@ -114,8 +118,11 @@ def _unique_channel_names(ch_names): overlaps = np.where(np.array(ch_names) == ch_stem)[0] # We need an extra character since we append '-'. # np.ceil(...) is the maximum number of appended digits. - n_keep = (FIFF_CH_NAME_MAX_LENGTH - 1 - - int(np.ceil(np.log10(len(overlaps))))) + if max_length is not None: + n_keep = ( + max_length - 1 - int(np.ceil(np.log10(len(overlaps))))) + else: + n_keep = np.inf n_keep = min(len(ch_stem), n_keep) ch_stem = ch_stem[:n_keep] for idx, ch_idx in enumerate(overlaps): @@ -134,7 +141,7 @@ class MontageMixin(object): """Mixin for Montage setting.""" @verbose - def set_montage(self, montage, match_case=True, + def set_montage(self, montage, match_case=True, match_alias=False, on_missing='raise', verbose=None): """Set EEG sensor configuration and head digitization. @@ -142,6 +149,7 @@ def set_montage(self, montage, match_case=True, ---------- %(montage)s %(match_case)s + %(match_alias)s %(on_missing_montage)s %(verbose_meth)s @@ -159,7 +167,7 @@ def set_montage(self, montage, match_case=True, from ..channels.montage import _set_montage info = self if isinstance(self, Info) else self.info - _set_montage(info, montage, match_case, on_missing) + _set_montage(info, montage, match_case, match_alias, on_missing) return self @@ -202,8 +210,16 @@ class Info(dict, MontageMixin): modified by various MNE-Python functions or methods (which have safeguards to ensure all fields remain in sync). - This class should not be instantiated directly. To create a measurement - information structure, use :func:`mne.create_info`. + .. warning:: This class should not be instantiated directly. To create a + measurement information structure, use + :func:`mne.create_info`. + + Parameters + ---------- + *args : list + Arguments. + **kwargs : dict + Keyword arguments. Attributes ---------- @@ -269,6 +285,8 @@ class Info(dict, MontageMixin): Tilt angle of the gantry in degrees. lowpass : float Lowpass corner frequency in Hertz. + It is automatically set to half the sampling rate if there is + otherwise no low-pass applied to the data. meas_date : datetime The time (UTC) of the recording. @@ -536,13 +554,13 @@ def __init__(self, *args, **kwargs): _format_trans(res, 'coord_trans') if self.get('dig', None) is not None and len(self['dig']): if isinstance(self['dig'], dict): # needs to be unpacked - self['dig'] = _dict_unpack(self['dig'], _dig_cast) + self['dig'] = _dict_unpack(self['dig'], _DIG_CAST) if not isinstance(self['dig'][0], DigPoint): self['dig'] = _format_dig_points(self['dig']) if isinstance(self.get('chs', None), dict): self['chs']['ch_name'] = [str(x) for x in np.char.decode( self['chs']['ch_name'], encoding='utf8')] - self['chs'] = _dict_unpack(self['chs'], _ch_cast) + self['chs'] = _dict_unpack(self['chs'], _CH_CAST) for pi, proj in enumerate(self.get('projs', [])): if not isinstance(proj, Projection): self['projs'][pi] = Projection(proj) @@ -711,7 +729,7 @@ def _check_consistency(self, prepend_error=''): self['meas_date'].tzinfo is None or self['meas_date'].tzinfo is not datetime.timezone.utc): raise RuntimeError('%sinfo["meas_date"] must be a datetime ' - 'object in UTC or None, got "%r"' + 'object in UTC or None, got %r' % (prepend_error, repr(self['meas_date']),)) chs = [ch['ch_name'] for ch in self['chs']] @@ -747,9 +765,6 @@ def _check_consistency(self, prepend_error=''): 'Bad info: info["chs"][%d]["loc"] must be ndarray with ' '12 elements, got %r' % (ci, loc)) - # make sure channel names are not too long - self._check_ch_name_length() - # make sure channel names are unique self['ch_names'] = _unique_channel_names(self['ch_names']) for idx, ch_name in enumerate(self['ch_names']): @@ -759,18 +774,6 @@ def _check_consistency(self, prepend_error=''): warn('the "filename" key is misleading ' 'and info should not have it') - def _check_ch_name_length(self): - """Check that channel names are sufficiently short.""" - bad_names = list() - for ch in self['chs']: - if len(ch['ch_name']) > 15: - bad_names.append(ch['ch_name']) - ch['ch_name'] = ch['ch_name'][:15] - if len(bad_names) > 0: - warn('%d channel names are too long, have been truncated to 15 ' - 'characters:\n%s' % (len(bad_names), bad_names)) - self._update_redundant() - def _update_redundant(self): """Update the redundant entries.""" self['ch_names'] = [ch['ch_name'] for ch in self['chs']] @@ -806,6 +809,33 @@ def pick_channels(self, ch_names, ordered=False): def ch_names(self): return self['ch_names'] + def _repr_html_(self, caption=None): + if isinstance(caption, str): + html = f'

{caption}

' + else: + html = '' + n_eeg = len(pick_types(self, meg=False, eeg=True)) + n_grad = len(pick_types(self, meg='grad')) + n_mag = len(pick_types(self, meg='mag')) + pick_eog = pick_types(self, meg=False, eog=True) + if len(pick_eog) > 0: + eog = ', '.join(np.array(self['ch_names'])[pick_eog]) + else: + eog = 'Not available' + pick_ecg = pick_types(self, meg=False, ecg=True) + if len(pick_ecg) > 0: + ecg = ', '.join(np.array(self['ch_names'])[pick_ecg]) + else: + ecg = 'Not available' + meas_date = self['meas_date'] + if meas_date is not None: + meas_date = meas_date.strftime("%B %d, %Y %H:%M:%S") + ' GMT' + + html += info_template.substitute( + caption=caption, info=self, meas_date=meas_date, n_eeg=n_eeg, + n_grad=n_grad, n_mag=n_mag, eog=eog, ecg=ecg) + return html + def _simplify_info(info): """Return a simplified info structure to speed up picking.""" @@ -927,7 +957,6 @@ def read_bad_channels(fid, node): ---------- fid : file The file descriptor. - node : dict The node of the FIF tree that contains info on the bad channels. @@ -936,6 +965,11 @@ def read_bad_channels(fid, node): bads : list A list of bad channel's names. """ + return _read_bad_channels(fid, node) + + +def _read_bad_channels(fid, node, ch_names_mapping): + ch_names_mapping = {} if ch_names_mapping is None else ch_names_mapping nodes = dir_tree_find(node, FIFF.FIFFB_MNE_BAD_CHANNELS) bads = [] @@ -944,6 +978,7 @@ def read_bad_channels(fid, node): tag = find_tag(fid, node, FIFF.FIFF_MNE_CH_NAME_LIST) if tag is not None and tag.data is not None: bads = tag.data.split(':') + bads[:] = _rename_list(bads, ch_names_mapping) return bads @@ -1077,6 +1112,7 @@ def read_meas_info(fid, tree, clean_bads=False, verbose=None): elif kind == FIFF.FIFF_MNE_KIT_SYSTEM_ID: tag = read_tag(fid, pos) kit_system_id = int(tag.data) + ch_names_mapping = _read_extended_ch_info(chs, meas_info, fid) # Check that we have everything we need if nchan is None: @@ -1130,13 +1166,16 @@ def read_meas_info(fid, tree, clean_bads=False, verbose=None): acq_stim = tag.data # Load the SSP data - projs = _read_proj(fid, meas_info) + projs = _read_proj( + fid, meas_info, ch_names_mapping=ch_names_mapping) # Load the CTF compensation data - comps = read_ctf_comp(fid, meas_info, chs) + comps = _read_ctf_comp( + fid, meas_info, chs, ch_names_mapping=ch_names_mapping) # Load the bad channel list - bads = read_bad_channels(fid, meas_info) + bads = _read_bad_channels( + fid, meas_info, ch_names_mapping=ch_names_mapping) # # Put the data together @@ -1405,7 +1444,7 @@ def read_meas_info(fid, tree, clean_bads=False, verbose=None): info['dev_ctf_t'] = dev_ctf_t if dev_head_t is not None and ctf_head_t is not None and dev_ctf_t is None: from ..transforms import Transform - head_ctf_trans = linalg.inv(ctf_head_t['trans']) + head_ctf_trans = np.linalg.inv(ctf_head_t['trans']) dev_ctf_trans = np.dot(head_ctf_trans, info['dev_head_t']['trans']) info['dev_ctf_t'] = Transform('meg', 'ctf_head', dev_ctf_trans) @@ -1426,6 +1465,48 @@ def read_meas_info(fid, tree, clean_bads=False, verbose=None): return info, meas +def _read_extended_ch_info(chs, parent, fid): + ch_infos = dir_tree_find(parent, FIFF.FIFFB_CH_INFO) + if len(ch_infos) == 0: + return + _check_option('length of channel infos', len(ch_infos), [len(chs)]) + logger.info(' Reading extended channel information') + + # Here we assume that ``remap`` is in the same order as the channels + # themselves, which is hopefully safe enough. + ch_names_mapping = dict() + for new, ch in zip(ch_infos, chs): + for k in range(new['nent']): + kind = new['directory'][k].kind + try: + key, cast = _CH_READ_MAP[kind] + except KeyError: + # This shouldn't happen if we're up to date with the FIFF + # spec + warn(f'Discarding extra channel information kind {kind}') + continue + assert key in ch + data = read_tag(fid, new['directory'][k].pos).data + if data is not None: + data = cast(data) + if key == 'ch_name': + ch_names_mapping[ch[key]] = data + ch[key] = data + _update_ch_info_named(ch) + # we need to return ch_names_mapping so that we can also rename the + # bad channels + return ch_names_mapping + + +def _rename_comps(comps, ch_names_mapping): + if not (comps and ch_names_mapping): + return + for comp in comps: + data = comp['data'] + for key in ('row_names', 'col_names'): + data[key][:] = _rename_list(data[key], ch_names_mapping) + + def _ensure_meas_date_none_or_dt(meas_date): if meas_date is None or np.array_equal(meas_date, DATE_NONE): meas_date = None @@ -1592,12 +1673,14 @@ def write_meas_info(fid, info, data_type=None, reset_range=True): write_coord_trans(fid, info['dev_ctf_t']) # Projectors - _write_proj(fid, info['projs']) + ch_names_mapping = _make_ch_names_mapping(info['chs']) + _write_proj(fid, info['projs'], ch_names_mapping=ch_names_mapping) # Bad channels if len(info['bads']) > 0: + bads = _rename_list(info['bads'], ch_names_mapping) start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS) - write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, info['bads']) + write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, bads) end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS) # General @@ -1631,14 +1714,7 @@ def write_meas_info(fid, info, data_type=None, reset_range=True): write_string(fid, FIFF.FIFF_XPLOTTER_LAYOUT, info['xplotter_layout']) # Channel information - for k, c in enumerate(info['chs']): - # Scan numbers may have been messed up - c = deepcopy(c) - c['scanno'] = k + 1 - # for float/double, the "range" param is unnecessary - if reset_range is True: - c['range'] = 1.0 - write_ch_info(fid, c) + _write_ch_infos(fid, info['chs'], reset_range, ch_names_mapping) # Subject information if info.get('subject_info') is not None: @@ -1709,7 +1785,11 @@ def write_meas_info(fid, info, data_type=None, reset_range=True): del hs # CTF compensation info - write_ctf_comp(fid, info['comps']) + comps = info['comps'] + if ch_names_mapping: + comps = deepcopy(comps) + _rename_comps(comps, ch_names_mapping) + write_ctf_comp(fid, comps) # KIT system ID if info.get('kit_system_id') is not None: @@ -1948,8 +2028,8 @@ def create_info(ch_names, sfreq, ch_types='misc', verbose=None): Channel types, default is ``'misc'`` which is not a :term:`data channel `. Currently supported fields are 'ecg', 'bio', 'stim', 'eog', 'misc', - 'seeg', 'ecog', 'mag', 'eeg', 'ref_meg', 'grad', 'emg', 'hbr' or 'hbo'. - If str, then all channels are assumed to be of the same type. + 'seeg', 'dbs', 'ecog', 'mag', 'eeg', 'ref_meg', 'grad', 'emg', 'hbr' + or 'hbo'. If str, then all channels are assumed to be of the same type. %(verbose)s Returns @@ -1968,7 +2048,7 @@ def create_info(ch_names, sfreq, ch_types='misc', verbose=None): be initialized to the identity transform. Proper units of measure: - * V: eeg, eog, seeg, emg, ecg, bio, ecog + * V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog * T: mag * T/m: grad * M: hbo, hbr @@ -2128,6 +2208,7 @@ def anonymize_info(info, daysback=None, keep_his=False, verbose=None): tzinfo=datetime.timezone.utc) default_str = "mne_anonymize" default_subject_id = 0 + default_sex = 0 default_desc = ("Anonymized using a time shift" " to preserve age at acquisition") @@ -2152,7 +2233,10 @@ def anonymize_info(info, daysback=None, keep_his=False, verbose=None): value = info.get(key) if value is not None: assert 'msecs' not in value - if none_meas_date: + if (none_meas_date or + ((value['secs'], value['usecs']) == DATE_NONE)): + # Don't try to shift backwards in time when no measurement + # date is available or when file_id is already a place holder tmp = DATE_NONE else: tmp = _add_timedelta_to_stamp( @@ -2171,9 +2255,15 @@ def anonymize_info(info, daysback=None, keep_his=False, verbose=None): if subject_info.get('id') is not None: subject_info['id'] = default_subject_id if keep_his: - logger.info('Not fully anonymizing info - keeping \'his_id\'') - elif subject_info.get('his_id') is not None: - subject_info['his_id'] = str(default_subject_id) + logger.info('Not fully anonymizing info - keeping ' + 'his_id, sex, and hand info') + else: + if subject_info.get('his_id') is not None: + subject_info['his_id'] = str(default_subject_id) + if subject_info.get('sex') is not None: + subject_info['sex'] = default_sex + if subject_info.get('hand') is not None: + del subject_info['hand'] # there's no "unknown" setting for key in ('last_name', 'first_name', 'middle_name'): if subject_info.get(key) is not None: @@ -2242,7 +2332,7 @@ def anonymize_info(info, daysback=None, keep_his=False, verbose=None): 'Underlying Error:\n') info._check_consistency(prepend_error=err_mesg) err_mesg = ('anonymize_info generated an inconsistent info object. ' - 'daysback parameter was too large.' + 'daysback parameter was too large. ' 'Underlying Error:\n') _check_dates(info, prepend_error=err_mesg) @@ -2295,11 +2385,27 @@ def _bad_chans_comp(info, ch_names): return False, missing_ch_names -_dig_cast = {'kind': int, 'ident': int, 'r': lambda x: x, 'coord_frame': int} -_ch_cast = {'scanno': int, 'logno': int, 'kind': int, - 'range': float, 'cal': float, 'coil_type': int, - 'loc': lambda x: x, 'unit': int, 'unit_mul': int, - 'ch_name': lambda x: x, 'coord_frame': int} +_DIG_CAST = dict( + kind=int, ident=int, r=lambda x: x, coord_frame=int) +# key -> const, cast, write +_CH_INFO_MAP = OrderedDict( + scanno=(FIFF.FIFF_CH_SCAN_NO, int, write_int), + logno=(FIFF.FIFF_CH_LOGICAL_NO, int, write_int), + kind=(FIFF.FIFF_CH_KIND, int, write_int), + range=(FIFF.FIFF_CH_RANGE, float, write_float), + cal=(FIFF.FIFF_CH_CAL, float, write_float), + coil_type=(FIFF.FIFF_CH_COIL_TYPE, int, write_int), + loc=(FIFF.FIFF_CH_LOC, lambda x: x, write_float), + unit=(FIFF.FIFF_CH_UNIT, int, write_int), + unit_mul=(FIFF.FIFF_CH_UNIT_MUL, int, write_int), + ch_name=(FIFF.FIFF_CH_DACQ_NAME, str, write_string), + coord_frame=(FIFF.FIFF_CH_COORD_FRAME, int, write_int), +) +# key -> cast +_CH_CAST = OrderedDict((key, val[1]) for key, val in _CH_INFO_MAP.items()) +# const -> key, cast +_CH_READ_MAP = OrderedDict((val[0], (key, val[1])) + for key, val in _CH_INFO_MAP.items()) @contextlib.contextmanager @@ -2309,8 +2415,8 @@ def _writing_info_hdf5(info): orig_chs = info['chs'] try: if orig_dig is not None and len(orig_dig) > 0: - info['dig'] = _dict_pack(info['dig'], _dig_cast) - info['chs'] = _dict_pack(info['chs'], _ch_cast) + info['dig'] = _dict_pack(info['dig'], _DIG_CAST) + info['chs'] = _dict_pack(info['chs'], _CH_CAST) info['chs']['ch_name'] = np.char.encode( info['chs']['ch_name'], encoding='utf8') yield @@ -2330,3 +2436,38 @@ def _dict_unpack(obj, casts): n = len(obj[list(casts)[0]]) return [{key: cast(obj[key][ii]) for key, cast in casts.items()} for ii in range(n)] + + +def _make_ch_names_mapping(chs): + orig_ch_names = [c['ch_name'] for c in chs] + ch_names = orig_ch_names.copy() + _unique_channel_names(ch_names, max_length=15, verbose='error') + ch_names_mapping = dict() + if orig_ch_names != ch_names: + ch_names_mapping.update(zip(orig_ch_names, ch_names)) + return ch_names_mapping + + +def _write_ch_infos(fid, chs, reset_range, ch_names_mapping): + ch_names_mapping = dict() if ch_names_mapping is None else ch_names_mapping + for k, c in enumerate(chs): + # Scan numbers may have been messed up + c = c.copy() + c['ch_name'] = ch_names_mapping.get(c['ch_name'], c['ch_name']) + assert len(c['ch_name']) <= 15 + c['scanno'] = k + 1 + # for float/double, the "range" param is unnecessary + if reset_range: + c['range'] = 1.0 + write_ch_info(fid, c) + # only write new-style channel information if necessary + if len(ch_names_mapping): + logger.info( + ' Writing channel names to FIF truncated to 15 characters ' + 'with remapping') + for ch in chs: + start_block(fid, FIFF.FIFFB_CH_INFO) + assert set(ch) == set(_CH_INFO_MAP) + for (key, (const, _, write)) in _CH_INFO_MAP.items(): + write(fid, const, ch[key]) + end_block(fid, FIFF.FIFFB_CH_INFO) diff --git a/mne/io/nedf/__init__.py b/mne/io/nedf/__init__.py new file mode 100644 index 00000000000..9d16c4ce2b5 --- /dev/null +++ b/mne/io/nedf/__init__.py @@ -0,0 +1,7 @@ +"""NEDF file import module.""" + +# Author: Tristan Stenner +# +# License: BSD (3-clause) + +from .nedf import read_raw_nedf, _parse_nedf_header diff --git a/mne/io/nedf/nedf.py b/mne/io/nedf/nedf.py new file mode 100644 index 00000000000..70fd2befc4c --- /dev/null +++ b/mne/io/nedf/nedf.py @@ -0,0 +1,218 @@ +# -*- coding: utf-8 -*- +"""Import NeuroElectrics DataFormat (NEDF) files.""" + +from copy import deepcopy +from datetime import datetime, timezone +from xml.etree import ElementTree + +import numpy as np + +from ..base import BaseRaw +from ..meas_info import create_info +from ..utils import _mult_cal_one +from ...utils import warn, verbose, _check_fname + + +def _getsubnodetext(node, name): + """Get an element from an XML node, raise an error otherwise. + + Parameters + ---------- + node: Element + XML Element + name: str + Child element name + + Returns + ------- + test: str + Text contents of the child nodes + """ + subnode = node.findtext(name) + if not subnode: + raise RuntimeError('NEDF header ' + name + ' not found') + return subnode + + +def _parse_nedf_header(header): + """Read header information from the first 10kB of an .nedf file. + + Parameters + ---------- + header : bytes + Null-terminated header data, mostly the file's first 10240 bytes. + + Returns + ------- + info : dict + A dictionary with header information. + dt : numpy.dtype + Structure of the binary EEG/accelerometer/trigger data in the file. + n_samples : int + The number of data samples. + """ + info = {} + # nedf files have three accelerometer channels sampled at 100Hz followed + # by five EEG samples + TTL trigger sampled at 500Hz + # For 32 EEG channels and no stim channels, the data layout may look like + # [ ('acc', '>u2', (3,)), + # ('data', dtype([ + # ('eeg', 'u1', (32, 3)), + # ('trig', '>i4', (1,)) + # ]), (5,)) + # ] + + dt = [] # dtype for the binary data block + datadt = [] # dtype for a single EEG sample + + headerend = header.find(b'\0') + if headerend == -1: + raise RuntimeError('End of header null not found') + headerxml = ElementTree.fromstring(header[:headerend]) + nedfversion = headerxml.findtext('NEDFversion', '') + if nedfversion not in ['1.3', '1.4']: + warn('NEDFversion unsupported, use with caution') + + if headerxml.findtext('stepDetails/DeviceClass', '') == 'STARSTIM': + warn('Found Starstim, this hasn\'t been tested extensively!') + + if headerxml.findtext('AdditionalChannelStatus', 'OFF') != 'OFF': + raise RuntimeError('Unknown additional channel, aborting.') + + n_acc = int(headerxml.findtext('NumberOfChannelsOfAccelerometer', 0)) + if n_acc: + # expect one sample of u16 accelerometer data per block + dt.append(('acc', '>u2', (n_acc,))) + + eegset = headerxml.find('EEGSettings') + if eegset is None: + raise RuntimeError('No EEG channels found') + nchantotal = int(_getsubnodetext(eegset, 'TotalNumberOfChannels')) + info['nchan'] = nchantotal + + info['sfreq'] = int(_getsubnodetext(eegset, 'EEGSamplingRate')) + info['ch_names'] = [e.text for e in eegset.find('EEGMontage')] + if nchantotal != len(info['ch_names']): + raise RuntimeError( + f"TotalNumberOfChannels ({nchantotal}) != " + f"channel count ({len(info['ch_names'])})") + # expect nchantotal uint24s + datadt.append(('eeg', 'B', (nchantotal, 3))) + + if headerxml.find('STIMSettings') is not None: + # 2* -> two stim samples per eeg sample + datadt.append(('stim', 'B', (2, nchantotal, 3))) + warn('stim channels are currently ignored') + + # Trigger data: 4 bytes in newer versions, 1 byte in older versions + trigger_type = '>i4' if headerxml.findtext('NEDFversion') else 'B' + datadt.append(('trig', trigger_type)) + # 5 data samples per block + dt.append(('data', np.dtype(datadt), (5,))) + + date = headerxml.findtext('StepDetails/StartDate_firstEEGTimestamp', 0) + info['meas_date'] = datetime.fromtimestamp(int(date) / 1000, timezone.utc) + + n_samples = int(_getsubnodetext(eegset, 'NumberOfRecordsOfEEG')) + n_full, n_last = divmod(n_samples, 5) + dt_last = deepcopy(dt) + assert dt_last[-1][-1] == (5,) + dt_last[-1] = list(dt_last[-1]) + dt_last[-1][-1] = (n_last,) + dt_last[-1] = tuple(dt_last[-1]) + return info, np.dtype(dt), np.dtype(dt_last), n_samples, n_full + + +# the first 10240 bytes are header in XML format, padded with NULL bytes +_HDRLEN = 10240 + + +class RawNedf(BaseRaw): + """Raw object from NeuroElectrics nedf file.""" + + def __init__(self, filename, preload=False, verbose=None): + filename = _check_fname(filename, 'read', True, 'filename') + with open(filename, mode='rb') as fid: + header = fid.read(_HDRLEN) + header, dt, dt_last, n_samp, n_full = _parse_nedf_header(header) + ch_names = header['ch_names'] + ['STI 014'] + ch_types = ['eeg'] * len(ch_names) + ch_types[-1] = 'stim' + info = create_info(ch_names, header['sfreq'], ch_types) + # scaling factor ADC-values -> volts + # taken from the NEDF EEGLAB plugin + # (https://www.neuroelectrics.com/resources/software/): + for ch in info['chs'][:-1]: + ch['cal'] = 2.4 / (6.0 * 8388607) + info['meas_date'] = header['meas_date'] + raw_extra = dict(dt=dt, dt_last=dt_last, n_full=n_full) + super().__init__( + info, preload=preload, filenames=[filename], verbose=verbose, + raw_extras=[raw_extra], last_samps=[n_samp - 1]) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + dt = self._raw_extras[fi]['dt'] + dt_last = self._raw_extras[fi]['dt_last'] + n_full = self._raw_extras[fi]['n_full'] + n_eeg = dt[1].subdtype[0][0].shape[0] + # data is stored in 5-sample chunks (except maybe the last one!) + # so we have to do some gymnastics to pick the correct parts to + # read + offset = start // 5 * dt.itemsize + _HDRLEN + start_sl = start % 5 + n_samples = stop - start + n_samples_full = min(stop, n_full * 5) - start + last = None + n_chunks = (n_samples_full - 1) // 5 + 1 + n_tot = n_chunks * 5 + with open(self._filenames[fi], 'rb') as fid: + fid.seek(offset, 0) + chunks = np.fromfile(fid, dtype=dt, count=n_chunks) + assert len(chunks) == n_chunks + if n_samples != n_samples_full: + last = np.fromfile(fid, dtype=dt_last, count=1) + eeg = _convert_eeg(chunks, n_eeg, n_tot) + trig = chunks['data']['trig'].reshape(1, n_tot) + if last is not None: + n_last = dt_last['data'].shape[0] + eeg = np.concatenate( + (eeg, _convert_eeg(last, n_eeg, n_last)), axis=-1) + trig = np.concatenate( + (trig, last['data']['trig'].reshape(1, n_last)), axis=-1) + one_ = np.concatenate((eeg, trig)) + one = one_[:, start_sl:n_samples + start_sl] + _mult_cal_one(data, one, idx, cals, mult) + + +def _convert_eeg(chunks, n_eeg, n_tot): + # convert uint8-triplet -> int32 + eeg = chunks['data']['eeg'] @ np.array([1 << 16, 1 << 8, 1]) + # convert sign if necessary + eeg[eeg > (1 << 23)] -= 1 << 24 + eeg = eeg.reshape((n_tot, n_eeg)).T + return eeg + + +@verbose +def read_raw_nedf(filename, preload=False, verbose=None): + """Read NeuroElectrics .nedf files. + + NEDF file versions starting from 1.3 are supported. + + Parameters + ---------- + filename : str + Path to the .nedf file. + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawNedf + A Raw object containing NEDF data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + return RawNedf(filename, preload, verbose) diff --git a/mne/io/nedf/tests/__init__.py b/mne/io/nedf/tests/__init__.py new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/mne/io/nedf/tests/__init__.py @@ -0,0 +1 @@ + diff --git a/mne/io/nedf/tests/test_nedf.py b/mne/io/nedf/tests/test_nedf.py new file mode 100644 index 00000000000..3df747c81ad --- /dev/null +++ b/mne/io/nedf/tests/test_nedf.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +"""Test reading of NEDF format.""" +# Author: Tristan Stenner +# +# License: BSD (3-clause) + +import os.path as op + +import pytest +from numpy.testing import assert_allclose, assert_array_equal + +from mne import find_events +from mne.io.constants import FIFF +from mne.io.nedf import read_raw_nedf, _parse_nedf_header +from mne.datasets import testing +from mne.io.tests.test_raw import _test_raw_reader + +eeg_path = testing.data_path(download=False, verbose=True) +eegfile = op.join(eeg_path, 'nedf', 'testdata.nedf') + +stimhdr = b""" + + 1.3 + %d + + 4 + 500 + ABCD + 11 + + +\x00""" + + +@pytest.mark.parametrize('nacc', (0, 3)) +def test_nedf_header_parser(nacc): + """Test NEDF header parsing and dtype extraction.""" + with pytest.warns(RuntimeWarning, match='stim channels.*ignored'): + info, dt, dt_last, n_samples, n_full = _parse_nedf_header( + stimhdr % nacc) + assert n_samples == 11 + assert n_full == 2 + nchan = 4 + assert info['nchan'] == nchan + assert dt.itemsize == 200 + nacc * 2 + if nacc: + assert dt.names[0] == 'acc' + assert dt['acc'].shape == (nacc,) + + assert dt['data'].shape == (5,) # blocks of 5 EEG samples each + assert dt_last['data'].shape == (1,) # plus one last extra one + + eegsampledt = dt['data'].subdtype[0] + assert eegsampledt.names == ('eeg', 'stim', 'trig') + assert eegsampledt['eeg'].shape == (nchan, 3) + assert eegsampledt['stim'].shape == (2, nchan, 3) + + +def test_invalid_headers(): + """Test that invalid headers raise exceptions.""" + tpl = b""" + 1.3 + + %s + ABCD + + \x00""" + nchan = b'4' + sr = b'500' + hdr = { + 'null': + b'No null terminator', + 'Unknown additional': + (b'1.3' + + b'???\x00'), # noqa: E501 + 'No EEG channels found': + b'1.3\x00', + 'TotalNumberOfChannels not found': + tpl % b'No nchan.', + '!= channel count': + tpl % (sr + b'52'), + 'EEGSamplingRate not found': + tpl % nchan, + 'NumberOfRecordsOfEEG not found': + tpl % (sr + nchan), + } + for match, invalid_hdr in hdr.items(): + with pytest.raises(RuntimeError, match=match): + _parse_nedf_header(invalid_hdr) + + sus_hdrs = { + 'unsupported': b'25\x00', + 'tested': ( + b'1.3' + + b'STARSTIM\x00'), + } + for match, sus_hdr in sus_hdrs.items(): + with pytest.warns(RuntimeWarning, match=match): + with pytest.raises(RuntimeError, match='No EEG channels found'): + _parse_nedf_header(sus_hdr) + + +@testing.requires_testing_data +def test_nedf_data(): + """Test reading raw NEDF files.""" + raw = read_raw_nedf(eegfile) + nsamples = len(raw) + assert nsamples == 32538 + + events = find_events(raw, shortest_event=1) + assert len(events) == 4 + assert_array_equal(events[:, 2], [1, 1, 1, 1]) + onsets = events[:, 0] / raw.info['sfreq'] + assert raw.info['sfreq'] == 500 + + data_end = raw.get_data('Fp1', nsamples - 100, nsamples).mean() + assert_allclose(data_end, .0176, atol=.01) + assert_allclose(raw.get_data('Fpz', 0, 100).mean(), .0185, atol=.01) + + assert_allclose(onsets, [22.384, 38.238, 49.496, 63.15]) + assert raw.info['meas_date'].year == 2019 + assert raw.ch_names[2] == 'AF7' + + for ch in raw.info['chs'][:-1]: + assert ch['kind'] == FIFF.FIFFV_EEG_CH + assert ch['unit'] == FIFF.FIFF_UNIT_V + assert raw.info['chs'][-1]['kind'] == FIFF.FIFFV_STIM_CH + assert raw.info['chs'][-1]['unit'] == FIFF.FIFF_UNIT_V + + # full tests + _test_raw_reader(read_raw_nedf, filename=eegfile) diff --git a/mne/io/nicolet/nicolet.py b/mne/io/nicolet/nicolet.py index ae3e8239213..d7e82557ca9 100644 --- a/mne/io/nicolet/nicolet.py +++ b/mne/io/nicolet/nicolet.py @@ -19,17 +19,18 @@ def read_raw_nicolet(input_fname, ch_type, eog=(), ecg=(), emg=(), misc=(), preload=False, verbose=None): """Read Nicolet data as raw object. - Note: This reader takes data files with the extension ``.data`` as an - input. The header file with the same file name stem and an extension - ``.head`` is expected to be found in the same directory. + ..note:: This reader takes data files with the extension ``.data`` as an + input. The header file with the same file name stem and an + extension ``.head`` is expected to be found in the same + directory. Parameters ---------- input_fname : str - Path to the data file. + Path to the data file (ending with ``.data`` not ``.head``). ch_type : str Channel type to designate to the data channels. Supported data types - include 'eeg', 'seeg'. + include 'eeg', 'dbs'. eog : list | tuple | 'auto' Names of channels or list of indices that should be designated EOG channels. If 'auto', the channel names beginning with @@ -63,7 +64,13 @@ def read_raw_nicolet(input_fname, ch_type, eog=(), def _get_nicolet_info(fname, ch_type, eog, ecg, emg, misc): """Extract info from Nicolet header files.""" - fname = path.splitext(fname)[0] + fname, extension = path.splitext(fname) + + if extension != '.data': + raise ValueError( + f'File name should end with .data not "{extension}".' + ) + header = fname + '.head' logger.info('Reading header...') @@ -75,8 +82,11 @@ def _get_nicolet_info(fname, ch_type, eog, ecg, emg, misc): value = value[1:-2].split(',') # strip brackets elif var == 'conversion_factor': value = float(value) - elif var != 'start_ts': + elif var in ['num_channels', 'rec_id', 'adm_id', 'pat_id', + 'num_samples']: value = int(value) + elif var != 'start_ts': + value = float(value) header_info[var] = value ch_names = header_info['elec_names'] diff --git a/mne/io/nicolet/tests/test_nicolet.py b/mne/io/nicolet/tests/test_nicolet.py index 7dd21f9f9dd..f24823e0e0d 100644 --- a/mne/io/nicolet/tests/test_nicolet.py +++ b/mne/io/nicolet/tests/test_nicolet.py @@ -6,19 +6,22 @@ import os.path as op import inspect -from mne.utils import run_tests_if_main from mne.io import read_raw_nicolet from mne.io.tests.test_raw import _test_raw_reader +import pytest + FILE = inspect.getfile(inspect.currentframe()) base_dir = op.join(op.dirname(op.abspath(FILE)), 'data') -fname = op.join(base_dir, 'test_nicolet_raw.data') +fname_data = op.join(base_dir, 'test_nicolet_raw.data') +fname_head = op.join(base_dir, 'test_nicolet_raw.head') def test_data(): """Test reading raw nicolet files.""" - _test_raw_reader(read_raw_nicolet, input_fname=fname, ch_type='eeg', + _test_raw_reader(read_raw_nicolet, input_fname=fname_data, ch_type='eeg', ecg='auto', eog='auto', emg='auto', misc=['PHO']) - -run_tests_if_main() + with pytest.raises(ValueError, + match='File name should end with .data not ".head".'): + read_raw_nicolet(fname_head, 'eeg') diff --git a/mne/io/nihon/nihon.py b/mne/io/nihon/nihon.py index 44803250361..b3a7591d4e6 100644 --- a/mne/io/nihon/nihon.py +++ b/mne/io/nihon/nihon.py @@ -8,7 +8,7 @@ import numpy as np -from ...utils import fill_doc, logger, verbose, warn +from ...utils import fill_doc, logger, verbose, warn, _check_fname from ..base import BaseRaw from ..meas_info import create_info from ...annotations import Annotations @@ -308,6 +308,7 @@ class RawNihon(BaseRaw): @verbose def __init__(self, fname, preload=False, verbose=None): + fname = _check_fname(fname, 'read', True, 'fname') fname = _ensure_path(fname) data_name = fname.name logger.info('Loading %s' % data_name) diff --git a/mne/io/nirx/nirx.py b/mne/io/nirx/nirx.py index 6aa0c618af7..b3c21dacd6e 100644 --- a/mne/io/nirx/nirx.py +++ b/mne/io/nirx/nirx.py @@ -16,7 +16,8 @@ from ..meas_info import create_info, _format_dig_points from ...annotations import Annotations from ...transforms import apply_trans, _get_trans -from ...utils import logger, verbose, fill_doc, warn +from ...utils import (logger, verbose, fill_doc, warn, _check_fname, + _validate_type) @fill_doc @@ -106,12 +107,12 @@ def __init__(self, fname, saturated, preload=False, verbose=None): from ...coreg import get_mni_fiducials # avoid circular import prob from ...preprocessing import annotate_nan # avoid circular import prob logger.info('Loading %s' % fname) - + _validate_type(fname, 'path-like', 'fname') + fname = str(fname) if fname.endswith('.hdr'): fname = op.dirname(op.abspath(fname)) - if not op.isdir(fname): - raise FileNotFoundError('The path you specified does not exist.') + fname = _check_fname(fname, 'read', True, 'fname', need_dir=True) # Check if required files exist and store names for later use files = dict() diff --git a/mne/io/nirx/tests/test_nirx.py b/mne/io/nirx/tests/test_nirx.py index 0547969bb2b..01d2dfcbb6b 100644 --- a/mne/io/nirx/tests/test_nirx.py +++ b/mne/io/nirx/tests/test_nirx.py @@ -46,7 +46,7 @@ def test_nirx_hdr_load(): @requires_testing_data def test_nirx_missing_warn(): """Test reading NIRX files when missing data.""" - with pytest.raises(FileNotFoundError, match='The path you'): + with pytest.raises(FileNotFoundError, match='does not exist'): read_raw_nirx(fname_nirx_15_2_short + "1", preload=True) diff --git a/mne/io/open.py b/mne/io/open.py index 49bfd5afc1e..17abcaee2f5 100644 --- a/mne/io/open.py +++ b/mne/io/open.py @@ -9,12 +9,11 @@ from gzip import GzipFile import numpy as np -from scipy import sparse from .tag import read_tag_info, read_tag, Tag, _call_dict_names from .tree import make_dir_tree, dir_tree_find from .constants import FIFF -from ..utils import logger, verbose, _file_like +from ..utils import logger, verbose, _file_like, warn class _NoCloseRead(object): @@ -159,10 +158,16 @@ def _fiff_open(fname, fid, preload): logger.debug(' Creating tag directory for %s...' % fname) dirpos = int(tag.data) + read_slow = True if dirpos > 0: - tag = read_tag(fid, dirpos) - directory = tag.data - else: + dir_tag = read_tag(fid, dirpos) + if dir_tag is None: + warn(f'FIF tag directory missing at the end of the file, possibly ' + f'corrupted file: {fname}') + else: + directory = dir_tag.data + read_slow = False + if read_slow: fid.seek(0, 0) directory = list() while tag.next >= 0: @@ -244,6 +249,7 @@ def _find_type(value, fmts=['FIFF_'], exclude=['FIFF_UNIT']): def _show_tree(fid, tree, indent, level, read_limit, max_str, tag_id): """Show FIFF tree.""" + from scipy import sparse this_idt = indent * level next_idt = indent * (level + 1) # print block-level information diff --git a/mne/io/persyst/persyst.py b/mne/io/persyst/persyst.py index 40ce94ec824..1cc83d30ff5 100644 --- a/mne/io/persyst/persyst.py +++ b/mne/io/persyst/persyst.py @@ -13,7 +13,7 @@ from ..meas_info import create_info from ..utils import _mult_cal_one from ...annotations import Annotations -from ...utils import logger, verbose, fill_doc, warn +from ...utils import logger, verbose, fill_doc, warn, _check_fname @fill_doc @@ -35,6 +35,14 @@ def read_raw_persyst(fname, preload=False, verbose=None): See Also -------- mne.io.Raw : Documentation of attribute and methods. + + Notes + ----- + It is assumed that the ``.lay`` and ``.dat`` file + are in the same directory. To get the correct file path to the + ``.dat`` file, ``read_raw_persyst`` will get the corresponding dat + filename from the lay file, and look for that file inside the same + directory as the lay file. """ return RawPersyst(fname, preload, verbose) @@ -57,10 +65,13 @@ class RawPersyst(BaseRaw): @verbose def __init__(self, fname, preload=False, verbose=None): + fname = _check_fname(fname, 'read', True, 'fname') logger.info('Loading %s' % fname) + # make sure filename is the Lay file if not fname.endswith('.lay'): fname = fname + '.lay' + # get the current directory and Lay filename curr_path, lay_fname = op.dirname(fname), op.basename(fname) if not op.exists(fname): raise FileNotFoundError(f'The path you specified, ' @@ -76,6 +87,9 @@ def __init__(self, fname, preload=False, verbose=None): patient_dict = OrderedDict() comments_dict = OrderedDict() + # keep track of total number of comments + num_comments = 0 + # loop through each line in the lay file for key, val, section in zip(keys, data, sections): if key == '': @@ -89,15 +103,16 @@ def __init__(self, fname, preload=False, verbose=None): if section == 'fileinfo': # extract the .dat file name if key == 'file': - dat_fname = val - dat_path = op.dirname(dat_fname) + dat_fname = op.basename(val) dat_fpath = op.join(curr_path, op.basename(dat_fname)) # determine if .dat file exists where it should error_msg = f'The data path you specified ' \ - f'does not exist for the lay path, {lay_fname}' - if op.isabs(dat_path) and not op.exists(dat_fname): - raise FileNotFoundError(error_msg) + f'does not exist for the lay path, ' \ + f'{lay_fname}. Make sure the dat file ' \ + f'is in the same directory as the lay ' \ + f'file, and the specified dat filename ' \ + f'matches.' if not op.exists(dat_fpath): raise FileNotFoundError(error_msg) fileinfo_dict[key] = val @@ -108,8 +123,10 @@ def __init__(self, fname, preload=False, verbose=None): # Patient (All optional) elif section == 'patient': patient_dict[key] = val + # Comments (turned into mne.Annotations) elif section == 'comments': - comments_dict[key] = val + comments_dict[key] = comments_dict.get(key, list()) + [val] + num_comments += 1 # get numerical metadata # datatype is either 7 for 32 bit, or 0 for 16 bit @@ -205,17 +222,21 @@ def __init__(self, fname, preload=False, verbose=None): raw_extras=[raw_extras], verbose=verbose) # set annotations based on the comments read in - num_comments = len(comments_dict) onset = np.zeros(num_comments, float) duration = np.zeros(num_comments, float) description = [''] * num_comments - for t_idx, (_description, (_onset, _duration)) in \ - enumerate(comments_dict.items()): - # extract the onset, duration, description to - # create an Annotations object - onset[t_idx] = _onset - duration[t_idx] = _duration - description[t_idx] = _description + + # loop through comments dictionary, which may contain + # multiple events for the same "text" annotation + t_idx = 0 + for _description, event_tuples in comments_dict.items(): + for (_onset, _duration) in event_tuples: + # extract the onset, duration, description to + # create an Annotations object + onset[t_idx] = _onset + duration[t_idx] = _duration + description[t_idx] = _description + t_idx += 1 annot = Annotations(onset, duration, description) self.set_annotations(annot) @@ -365,6 +386,34 @@ def _process_lay_line(line, section): value : str The string from the line after the ``'='`` character. If section is "Comments", then returns the onset and duration as a tuple. + + Notes + ----- + The lay file comprises of multiple "sections" that are documented with + bracket ``[]`` characters. For example, ``[FileInfo]`` and the lines + afterward indicate metadata about the data file itself. Within + each section, there are multiple lines in the format of + ``=``. + + For ``FileInfo``, ``Patient`` and ``ChannelMap`` + each line will be denoted with a ``key`` and a ``value`` that + can be represented as a dictionary. The keys describe what sort + of data that line holds, while the values contain the corresponding + value. In some cases, the ``value``. + + For ``SampleTimes``, the ``key`` and ``value`` pair indicate the + start and end time in seconds of the original data file. + + For ``Comments`` section, this denotes an area where users through + Persyst actually annotate data in time. These are instead + represented as 5 data points that are ``,`` delimited. These + data points are ordered as: + + 1. time (in seconds) of the annotation + 2. duration (in seconds) of the annotation + 3. state (unused) + 4. variable type (unused) + 5. free-form text describing the annotation """ key = '' # default; only return value possibly not set line = line.strip() # remove leading and trailing spaces @@ -388,7 +437,7 @@ def _process_lay_line(line, section): # Currently not used if section == 'comments': # Persyst Comments output 5 variables "," separated - time_sec, duration, state, var_type, text = line.split(',') + time_sec, duration, state, var_type, text = line.split(',', 4) status = 2 key = text value = (time_sec, duration) diff --git a/mne/io/persyst/tests/test_persyst.py b/mne/io/persyst/tests/test_persyst.py index 7ac8262af6a..8ce0bdb48d5 100644 --- a/mne/io/persyst/tests/test_persyst.py +++ b/mne/io/persyst/tests/test_persyst.py @@ -8,6 +8,7 @@ import pytest from numpy.testing import assert_array_equal +import numpy as np import mne from mne.datasets.testing import data_path, requires_testing_data @@ -123,7 +124,6 @@ def test_persyst_wrong_file(tmpdir): with pytest.raises(FileNotFoundError, match='The path you'): read_raw_persyst(fname_dat, preload=True) - out_dir = mne.utils._TempDir() out_dir = str(tmpdir) new_fname_lay = op.join(out_dir, op.basename(fname_lay)) new_fname_dat = op.join(out_dir, op.basename(fname_dat)) @@ -142,12 +142,75 @@ def test_persyst_wrong_file(tmpdir): read_raw_persyst(new_fname_lay, preload=True) +@requires_testing_data +def test_persyst_moved_file(tmpdir): + """Test reader - Persyst files need to be in same directory.""" + out_dir = str(tmpdir) + new_fname_lay = op.join(out_dir, op.basename(fname_lay)) + new_fname_dat = op.join(out_dir, op.basename(fname_dat)) + shutil.copy(fname_lay, new_fname_lay) + + # original file read should work + read_raw_persyst(fname_lay) + + # without a .dat file, reader should break + # when the lay file was moved + desired_err_msg = \ + 'The data path you specified does ' \ + 'not exist for the lay path, ' \ + 'sub-pt1_ses-02_task-monitor_acq-ecog_run-01_clip2.lay' + with pytest.raises(FileNotFoundError, match=desired_err_msg): + read_raw_persyst(new_fname_lay, preload=True) + + # now change the file contents to point + # to the full path, but it should still not work + # as reader requires lay and dat file to be in + # same directory + with open(fname_lay, "r") as fin: + with open(new_fname_lay, 'w') as fout: + # for each line in the input file + for idx, line in enumerate(fin): + if line.startswith('File='): + # give it the full path to the old data + test_fpath = op.join(op.dirname(fname_dat), + line.split('=')[1]) + line = f'File={test_fpath}\n' + fout.write(line) + with pytest.raises(FileNotFoundError, match=desired_err_msg): + read_raw_persyst(new_fname_lay, preload=True) + + # once we copy the dat file to the same directory, reader + # should work + shutil.copy(fname_dat, new_fname_dat) + read_raw_persyst(new_fname_lay, preload=True) + + @requires_testing_data def test_persyst_standard(): """Test standard operations.""" _test_raw_reader(read_raw_persyst, fname=fname_lay) +@requires_testing_data +def test_persyst_annotations(tmpdir): + """Test annotations reading in Persyst.""" + out_dir = str(tmpdir) + new_fname_lay = op.join(out_dir, op.basename(fname_lay)) + new_fname_dat = op.join(out_dir, op.basename(fname_dat)) + shutil.copy(fname_dat, new_fname_dat) + shutil.copy(fname_lay, new_fname_lay) + + raw = read_raw_persyst(new_fname_lay) + + # get the annotations and make sure that repeated annotations + # are in the dataset + annotations = raw.annotations + assert np.count_nonzero(annotations.description == 'seizure') == 2 + + # make sure annotation with a "," character is in there + assert 'seizure1,2' in annotations.description + + @requires_testing_data def test_persyst_errors(): """Test reading Persyst files when passed in wrong file path.""" @@ -182,20 +245,6 @@ def test_persyst_errors(): 'file do not'): read_raw_persyst(new_fname_lay) - # reformat the lay file - os.remove(new_fname_lay) - with open(fname_lay, "r") as fin: - with open(new_fname_lay, 'w') as fout: - # for each line in the input file - for idx, line in enumerate(fin): - if line.startswith('File'): - line = f'File=/{op.basename(fname_dat)}\n' - fout.write(line) - # file should break - with pytest.raises(FileNotFoundError, match='The data path ' - 'you specified'): - read_raw_persyst(new_fname_lay) - # reformat the lay file to have testdate # improperly specified os.remove(new_fname_lay) diff --git a/mne/io/pick.py b/mne/io/pick.py index 4a7ae650994..d5711a6c816 100644 --- a/mne/io/pick.py +++ b/mne/io/pick.py @@ -49,16 +49,19 @@ def get_channel_type_constants(include_defaults=False): seeg=dict(kind=FIFF.FIFFV_SEEG_CH, unit=FIFF.FIFF_UNIT_V, coil_type=FIFF.FIFFV_COIL_EEG), + dbs=dict(kind=FIFF.FIFFV_DBS_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_EEG), ecog=dict(kind=FIFF.FIFFV_ECOG_CH, unit=FIFF.FIFF_UNIT_V, coil_type=FIFF.FIFFV_COIL_EEG), eog=dict(kind=FIFF.FIFFV_EOG_CH, unit=FIFF.FIFF_UNIT_V), emg=dict(kind=FIFF.FIFFV_EMG_CH, unit=FIFF.FIFF_UNIT_V), ecg=dict(kind=FIFF.FIFFV_ECG_CH, unit=FIFF.FIFF_UNIT_V), + resp=dict(kind=FIFF.FIFFV_RESP_CH, unit=FIFF.FIFF_UNIT_V), bio=dict(kind=FIFF.FIFFV_BIO_CH, unit=FIFF.FIFF_UNIT_V), misc=dict(kind=FIFF.FIFFV_MISC_CH, unit=FIFF.FIFF_UNIT_V), stim=dict(kind=FIFF.FIFFV_STIM_CH), - resp=dict(kind=FIFF.FIFFV_RESP_CH), exci=dict(kind=FIFF.FIFFV_EXCI_CH), syst=dict(kind=FIFF.FIFFV_SYST_CH), ias=dict(kind=FIFF.FIFFV_IAS_CH), @@ -127,6 +130,7 @@ def get_channel_type_constants(include_defaults=False): FIFF.FIFFV_IAS_CH: 'ias', FIFF.FIFFV_SYST_CH: 'syst', FIFF.FIFFV_SEEG_CH: 'seeg', + FIFF.FIFFV_DBS_CH: 'dbs', FIFF.FIFFV_BIO_CH: 'bio', FIFF.FIFFV_QUAT_0: 'chpi', FIFF.FIFFV_QUAT_1: 'chpi', @@ -181,8 +185,8 @@ def channel_type(info, idx): Type of channel. Will be one of:: {'grad', 'mag', 'eeg', 'csd', 'stim', 'eog', 'emg', 'ecg', - 'ref_meg', 'resp', 'exci', 'ias', 'syst', 'misc', 'seeg', 'bio', - 'chpi', 'dipole', 'gof', 'ecog', 'hbo', 'hbr'} + 'ref_meg', 'resp', 'exci', 'ias', 'syst', 'misc', 'seeg', 'dbs', + 'bio', 'chpi', 'dipole', 'gof', 'ecog', 'hbo', 'hbr'} """ # This is faster than the original _channel_type_old now in test_pick.py # because it uses (at most!) two dict lookups plus one conditional @@ -320,20 +324,20 @@ def _triage_fnirs_pick(ch, fnirs, warned): """Triage an fNIRS pick type.""" if fnirs is True: return True - elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO and fnirs == 'hbo': + elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO and 'hbo' in fnirs: return True - elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBR and fnirs == 'hbr': + elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBR and 'hbr' in fnirs: return True elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE and \ - fnirs == 'fnirs_cw_amplitude': + 'fnirs_cw_amplitude' in fnirs: return True elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE and \ - fnirs == 'fnirs_fd_ac_amplitude': + 'fnirs_fd_ac_amplitude' in fnirs: return True elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_FD_PHASE and \ - fnirs == 'fnirs_fd_phase': + 'fnirs_fd_phase' in fnirs: return True - elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_OD and fnirs == 'fnirs_od': + elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_OD and 'fnirs_od' in fnirs: return True return False @@ -366,7 +370,7 @@ def pick_types(info, meg=False, eeg=False, stim=False, eog=False, ecg=False, emg=False, ref_meg='auto', misc=False, resp=False, chpi=False, exci=False, ias=False, syst=False, seeg=False, dipole=False, gof=False, bio=False, ecog=False, fnirs=False, csd=False, - include=(), exclude='bads', selection=None): + dbs=False, include=(), exclude='bads', selection=None): """Pick channels by type and names. Parameters @@ -421,6 +425,8 @@ def pick_types(info, meg=False, eeg=False, stim=False, eog=False, ecg=False, include channels measuring deoxyhemoglobin). csd : bool Current source density channels. + dbs : bool + Deep brain stimulation channels. include : list of str List of additional channels to include. If empty do not include any. exclude : list of str | str @@ -449,7 +455,7 @@ def pick_types(info, meg=False, eeg=False, stim=False, eog=False, ecg=False, len(info['comps']) > 0 and meg is not False) for param in (eeg, stim, eog, ecg, emg, misc, resp, chpi, exci, - ias, syst, seeg, dipole, gof, bio, ecog, csd): + ias, syst, seeg, dipole, gof, bio, ecog, csd, dbs): if not isinstance(param, bool): w = ('Parameters for all channel types (with the exception of ' '"meg", "ref_meg" and "fnirs") must be of type bool, not {}.') @@ -457,7 +463,7 @@ def pick_types(info, meg=False, eeg=False, stim=False, eog=False, ecg=False, param_dict = dict(eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg, misc=misc, resp=resp, chpi=chpi, exci=exci, - ias=ias, syst=syst, seeg=seeg, dipole=dipole, + ias=ias, syst=syst, seeg=seeg, dbs=dbs, dipole=dipole, gof=gof, bio=bio, ecog=ecog, csd=csd) # avoid triage if possible if isinstance(meg, bool): @@ -705,7 +711,7 @@ def pick_channels_forward(orig, include=[], exclude=[], ordered=False, def pick_types_forward(orig, meg=False, eeg=False, ref_meg=True, seeg=False, - ecog=False, include=[], exclude=[]): + ecog=False, dbs=False, include=[], exclude=[]): """Pick by channel type and names from a forward operator. Parameters @@ -724,6 +730,8 @@ def pick_types_forward(orig, meg=False, eeg=False, ref_meg=True, seeg=False, If True include stereotactic EEG channels. ecog : bool If True include electrocorticography channels. + dbs : bool + If True include deep brain stimulation channels. include : list of str List of additional channels to include. If empty do not include any. exclude : list of str | str @@ -736,8 +744,8 @@ def pick_types_forward(orig, meg=False, eeg=False, ref_meg=True, seeg=False, Forward solution restricted to selected channel types. """ info = orig['info'] - sel = pick_types(info, meg, eeg, ref_meg=ref_meg, seeg=seeg, ecog=ecog, - include=include, exclude=exclude) + sel = pick_types(info, meg, eeg, ref_meg=ref_meg, seeg=seeg, + ecog=ecog, dbs=dbs, include=include, exclude=exclude) if len(sel) == 0: raise ValueError('No valid channels found') include_ch_names = [info['ch_names'][k] for k in sel] @@ -956,36 +964,32 @@ def _check_excludes_includes(chs, info=None, allow_bads=False): _PICK_TYPES_DATA_DICT = dict( meg=True, eeg=True, csd=True, stim=False, eog=False, ecg=False, emg=False, misc=False, resp=False, chpi=False, exci=False, ias=False, syst=False, - seeg=True, dipole=False, gof=False, bio=False, ecog=True, fnirs=True) + seeg=True, dipole=False, gof=False, bio=False, ecog=True, fnirs=True, + dbs=True) _PICK_TYPES_KEYS = tuple(list(_PICK_TYPES_DATA_DICT) + ['ref_meg']) _MEG_CH_TYPES_SPLIT = ('mag', 'grad', 'planar1', 'planar2') _FNIRS_CH_TYPES_SPLIT = ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od') _DATA_CH_TYPES_ORDER_DEFAULT = ( - 'mag', 'grad', 'eeg', 'csd', 'eog', 'ecg', 'emg', 'ref_meg', 'misc', - 'stim', 'resp', 'chpi', 'exci', 'ias', 'syst', 'seeg', 'bio', - 'ecog') + _FNIRS_CH_TYPES_SPLIT + ('whitened',) + 'mag', 'grad', 'eeg', 'csd', 'eog', 'ecg', 'resp', 'emg', 'ref_meg', + 'misc', 'stim', 'chpi', 'exci', 'ias', 'syst', 'seeg', 'bio', 'ecog', + 'dbs') + _FNIRS_CH_TYPES_SPLIT + ('whitened',) # Valid data types, ordered for consistency, used in viz/evoked. _VALID_CHANNEL_TYPES = ( - 'eeg', 'grad', 'mag', 'seeg', 'eog', 'ecg', 'emg', 'dipole', 'gof', 'bio', - 'ecog') + _FNIRS_CH_TYPES_SPLIT + ('misc', 'csd') + 'eeg', 'grad', 'mag', 'seeg', 'eog', 'ecg', 'resp', 'emg', 'dipole', 'gof', + 'bio', 'ecog', 'dbs') + _FNIRS_CH_TYPES_SPLIT + ('misc', 'csd') _DATA_CH_TYPES_SPLIT = ( - 'mag', 'grad', 'eeg', 'csd', 'seeg', 'ecog') + _FNIRS_CH_TYPES_SPLIT + 'mag', 'grad', 'eeg', 'csd', 'seeg', 'ecog', 'dbs') + _FNIRS_CH_TYPES_SPLIT -def _pick_data_channels(info, exclude='bads', with_ref_meg=True): +def _pick_data_channels(info, exclude='bads', with_ref_meg=True, + with_aux=False): """Pick only data channels.""" - return pick_types(info, ref_meg=with_ref_meg, exclude=exclude, - **_PICK_TYPES_DATA_DICT) - - -def _pick_aux_channels(info, exclude='bads'): - """Pick only auxiliary channels. - - Corresponds to EOG, ECG, EMG and BIO - """ - return pick_types(info, meg=False, eog=True, ecg=True, emg=True, bio=True, - ref_meg=False, exclude=exclude) + kwargs = _PICK_TYPES_DATA_DICT + if with_aux: + kwargs = kwargs.copy() + kwargs.update(eog=True, ecg=True, emg=True, bio=True) + return pick_types(info, ref_meg=with_ref_meg, exclude=exclude, **kwargs) def _pick_data_or_ica(info, exclude=()): @@ -1137,8 +1141,10 @@ def _picks_str_to_idx(info, picks, exclude, with_ref_meg, return_kind, extra_picks |= set(pick_types( info, meg=use_meg, ref_meg=False, exclude=exclude)) if len(fnirs) > 0 and not kwargs.get('fnirs', False): - # if it has two entries, it's both, otherwise it's just one - kwargs['fnirs'] = True if len(fnirs) == 2 else list(fnirs)[0] + if len(fnirs) == 1: + kwargs['fnirs'] = list(fnirs)[0] + else: + kwargs['fnirs'] = list(fnirs) picks_type = pick_types(info, exclude=exclude, **kwargs) if len(extra_picks) > 0: picks_type = sorted(set(picks_type) | set(extra_picks)) diff --git a/mne/io/proc_history.py b/mne/io/proc_history.py index 13611c0feb9..6bbdecb5356 100644 --- a/mne/io/proc_history.py +++ b/mne/io/proc_history.py @@ -4,7 +4,6 @@ # License: Simplified BSD import numpy as np -from scipy.sparse import csc_matrix from .open import read_tag, fiff_open from .tree import dir_tree_find @@ -13,6 +12,7 @@ write_float_sparse, write_id) from .tag import find_tag from .constants import FIFF +from ..fixes import _csc_matrix_cast from ..utils import warn, _check_fname _proc_keys = ['parent_file_id', 'block_id', 'parent_block_id', @@ -153,7 +153,7 @@ def _write_proc_history(fid, info): FIFF.FIFF_CREATOR, FIFF.FIFF_DECOUPLER_MATRIX) _sss_ctc_writers = (write_id, write_int, write_string, write_float_sparse) -_sss_ctc_casters = (dict, np.array, str, csc_matrix) +_sss_ctc_casters = (dict, np.array, str, _csc_matrix_cast) _sss_cal_keys = ('cal_chans', 'cal_corrs') _sss_cal_ids = (FIFF.FIFF_SSS_CAL_CHANS, FIFF.FIFF_SSS_CAL_CORRS) diff --git a/mne/io/proj.py b/mne/io/proj.py index 37075153578..589393a7ea5 100644 --- a/mne/io/proj.py +++ b/mne/io/proj.py @@ -10,10 +10,9 @@ from math import sqrt import numpy as np -from scipy import linalg from .tree import dir_tree_find -from .tag import find_tag +from .tag import find_tag, _rename_list from .constants import FIFF from .pick import pick_types, pick_info from .write import (write_int, write_float, write_string, write_name_list, @@ -339,22 +338,8 @@ def _proj_equal(a, b, check_active=True): @verbose -def _read_proj(fid, node, verbose=None): - """Read spatial projections from a FIF file. - - Parameters - ---------- - fid : file - The file descriptor of the open file. - node : tree node - The node of the tree where to look. - %(verbose)s - - Returns - ------- - projs : list of Projection - The list of projections. - """ +def _read_proj(fid, node, *, ch_names_mapping=None, verbose=None): + ch_names_mapping = {} if ch_names_mapping is None else ch_names_mapping projs = list() # Locate the projection data @@ -437,6 +422,7 @@ def _read_proj(fid, node, verbose=None): # just always use this, we used to have bugs with writing the # number correctly... nchan = len(names) + names[:] = _rename_list(names, ch_names_mapping) # Use exactly the same fields in data as in a named matrix one = Projection(kind=kind, active=active, desc=desc, data=dict(nrow=nvec, ncol=nchan, row_names=None, @@ -459,7 +445,7 @@ def _read_proj(fid, node, verbose=None): ############################################################################### # Write -def _write_proj(fid, projs): +def _write_proj(fid, projs, *, ch_names_mapping=None): """Write a projection operator to a file. Parameters @@ -472,6 +458,7 @@ def _write_proj(fid, projs): if len(projs) == 0: return + ch_names_mapping = dict() if ch_names_mapping is None else ch_names_mapping # validation _validate_type(projs, (list, tuple), 'projs') for pi, proj in enumerate(projs): @@ -482,8 +469,8 @@ def _write_proj(fid, projs): for proj in projs: start_block(fid, FIFF.FIFFB_PROJ_ITEM) write_int(fid, FIFF.FIFF_NCHAN, len(proj['data']['col_names'])) - write_name_list(fid, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST, - proj['data']['col_names']) + names = _rename_list(proj['data']['col_names'], ch_names_mapping) + write_name_list(fid, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST, names) write_string(fid, FIFF.FIFF_NAME, proj['desc']) write_int(fid, FIFF.FIFF_PROJ_ITEM_KIND, proj['kind']) if proj['kind'] == FIFF.FIFFV_PROJ_ITEM_FIELD: @@ -554,6 +541,7 @@ def _make_projector(projs, ch_names, bads=(), include_active=True, warning will be raised next time projectors are constructed with the given inputs. If inplace=True, no meaningful data are returned. """ + from scipy import linalg nchan = len(ch_names) if nchan == 0: raise ValueError('No channel names specified') @@ -635,7 +623,7 @@ def _make_projector(projs, ch_names, bads=(), include_active=True, return default_return # Reorthogonalize the vectors - U, S, V = linalg.svd(vecs[:, :nvec], full_matrices=False) + U, S, _ = linalg.svd(vecs[:, :nvec], full_matrices=False) # Throw away the linearly dependent guys nproj = np.sum((S / S[0]) > 1e-2) diff --git a/mne/io/reference.py b/mne/io/reference.py index 080e66b0abc..b459ed704de 100644 --- a/mne/io/reference.py +++ b/mne/io/reference.py @@ -4,9 +4,7 @@ # # License: BSD (3-clause) -from copy import deepcopy import numpy as np -from scipy import linalg from .constants import FIFF from .meas_info import _check_ch_keys @@ -17,7 +15,7 @@ from ..evoked import Evoked from ..epochs import BaseEpochs from ..utils import (logger, warn, verbose, _validate_type, _check_preload, - _check_option) + _check_option, fill_doc) from ..defaults import DEFAULTS @@ -48,9 +46,8 @@ def _copy_channel(inst, ch_name, new_ch_name): return inst -def _apply_reference(inst, ref_from, ref_to=None, forward=None, - ch_type='auto'): - """Apply a custom EEG referencing scheme.""" +def _check_before_reference(inst, ref_from, ref_to, ch_type): + """Prepare instance for referencing.""" # Check to see that data is preloaded _check_preload(inst, "Applying a reference") @@ -98,6 +95,21 @@ def _apply_reference(inst, ref_from, ref_to=None, forward=None, inst._projector, _ = \ setup_proj(inst.info, add_eeg_ref=False, activate=False) + # If the reference touches EEG/ECoG/sEEG/DBS electrodes, note in the + # info that a non-CAR has been applied. + ref_to_channels = pick_channels(inst.ch_names, ref_to, ordered=True) + if len(np.intersect1d(ref_to_channels, eeg_idx)) > 0: + inst.info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_ON + + return ref_to + + +def _apply_reference(inst, ref_from, ref_to=None, forward=None, + ch_type='auto'): + """Apply a custom EEG referencing scheme.""" + from scipy import linalg + ref_to = _check_before_reference(inst, ref_from, ref_to, ch_type) + # Compute reference if len(ref_from) > 0: # this is guaranteed below, but we should avoid the crazy pick_channels @@ -113,10 +125,6 @@ def _apply_reference(inst, ref_from, ref_to=None, forward=None, data[..., ref_to, :] -= ref_data ref_data = ref_data[..., 0, :] - # If the reference touches EEG/ECoG/sEEG electrodes, note in the info - # that a non-CAR has been applied. - if len(np.intersect1d(ref_to, eeg_idx)) > 0: - inst.info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_ON # REST if forward is not None: # use ch_sel and the given forward @@ -139,6 +147,7 @@ def _apply_reference(inst, ref_from, ref_to=None, forward=None, return inst, ref_data +@fill_doc def add_reference_channels(inst, ref_channels, copy=True): """Add reference channels to data that consists of all zeros. @@ -150,10 +159,7 @@ def add_reference_channels(inst, ref_channels, copy=True): ---------- inst : instance of Raw | Epochs | Evoked Instance of Raw or Epochs with EEG channels and reference channel(s). - ref_channels : str | list of str - Name of the electrode(s) which served as the reference in the - recording. If a name is provided, a corresponding channel is added - and its data is set to 0. This is useful for later re-referencing. + %(ref_channels)s copy : bool Specifies whether the data will be copied (True) or modified in-place (False). Defaults to True. @@ -355,11 +361,11 @@ def set_eeg_reference(inst, ref_channels='average', copy=True, def _get_ch_type(inst, ch_type): _validate_type(ch_type, str, 'ch_type') - _check_option('ch_type', ch_type, ('auto', 'eeg', 'ecog', 'seeg')) + _check_option('ch_type', ch_type, ('auto', 'eeg', 'ecog', 'seeg', 'dbs')) # if ch_type is 'auto', search through list to find first reasonable # reference-able channel type. if ch_type == 'auto': - for type_ in ['eeg', 'ecog', 'seeg']: + for type_ in ['eeg', 'ecog', 'seeg', 'dbs']: if type_ in inst: ch_type = type_ logger.info('%s channel type selected for ' @@ -367,7 +373,7 @@ def _get_ch_type(inst, ch_type): break # if auto comes up empty, or the user specifies a bad ch_type. else: - raise ValueError('No EEG, ECoG or sEEG channels found ' + raise ValueError('No EEG, ECoG, sEEG or DBS channels found ' 'to rereference.') return ch_type @@ -379,15 +385,14 @@ def set_bipolar_reference(inst, anode, cathode, ch_name=None, ch_info=None, A bipolar reference takes the difference between two channels (the anode minus the cathode) and adds it as a new virtual channel. The original - channels will be dropped. + channels will be dropped by default. Multiple anodes and cathodes can be specified, in which case multiple - virtual channels will be created. The 1st anode will be subtracted from the - 1st cathode, the 2nd anode from the 2nd cathode, etc. + virtual channels will be created. The 1st cathode will be subtracted + from the 1st anode, the 2nd cathode from the 2nd anode, etc. - By default, the virtual channels will be annotated with channel info of - the anodes, their locations set to (0, 0, 0) and coil types set to - EEG_BIPOLAR. + By default, the virtual channels will be annotated with channel-info and + -location of the anodes and coil types will be set to EEG_BIPOLAR. Parameters ---------- @@ -434,6 +439,11 @@ def set_bipolar_reference(inst, anode, cathode, ch_name=None, ch_info=None, .. versionadded:: 0.9.0 """ + from .meas_info import create_info + from ..io import RawArray + from ..epochs import EpochsArray + from ..evoked import EvokedArray + _check_can_reref(inst) if not isinstance(anode, list): anode = [anode] @@ -446,7 +456,7 @@ def set_bipolar_reference(inst, anode, cathode, ch_name=None, ch_info=None, 'of cathodes (got %d).' % (len(anode), len(cathode))) if ch_name is None: - ch_name = ['%s-%s' % ac for ac in zip(anode, cathode)] + ch_name = [f'{a}-{c}' for (a, c) in zip(anode, cathode)] elif not isinstance(ch_name, list): ch_name = [ch_name] if len(ch_name) != len(anode): @@ -469,36 +479,64 @@ def set_bipolar_reference(inst, anode, cathode, ch_name=None, ch_info=None, raise ValueError('Number of channel info dictionaries must equal the ' 'number of anodes/cathodes.') - # Merge specified and anode channel information dictionaries - new_chs = [] - for ci, (an, ch) in enumerate(zip(anode, ch_info)): - _check_ch_keys(ch, ci, name='ch_info', check_min=False) - an_idx = inst.ch_names.index(an) - this_chs = deepcopy(inst.info['chs'][an_idx]) + if copy: + inst = inst.copy() - # Set channel location and coil type - this_chs['loc'] = np.zeros(12) - this_chs['coil_type'] = FIFF.FIFFV_COIL_EEG_BIPOLAR + anode = _check_before_reference(inst, ref_from=cathode, + ref_to=anode, ch_type='auto') - this_chs.update(ch) - new_chs.append(this_chs) + # Create bipolar reference channels by multiplying the data + # (channels x time) with a matrix (n_virtual_channels x channels) + # and add them to the instance. + multiplier = np.zeros((len(anode), len(inst.ch_names))) + for idx, (a, c) in enumerate(zip(anode, cathode)): + multiplier[idx, inst.ch_names.index(a)] = 1 + multiplier[idx, inst.ch_names.index(c)] = -1 - if copy: - inst = inst.copy() + ref_info = create_info(ch_names=ch_name, sfreq=inst.info['sfreq'], + ch_types=inst.get_channel_types(picks=anode)) - for i, (an, ca, name, chs) in enumerate( - zip(anode, cathode, ch_name, new_chs)): - if an in anode[i + 1:] or an in cathode[i + 1:] or not drop_refs: - # Make a copy of the channel if it's still needed later - # otherwise it's modified inplace - _copy_channel(inst, an, 'TMP') - an = 'TMP' - _apply_reference(inst, [ca], [an]) # ensures preloaded + # Update "chs" in Reference-Info. + for ch_idx, (an, info) in enumerate(zip(anode, ch_info)): + _check_ch_keys(info, ch_idx, name='ch_info', check_min=False) an_idx = inst.ch_names.index(an) - inst.info['chs'][an_idx] = chs - inst.info['chs'][an_idx]['ch_name'] = name - logger.info('Bipolar channel added as "%s".' % name) - inst.info._update_redundant() + # Copy everything from anode (except ch_name). + an_chs = {k: v for k, v in inst.info['chs'][an_idx].items() + if k != 'ch_name'} + ref_info['chs'][ch_idx].update(an_chs) + # Set coil-type to bipolar. + ref_info['chs'][ch_idx]['coil_type'] = FIFF.FIFFV_COIL_EEG_BIPOLAR + # Update with info from ch_info-parameter. + ref_info['chs'][ch_idx].update(info) + + # Set other info-keys from original instance. + pick_info = {k: v for k, v in inst.info.items() if k not in + ['chs', 'ch_names', 'bads', 'nchan', 'sfreq']} + ref_info.update(pick_info) + + # Rereferencing of data. + ref_data = multiplier @ inst._data + + if isinstance(inst, BaseRaw): + ref_inst = RawArray(ref_data, ref_info, first_samp=inst.first_samp, + copy=None) + elif isinstance(inst, BaseEpochs): + ref_inst = EpochsArray(ref_data, ref_info, events=inst.events, + tmin=inst.tmin, event_id=inst.event_id, + metadata=inst.metadata) + else: + ref_inst = EvokedArray(ref_data, ref_info, tmin=inst.tmin, + comment=inst.comment, nave=inst.nave, + kind='average') + + # Add referenced instance to original instance. + inst.add_channels([ref_inst], force_update_info=True) + + added_channels = ', '.join([name for name in ch_name]) + logger.info(f'Added the following bipolar channels:\n{added_channels}') + + for attr_name in ['picks', '_projector']: + setattr(inst, attr_name, None) # Drop remaining channels. if drop_refs: diff --git a/mne/io/snirf/_snirf.py b/mne/io/snirf/_snirf.py index 9a8cab4ba3d..16b7def0f55 100644 --- a/mne/io/snirf/_snirf.py +++ b/mne/io/snirf/_snirf.py @@ -10,7 +10,7 @@ from ..meas_info import create_info from ..utils import _mult_cal_one from ...annotations import Annotations -from ...utils import logger, verbose, fill_doc, warn +from ...utils import logger, verbose, fill_doc, warn, _check_fname from ...utils.check import _require_version from ..constants import FIFF from .._digitization import _make_dig_points @@ -69,6 +69,7 @@ def __init__(self, fname, preload=False, verbose=None): from ...externals.pymatreader.utils import _import_h5py h5py = _import_h5py() + fname = _check_fname(fname, 'read', True, 'fname') logger.info('Loading %s' % fname) with h5py.File(fname, 'r') as dat: diff --git a/mne/io/tag.py b/mne/io/tag.py index 5dea0eae7c8..e2b01a918a5 100644 --- a/mne/io/tag.py +++ b/mne/io/tag.py @@ -7,7 +7,6 @@ import struct import numpy as np -from scipy import sparse from .constants import (FIFF, _dig_kind_named, _dig_cardinal_named, _ch_kind_named, _ch_coil_type_named, _ch_unit_named, @@ -168,6 +167,7 @@ def _read_tag_header(fid): def _read_matrix(fid, tag, shape, rlims, matrix_coding): """Read a matrix (dense or sparse) tag.""" + from scipy import sparse matrix_coding = matrix_coding >> 16 # This should be easy to implement (see _frombuffer_rows) @@ -356,12 +356,16 @@ def _read_ch_info_struct(fid, tag, shape, rlims): ch_name = ch_name[:np.argmax(ch_name == b'')].tobytes() d['ch_name'] = ch_name.decode() # coil coordinate system definition + _update_ch_info_named(d) + return d + + +def _update_ch_info_named(d): d['coord_frame'] = _ch_coord_dict.get(d['kind'], FIFF.FIFFV_COORD_UNKNOWN) d['kind'] = _ch_kind_named.get(d['kind'], d['kind']) d['coil_type'] = _ch_coil_type_named.get(d['coil_type'], d['coil_type']) d['unit'] = _ch_unit_named.get(d['unit'], d['unit']) d['unit_mul'] = _ch_unit_mul_named.get(d['unit_mul'], d['unit_mul']) - return d def _read_old_pack(fid, tag, shape, rlims): @@ -453,6 +457,8 @@ def read_tag(fid, pos=None, shape=None, rlims=None): if pos is not None: fid.seek(pos, 0) tag = _read_tag_header(fid) + if tag is None: + return tag if tag.size > 0: matrix_coding = _is_matrix & tag.type if matrix_coding != 0: @@ -501,3 +507,7 @@ def has_tag(node, kind): if d.kind == kind: return True return False + + +def _rename_list(bads, ch_names_mapping): + return [ch_names_mapping.get(bad, bad) for bad in bads] diff --git a/mne/io/tests/test_apply_function.py b/mne/io/tests/test_apply_function.py index 270cce61061..d3686c4799b 100644 --- a/mne/io/tests/test_apply_function.py +++ b/mne/io/tests/test_apply_function.py @@ -7,7 +7,7 @@ from mne import create_info from mne.io import RawArray -from mne.utils import logger, catch_logging, run_tests_if_main +from mne.utils import logger, catch_logging def bad_1(x): @@ -63,6 +63,3 @@ def test_apply_function_verbose(): assert out is raw raw.apply_function(printer, verbose=True) assert sio.getvalue().count('\n') == n_chan - - -run_tests_if_main() diff --git a/mne/io/tests/test_constants.py b/mne/io/tests/test_constants.py index 020217c0ba0..a89192fec80 100644 --- a/mne/io/tests/test_constants.py +++ b/mne/io/tests/test_constants.py @@ -19,11 +19,12 @@ # https://github.com/mne-tools/fiff-constants/commits/master -commit = '198d943d0ff92ecdfb947b84af6289a0e79ad060' +REPO = 'mne-tools' +COMMIT = '5bd84d224de502bee66f70b7867b8f45b45264c1' # These are oddities that we won't address: iod_dups = (355, 359) # these are in both MEGIN and MNE files -tag_dups = (3501, 3507) # in both MEGIN and MNE files +tag_dups = (3501,) # in both MEGIN and MNE files _dir_ignore_names = ('clear', 'copy', 'fromkeys', 'get', 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values', @@ -81,8 +82,8 @@ def test_constants(tmpdir): """Test compensation.""" tmpdir = str(tmpdir) # old pytest... dest = op.join(tmpdir, 'fiff.zip') - _fetch_file('https://codeload.github.com/mne-tools/fiff-constants/zip/' + - commit, dest) + _fetch_file('https://codeload.github.com/' + f'{REPO}/fiff-constants/zip/{COMMIT}', dest) names = list() with zipfile.ZipFile(dest, 'r') as ff: for name in ff.namelist(): diff --git a/mne/io/tests/test_meas_info.py b/mne/io/tests/test_meas_info.py index f8e3b13b6bb..87d9bbbf427 100644 --- a/mne/io/tests/test_meas_info.py +++ b/mne/io/tests/test_meas_info.py @@ -6,26 +6,33 @@ import hashlib import os.path as op -from datetime import datetime, timedelta, timezone +from datetime import datetime, timedelta, timezone, date import pytest import numpy as np from numpy.testing import assert_array_equal, assert_allclose from scipy import sparse -from mne import Epochs, read_events, pick_info, pick_types, Annotations +from mne import (Epochs, read_events, pick_info, pick_types, Annotations, + read_evokeds, make_forward_solution, make_sphere_model, + setup_volume_source_space, write_forward_solution, + read_forward_solution, write_cov, read_cov, read_epochs, + compute_covariance) from mne.channels import read_polhemus_fastscan from mne.event import make_fixed_length_events from mne.datasets import testing from mne.io import (read_fiducials, write_fiducials, _coil_trans_to_loc, - _loc_to_coil_trans, read_raw_fif, read_info, write_info) + _loc_to_coil_trans, read_raw_fif, read_info, write_info, + meas_info, Projection, BaseRaw) from mne.io.constants import FIFF from mne.io.write import _generate_meas_id, DATE_NONE from mne.io.meas_info import (Info, create_info, _merge_info, _force_update_info, RAW_INFO_FIELDS, _bad_chans_comp, _get_valid_units, anonymize_info, _stamp_to_dt, _dt_to_stamp, - _add_timedelta_to_stamp) + _add_timedelta_to_stamp, _read_extended_ch_info) +from mne.minimum_norm import (make_inverse_operator, write_inverse_operator, + read_inverse_operator, apply_inverse) from mne.io._digitization import _write_dig_points, _make_dig_points from mne.io import read_raw_ctf from mne.transforms import Transform @@ -533,6 +540,9 @@ def _test_anonymize_info(base_info): exp_info['subject_info']['last_name'] = default_str exp_info['subject_info']['id'] = default_subject_id exp_info['subject_info']['his_id'] = str(default_subject_id) + exp_info['subject_info']['sex'] = 0 + del exp_info['subject_info']['hand'] # there's no "unknown" setting + # this bday is 3653 days different. the change in day is due to a # different number of leap days between 1987 and 1977 than between # 2010 and 2000. @@ -557,6 +567,8 @@ def _test_anonymize_info(base_info): # exp 2 tests the keep_his option exp_info_2 = exp_info.copy() exp_info_2['subject_info']['his_id'] = 'foobar' + exp_info_2['subject_info']['sex'] = 0 + exp_info_2['subject_info']['hand'] = 1 # exp 3 tests is a supplied daysback delta_t_2 = timedelta(days=43) @@ -644,13 +656,38 @@ def test_anonymize(tmpdir): assert raw.first_samp == first_samp assert_allclose(raw.annotations.onset, expected_onset) - # Test instance method + # test mne.anonymize_info() events = read_events(event_name) epochs = Epochs(raw, events[:1], 2, 0., 0.1, baseline=None) - _test_anonymize_info(raw.info.copy()) _test_anonymize_info(epochs.info.copy()) + # test instance methods & I/O roundtrip + for inst, keep_his in zip((raw, epochs), (True, False)): + inst = inst.copy() + + subject_info = dict(his_id='Volunteer', sex=2, hand=1) + inst.info['subject_info'] = subject_info + inst.anonymize(keep_his=keep_his) + + si = inst.info['subject_info'] + if keep_his: + assert si == subject_info + else: + assert si['his_id'] == '0' + assert si['sex'] == 0 + assert 'hand' not in si + + # write to disk & read back + inst_type = 'raw' if isinstance(inst, BaseRaw) else 'epo' + fname = 'tmp_raw.fif' if inst_type == 'raw' else 'tmp_epo.fif' + out_path = tmpdir.join(fname) + inst.save(out_path, overwrite=True) + if inst_type == 'raw': + read_raw_fif(out_path) + else: + read_epochs(out_path) + # test that annotations are correctly zeroed raw.anonymize() assert raw.first_samp == first_samp @@ -667,6 +704,19 @@ def test_anonymize(tmpdir): assert_allclose(raw.annotations.onset, expected_onset) +def test_anonymize_with_io(tmpdir): + """Test that IO does not break anonymization.""" + raw = read_raw_fif(raw_fname) + + temp_path = tmpdir.join('tmp_raw.fif') + raw.save(temp_path) + + raw2 = read_raw_fif(temp_path) + + daysback = (raw2.info['meas_date'].date() - date(1924, 1, 1)).days + raw2.anonymize(daysback=daysback) + + @testing.requires_testing_data def test_csr_csc(tmpdir): """Test CSR and CSC.""" @@ -765,3 +815,150 @@ def test_invalid_subject_birthday(): with pytest.warns(RuntimeWarning, match='No birthday will be set'): raw = read_raw_fif(raw_invalid_bday_fname) assert 'birthday' not in raw.info['subject_info'] + + +@pytest.mark.parametrize('fname', [ + pytest.param(ctf_fname, marks=testing._pytest_mark()), + raw_fname, +]) +def test_channel_name_limit(tmpdir, monkeypatch, fname): + """Test that our remapping works properly.""" + # + # raw + # + if fname.endswith('fif'): + raw = read_raw_fif(fname) + raw.pick_channels(raw.ch_names[:3]) + ref_names = [] + data_names = raw.ch_names + else: + assert fname.endswith('.ds') + raw = read_raw_ctf(fname) + ref_names = [raw.ch_names[pick] + for pick in pick_types(raw.info, meg=False, ref_meg=True)] + data_names = raw.ch_names[32:35] + proj = dict(data=np.ones((1, len(data_names))), + col_names=data_names[:2].copy(), row_names=None, nrow=1) + proj = Projection( + data=proj, active=False, desc='test', kind=0, explained_var=0.) + raw.add_proj(proj, remove_existing=True) + raw.info.normalize_proj() + raw.pick_channels(data_names + ref_names).crop(0, 2) + long_names = ['123456789abcdefg' + name for name in raw.ch_names] + fname = tmpdir.join('test-raw.fif') + with catch_logging() as log: + raw.save(fname) + log = log.getvalue() + assert 'truncated' not in log + rename = dict(zip(raw.ch_names, long_names)) + long_data_names = [rename[name] for name in data_names] + long_proj_names = long_data_names[:2] + raw.rename_channels(rename) + for comp in raw.info['comps']: + for key in ('row_names', 'col_names'): + for name in comp['data'][key]: + assert name in raw.ch_names + if raw.info['comps']: + assert raw.compensation_grade == 0 + raw.apply_gradient_compensation(3) + assert raw.compensation_grade == 3 + assert len(raw.info['projs']) == 1 + assert raw.info['projs'][0]['data']['col_names'] == long_proj_names + raw.info['bads'] = bads = long_data_names[2:3] + good_long_data_names = [ + name for name in long_data_names if name not in bads] + with catch_logging() as log: + raw.save(fname, overwrite=True, verbose=True) + log = log.getvalue() + assert 'truncated to 15' in log + for name in raw.ch_names: + assert len(name) > 15 + # first read the full way + with catch_logging() as log: + raw_read = read_raw_fif(fname, verbose=True) + log = log.getvalue() + assert 'Reading extended channel information' in log + for ra in (raw, raw_read): + assert ra.ch_names == long_names + assert raw_read.info['projs'][0]['data']['col_names'] == long_proj_names + del raw_read + # next read as if no longer names could be read + monkeypatch.setattr( + meas_info, '_read_extended_ch_info', lambda x, y, z: None) + with catch_logging() as log: + raw_read = read_raw_fif(fname, verbose=True) + log = log.getvalue() + assert 'extended' not in log + if raw.info['comps']: + assert raw_read.compensation_grade == 3 + raw_read.apply_gradient_compensation(0) + assert raw_read.compensation_grade == 0 + monkeypatch.setattr( # restore + meas_info, '_read_extended_ch_info', _read_extended_ch_info) + short_proj_names = [ + f'{name[:13 - bool(len(ref_names))]}-{len(ref_names) + ni}' + for ni, name in enumerate(long_data_names[:2])] + assert raw_read.info['projs'][0]['data']['col_names'] == short_proj_names + # + # epochs + # + epochs = Epochs(raw, make_fixed_length_events(raw)) + fname = tmpdir.join('test-epo.fif') + epochs.save(fname) + epochs_read = read_epochs(fname) + for ep in (epochs, epochs_read): + assert ep.info['ch_names'] == long_names + assert ep.ch_names == long_names + del raw, epochs_read + # cov + epochs.info['bads'] = [] + cov = compute_covariance(epochs, verbose='error') + fname = tmpdir.join('test-cov.fif') + write_cov(fname, cov) + cov_read = read_cov(fname) + for co in (cov, cov_read): + assert co['names'] == long_data_names + assert co['bads'] == [] + del cov_read + + # + # evoked + # + evoked = epochs.average() + evoked.info['bads'] = bads + assert evoked.nave == 1 + fname = tmpdir.join('test-ave.fif') + evoked.save(fname) + evoked_read = read_evokeds(fname)[0] + for ev in (evoked, evoked_read): + assert ev.ch_names == long_names + assert ev.info['bads'] == bads + del evoked_read, epochs + + # + # forward + # + with pytest.warns(None): # not enough points for CTF + sphere = make_sphere_model('auto', 'auto', evoked.info) + src = setup_volume_source_space( + pos=dict(rr=[[0, 0, 0.04]], nn=[[0, 1., 0.]])) + fwd = make_forward_solution(evoked.info, None, src, sphere) + fname = tmpdir.join('temp-fwd.fif') + write_forward_solution(fname, fwd) + fwd_read = read_forward_solution(fname) + for fw in (fwd, fwd_read): + assert fw['sol']['row_names'] == long_data_names + assert fw['info']['ch_names'] == long_data_names + assert fw['info']['bads'] == bads + del fwd_read + + # + # inv + # + inv = make_inverse_operator(evoked.info, fwd, cov) + fname = tmpdir.join('test-inv.fif') + write_inverse_operator(fname, inv) + inv_read = read_inverse_operator(fname) + for iv in (inv, inv_read): + assert iv['info']['ch_names'] == good_long_data_names + apply_inverse(evoked, inv) # smoke test diff --git a/mne/io/tests/test_pick.py b/mne/io/tests/test_pick.py index 732dae85b91..14689bff189 100644 --- a/mne/io/tests/test_pick.py +++ b/mne/io/tests/test_pick.py @@ -244,6 +244,31 @@ def test_pick_seeg_ecog(): assert_equal(len(pick_types(raw.info, meg=False, seeg=True, ecog=True)), 0) +def test_pick_dbs(): + """Test picking with DBS.""" + # gh-8739 + names = 'A1 A2 Fz O OTp1 OTp2 OTp3'.split() + types = 'mag mag eeg eeg dbs dbs dbs'.split() + info = create_info(names, 1024., types) + picks_by_type = [('mag', [0, 1]), ('eeg', [2, 3]), ('dbs', [4, 5, 6])] + assert_indexing(info, picks_by_type) + assert_array_equal(pick_types(info, meg=False, dbs=True), [4, 5, 6]) + for i, t in enumerate(types): + assert channel_type(info, i) == types[i] + raw = RawArray(np.zeros((len(names), 7)), info) + events = np.array([[1, 0, 0], [2, 0, 0]]) + epochs = Epochs(raw, events=events, event_id={'event': 0}, + tmin=-1e-5, tmax=1e-5, + baseline=(0, 0)) # only one sample + evoked = epochs.average(pick_types(epochs.info, meg=True, dbs=True)) + e_dbs = evoked.copy().pick_types(meg=False, dbs=True) + for lt, rt in zip(e_dbs.ch_names, [names[4], names[5], names[6]]): + assert lt == rt + raw = read_raw_fif(op.join(io_dir, 'tests', 'data', + 'test_chpi_raw_sss.fif')) + assert len(pick_types(raw.info, meg=False, dbs=True)) == 0 + + def test_pick_chpi(): """Test picking cHPI.""" # Make sure we don't mis-classify cHPI channels diff --git a/mne/io/tests/test_raw.py b/mne/io/tests/test_raw.py index d6e6ed055f5..d46e04a1fa1 100644 --- a/mne/io/tests/test_raw.py +++ b/mne/io/tests/test_raw.py @@ -5,8 +5,12 @@ # # License: BSD (3-clause) -from os import path as op +from contextlib import redirect_stdout +from io import StringIO import math +import os +from os import path as op +from pathlib import Path import re import pytest @@ -18,12 +22,15 @@ from mne.datasets import testing from mne.externals.h5io import read_hdf5, write_hdf5 from mne.io import read_raw_fif, RawArray, BaseRaw, Info, _writing_info_hdf5 +from mne.io.base import _get_scaling from mne.utils import (_TempDir, catch_logging, _raw_annot, _stamp_to_dt, - object_diff, check_version) + object_diff, check_version, requires_pandas, + _check_eeglabio_installed) from mne.io.meas_info import _get_valid_units from mne.io._digitization import DigPoint from mne.io.proj import Projection from mne.io.utils import _mult_cal_one +from mne.io import read_raw_eeglab def assert_named_constants(info): @@ -257,6 +264,12 @@ def _test_raw_reader(reader, test_preloading=True, test_kwargs=True, out_fname = op.join(tempdir, 'test_raw.fif') raw = concatenate_raws([raw]) raw.save(out_fname, tmax=raw.times[-1], overwrite=True, buffer_size_sec=1) + + # Test saving with not correct extension + out_fname_h5 = op.join(tempdir, 'test_raw.h5') + with pytest.raises(IOError, match='raw must end with .fif or .fif.gz'): + raw.save(out_fname_h5) + raw3 = read_raw_fif(out_fname) assert_named_constants(raw3.info) assert set(raw.info.keys()) == set(raw3.info.keys()) @@ -340,6 +353,38 @@ def _test_raw_reader(reader, test_preloading=True, test_kwargs=True, write_hdf5(fname_h5, raw.info) new_info = Info(read_hdf5(fname_h5)) assert object_diff(new_info, raw.info) == '' + + # Make sure that changing directory does not break anything + if test_preloading: + these_kwargs = kwargs.copy() + key = None + for key in ('fname', + 'input_fname', # artemis123 + 'vhdr_fname', # BV + 'pdf_fname', # BTi + 'directory', # CTF + 'filename', # nedf + ): + try: + fname = kwargs[key] + except KeyError: + key = None + else: + break + # len(kwargs) == 0 for the fake arange reader + if len(kwargs): + assert key is not None, sorted(kwargs.keys()) + dirname = op.dirname(fname) + these_kwargs[key] = op.basename(fname) + these_kwargs['preload'] = False + orig_dir = os.getcwd() + try: + os.chdir(dirname) + raw_chdir = reader(**these_kwargs) + finally: + os.chdir(orig_dir) + raw_chdir.load_data() + return raw @@ -537,3 +582,134 @@ def _read_raw_arange(preload=False, verbose=None): def test_test_raw_reader(): """Test _test_raw_reader.""" _test_raw_reader(_read_raw_arange, test_scaling=False, test_rank='less') + + +@pytest.mark.slowtest +def test_describe_print(): + """Test print output of describe method.""" + fname = Path(__file__).parent / "data" / "test_raw.fif" + raw = read_raw_fif(fname) + + # test print output + f = StringIO() + with redirect_stdout(f): + raw.describe() + s = f.getvalue().strip().split("\n") + assert len(s) == 378 + assert s[0] == "" # noqa + assert s[1] == " ch name type unit min Q1 median Q3 max" # noqa + assert s[2] == " 0 MEG 0113 GRAD fT/cm -221.80 -38.57 -9.64 19.29 414.67" # noqa + assert s[-1] == "375 EOG 061 EOG µV -231.41 271.28 277.16 285.66 334.69" # noqa + + +@requires_pandas +@pytest.mark.slowtest +def test_describe_df(): + """Test returned data frame of describe method.""" + fname = Path(__file__).parent / "data" / "test_raw.fif" + raw = read_raw_fif(fname) + + df = raw.describe(data_frame=True) + assert df.shape == (376, 8) + assert (df.columns.tolist() == ["name", "type", "unit", "min", "Q1", + "median", "Q3", "max"]) + assert df.index.name == "ch" + assert_allclose(df.iloc[0, 3:].astype(float), + np.array([-2.218017605790535e-11, + -3.857421923113974e-12, + -9.643554807784935e-13, + 1.928710961556987e-12, + 4.146728567347522e-11])) + + +def test_get_data_units(): + """Test the "units" argument of get_data method.""" + # Test the unit conversion function + assert _get_scaling('eeg', 'uV') == 1e6 + assert _get_scaling('eeg', 'dV') == 1e1 + assert _get_scaling('eeg', 'pV') == 1e12 + assert _get_scaling('mag', 'fT') == 1e15 + assert _get_scaling('grad', 'T/m') == 1 + assert _get_scaling('grad', 'T/mm') == 1e-3 + assert _get_scaling('grad', 'fT/m') == 1e15 + assert _get_scaling('grad', 'fT/cm') == 1e13 + assert _get_scaling('csd', 'uV/cm²') == 1e2 + + fname = Path(__file__).parent / "data" / "test_raw.fif" + raw = read_raw_fif(fname) + + last = np.array([4.63803098e-05, 7.66563736e-05, 2.71933595e-04]) + last_eeg = np.array([7.12207023e-05, 4.63803098e-05, 7.66563736e-05]) + last_grad = np.array([-3.85742192e-12, 9.64355481e-13, -1.06079103e-11]) + + # None + data_none = raw.get_data() + assert data_none.shape == (376, 14400) + assert_array_almost_equal(data_none[-3:, -1], last) + + # str: unit no conversion + data_str_noconv = raw.get_data(picks=['eeg'], units='V') + assert data_str_noconv.shape == (60, 14400) + assert_array_almost_equal(data_str_noconv[-3:, -1], last_eeg) + # str: simple unit + data_str_simple = raw.get_data(picks=['eeg'], units='uV') + assert data_str_simple.shape == (60, 14400) + assert_array_almost_equal(data_str_simple[-3:, -1], last_eeg * 1e6) + # str: fraction unit + data_str_fraction = raw.get_data(picks=['grad'], units='fT/cm') + assert data_str_fraction.shape == (204, 14400) + assert_array_almost_equal(data_str_fraction[-3:, -1], + last_grad * (1e15 / 1e2)) + # str: more than one channel type but one with unit + data_str_simplestim = raw.get_data(picks=['eeg', 'stim'], units='V') + assert data_str_simplestim.shape == (69, 14400) + assert_array_almost_equal(data_str_simplestim[-3:, -1], last_eeg) + # str: too many channels + with pytest.raises(ValueError, match='more than one channel'): + raw.get_data(units='uV') + # str: invalid unit + with pytest.raises(ValueError, match='is not a valid unit'): + raw.get_data(picks=['eeg'], units='fV/cm') + + # dict: combination of simple and fraction units + data_dict = raw.get_data(units=dict(grad='fT/cm', mag='fT', eeg='uV')) + assert data_dict.shape == (376, 14400) + assert_array_almost_equal(data_dict[0, -1], + -3.857421923113974e-12 * (1e15 / 1e2)) + assert_array_almost_equal(data_dict[2, -1], -2.1478272253525944e-13 * 1e15) + assert_array_almost_equal(data_dict[-2, -1], 7.665637356879529e-05 * 1e6) + # dict: channel type not in instance + data_dict_notin = raw.get_data(units=dict(hbo='uM')) + assert data_dict_notin.shape == (376, 14400) + assert_array_almost_equal(data_dict_notin[-3:, -1], last) + # dict: one invalid unit + with pytest.raises(ValueError, match='is not a valid unit'): + raw.get_data(units=dict(grad='fT/cV', mag='fT', eeg='uV')) + # dict: one invalid channel type + with pytest.raises(KeyError, match='is not a channel type'): + raw.get_data(units=dict(bad_type='fT/cV', mag='fT', eeg='uV')) + + # not the good type + with pytest.raises(TypeError, match='instance of None, str, or dict'): + raw.get_data(units=['fT/cm', 'fT', 'uV']) + + +@pytest.mark.skipif(not _check_eeglabio_installed(strict=False), + reason='eeglabio not installed') +def test_export_eeglab(): + """Test saving a Raw instance to EEGLAB's set format.""" + fname = Path(__file__).parent / "data" / "test_raw.fif" + raw = read_raw_fif(fname) + raw.load_data() + tmpdir = _TempDir() + temp_fname = op.join(str(tmpdir), 'test.set') + raw.export(temp_fname) + raw.drop_channels([ch for ch in ['epoc'] + if ch in raw.ch_names]) + raw_read = read_raw_eeglab(temp_fname, preload=True) + assert raw.ch_names == raw_read.ch_names + cart_coords = np.array([d['loc'][:3] for d in raw.info['chs']]) # just xyz + cart_coords_read = np.array([d['loc'][:3] for d in raw_read.info['chs']]) + assert_allclose(cart_coords, cart_coords_read) + assert_allclose(raw.times, raw_read.times) + assert_allclose(raw.get_data(), raw_read.get_data()) diff --git a/mne/io/tests/test_read_raw.py b/mne/io/tests/test_read_raw.py index 7d670aac35e..10007c5520c 100644 --- a/mne/io/tests/test_read_raw.py +++ b/mne/io/tests/test_read_raw.py @@ -5,11 +5,15 @@ # License: BSD (3-clause) from pathlib import Path + import pytest + from mne.io import read_raw +from mne.datasets import testing base = Path(__file__).parent.parent +test_base = Path(testing.data_path(download=False)) @pytest.mark.parametrize('fname', ['x.xxx', 'x']) @@ -26,10 +30,14 @@ def test_read_raw_suggested(fname): read_raw(fname) -@pytest.mark.parametrize('fname', [base / 'edf/tests/data/test.edf', - base / 'edf/tests/data/test.bdf', - base / 'brainvision/tests/data/test.vhdr', - base / 'kit/tests/data/test.sqd']) +@pytest.mark.parametrize('fname', [ + base / 'edf/tests/data/test.edf', + base / 'edf/tests/data/test.bdf', + base / 'brainvision/tests/data/test.vhdr', + base / 'kit/tests/data/test.sqd', + pytest.param(test_base / 'KIT/data_berlin.con', + marks=testing._pytest_mark()), +]) def test_read_raw_supported(fname): """Test supported file types.""" read_raw(fname) diff --git a/mne/io/tests/test_reference.py b/mne/io/tests/test_reference.py index 9a8c555b1af..520d48bf121 100644 --- a/mne/io/tests/test_reference.py +++ b/mne/io/tests/test_reference.py @@ -15,7 +15,8 @@ set_eeg_reference, set_bipolar_reference, add_reference_channels, create_info, make_sphere_model, make_forward_solution, setup_volume_source_space, - pick_channels_forward, read_evokeds) + pick_channels_forward, read_evokeds, + find_events) from mne.epochs import BaseEpochs from mne.fixes import nullcontext from mne.io import RawArray, read_raw_fif @@ -237,21 +238,37 @@ def test_set_eeg_reference(): set_eeg_reference(raw, ['EEG 001'], True, True) -@pytest.mark.parametrize('ch_type', ('auto', 'ecog')) +@pytest.mark.parametrize('ch_type', ('auto', 'ecog', 'dbs')) def test_set_eeg_reference_ch_type(ch_type): - """Test setting EEG reference for ECoG.""" + """Test setting EEG reference for ECoG or DBS.""" # gh-6454 + # gh-8739 added DBS + ch_names = ['ECOG01', 'ECOG02', 'DBS01', 'DBS02', 'MISC'] rng = np.random.RandomState(0) - data = rng.randn(3, 1000) - raw = RawArray(data, create_info(3, 1000., ['ecog'] * 2 + ['misc'])) + data = rng.randn(5, 1000) + raw = RawArray(data, create_info(ch_names, 1000., ['ecog'] * 2 + + ['dbs'] * 2 + ['misc'])) + if ch_type == 'auto': + + ref_ch = ch_names[:2] + else: + ref_ch = raw.copy().pick(picks=ch_type).ch_names with catch_logging() as log: reref, ref_data = set_eeg_reference(raw.copy(), ch_type=ch_type, verbose=True) - assert 'Applying a custom ECoG' in log.getvalue() + if ch_type in ['auto', 'ecog']: + assert 'Applying a custom ECoG' in log.getvalue() + else: + assert 'Applying a custom DBS' in log.getvalue() assert reref.info['custom_ref_applied'] # gh-7350 - _test_reference(raw, reref, ref_data, ['0', '1']) + _test_reference(raw, reref, ref_data, ref_ch) with pytest.raises(ValueError, match='No channels supplied'): set_eeg_reference(raw, ch_type='eeg') + # gh-8739 + raw2 = RawArray(data, create_info(5, 1000., ['mag'] * 4 + ['misc'])) + with pytest.raises(ValueError, match='No EEG, ECoG, sEEG or DBS channels ' + 'found to rereference.'): + set_eeg_reference(raw2, ch_type='auto') @testing.requires_testing_data @@ -314,23 +331,35 @@ def test_set_eeg_reference_rest(): @testing.requires_testing_data -def test_set_bipolar_reference(): +@pytest.mark.parametrize('inst_type', ('raw', 'epochs', 'evoked')) +def test_set_bipolar_reference(inst_type): """Test bipolar referencing.""" raw = read_raw_fif(fif_fname, preload=True) raw.apply_proj() + if inst_type == 'raw': + inst = raw + del raw + elif inst_type in ['epochs', 'evoked']: + events = find_events(raw, stim_channel='STI 014') + epochs = Epochs(raw, events, tmin=-0.3, tmax=0.7, preload=True) + inst = epochs + if inst_type == 'evoked': + inst = epochs.average() + del epochs + ch_info = {'kind': FIFF.FIFFV_EOG_CH, 'extra': 'some extra value'} with pytest.raises(KeyError, match='key errantly present'): - set_bipolar_reference(raw, 'EEG 001', 'EEG 002', 'bipolar', ch_info) + set_bipolar_reference(inst, 'EEG 001', 'EEG 002', 'bipolar', ch_info) ch_info.pop('extra') reref = set_bipolar_reference( - raw, 'EEG 001', 'EEG 002', 'bipolar', ch_info) + inst, 'EEG 001', 'EEG 002', 'bipolar', ch_info) assert (reref.info['custom_ref_applied']) # Compare result to a manual calculation - a = raw.copy().pick_channels(['EEG 001', 'EEG 002']) - a = a._data[0, :] - a._data[1, :] - b = reref.copy().pick_channels(['bipolar'])._data[0, :] + a = inst.copy().pick_channels(['EEG 001', 'EEG 002']) + a = a._data[..., 0, :] - a._data[..., 1, :] + b = reref.copy().pick_channels(['bipolar'])._data[..., 0, :] assert_allclose(a, b) # Original channels should be replaced by a virtual one @@ -340,23 +369,21 @@ def test_set_bipolar_reference(): # Check channel information bp_info = reref.info['chs'][reref.ch_names.index('bipolar')] - an_info = reref.info['chs'][raw.ch_names.index('EEG 001')] + an_info = inst.info['chs'][inst.ch_names.index('EEG 001')] for key in bp_info: - if key == 'loc': - assert_array_equal(bp_info[key], 0) - elif key == 'coil_type': - assert_equal(bp_info[key], FIFF.FIFFV_COIL_EEG_BIPOLAR) + if key == 'coil_type': + assert bp_info[key] == FIFF.FIFFV_COIL_EEG_BIPOLAR, key elif key == 'kind': - assert_equal(bp_info[key], FIFF.FIFFV_EOG_CH) - else: - assert_equal(bp_info[key], an_info[key]) + assert bp_info[key] == FIFF.FIFFV_EOG_CH, key + elif key != 'ch_name': + assert_equal(bp_info[key], an_info[key], err_msg=key) # Minimalist call - reref = set_bipolar_reference(raw, 'EEG 001', 'EEG 002') + reref = set_bipolar_reference(inst, 'EEG 001', 'EEG 002') assert ('EEG 001-EEG 002' in reref.ch_names) # Minimalist call with twice the same anode - reref = set_bipolar_reference(raw, + reref = set_bipolar_reference(inst, ['EEG 001', 'EEG 001', 'EEG 002'], ['EEG 002', 'EEG 003', 'EEG 003']) assert ('EEG 001-EEG 002' in reref.ch_names) @@ -364,38 +391,41 @@ def test_set_bipolar_reference(): # Set multiple references at once reref = set_bipolar_reference( - raw, + inst, ['EEG 001', 'EEG 003'], ['EEG 002', 'EEG 004'], ['bipolar1', 'bipolar2'], [{'kind': FIFF.FIFFV_EOG_CH}, {'kind': FIFF.FIFFV_EOG_CH}], ) - a = raw.copy().pick_channels(['EEG 001', 'EEG 002', 'EEG 003', 'EEG 004']) - a = np.array([a._data[0, :] - a._data[1, :], - a._data[2, :] - a._data[3, :]]) + a = inst.copy().pick_channels(['EEG 001', 'EEG 002', 'EEG 003', 'EEG 004']) + a = np.concatenate( + [a._data[..., :1, :] - a._data[..., 1:2, :], + a._data[..., 2:3, :] - a._data[..., 3:4, :]], + axis=-2 + ) b = reref.copy().pick_channels(['bipolar1', 'bipolar2'])._data assert_allclose(a, b) # Test creating a bipolar reference that doesn't involve EEG channels: # it should not set the custom_ref_applied flag - reref = set_bipolar_reference(raw, 'MEG 0111', 'MEG 0112', + reref = set_bipolar_reference(inst, 'MEG 0111', 'MEG 0112', ch_info={'kind': FIFF.FIFFV_MEG_CH}, verbose='error') assert (not reref.info['custom_ref_applied']) - assert ('MEG 0111-MEG 0112'[:15] in reref.ch_names) + assert ('MEG 0111-MEG 0112' in reref.ch_names) # Test a battery of invalid inputs - pytest.raises(ValueError, set_bipolar_reference, raw, + pytest.raises(ValueError, set_bipolar_reference, inst, 'EEG 001', ['EEG 002', 'EEG 003'], 'bipolar') - pytest.raises(ValueError, set_bipolar_reference, raw, + pytest.raises(ValueError, set_bipolar_reference, inst, ['EEG 001', 'EEG 002'], 'EEG 003', 'bipolar') - pytest.raises(ValueError, set_bipolar_reference, raw, + pytest.raises(ValueError, set_bipolar_reference, inst, 'EEG 001', 'EEG 002', ['bipolar1', 'bipolar2']) - pytest.raises(ValueError, set_bipolar_reference, raw, + pytest.raises(ValueError, set_bipolar_reference, inst, 'EEG 001', 'EEG 002', 'bipolar', ch_info=[{'foo': 'bar'}, {'foo': 'bar'}]) - pytest.raises(ValueError, set_bipolar_reference, raw, + pytest.raises(ValueError, set_bipolar_reference, inst, 'EEG 001', 'EEG 002', ch_name='EEG 003') diff --git a/mne/io/utils.py b/mne/io/utils.py index a272cd23065..f0c700880f7 100644 --- a/mne/io/utils.py +++ b/mne/io/utils.py @@ -77,8 +77,7 @@ def _find_channels(ch_names, ch_type='EOG'): def _mult_cal_one(data_view, one, idx, cals, mult): """Take a chunk of raw data, multiply by mult or cals, and store.""" one = np.asarray(one, dtype=data_view.dtype) - assert data_view.shape[1] == one.shape[1], \ - (data_view.shape[1], one.shape[1]) + assert data_view.shape[1] == one.shape[1], (data_view.shape[1], one.shape[1]) # noqa: E501 if mult is not None: mult.ndim == one.ndim == 2 data_view[:] = mult @ one[idx] @@ -313,3 +312,24 @@ def _construct_bids_filename(base, ext, part_idx): if dirname: use_fname = op.join(dirname, use_fname) return use_fname + + +def _get_als_coords_from_chs(chs, drop_chs=None): + """Extract channel locations in ALS format (x, y, z) from a chs instance. + + Returns + ------- + None if no valid coordinates are found (all zeros) + """ + if drop_chs is None: + drop_chs = [] + cart_coords = np.array([d['loc'][:3] for d in chs + if d['ch_name'] not in drop_chs]) + if cart_coords.any(): # has coordinates + # (-y x z) to (x y z) + cart_coords[:, 0] = -cart_coords[:, 0] # -y to y + # swap x (1) and y (0) + cart_coords[:, [0, 1]] = cart_coords[:, [1, 0]] + else: + cart_coords = None + return cart_coords diff --git a/mne/io/write.py b/mne/io/write.py index 4cd33598601..baa611e5009 100644 --- a/mne/io/write.py +++ b/mne/io/write.py @@ -10,7 +10,6 @@ import uuid import numpy as np -from scipy import linalg, sparse from .constants import FIFF from ..utils import logger, _file_like @@ -368,7 +367,7 @@ def write_coord_trans(fid, trans): fid.write(np.array(move, dtype='>f4').tobytes()) # ...and its inverse - trans_inv = linalg.inv(trans['trans']) + trans_inv = np.linalg.inv(trans['trans']) rot = trans_inv[:3, :3] move = trans_inv[:3, 3] fid.write(np.array(rot, dtype='>f4').tobytes()) @@ -436,6 +435,7 @@ def write_float_sparse_ccs(fid, kind, mat): def write_float_sparse(fid, kind, mat, fmt='auto'): """Write a single-precision floating-point sparse matrix tag.""" + from scipy import sparse from .tag import _matrix_coding_CCS, _matrix_coding_RCS if fmt == 'auto': fmt = 'csr' if isinstance(mat, sparse.csr_matrix) else 'csc' diff --git a/mne/label.py b/mne/label.py index 1ac3d23b744..100122f27e1 100644 --- a/mne/label.py +++ b/mne/label.py @@ -12,7 +12,6 @@ import re import numpy as np -from scipy import linalg, sparse from .parallel import parallel_func, check_n_jobs from .source_estimate import (SourceEstimate, VolSourceEstimate, @@ -233,7 +232,7 @@ def __init__(self, vertices=(), pos=None, values=None, hemi=None, self.hemi = hemi self.comment = comment self.verbose = verbose - self.subject = _check_subject(None, subject, False) + self.subject = _check_subject(None, subject, raise_error=False) self.color = color self.name = name self.filename = filename @@ -270,7 +269,13 @@ def __repr__(self): # noqa: D105 return "
-""") +""") # noqa: E501 footer_template = HTMLTemplate(u""" -
- +
+ +
+ -""") +""") # noqa: E501 html_template = Template(u"""
  • @@ -762,69 +813,6 @@ def _build_html_slider(slices_range, slides_klass, slider_id,
  • """) -raw_template = Template(u""" -
  • -

    {{caption}}

    - - - - {{if meas_date is not None}} - - {{else}}{{endif}} - - - - {{if info['experimenter'] is not None}} - - {{else}}{{endif}} - - - - {{if info['dig'] is not None}} - - {{else}} - - {{endif}} - - - - - - - - {{if info['bads'] is not None}} - - {{else}}{{endif}} - - - - - - - - - - - - - - - - - - - - - - - - -
    Measurement date{{meas_date}}Unknown
    Experimenter{{info['experimenter']}}Unknown
    Digitized points{{len(info['dig'])}} pointsNot available
    Good channels{{n_mag}} magnetometer, {{n_grad}} gradiometer, - and {{n_eeg}} EEG channels
    Bad channels{{', '.join(info['bads'])}}None
    EOG channels{{eog}}
    ECG channels{{ecg}}
    Measurement time range{{u'%0.2f' % tmin}} to {{u'%0.2f' % tmax}} sec.
    Sampling frequency{{u'%0.2f' % info['sfreq']}} Hz
    Highpass{{u'%0.2f' % info['highpass']}} Hz
    Lowpass{{u'%0.2f' % info['lowpass']}} Hz
    -
  • -""") - - toc_list = Template(u"""
  • {{if id}} @@ -890,11 +878,52 @@ class Report(object): .. versionadded:: 0.21 %(verbose)s + Attributes + ---------- + info_fname : None | str + Name of the file containing the info dictionary. + %(subjects_dir)s + subject : str | None + Subject name. + title : str + Title of the report. + cov_fname : None | str + Name of the file containing the noise covariance. + %(baseline_report)s + Defaults to ``None``, i.e. no baseline correction. + image_format : str + Default image format to use (default is 'png'). + SVG uses vector graphics, so fidelity is higher but can increase + file size and browser image rendering time as well. + + .. versionadded:: 0.15 + + raw_psd : bool | dict + If True, include PSD plots for raw files. Can be False (default) to + omit, True to plot, or a dict to pass as ``kwargs`` to + :meth:`mne.io.Raw.plot_psd`. + + .. versionadded:: 0.17 + projs : bool + Whether to include topographic plots of SSP projectors, if present in + the data. Defaults to ``False``. + + .. versionadded:: 0.21 + %(verbose)s + html : list of str + Contains items of html-page. + include : list of str + Dictionary containing elements included in head. + fnames : list of str + List of file names rendered. + sections : list of str + List of sections. + lang : str + language setting for the HTML file. + Notes ----- - See :ref:`tut-report` for an introduction to using ``mne.Report``, and - :ref:`this example ` for an example of customizing the report - with a slider. + See :ref:`tut-report` for an introduction to using ``mne.Report``. .. versionadded:: 0.8.0 """ @@ -905,15 +934,18 @@ def __init__(self, info_fname=None, subjects_dir=None, self.info_fname = str(info_fname) if info_fname is not None else None self.cov_fname = str(cov_fname) if cov_fname is not None else None self.baseline = baseline - self.subjects_dir = get_subjects_dir(subjects_dir, raise_error=False) + if subjects_dir is not None: + subjects_dir = get_subjects_dir(subjects_dir) + self.subjects_dir = subjects_dir self.subject = subject self.title = title self.image_format = _check_image_format(None, image_format) self.projs = projs self.verbose = verbose - self.initial_id = 0 + self._initial_id = 0 self.html = [] + self.include = [] self.fnames = [] # List of file names rendered self.sections = [] # List of sections self.lang = 'en-us' # language setting for the HTML file @@ -944,13 +976,19 @@ def __repr__(self): return s def __len__(self): - """Return the number of items in report.""" + """Return the number of files processed by the report. + + Returns + ------- + n_files : int + The number of files processed. + """ return len(self.fnames) def _get_id(self): """Get id of plot.""" - self.initial_id += 1 - return self.initial_id + self._initial_id += 1 + return self._initial_id def _validate_input(self, items, captions, section, comments=None): """Validate input.""" @@ -979,6 +1017,38 @@ def _validate_input(self, items, captions, section, comments=None): return items, captions, comments + def add_custom_css(self, css): + """Add custom CSS to the report. + + Parameters + ---------- + css : str + Style definitions to add to the report. The content of this string + will be embedded between HTML ```` tags. + + Notes + ----- + .. versionadded:: 0.23 + """ + style = f'\n' + self.include += style + + def add_custom_js(self, js): + """Add custom JavaScript to the report. + + Parameters + ---------- + js : str + JavaScript code to add to the report. The content of this string + will be embedded between HTML ```` tags. + + Notes + ----- + .. versionadded:: 0.23 + """ + script = f'\n' + self.include += script + def remove(self, caption, section=None): """Remove a figure from the report. @@ -1067,7 +1137,7 @@ def _add_or_replace(self, fname, sectionlabel, html, replace=False): def add_figs_to_section(self, figs, captions, section='custom', scale=None, image_format=None, comments=None, - replace=False): + replace=False, auto_close=True): """Append custom user-defined figures. Parameters @@ -1097,6 +1167,9 @@ class construction. replace : bool If ``True``, figures already present that have the same caption will be replaced. Defaults to ``False``. + auto_close : bool + If True, the plots are closed during the generation of the report. + Defaults to True. """ figs, captions, comments = self._validate_input(figs, captions, section, comments) @@ -1109,7 +1182,7 @@ class construction. div_klass = self._sectionvars[section] img_klass = self._sectionvars[section] - img = _fig_to_img(fig, image_format, scale) + img = _fig_to_img(fig, image_format, scale, auto_close) html = image_template.substitute(img=img, id=global_id, div_klass=div_klass, img_klass=img_klass, @@ -1211,10 +1284,10 @@ def add_htmls_to_section(self, htmls, captions, section='custom', html_template.substitute(div_klass=div_klass, id=global_id, caption=caption, html=html), replace) - @fill_doc + @verbose def add_bem_to_section(self, subject, caption='BEM', section='bem', decim=2, n_jobs=1, subjects_dir=None, - replace=False): + replace=False, width=512, verbose=None): """Render a bem slider html str. Parameters @@ -1234,26 +1307,35 @@ def add_bem_to_section(self, subject, caption='BEM', section='bem', replace : bool If ``True``, figures already present that have the same caption will be replaced. Defaults to ``False``. + width : int + The width of the MRI images (in pixels). Larger values will have + clearer surface lines, but will create larger HTML files. + Typically a factor of 2 more than the number of MRI voxels along + each dimension (typically 512, default) is reasonable. + + .. versionadded:: 0.23 + %(verbose_meth)s Notes ----- .. versionadded:: 0.9.0 """ + width = _ensure_int(width, 'width') caption = 'custom plot' if caption == '' else caption html = self._render_bem(subject=subject, subjects_dir=subjects_dir, decim=decim, n_jobs=n_jobs, section=section, - caption=caption) + caption=caption, width=width) html, caption, _ = self._validate_input(html, caption, section) sectionvar = self._sectionvars[section] # convert list->str assert isinstance(html, list) html = u''.join(html) self._add_or_replace('%s-#-%s-#-custom' % (caption[0], sectionvar), - sectionvar, html) + sectionvar, html, replace=replace) def add_slider_to_section(self, figs, captions=None, section='custom', title='Slider', scale=None, image_format=None, - replace=False): + replace=False, auto_close=True): """Render a slider of figs to the report. Parameters @@ -1285,6 +1367,11 @@ class construction. replace : bool If ``True``, figures already present that have the same caption will be replaced. Defaults to ``False``. + auto_close : bool + If True, the plots are closed during the generation of the report. + Defaults to True. + + .. versionadded:: 0.23 Notes ----- @@ -1326,7 +1413,7 @@ class construction. raise TypeError('Captions must be None or an iterable of ' 'float, int, str, Got %s' % type(captions)) for ii, (fig, caption) in enumerate(zip(figs, captions)): - img = _fig_to_img(fig, image_format, scale) + img = _fig_to_img(fig, image_format, scale, auto_close) slice_id = '%s-%s-%s' % (name, global_id, sl[ii]) first = True if ii == 0 else False slices.append(_build_html_image(img, slice_id, div_klass, @@ -1347,7 +1434,8 @@ class construction. slider_full_template.substitute(id=global_id, title=title, div_klass=slider_klass, slider_id=slider_id, html=html, - image_html=image_html)) + image_html=image_html), + replace=replace) ########################################################################### # HTML rendering @@ -1400,7 +1488,7 @@ def _init_render(self, verbose=None): self.include = ''.join(include) @verbose - def parse_folder(self, data_path, pattern='*.fif', n_jobs=1, mri_decim=2, + def parse_folder(self, data_path, pattern=None, n_jobs=1, mri_decim=2, sort_sections=True, on_error='warn', image_format=None, render_bem=True, verbose=None): r"""Render all the files in the folder. @@ -1410,10 +1498,13 @@ def parse_folder(self, data_path, pattern='*.fif', n_jobs=1, mri_decim=2, data_path : str Path to the folder containing data whose HTML report will be created. - pattern : str | list of str + pattern : None | str | list of str Filename pattern(s) to include in the report. Example: [\*raw.fif, \*ave.fif] will include Raw as well as Evoked - files. + files. If ``None``, include all supported file formats. + + .. versionchanged:: 0.23 + Include supported non-FIFF files by default. %(n_jobs)s mri_decim : int Use this decimation factor for generating MRI/BEM images @@ -1448,7 +1539,9 @@ class construction. if self.title is None: self.title = 'MNE Report for ...%s' % self.data_path[-20:] - if not isinstance(pattern, (list, tuple)): + if pattern is None: + pattern = [f'*{ext}' for ext in SUPPORTED_READ_RAW_EXTENSIONS] + elif not isinstance(pattern, (list, tuple)): pattern = [pattern] # iterate through the possible patterns @@ -1463,7 +1556,10 @@ class construction. fnames_to_remove = [] for fname in fnames: if _endswith(fname, ('raw', 'sss', 'meg')): - inst = read_raw_fif(fname, allow_maxshield=True, preload=False) + kwargs = dict(fname=fname, preload=False) + if fname.endswith(('.fif', '.fif.gz')): + kwargs['allow_maxshield'] = True + inst = read_raw(**kwargs) else: continue @@ -1543,8 +1639,8 @@ def _get_state_params(self): """ # Note: self._fname is not part of the state return (['baseline', 'cov_fname', 'fnames', 'html', 'include', - 'image_format', 'info_fname', 'initial_id', 'raw_psd', - '_sectionlabels', 'sections', '_sectionvars', + 'image_format', 'info_fname', '_initial_id', 'raw_psd', + '_sectionlabels', 'sections', '_sectionvars', 'projs', '_sort_sections', 'subjects_dir', 'subject', 'title', 'verbose'], ['data_path', 'lang', '_sort']) @@ -1570,7 +1666,9 @@ def __setstate__(self, state): setattr(self, param, state[param]) return state - def save(self, fname=None, open_browser=True, overwrite=False): + @verbose + def save(self, fname=None, open_browser=True, overwrite=False, *, + verbose=None): """Save the report and optionally open it in browser. Parameters @@ -1585,8 +1683,8 @@ def save(self, fname=None, open_browser=True, overwrite=False): open_browser : bool When saving to HTML, open the rendered HTML file browser after saving if True. Defaults to True. - overwrite : bool - If True, overwrite report if it already exists. Defaults to False. + %(overwrite)s + %(verbose_meth)s Returns ------- @@ -1723,8 +1821,7 @@ def _render_toc(self, verbose=None): global_id += 1 html_toc += u'
  • ' - elif fname.endswith(tuple(VALID_EXTENSIONS + - ['bem', 'custom'])): + elif fname.endswith(VALID_EXTENSIONS + ('bem', 'custom')): html_toc += toc_list.substitute(div_klass=div_klass, id=global_id, tooltip=tooltip, @@ -1741,42 +1838,17 @@ def _render_toc(self, verbose=None): self._sectionlabels = sectionlabels lang = getattr(self, 'lang', 'en-us') + sections = [section if section != 'mri' else 'MRI' + for section in self.sections] html_header = header_template.substitute( title=self.title, include=self.include, lang=lang, - sections=self.sections, sectionvars=self._sectionvars) + sections=sections, sectionvars=self._sectionvars) self.html.insert(0, html_header) # Insert header at position 0 self.html.insert(1, html_toc) # insert TOC - def _render_array(self, array, global_id=None, cmap='gray', - limits=None, n_jobs=1): - """Render mri without bem contours (only PNG).""" - html = [] - html.append(u'
    ') - # Axial - limits = limits or {} - axial_limit = limits.get('axial') - axial_slices_gen = _iterate_axial_slices(array, axial_limit) - html.append( - self._render_one_axis(axial_slices_gen, 'axial', - global_id, cmap, array.shape[1], n_jobs)) - # Sagittal - sagittal_limit = limits.get('sagittal') - sagittal_slices_gen = _iterate_sagittal_slices(array, sagittal_limit) - html.append( - self._render_one_axis(sagittal_slices_gen, 'sagittal', - global_id, cmap, array.shape[1], n_jobs)) - # Coronal - coronal_limit = limits.get('coronal') - coronal_slices_gen = _iterate_coronal_slices(array, coronal_limit) - html.append( - self._render_one_axis(coronal_slices_gen, 'coronal', - global_id, cmap, array.shape[1], n_jobs)) - # Close section - html.append(u'
    ') - return '\n'.join(html) - def _render_one_bem_axis(self, mri_fname, surfaces, global_id, - orientation='coronal', decim=2, n_jobs=1): + orientation='coronal', decim=2, n_jobs=1, + width=512): """Render one axis of bem contours (only PNG).""" import nibabel as nib nim = nib.load(mri_fname) @@ -1789,12 +1861,13 @@ def _render_one_bem_axis(self, mri_fname, surfaces, global_id, slides_klass = '%s-%s' % (name, global_id) sl = np.arange(0, n_slices, decim) + logger.debug(f'Rendering BEM {orientation} with {len(sl)} slices') kwargs = dict(mri_fname=mri_fname, surfaces=surfaces, show=False, orientation=orientation, img_output=True, src=None, - show_orientation=True) + show_orientation=True, width=width) imgs = _figs_to_mrislices(sl, n_jobs, **kwargs) slices = [] - img_klass = 'slideimg-%s' % name + img_klass = 'slideimg-%s w-100' % name div_klass = 'span12 %s' % slides_klass for ii, img in enumerate(imgs): slice_id = '%s-%s-%s' % (name, global_id, sl[ii]) @@ -1817,42 +1890,24 @@ def _render_one_bem_axis(self, mri_fname, surfaces, global_id, def _render_raw(self, raw_fname, data_path): """Render raw (only text).""" - import matplotlib.pyplot as plt global_id = self._get_id() - raw = read_raw_fif(raw_fname, allow_maxshield='yes') + kwargs = dict(fname=raw_fname, preload=False) + if raw_fname.endswith(('.fif', '.fif.gz')): + kwargs['allow_maxshield'] = True + raw = read_raw(**kwargs) extra = '(MaxShield on)' if raw.info.get('maxshield', False) else '' caption = self._gen_caption(prefix='Raw', suffix=extra, fname=raw_fname, data_path=data_path) - n_eeg = len(pick_types(raw.info, meg=False, eeg=True)) - n_grad = len(pick_types(raw.info, meg='grad')) - n_mag = len(pick_types(raw.info, meg='mag')) - pick_eog = pick_types(raw.info, meg=False, eog=True) - if len(pick_eog) > 0: - eog = ', '.join(np.array(raw.info['ch_names'])[pick_eog]) - else: - eog = 'Not available' - pick_ecg = pick_types(raw.info, meg=False, ecg=True) - if len(pick_ecg) > 0: - ecg = ', '.join(np.array(raw.info['ch_names'])[pick_ecg]) - else: - ecg = 'Not available' - meas_date = raw.info['meas_date'] - if meas_date is not None: - meas_date = meas_date.strftime("%B %d, %Y") + ' GMT' - - html = raw_template.substitute( - div_klass='raw', id=global_id, caption=caption, info=raw.info, - meas_date=meas_date, n_eeg=n_eeg, n_grad=n_grad, n_mag=n_mag, - eog=eog, ecg=ecg, tmin=raw._first_time, tmax=raw._last_time) + html = """
  • """ % (global_id) + html += raw._repr_html_(caption=caption) + html += "
  • " raw_psd = {} if self.raw_psd is True else self.raw_psd if isinstance(raw_psd, dict): - from matplotlib.backends.backend_agg import FigureCanvasAgg n_ax = sum(kind in raw for kind in _DATA_CH_TYPES_SPLIT) - fig, axes = plt.subplots(n_ax, 1, figsize=(6, 1 + 1.5 * n_ax), - dpi=92) - FigureCanvasAgg(fig) + fig = _figure_agg(figsize=(6, 1 + 1.5 * n_ax), dpi=92) + axes = [fig.add_subplot(1, n_ax, ii + 1) for ii in range(n_ax)] img = _fig_to_img(raw.plot_psd, self.image_format, ax=axes, **raw_psd) new_html = image_template.substitute( @@ -2017,6 +2072,7 @@ def _render_epochs(self, epo_fname, image_format, data_path): html = image_template.substitute( img=img, id=global_id, div_klass='epochs', img_klass='epochs', caption=caption, show=show, image_format=image_format) + html += epochs._repr_html_() return html def _render_cov(self, cov_fname, info_fname, image_format, data_path, @@ -2086,8 +2142,10 @@ def _render_trans(self, trans, path, info, subject, subjects_dir, return html def _render_bem(self, subject, subjects_dir, decim, n_jobs, - section='bem', caption='BEM'): + section='bem', caption='BEM', width=512): """Render mri+bem (only PNG).""" + if subjects_dir is None: + subjects_dir = self.subjects_dir subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) # Get the MRI filename @@ -2114,12 +2172,9 @@ def _render_bem(self, subject, subjects_dir, decim, n_jobs, name = caption html += u'
  • \n' % (klass, global_id) html += u'

    %s

    \n' % name # all other captions are h4 - html += self._render_one_bem_axis(mri_fname, surfaces, global_id, - 'axial', decim, n_jobs) - html += self._render_one_bem_axis(mri_fname, surfaces, global_id, - 'sagittal', decim, n_jobs) - html += self._render_one_bem_axis(mri_fname, surfaces, global_id, - 'coronal', decim, n_jobs) + for view in _BEM_VIEWS: + html += self._render_one_bem_axis(mri_fname, surfaces, global_id, + view, decim, n_jobs, width) html += u'
  • \n' return ''.join(html) @@ -2140,7 +2195,7 @@ def _recursive_search(path, pattern): for f in fnmatch.filter(files, pattern): # only the following file types are supported # this ensures equitable distribution of jobs - if f.endswith(tuple(VALID_EXTENSIONS)): + if f.endswith(VALID_EXTENSIONS): filtered_files.append(op.realpath(op.join(dirpath, f))) return filtered_files @@ -2202,10 +2257,13 @@ def __call__(self, block, block_vars, gallery_conf): fid.write(_FA_FILE_CODE) # copy HTML file html_fname = op.basename(report.fname) - out_fname = op.join( + out_dir = op.join( self.app.builder.outdir, op.relpath(op.dirname(block_vars['target_file']), - self.app.builder.srcdir), html_fname) + self.app.builder.srcdir)) + os.makedirs(out_dir, exist_ok=True) + out_fname = op.join(out_dir, html_fname) + assert op.isfile(report.fname) self.files[report.fname] = out_fname # embed links/iframe data = _SCRAPER_TEXT.format(html_fname) diff --git a/mne/selection.py b/mne/selection.py deleted file mode 100644 index cf6dbafe431..00000000000 --- a/mne/selection.py +++ /dev/null @@ -1,189 +0,0 @@ -# Authors: Alexandre Gramfort -# Matti Hämäläinen -# Martin Luessi -# -# License: BSD (3-clause) - -from os import path -from collections import OrderedDict - -import numpy as np - -from .io.meas_info import Info -from .io.pick import _pick_data_channels, pick_types -from .utils import logger, verbose, _get_stim_channel - -_SELECTIONS = ['Vertex', 'Left-temporal', 'Right-temporal', 'Left-parietal', - 'Right-parietal', 'Left-occipital', 'Right-occipital', - 'Left-frontal', 'Right-frontal'] -_EEG_SELECTIONS = ['EEG 1-32', 'EEG 33-64', 'EEG 65-96', 'EEG 97-128'] - - -@verbose -def read_selection(name, fname=None, info=None, verbose=None): - """Read channel selection from file. - - By default, the selections used in ``mne_browse_raw`` are supported. - Additional selections can be added by specifying a selection file (e.g. - produced using ``mne_browse_raw``) using the ``fname`` parameter. - - The ``name`` parameter can be a string or a list of string. The returned - selection will be the combination of all selections in the file where - (at least) one element in name is a substring of the selection name in - the file. For example, ``name=['temporal', 'Right-frontal']`` will produce - a combination of ``'Left-temporal'``, ``'Right-temporal'``, and - ``'Right-frontal'``. - - The included selections are: - - * ``'Vertex'`` - * ``'Left-temporal'`` - * ``'Right-temporal'`` - * ``'Left-parietal'`` - * ``'Right-parietal'`` - * ``'Left-occipital'`` - * ``'Right-occipital'`` - * ``'Left-frontal'`` - * ``'Right-frontal'`` - - Parameters - ---------- - name : str or list of str - Name of the selection. If is a list, the selections are combined. - fname : str - Filename of the selection file (if None, built-in selections are used). - info : instance of Info - Measurement info file, which will be used to determine the spacing - of channel names to return, e.g. ``'MEG 0111'`` for old Neuromag - systems and ``'MEG0111'`` for new ones. - %(verbose)s - - Returns - ------- - sel : list of string - List with channel names in the selection. - """ - # convert name to list of string - if not isinstance(name, (list, tuple)): - name = [name] - if isinstance(info, Info): - picks = pick_types(info, meg=True, exclude=()) - if len(picks) > 0 and ' ' not in info['ch_names'][picks[0]]: - spacing = 'new' - else: - spacing = 'old' - elif info is not None: - raise TypeError('info must be an instance of Info or None, not %s' - % (type(info),)) - else: # info is None - spacing = 'old' - - # use built-in selections by default - if fname is None: - fname = path.join(path.dirname(__file__), 'data', 'mne_analyze.sel') - - if not path.isfile(fname): - raise ValueError('The file %s does not exist.' % fname) - - # use this to make sure we find at least one match for each name - name_found = {n: False for n in name} - with open(fname, 'r') as fid: - sel = [] - for line in fid: - line = line.strip() - # skip blank lines and comments - if len(line) == 0 or line[0] == '#': - continue - # get the name of the selection in the file - pos = line.find(':') - if pos < 0: - logger.info('":" delimiter not found in selections file, ' - 'skipping line') - continue - sel_name_file = line[:pos] - # search for substring match with name provided - for n in name: - if sel_name_file.find(n) >= 0: - sel.extend(line[pos + 1:].split('|')) - name_found[n] = True - break - - # make sure we found at least one match for each name - for n, found in name_found.items(): - if not found: - raise ValueError('No match for selection name "%s" found' % n) - - # make the selection a sorted list with unique elements - sel = list(set(sel)) - sel.sort() - if spacing == 'new': # "new" or "old" by now, "old" is default - sel = [s.replace('MEG ', 'MEG') for s in sel] - return sel - - -def _divide_to_regions(info, add_stim=True): - """Divide channels to regions by positions.""" - from scipy.stats import zscore - picks = _pick_data_channels(info, exclude=[]) - chs_in_lobe = len(picks) // 4 - pos = np.array([ch['loc'][:3] for ch in info['chs']]) - x, y, z = pos.T - - frontal = picks[np.argsort(y[picks])[-chs_in_lobe:]] - picks = np.setdiff1d(picks, frontal) - - occipital = picks[np.argsort(y[picks])[:chs_in_lobe]] - picks = np.setdiff1d(picks, occipital) - - temporal = picks[np.argsort(z[picks])[:chs_in_lobe]] - picks = np.setdiff1d(picks, temporal) - - lt, rt = _divide_side(temporal, x) - lf, rf = _divide_side(frontal, x) - lo, ro = _divide_side(occipital, x) - lp, rp = _divide_side(picks, x) # Parietal lobe from the remaining picks. - - # Because of the way the sides are divided, there may be outliers in the - # temporal lobes. Here we switch the sides for these outliers. For other - # lobes it is not a big problem because of the vicinity of the lobes. - with np.errstate(invalid='ignore'): # invalid division, greater compare - zs = np.abs(zscore(x[rt])) - outliers = np.array(rt)[np.where(zs > 2.)[0]] - rt = list(np.setdiff1d(rt, outliers)) - - with np.errstate(invalid='ignore'): # invalid division, greater compare - zs = np.abs(zscore(x[lt])) - outliers = np.append(outliers, (np.array(lt)[np.where(zs > 2.)[0]])) - lt = list(np.setdiff1d(lt, outliers)) - - l_mean = np.mean(x[lt]) - r_mean = np.mean(x[rt]) - for outlier in outliers: - if abs(l_mean - x[outlier]) < abs(r_mean - x[outlier]): - lt.append(outlier) - else: - rt.append(outlier) - - if add_stim: - stim_ch = _get_stim_channel(None, info, raise_error=False) - if len(stim_ch) > 0: - for region in [lf, rf, lo, ro, lp, rp, lt, rt]: - region.append(info['ch_names'].index(stim_ch[0])) - return OrderedDict([('Left-frontal', lf), ('Right-frontal', rf), - ('Left-parietal', lp), ('Right-parietal', rp), - ('Left-occipital', lo), ('Right-occipital', ro), - ('Left-temporal', lt), ('Right-temporal', rt)]) - - -def _divide_side(lobe, x): - """Make a separation between left and right lobe evenly.""" - lobe = np.asarray(lobe) - median = np.median(x[lobe]) - - left = lobe[np.where(x[lobe] < median)[0]] - right = lobe[np.where(x[lobe] > median)[0]] - medians = np.where(x[lobe] == median)[0] - - left = np.sort(np.concatenate([left, lobe[medians[1::2]]])) - right = np.sort(np.concatenate([right, lobe[medians[::2]]])) - return list(left), list(right) diff --git a/mne/simulation/metrics.py b/mne/simulation/metrics.py index b7e68fcd72d..dd228d6a517 100644 --- a/mne/simulation/metrics.py +++ b/mne/simulation/metrics.py @@ -4,7 +4,6 @@ # License: BSD (3-clause) import numpy as np -from scipy.linalg import norm from ..utils import _check_option @@ -61,5 +60,5 @@ def source_estimate_quantification(stc1, stc2, metric='rms'): # Calculate correlation coefficient between matrix elements elif metric == 'cosine': score = 1. - (np.dot(data1.flatten(), data2.flatten()) / - (norm(data1) * norm(data2))) + (np.linalg.norm(data1) * np.linalg.norm(data2))) return score diff --git a/mne/simulation/raw.py b/mne/simulation/raw.py index 4d07829f17e..bd0fa97a6fb 100644 --- a/mne/simulation/raw.py +++ b/mne/simulation/raw.py @@ -33,6 +33,7 @@ from ..utils import (logger, verbose, check_random_state, _pl, _validate_type, _check_preload) from ..parallel import check_n_jobs +from .source import SourceSimulator def _check_cov(info, cov): @@ -110,8 +111,11 @@ def _check_head_pos(head_pos, info, first_samp, times=None): raise RuntimeError('All position times must be <= t_end (%0.1f ' 'sec), found %s/%s bad values (is this a split ' 'file?)' % (times[-1], bad.sum(), len(bad))) + # If it starts close to zero, make it zero (else unique(offset) fails) + if len(ts) > 0 and ts[0] < (0.5 / info['sfreq']): + ts[0] = 0. # If it doesn't start at zero, insert one at t=0 - if len(ts) == 0 or ts[0] > 0: + elif len(ts) == 0 or ts[0] > 0: ts = np.r_[[0.], ts] dev_head_ts.insert(0, info['dev_head_t']['trans']) dev_head_ts = [{'trans': d, 'to': info['dev_head_t']['to'], @@ -140,7 +144,7 @@ def simulate_raw(info, stc=None, trans=None, src=None, bem=None, head_pos=None, .. versionchanged:: 0.18 Support for :class:`mne.Info`. - stc : iterable | SourceEstimate + stc : iterable | SourceEstimate | SourceSimulator The source estimates to use to simulate data. Each must have the same sample rate as the raw data, and the vertices of all stcs in the iterable must match. Each entry in the iterable can also be a tuple of @@ -149,7 +153,8 @@ def simulate_raw(info, stc=None, trans=None, src=None, bem=None, head_pos=None, See Notes for details. .. versionchanged:: 0.18 - Support for tuple, and iterable of tuple or SourceEstimate. + Support for tuple, iterable of tuple or `~mne.SourceEstimate`, + or `~mne.simulation.SourceSimulator`. trans : dict | str | None Either a transformation filename (usually made using mne_analyze) or an info dict (usually opened using read_trans()). @@ -166,7 +171,7 @@ def simulate_raw(info, stc=None, trans=None, src=None, bem=None, head_pos=None, solution filename (e.g., "sample-5120-5120-5120-bem-sol.fif"). Can be None if ``forward`` is provided. %(head_pos)s - See for example [1]_. + See for example :footcite:`LarsonTaulu2017`. mindist : float Minimum distance between sources and the inner skull boundary to use during forward calculation. @@ -237,9 +242,7 @@ def simulate_raw(info, stc=None, trans=None, src=None, bem=None, head_pos=None, References ---------- - .. [1] Larson E, Taulu S (2017). "The Importance of Properly Compensating - for Head Movements During MEG Acquisition Across Different Age - Groups." Brain Topogr 30:172–181 + .. footbibliography:: """ # noqa: E501 _validate_type(info, Info, 'info') raw_verbose = verbose @@ -273,8 +276,10 @@ def simulate_raw(info, stc=None, trans=None, src=None, bem=None, head_pos=None, logger.info('Setting up raw simulation: %s position%s, "%s" interpolation' % (len(dev_head_ts), _pl(dev_head_ts), interp)) + if isinstance(stc, SourceSimulator) and stc.first_samp != first_samp: + logger.info('SourceSimulator first_samp does not match argument.') + stc_enum, stc_counted, verts = _check_stc_iterable(stc, info) - # del stc if forward is not None: forward = restrict_forward_to_stc(forward, verts) src = forward['src'] @@ -369,7 +374,7 @@ def add_eog(raw, head_pos=None, interp='cos2', n_jobs=1, random_state=None, 1. Random activation times are drawn from an inhomogeneous poisson process whose blink rate oscillates between 4.5 blinks/minute and 17 blinks/minute based on the low (reading) and high (resting) - blink rates from [1]_. + blink rates from :footcite:`BentivoglioEtAl1997`. 2. The activation kernel is a 250 ms Hanning window. 3. Two activated dipoles are located in the z=0 plane (in head coordinates) at ±30 degrees away from the y axis (nasion). @@ -382,8 +387,7 @@ def add_eog(raw, head_pos=None, interp='cos2', n_jobs=1, random_state=None, References ---------- - .. [1] Bentivoglio et al. "Analysis of blink rate patterns in normal - subjects" Movement Disorders, 1997 Nov;12(6):1028-34. + .. footbibliography:: """ return _add_exg(raw, 'blink', head_pos, interp, n_jobs, random_state) @@ -524,7 +528,7 @@ def _add_exg(raw, kind, head_pos, interp, n_jobs, random_state): proc_lims = np.concatenate([np.arange(0, len(used), 10000), [len(used)]]) for start, stop in zip(proc_lims[:-1], proc_lims[1:]): fwd, _ = interper.feed(stop - start) - data[picks, start:stop] = einsum( + data[picks, start:stop] += einsum( 'svt,vt->st', fwd, exg_data[:, start:stop]) assert not used[start:stop].any() used[start:stop] = True @@ -580,7 +584,7 @@ def add_chpi(raw, head_pos=None, interp='cos2', n_jobs=1, verbose=None): lims = np.concatenate([offsets, [len(raw.times)]]) for start, stop in zip(lims[:-1], lims[1:]): fwd, = interper.feed(stop - start) - data[meg_picks, start:stop] = einsum( + data[meg_picks, start:stop] += einsum( 'svt,vt->st', fwd, sinusoids[:, start:stop]) assert not used[start:stop].any() used[start:stop] = True diff --git a/mne/simulation/source.py b/mne/simulation/source.py index 94ada9e4681..b92a699dbce 100644 --- a/mne/simulation/source.py +++ b/mne/simulation/source.py @@ -13,7 +13,8 @@ from ..source_estimate import SourceEstimate, VolSourceEstimate from ..source_space import _ensure_src from ..fixes import rng_uniform -from ..utils import check_random_state, warn, _check_option, fill_doc +from ..utils import (check_random_state, warn, _check_option, fill_doc, + _ensure_int, _ensure_events) from ..label import Label from ..surface import _compute_nearest @@ -327,6 +328,10 @@ class SourceSimulator(object): duration : float | None Time interval during which the simulation takes place in seconds. If None, it is computed using existing events and waveform lengths. + first_samp : int + First sample from which the simulation takes place, as an integer. + Comparable to the ``first_samp`` property of `~mne.io.Raw` objects. + Default is 0. Attributes ---------- @@ -336,28 +341,39 @@ class SourceSimulator(object): The number of time samples of the simulation. """ - def __init__(self, src, tstep=1e-3, duration=None): + def __init__(self, src, tstep=1e-3, duration=None, first_samp=0): + if duration is not None and duration < tstep: + raise ValueError('duration must be None or >= tstep.') + self.first_samp = _ensure_int(first_samp, 'first_samp') self._src = src self._tstep = tstep self._labels = [] self._waveforms = [] self._events = np.empty((0, 3), dtype=int) - self._duration = duration + self._duration = duration # if not None, sets # samples self._last_samples = [] self._chk_duration = 1000 @property def duration(self): - """Duration of the simulation""" - # If not, the precomputed maximum last sample is used - if self._duration is None: - return np.max(self._last_samples) * self._tstep - return self._duration + """Duration of the simulation in same units as tstep.""" + if self._duration is not None: + return self._duration + return self.n_times * self._tstep @property def n_times(self): - """Number of time samples in the simulation""" - return int(self.duration / self._tstep) + """Number of time samples in the simulation.""" + if self._duration is not None: + return int(self._duration / self._tstep) + ls = self.first_samp + if len(self._last_samples) > 0: + ls = np.max(self._last_samples) + return ls - self.first_samp + 1 # >= 1 + + @property + def last_samp(self): + return self.first_samp + self.n_times - 1 def add_data(self, label, waveform, events): """Add data to the simulation. @@ -394,42 +410,48 @@ def add_data(self, label, waveform, events): raise ValueError('Number of waveforms and events should match or ' 'there should be a single waveform (%d != %d).' % (len(waveform), len(events))) - # Update the maximum duration possible based on the events + events = _ensure_events(events).astype(np.int64) + # Update the last sample possible based on events + waveforms self._labels.extend([label] * len(events)) self._waveforms.extend(waveform) - self._events = np.vstack([self._events, events]) + self._events = np.concatenate([self._events, events]) + assert self._events.dtype == np.int64 # First sample per waveform is the first column of events # Last is computed below - self._last_samples = np.array([self._events[i, 0] + len(w) + self._last_samples = np.array([self._events[i, 0] + len(w) - 1 for i, w in enumerate(self._waveforms)]) def get_stim_channel(self, start_sample=0, stop_sample=None): """Get the stim channel from the provided data. Returns the stim channel data according to the simulation parameters - which should be added through function add_data. If both start_sample + which should be added through the add_data method. If both start_sample and stop_sample are not specified, the entire duration is used. Parameters ---------- start_sample : int - First sample in chunk. Default is 0. + First sample in chunk. Default is the value of the ``first_samp`` + attribute. stop_sample : int | None - The stop sample of the returned stc. This sample is not part of the - output to follow slicing semantics. If None, then all samples past - start_sample is returned. + The final sample of the returned stc. If None, then all samples + from start_sample onward are returned. Returns ------- stim_data : ndarray of int, shape (n_samples,) The stimulation channel data. """ + if start_sample is None: + start_sample = self.first_samp if stop_sample is None: - stop_sample = self.n_times - n_samples = stop_sample - start_sample + stop_sample = start_sample + self.n_times - 1 + elif stop_sample < start_sample: + raise ValueError('Argument start_sample must be >= stop_sample.') + n_samples = stop_sample - start_sample + 1 # Initialize the stim data array - stim_data = np.zeros(n_samples, dtype=int) + stim_data = np.zeros(n_samples, dtype=np.int64) # Select only events in the time chunk stim_ind = np.where(np.logical_and( @@ -437,12 +459,12 @@ def get_stim_channel(self, start_sample=0, stop_sample=None): self._events[:, 0] < stop_sample))[0] if len(stim_ind) > 0: - relative_ind = self._events[stim_ind, 0].astype(int) - start_sample + relative_ind = self._events[stim_ind, 0] - start_sample stim_data[relative_ind] = self._events[stim_ind, 2] return stim_data - def get_stc(self, start_sample=0, stop_sample=None): + def get_stc(self, start_sample=None, stop_sample=None): """Simulate a SourceEstimate from the provided data. Returns a SourceEstimate object constructed according to the simulation @@ -452,12 +474,12 @@ def get_stc(self, start_sample=0, stop_sample=None): Parameters ---------- - start_sample : int - First sample in chunk. Default is 0. + start_sample : int | None + First sample in chunk. If ``None`` the value of the ``first_samp`` + attribute is used. Defaults to ``None``. stop_sample : int | None - The stop sample of the returned stc. This sample is not part of the - output to follow slicing semantics. If None, then all samples past - start_sample is returned. + The final sample of the returned STC. If ``None``, then all samples + past ``start_sample`` are returned. Returns ------- @@ -467,19 +489,22 @@ def get_stc(self, start_sample=0, stop_sample=None): if len(self._labels) == 0: raise ValueError('No simulation parameters were found. Please use ' 'function add_data to add simulation parameters.') + if start_sample is None: + start_sample = self.first_samp if stop_sample is None: - stop_sample = self.n_times - - n_samples = stop_sample - start_sample + stop_sample = start_sample + self.n_times - 1 + elif stop_sample < start_sample: + raise ValueError('start_sample must be >= stop_sample.') + n_samples = stop_sample - start_sample + 1 - # Initialize the stc_data array + # Initialize the stc_data array to span all possible samples stc_data = np.zeros((len(self._labels), n_samples)) - # Select only the indices that have events in the time chunk + # Select only the events that fall within the span ind = np.where(np.logical_and(self._last_samples >= start_sample, - self._events[:, 0] < stop_sample))[0] + self._events[:, 0] <= stop_sample))[0] - # Loop only over the items that are in the time chunk + # Loop only over the items that are in the time span subset_waveforms = [self._waveforms[i] for i in ind] for i, (waveform, event) in enumerate(zip(subset_waveforms, self._events[ind])): @@ -489,16 +514,17 @@ def get_stc(self, start_sample=0, stop_sample=None): wf_stop = self._last_samples[ind[i]] # Recover the indices of the event that should be in the chunk - waveform_ind = np.in1d(np.arange(wf_start, wf_stop), - np.arange(start_sample, stop_sample)) + waveform_ind = np.in1d(np.arange(wf_start, wf_stop + 1), + np.arange(start_sample, stop_sample + 1)) # Recover the indices that correspond to the overlap - stc_ind = np.in1d(np.arange(start_sample, stop_sample), - np.arange(wf_start, wf_stop)) + stc_ind = np.in1d(np.arange(start_sample, stop_sample + 1), + np.arange(wf_start, wf_stop + 1)) # add the resulting waveform chunk to the corresponding label stc_data[ind[i]][stc_ind] += waveform[waveform_ind] + start_sample -= self.first_samp # STC sample ref is 0 stc = simulate_stc(self._src, self._labels, stc_data, start_sample * self._tstep, self._tstep, allow_overlap=True) @@ -507,11 +533,13 @@ def get_stc(self, start_sample=0, stop_sample=None): def __iter__(self): """Iterate over 1 second STCs.""" - # Arbitrary chunk size, can be modified later to something else + # Arbitrary chunk size, can be modified later to something else. # Loop over chunks of 1 second - or, maximum sample size. # Can be modified to a different value. - n_times = self.n_times - for start_sample in range(0, n_times, self._chk_duration): - stop_sample = min(start_sample + self._chk_duration, n_times) + last_sample = self.last_samp + for start_sample in range(self.first_samp, last_sample + 1, + self._chk_duration): + stop_sample = min(start_sample + self._chk_duration - 1, + last_sample) yield (self.get_stc(start_sample, stop_sample), self.get_stim_channel(start_sample, stop_sample)) diff --git a/mne/simulation/tests/test_raw.py b/mne/simulation/tests/test_raw.py index ba097b5b6a8..f55b4ec0881 100644 --- a/mne/simulation/tests/test_raw.py +++ b/mne/simulation/tests/test_raw.py @@ -24,13 +24,15 @@ from mne.tests.test_chpi import _assert_quats from mne.datasets import testing from mne.simulation import (simulate_sparse_stc, simulate_raw, add_eog, - add_ecg, add_chpi) + add_ecg, add_chpi, add_noise) from mne.source_space import _compare_source_spaces +from mne.simulation.source import SourceSimulator +from mne.label import Label from mne.surface import _get_ico_surface from mne.io import read_raw_fif, RawArray from mne.io.constants import FIFF from mne.time_frequency import psd_welch -from mne.utils import run_tests_if_main, catch_logging, check_version +from mne.utils import catch_logging, check_version base_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') raw_fname_short = op.join(base_path, 'test_raw.fif') @@ -325,7 +327,7 @@ def test_degenerate(raw_data): @pytest.mark.slowtest def test_simulate_raw_bem(raw_data): """Test simulation of raw data with BEM.""" - raw, src, stc, trans, sphere = raw_data + raw, src_ss, stc, trans, sphere = raw_data src = setup_source_space('sample', 'oct1', subjects_dir=subjects_dir) for s in src: s['nuse'] = 3 @@ -366,6 +368,31 @@ def test_simulate_raw_bem(raw_data): diffs = np.sqrt(np.sum((locs - fits) ** 2, axis=-1)) * 1000 med_diff = np.median(diffs) assert med_diff < tol, '%s: %s' % (bem, med_diff) + # also test event timings with SourceSimulator + first_samp = raw.first_samp + events = find_events(raw, initial_event=True, verbose=False) + evt_times = events[:, 0] + assert len(events) == 3 + labels_sim = [[], [], []] # random l+r hemisphere points + labels_sim[0] = Label([src_ss[0]['vertno'][1]], hemi='lh') + labels_sim[1] = Label([src_ss[0]['vertno'][4]], hemi='lh') + labels_sim[2] = Label([src_ss[1]['vertno'][2]], hemi='rh') + wf_sim = np.array([2, 1, 0]) + for this_fs in (0, first_samp): + ss = SourceSimulator(src_ss, 1. / raw.info['sfreq'], + first_samp=this_fs) + for i in range(3): + ss.add_data(labels_sim[i], wf_sim, events[np.newaxis, i]) + assert ss.n_times == evt_times[-1] + len(wf_sim) - this_fs + raw_sim = simulate_raw(raw.info, ss, src=src_ss, bem=bem_fname, + first_samp=first_samp) + data = raw_sim.get_data() + amp0 = data[:, evt_times - first_samp].max() + amp1 = data[:, evt_times + 1 - first_samp].max() + amp2 = data[:, evt_times + 2 - first_samp].max() + assert_allclose(amp0 / amp1, wf_sim[0] / wf_sim[1], rtol=1e-5) + assert amp2 == 0 + assert raw_sim.n_times == ss.n_times def test_simulate_round_trip(raw_data): @@ -479,4 +506,39 @@ def test_simulate_raw_chpi(): vel_atol=0.03) # velicity huge because of t_step_min above -run_tests_if_main() +@testing.requires_testing_data +def test_simulation_cascade(): + """Test that cascading operations do not overwrite data.""" + # Create 10 second raw dataset with zeros in the data matrix + raw_null = read_raw_fif(raw_chpi_fname, allow_maxshield='yes') + raw_null.crop(0, 1).pick_types(meg=True).load_data() + raw_null.apply_function(lambda x: np.zeros_like(x)) + assert_array_equal(raw_null.get_data(), 0.) + + # Calculate independent signal additions + raw_eog = raw_null.copy() + add_eog(raw_eog, random_state=0) + + raw_ecg = raw_null.copy() + add_ecg(raw_ecg, random_state=0) + + raw_noise = raw_null.copy() + cov = make_ad_hoc_cov(raw_null.info) + add_noise(raw_noise, cov, random_state=0) + + raw_chpi = raw_null.copy() + add_chpi(raw_chpi) + + # Calculate Cascading signal additions + raw_cascade = raw_null.copy() + add_eog(raw_cascade, random_state=0) + add_ecg(raw_cascade, random_state=0) + add_chpi(raw_cascade) + add_noise(raw_cascade, cov, random_state=0) + + cascade_data = raw_cascade.get_data() + serial_data = 0. + for raw_other in (raw_eog, raw_ecg, raw_noise, raw_chpi): + serial_data += raw_other.get_data() + + assert_allclose(cascade_data, serial_data, atol=1e-20) diff --git a/mne/simulation/tests/test_source.py b/mne/simulation/tests/test_source.py index 30806982fb5..08095f20c48 100644 --- a/mne/simulation/tests/test_source.py +++ b/mne/simulation/tests/test_source.py @@ -14,8 +14,7 @@ from mne import (read_label, read_forward_solution, pick_types_forward, convert_forward_solution) from mne.label import Label -from mne.simulation.source import simulate_stc, simulate_sparse_stc -from mne.simulation.source import SourceSimulator +from mne.simulation import simulate_stc, simulate_sparse_stc, SourceSimulator from mne.utils import run_tests_if_main, check_version @@ -339,12 +338,12 @@ def test_source_simulator(_get_fwd_labels): stc = ss.get_stc() stim_channel = ss.get_stim_channel() - # Stim channel data must have the same size as stc time samples - assert len(stim_channel) == stc.data.shape[1] - - stim_channel = ss.get_stim_channel(0, 0) - assert len(stim_channel) == 0 + # Make some size checks. + assert ss.duration == 1.0 + assert ss.n_times == 6 + assert ss.last_samp == 5 + assert len(stim_channel) == stc.data.shape[1] assert np.all(stc.vertices[0] == verts_lh) assert np.all(stc.vertices[1] == verts_rh) assert_array_almost_equal(stc.lh_data, output_data_lh) @@ -357,23 +356,52 @@ def test_source_simulator(_get_fwd_labels): counter += 1 assert counter == 1 + # Check validity of setting duration and start/stop parameters. half_ss = SourceSimulator(src, tstep, duration=0.5) for i in range(3): half_ss.add_data(mylabels[i], wfs[i], events[i]) + with pytest.raises(TypeError, match='array of integers'): + half_ss.add_data(mylabels[0], wfs[0], events[0].astype(float)) half_stc = half_ss.get_stc() assert_array_almost_equal(stc.data[:, :3], half_stc.data) - ss = SourceSimulator(src) + part_stc = ss.get_stc(start_sample=1, stop_sample=4) + assert part_stc.shape == (24, 4) + assert part_stc.times[0] == tstep + # Check validity of other arguments. + with pytest.raises(ValueError, match='start_sample must be'): + ss.get_stc(2, 0) + ss = SourceSimulator(src) with pytest.raises(ValueError, match='No simulation parameters'): ss.get_stc() - with pytest.raises(ValueError, match='label must be a Label'): ss.add_data(1, wfs, events) - with pytest.raises(ValueError, match='Number of waveforms and events ' 'should match'): ss.add_data(mylabels[0], wfs[:2], events) + with pytest.raises(ValueError, match='duration must be None or'): + ss = SourceSimulator(src, tstep, tstep / 2) + + # Verify first_samp functionality. + ss = SourceSimulator(src, tstep) + offset = 50 + for i in range(3): # events are offset, but first_samp = 0 + events[i][:, 0] += offset + ss.add_data(mylabels[i], wfs[i], events[i]) + offset_stc = ss.get_stc() + assert ss.n_times == 56 + assert ss.first_samp == 0 + assert offset_stc.data.shape == (stc.data.shape[0], + stc.data.shape[1] + offset) + ss = SourceSimulator(src, tstep, first_samp=offset) + for i in range(3): # events still offset, but first_samp > 0 + ss.add_data(mylabels[i], wfs[i], events[i]) + offset_stc = ss.get_stc() + assert ss.n_times == 6 + assert ss.first_samp == offset + assert ss.last_samp == offset + 5 + assert offset_stc.data.shape == stc.data.shape # Verify that the chunks have the correct length. source_simulator = SourceSimulator(src, tstep=tstep, duration=10 * tstep) diff --git a/mne/source_estimate.py b/mne/source_estimate.py index e461511194f..53a35e43a4d 100644 --- a/mne/source_estimate.py +++ b/mne/source_estimate.py @@ -11,8 +11,6 @@ from types import GeneratorType import numpy as np -from scipy import linalg, sparse -from scipy.sparse import coo_matrix, block_diag as sparse_block_diag from .baseline import rescale from .cov import Covariance @@ -517,7 +515,7 @@ def __init__(self, data, vertices, tmin, tstep, self._kernel_removed = False self._times = None self._update_times() - self.subject = _check_subject(None, subject, False) + self.subject = _check_subject(None, subject, raise_error=False) def __repr__(self): # noqa: D105 s = "%d vertices" % (sum(len(v) for v in self.vertices),) @@ -649,7 +647,7 @@ def plot(self, subject=None, surface='inflated', hemi='lh', foreground=None, initial_time=None, time_unit='s', backend='auto', spacing='oct6', title=None, show_traces='auto', src=None, volume_options=1., view_layout='vertical', - add_data_kwargs=None, verbose=None): + add_data_kwargs=None, brain_kwargs=None, verbose=None): brain = plot_source_estimates( self, subject, surface=surface, hemi=hemi, colormap=colormap, time_label=time_label, smoothing_steps=smoothing_steps, @@ -660,7 +658,8 @@ def plot(self, subject=None, surface='inflated', hemi='lh', initial_time=initial_time, time_unit=time_unit, backend=backend, spacing=spacing, title=title, show_traces=show_traces, src=src, volume_options=volume_options, view_layout=view_layout, - add_data_kwargs=add_data_kwargs, verbose=verbose) + add_data_kwargs=add_data_kwargs, brain_kwargs=brain_kwargs, + verbose=verbose) return brain @property @@ -1637,6 +1636,7 @@ def estimate_snr(self, info, fwd, cov, verbose=None): This function should only be used with source estimates with units nanoAmperes (i.e., MNE-like solutions, *not* dSPM or sLORETA). + See also :footcite:`GoldenholzEtAl2009`. .. warning:: This function currently only works properly for fixed orientation. @@ -1673,11 +1673,7 @@ def estimate_snr(self, info, fwd, cov, verbose=None): References ---------- - .. [1] Goldenholz, D. M., Ahlfors, S. P., Hämäläinen, M. S., Sharon, - D., Ishitobi, M., Vaina, L. M., & Stufflebeam, S. M. (2009). - Mapping the Signal-To-Noise-Ratios of Cortical Sources in - Magnetoencephalography and Electroencephalography. - Human Brain Mapping, 30(4), 1077–1086. doi:10.1002/hbm.20571 + .. footbibliography:: """ from .forward import convert_forward_solution, Forward from .minimum_norm.inverse import _prepare_forward @@ -1711,7 +1707,7 @@ def center_of_mass(self, subject=None, hemi=None, restrict_vertices=False, """Compute the center of mass of activity. This function computes the spatial center of mass on the surface - as well as the temporal center of mass as in [1]_. + as well as the temporal center of mass as in :footcite:`LarsonLee2013`. .. note:: All activity must occur in a single hemisphere, otherwise an error is raised. The "mass" of each point in space for @@ -1763,8 +1759,7 @@ def center_of_mass(self, subject=None, hemi=None, restrict_vertices=False, References ---------- - .. [1] Larson and Lee, "The cortical dynamics underlying effective - switching of auditory spatial attention", NeuroImage 2012. + .. footbibliography:: """ if not isinstance(surf, str): raise TypeError('surf must be a string, got %s' % (type(surf),)) @@ -1918,7 +1913,7 @@ def plot(self, subject=None, hemi='lh', colormap='hot', time_label='auto', background='black', foreground=None, initial_time=None, time_unit='s', show_traces='auto', src=None, volume_options=1., view_layout='vertical', add_data_kwargs=None, - verbose=None): # noqa: D102 + brain_kwargs=None, verbose=None): # noqa: D102 return plot_vector_source_estimates( self, subject=subject, hemi=hemi, colormap=colormap, time_label=time_label, smoothing_steps=smoothing_steps, @@ -1931,7 +1926,7 @@ def plot(self, subject=None, hemi='lh', colormap='hot', time_label='auto', initial_time=initial_time, time_unit=time_unit, show_traces=show_traces, src=src, volume_options=volume_options, view_layout=view_layout, add_data_kwargs=add_data_kwargs, - verbose=verbose) + brain_kwargs=brain_kwargs, verbose=verbose) class _BaseVolSourceEstimate(_BaseSourceEstimate): @@ -1949,7 +1944,7 @@ def plot_3d(self, subject=None, surface='white', hemi='both', foreground=None, initial_time=None, time_unit='s', backend='auto', spacing='oct6', title=None, show_traces='auto', src=None, volume_options=1., view_layout='vertical', - add_data_kwargs=None, verbose=None): + add_data_kwargs=None, brain_kwargs=None, verbose=None): return super().plot( subject=subject, surface=surface, hemi=hemi, colormap=colormap, time_label=time_label, smoothing_steps=smoothing_steps, @@ -1961,7 +1956,7 @@ def plot_3d(self, subject=None, surface='white', hemi='both', time_unit=time_unit, backend=backend, spacing=spacing, title=title, show_traces=show_traces, src=src, volume_options=volume_options, view_layout=view_layout, add_data_kwargs=add_data_kwargs, - verbose=verbose) + brain_kwargs=brain_kwargs, verbose=verbose) @copy_function_doc_to_method_doc(plot_volume_source_estimates) def plot(self, src, subject=None, subjects_dir=None, mode='stat_map', @@ -1979,7 +1974,7 @@ def plot(self, src, subject=None, subjects_dir=None, mode='stat_map', # Override here to provide the volume-specific options @verbose def extract_label_time_course(self, labels, src, mode='auto', - allow_empty=False, *, trans=None, + allow_empty=False, *, mri_resolution=True, verbose=None): """Extract label time courses for lists of labels. @@ -1992,7 +1987,6 @@ def extract_label_time_course(self, labels, src, mode='auto', %(eltc_src)s %(eltc_mode)s %(eltc_allow_empty)s - %(trans_deprecated)s %(eltc_mri_resolution)s %(verbose_meth)s @@ -2010,11 +2004,11 @@ def extract_label_time_course(self, labels, src, mode='auto', """ return extract_label_time_course( self, labels, src, mode=mode, return_generator=False, - allow_empty=allow_empty, trans=trans, + allow_empty=allow_empty, mri_resolution=mri_resolution, verbose=verbose) - @fill_doc - def in_label(self, label, mri, src, trans=None): + @verbose + def in_label(self, label, mri, src, *, verbose=None): """Get a source estimate object restricted to a label. SourceEstimate contains the time course of @@ -2030,7 +2024,7 @@ def in_label(self, label, mri, src, trans=None): src : instance of SourceSpaces The volumetric source space. It must be a single, whole-brain volume. - %(trans_deprecated)s + %(verbose_meth)s Returns ------- @@ -2049,7 +2043,6 @@ def in_label(self, label, mri, src, trans=None): volume_label = [label] else: volume_label = {'Volume ID %s' % (label): _ensure_int(label)} - _dep_trans(trans) label = _volume_labels(src, (mri, volume_label), mri_resolution=False) assert len(label) == 1 label = label[0] @@ -2282,7 +2275,8 @@ def plot_3d(self, subject=None, hemi='both', colormap='hot', background='black', foreground=None, initial_time=None, time_unit='s', show_traces='auto', src=None, volume_options=1., view_layout='vertical', - add_data_kwargs=None, verbose=None): # noqa: D102 + add_data_kwargs=None, brain_kwargs=None, + verbose=None): # noqa: D102 return _BaseVectorSourceEstimate.plot( self, subject=subject, hemi=hemi, colormap=colormap, time_label=time_label, smoothing_steps=smoothing_steps, @@ -2295,7 +2289,7 @@ def plot_3d(self, subject=None, hemi='both', colormap='hot', initial_time=initial_time, time_unit=time_unit, show_traces=show_traces, src=src, volume_options=volume_options, view_layout=view_layout, add_data_kwargs=add_data_kwargs, - verbose=verbose) + brain_kwargs=brain_kwargs, verbose=verbose) @fill_doc @@ -2643,6 +2637,7 @@ def spatio_temporal_tris_adjacency(tris, n_times, remap_vertices=False, vertices are time 1, the nodes from 2 to 2N are the vertices during time 2, etc. """ + from scipy import sparse if remap_vertices: logger.info('Reassigning vertex indices.') tris = np.searchsorted(np.unique(tris), tris) @@ -2679,6 +2674,7 @@ def spatio_temporal_dist_adjacency(src, n_times, dist, verbose=None): vertices are time 1, the nodes from 2 to 2N are the vertices during time 2, etc. """ + from scipy.sparse import block_diag as sparse_block_diag if src[0]['dist'] is None: raise RuntimeError('src must have distances included, consider using ' 'setup_source_space with add_dist=True') @@ -2787,6 +2783,7 @@ def spatial_inter_hemi_adjacency(src, dist, verbose=None): existing intra-hemispheric adjacency matrix, e.g. computed using geodesic distances. """ + from scipy import sparse from scipy.spatial.distance import cdist src = _ensure_src(src, kind='surface') adj = cdist(src[0]['rr'][src[0]['vertno']], @@ -2801,6 +2798,7 @@ def spatial_inter_hemi_adjacency(src, dist, verbose=None): @verbose def _get_adjacency_from_edges(edges, n_times, verbose=None): """Given edges sparse matrix, create adjacency matrix.""" + from scipy.sparse import coo_matrix n_vertices = edges.shape[0] logger.info("-- number of adjacent vertices : %d" % n_vertices) nnz = edges.col.size @@ -2832,11 +2830,12 @@ def _get_ico_tris(grade, verbose=None, return_surf=False): def _pca_flip(flip, data): + from scipy import linalg U, s, V = linalg.svd(data, full_matrices=False) # determine sign-flip sign = np.sign(np.dot(U[:, 0], flip)) # use average power in label for scaling - scale = linalg.norm(s) / np.sqrt(len(data)) + scale = np.linalg.norm(s) / np.sqrt(len(data)) return sign * scale * V[0] @@ -2860,6 +2859,19 @@ def _temporary_vertices(src, vertices): s['vertno'] = v +def _check_stc_src(stc, src): + if stc is not None and src is not None: + _check_subject( + src._subject, stc.subject, raise_error=False, + first_kind='source space subject', second_kind='stc.subject') + for s, v, hemi in zip(src, stc.vertices, ('left', 'right')): + n_missing = (~np.in1d(v, s['vertno'])).sum() + if n_missing: + raise ValueError('%d/%d %s hemisphere stc vertices ' + 'missing from the source space, likely ' + 'mismatch' % (n_missing, len(v), hemi)) + + def _prepare_label_extraction(stc, labels, src, mode, allow_empty, use_sparse): """Prepare indices and flips for extract_label_time_course.""" # If src is a mixed src space, the first 2 src spaces are surf type and @@ -2868,22 +2880,13 @@ def _prepare_label_extraction(stc, labels, src, mode, allow_empty, use_sparse): # of vol src space. # If stc=None (i.e. no activation time courses provided) and mode='mean', # only computes vertex indices and label_flip will be list of None. + from scipy import sparse from .label import label_sign_flip, Label, BiHemiLabel # if source estimate provided in stc, get vertices from source space and # check that they are the same as in the stcs - if stc is not None: - vertno = stc.vertices - - for s, v, hemi in zip(src, stc.vertices, ('left', 'right')): - n_missing = (~np.in1d(v, s['vertno'])).sum() - if n_missing: - raise ValueError('%d/%d %s hemisphere stc vertices missing ' - 'from the source space, likely mismatch' - % (n_missing, len(v), hemi)) - else: - vertno = [s['vertno'] for s in src] - + _check_stc_src(stc, src) + vertno = [s['vertno'] for s in src] if stc is None else stc.vertices nvert = [len(vn) for vn in vertno] # initialization @@ -2892,6 +2895,14 @@ def _prepare_label_extraction(stc, labels, src, mode, allow_empty, use_sparse): bad_labels = list() for li, label in enumerate(labels): + subject = label['subject'] if use_sparse else label.subject + # stc and src can each be None + _check_subject( + subject, getattr(stc, 'subject', None), raise_error=False, + first_kind='label.subject', second_kind='stc.subject') + _check_subject( + subject, getattr(src, '_subject', None), raise_error=False, + first_kind='label.subject', second_kind='source space subject') if use_sparse: assert isinstance(label, dict) vertidx = label['csr'] @@ -2976,6 +2987,7 @@ def _volume_labels(src, labels, mri_resolution): # given volumetric source space when used with extract_label_time_course from .label import Label assert src.kind == 'volume' + subject = src._subject extra = ' when using a volume source space' _import_nibabel('use volume atlas labels') _validate_type(labels, ('path-like', list, tuple), 'labels' + extra) @@ -3036,7 +3048,7 @@ def _volume_labels(src, labels, mri_resolution): for k, v in labels.items(): mask = atlas_data == v csr = interp[mask] - out_labels.append(dict(csr=csr, name=k)) + out_labels.append(dict(csr=csr, name=k, subject=subject)) nnz += csr.shape[0] > 0 else: # Use nearest values @@ -3045,7 +3057,7 @@ def _volume_labels(src, labels, mri_resolution): del src src_values = _get_atlas_values(vol_info, rr[vertno]) vertices = [vertno[src_values == val] for val in labels.values()] - out_labels = [Label(v, hemi='lh', name=val) + out_labels = [Label(v, hemi='lh', name=val, subject=subject) for v, val in zip(vertices, labels.keys())] nnz = sum(len(v) != 0 for v in vertices) logger.info('%d/%d atlas regions had at least one vertex ' @@ -3053,21 +3065,30 @@ def _volume_labels(src, labels, mri_resolution): return out_labels -def _dep_trans(trans): - if trans is not None: - warn('trans is no longer needed and will be removed in 0.23, do not ' - 'pass it as an argument', DeprecationWarning) +def _get_default_label_modes(): + return sorted(_label_funcs.keys()) + ['auto'] -def _gen_extract_label_time_course(stcs, labels, src, mode='mean', - allow_empty=False, trans=None, +def _get_allowed_label_modes(stc): + if isinstance(stc, (_BaseVolSourceEstimate, + _BaseVectorSourceEstimate)): + return ('mean', 'max', 'auto') + else: + return _get_default_label_modes() + + +def _gen_extract_label_time_course(stcs, labels, src, *, mode='mean', + allow_empty=False, mri_resolution=True, verbose=None): # loop through source estimates and extract time series - _dep_trans(trans) - _validate_type(src, SourceSpaces) - _check_option('mode', mode, sorted(_label_funcs.keys()) + ['auto']) + from scipy import sparse + if src is None and mode in ['mean', 'max']: + kind = 'surface' + else: + _validate_type(src, SourceSpaces) + kind = src.kind + _check_option('mode', mode, _get_default_label_modes()) - kind = src.kind if kind in ('surface', 'mixed'): if not isinstance(labels, list): labels = [labels] @@ -3082,11 +3103,11 @@ def _gen_extract_label_time_course(stcs, labels, src, mode='mean', for si, stc in enumerate(stcs): _validate_type(stc, _BaseSourceEstimate, 'stcs[%d]' % (si,), 'source estimate') + _check_option( + 'mode', mode, _get_allowed_label_modes(stc), + 'when using a vector and/or volume source estimate') if isinstance(stc, (_BaseVolSourceEstimate, _BaseVectorSourceEstimate)): - _check_option( - 'mode', mode, ('mean', 'max', 'auto'), - 'when using a vector and/or volume source estimate') mode = 'mean' if mode == 'auto' else mode else: mode = 'mean_flip' if mode == 'auto' else mode @@ -3144,8 +3165,7 @@ def _gen_extract_label_time_course(stcs, labels, src, mode='mean', @verbose def extract_label_time_course(stcs, labels, src, mode='auto', allow_empty=False, return_generator=False, - *, trans=None, mri_resolution=True, - verbose=None): + *, mri_resolution=True, verbose=None): """Extract label time course for lists of labels and source estimates. This function will extract one time course for each label and source @@ -3162,7 +3182,6 @@ def extract_label_time_course(stcs, labels, src, mode='auto', %(eltc_allow_empty)s return_generator : bool If True, a generator instead of a list is returned. - %(trans_deprecated)s %(eltc_mri_resolution)s %(verbose)s @@ -3190,7 +3209,7 @@ def extract_label_time_course(stcs, labels, src, mode='auto', label_tc = _gen_extract_label_time_course( stcs, labels, src, mode=mode, allow_empty=allow_empty, - trans=trans, mri_resolution=mri_resolution) + mri_resolution=mri_resolution) if not return_generator: # do the extraction and return a list @@ -3206,12 +3225,12 @@ def extract_label_time_course(stcs, labels, src, mode='auto', @verbose def stc_near_sensors(evoked, trans, subject, distance=0.01, mode='sum', project=True, subjects_dir=None, src=None, verbose=None): - """Create a STC from ECoG and sEEG sensor data. + """Create a STC from ECoG, sEEG and DBS sensor data. Parameters ---------- evoked : instance of Evoked - The evoked data. Must contain ECoG, or sEEG channels. + The evoked data. Must contain ECoG, sEEG or DBS channels. %(trans)s subject : str The subject name. @@ -3219,8 +3238,9 @@ def stc_near_sensors(evoked, trans, subject, distance=0.01, mode='sum', Distance (m) defining the activation "ball" of the sensor. mode : str Can be "sum" to do a linear sum of weights, "nearest" to - use only the weight of the nearest sensor, or "zero" to use a - zero-order hold. See Notes. + use only the weight of the nearest sensor, or "single" to + do a distance-weight of the nearest sensor. Default is "sum". + See Notes. project : bool If True, project the electrodes to the nearest ``'pial`` surface vertex before computing distances. Only used when doing a @@ -3257,17 +3277,17 @@ def stc_near_sensors(evoked, trans, subject, distance=0.01, mode='sum', 1 and a sensor at ``distance`` meters away (or larger) gets weight 0. If ``distance`` is less than the distance between any two electrodes, this will be the same as ``'nearest'``. - - ``'weighted'`` + - ``'single'`` Same as ``'sum'`` except that only the nearest electrode is used, rather than summing across electrodes within the ``distance`` radius. - As as ``'nearest'`` for vertices with distance zero to the projected + As ``'nearest'`` for vertices with distance zero to the projected sensor. - ``'nearest'`` The value is given by the value of the nearest sensor, up to a ``distance`` (beyond which it is zero). If creating a Volume STC, ``src`` must be passed in, and this - function will project sEEG sensors to nearby surrounding vertices. + function will project sEEG and DBS sensors to nearby surrounding vertices. Then the activation at each volume vertex is given by the mode in the same way as ECoG surface projections. @@ -3280,8 +3300,8 @@ def stc_near_sensors(evoked, trans, subject, distance=0.01, mode='sum', _validate_type(src, (None, SourceSpaces), 'src') _check_option('mode', mode, ('sum', 'single', 'nearest')) - # create a copy of Evoked using ecog and seeg - evoked = evoked.copy().pick_types(ecog=True, seeg=True) + # create a copy of Evoked using ecog, seeg and dbs + evoked = evoked.copy().pick_types(ecog=True, seeg=True, dbs=True) # get channel positions that will be used to pinpoint where # in the Source space we will use the evoked data @@ -3290,7 +3310,8 @@ def stc_near_sensors(evoked, trans, subject, distance=0.01, mode='sum', # remove nan channels nan_inds = np.where(np.isnan(pos).any(axis=1))[0] nan_chs = [evoked.ch_names[idx] for idx in nan_inds] - evoked.drop_channels(nan_chs) + if len(nan_chs): + evoked.drop_channels(nan_chs) pos = [pos[idx] for idx in range(len(pos)) if idx not in nan_inds] # coord_frame transformation from native mne "head" to MRI coord_frame @@ -3299,7 +3320,7 @@ def stc_near_sensors(evoked, trans, subject, distance=0.01, mode='sum', # convert head positions -> coord_frame MRI pos = apply_trans(trans, pos) - subject = _check_subject(None, subject, False) + subject = _check_subject(None, subject, raise_error=False) subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) if src is None: # fake a full surface one rrs = [read_surface(op.join(subjects_dir, subject, diff --git a/mne/source_space.py b/mne/source_space.py index 293a883b7aa..0e1186e798b 100644 --- a/mne/source_space.py +++ b/mne/source_space.py @@ -13,10 +13,9 @@ import os.path as op import numpy as np -from scipy import sparse, linalg from .io.constants import FIFF -from .io.meas_info import create_info, Info +from .io.meas_info import create_info, Info, read_fiducials from .io.tree import dir_tree_find from .io.tag import find_tag, read_tag from .io.open import fiff_open @@ -322,16 +321,16 @@ def __deepcopy__(self, memodict): ss.append(deepcopy(s, memodict)) return SourceSpaces(ss, info) - def save(self, fname, overwrite=False): + @verbose + def save(self, fname, overwrite=False, *, verbose=None): """Save the source spaces to a fif file. Parameters ---------- fname : str File to write. - overwrite : bool - If True, the destination file (if it exists) will be overwritten. - If False (default), an error will be raised if the file exists. + %(overwrite)s + %(verbose_meth)s """ write_source_spaces(fname, self, overwrite) @@ -374,8 +373,7 @@ def export_volume(self, fname, include_surfaces=True, use_lut : bool If True, assigns a numeric value to each source space that corresponds to a color on the freesurfer lookup table. - overwrite : bool - If True, overwrite the file if it exists. + %(overwrite)s .. versionadded:: 0.19 %(verbose_meth)s @@ -1064,9 +1062,7 @@ def write_source_spaces(fname, src, overwrite=False, verbose=None): -src.fif.gz. src : SourceSpaces The source spaces (as returned by read_source_spaces). - overwrite : bool - If True, the destination file (if it exists) will be overwritten. - If False (default), an error will be raised if the file exists. + %(overwrite)s %(verbose)s See Also @@ -1102,6 +1098,7 @@ def write_source_spaces(fname, src, overwrite=False, verbose=None): def _write_one_source_space(fid, this, verbose=None): """Write one source space.""" + from scipy import sparse if this['type'] == 'surf': src_type = FIFF.FIFFV_MNE_SPACE_SURFACE elif this['type'] == 'vol': @@ -2296,6 +2293,7 @@ def _src_vol_dims(s): def _add_interpolator(sp): """Compute a sparse matrix to interpolate the data into an MRI volume.""" # extract transformation information from mri + from scipy import sparse mri_width, mri_height, mri_depth, nvox = _src_vol_dims(sp[0]) # @@ -2348,6 +2346,7 @@ def _add_interpolator(sp): def _grid_interp(from_shape, to_shape, trans, order=1, inuse=None): """Compute a grid-to-grid linear or nearest interpolation given.""" + from scipy import sparse from_shape = np.array(from_shape, int) to_shape = np.array(to_shape, int) trans = np.array(trans, np.float64) # to -> from @@ -2649,6 +2648,7 @@ def add_source_space_distances(src, dist_limit=np.inf, n_jobs=1, verbose=None): the source space to disk, as the computed distances will automatically be stored along with the source space data for future use. """ + from scipy.sparse import csr_matrix from scipy.sparse.csgraph import dijkstra n_jobs = check_n_jobs(n_jobs) src = _ensure_src(src) @@ -2706,7 +2706,7 @@ def add_source_space_distances(src, dist_limit=np.inf, n_jobs=1, verbose=None): i, j = np.meshgrid(s['vertno'], s['vertno']) i = i.ravel()[idx] j = j.ravel()[idx] - s['dist'] = sparse.csr_matrix( + s['dist'] = csr_matrix( (d, (i, j)), shape=(s['np'], s['np']), dtype=np.float32) s['dist_limit'] = np.array([dist_limit], np.float32) @@ -3194,7 +3194,7 @@ def _get_src_nn(s, use_cps=True, vertices=None): # Project out the surface normal and compute SVD nn[vp] = np.sum( s['nn'][s['pinfo'][s['patch_inds'][p]], :], axis=0) - nn /= linalg.norm(nn, axis=-1, keepdims=True) + nn /= np.linalg.norm(nn, axis=-1, keepdims=True) else: nn = s['nn'][vertices, :] return nn @@ -3260,3 +3260,51 @@ def compute_distance_to_sensors(src, info, picks=None, trans=None, depths = cdist(src_pos, sensor_pos) return depths + + +@verbose +def get_mni_fiducials(subject, subjects_dir=None, verbose=None): + """Estimate fiducials for a subject. + + Parameters + ---------- + %(subject)s + %(subjects_dir)s + %(verbose)s + + Returns + ------- + fids_mri : list + List of estimated fiducials (each point in a dict), in the order + LPA, nasion, RPA. + + Notes + ----- + This takes the ``fsaverage-fiducials.fif`` file included with MNE—which + contain the LPA, nasion, and RPA for the ``fsaverage`` subject—and + transforms them to the given FreeSurfer subject's MRI space. + The MRI of ``fsaverage`` is already in MNI Talairach space, so applying + the inverse of the given subject's MNI Talairach affine transformation + (``$SUBJECTS_DIR/$SUBJECT/mri/transforms/talairach.xfm``) is used + to estimate the subject's fiducial locations. + + For more details about the coordinate systems and transformations involved, + see https://surfer.nmr.mgh.harvard.edu/fswiki/CoordinateSystems and + :ref:`plot_source_alignment`. + """ + # Eventually we might want to allow using the MNI Talairach with-skull + # transformation rather than the standard brain-based MNI Talaranch + # transformation, and/or project the points onto the head surface + # (if available). + fname_fids_fs = os.path.join(os.path.dirname(__file__), 'data', + 'fsaverage', 'fsaverage-fiducials.fif') + + # Read fsaverage fiducials file and subject Talairach. + fids, coord_frame = read_fiducials(fname_fids_fs) + assert coord_frame == FIFF.FIFFV_COORD_MRI + if subject == 'fsaverage': + return fids # special short-circuit for fsaverage + mni_mri_t = invert_transform(read_talxfm(subject, subjects_dir)) + for f in fids: + f['r'] = apply_trans(mni_mri_t, f['r']) + return fids diff --git a/mne/stats/_adjacency.py b/mne/stats/_adjacency.py index 4929c2f9253..c81344a8aef 100644 --- a/mne/stats/_adjacency.py +++ b/mne/stats/_adjacency.py @@ -5,7 +5,6 @@ # License: Simplified BSD import numpy as np -from scipy import sparse from ..utils import _validate_type, _check_option from ..utils.check import int_like @@ -30,6 +29,7 @@ def combine_adjacency(*structure): adjacency : scipy.sparse.coo_matrix, shape (n_features, n_features) The adjacency matrix. """ + from scipy import sparse structure = list(structure) for di, dim in enumerate(structure): name = f'structure[{di}]' diff --git a/mne/stats/cluster_level.py b/mne/stats/cluster_level.py index fb0ee794657..78887d9edcb 100644 --- a/mne/stats/cluster_level.py +++ b/mne/stats/cluster_level.py @@ -11,7 +11,6 @@ # License: Simplified BSD import numpy as np -from scipy import sparse from .parametric import f_oneway, ttest_1samp_no_p from ..parallel import parallel_func, check_n_jobs @@ -283,6 +282,7 @@ def _get_clusters_st(x_in, neighbors, max_step=1): def _get_components(x_in, adjacency, return_list=True): """Get connected components from a mask and a adjacency matrix.""" + from scipy import sparse if adjacency is False: components = np.arange(len(x_in)) else: @@ -502,6 +502,7 @@ def _find_clusters_1dir_parts(x, x_in, adjacency, max_step, partitions, def _find_clusters_1dir(x, x_in, adjacency, max_step, t_power, ndimage): """Actually call the clustering algorithm.""" + from scipy import sparse if adjacency is None: labels, n_labels = ndimage.label(x_in) @@ -557,11 +558,20 @@ def _cluster_indices_to_mask(components, n_tot): return components -def _cluster_mask_to_indices(components): +def _cluster_mask_to_indices(components, shape): """Convert to the old format of clusters, which were bool arrays.""" for ci, c in enumerate(components): - if not isinstance(c, slice): - components[ci] = np.where(c)[0] + if isinstance(c, np.ndarray): # mask + components[ci] = np.where(c.reshape(shape)) + else: + assert isinstance(c, tuple), type(c) + c = list(c) # tuple->list + for ii, cc in enumerate(c): + if isinstance(cc, slice): + c[ii] = np.arange(cc.start, cc.stop) + else: + c[ii] = np.where(cc)[0] + components[ci] = tuple(c) return components @@ -583,6 +593,7 @@ def _pval_from_histogram(T, H0, tail): def _setup_adjacency(adjacency, n_tests, n_times): + from scipy import sparse if not sparse.issparse(adjacency): raise ValueError("If adjacency matrix is given, it must be a " "SciPy sparse matrix.") @@ -918,7 +929,7 @@ def _permutation_cluster_test(X, threshold, n_permutations, tail, stat_fun, else: # ndimage outputs slices or boolean masks by default if out_type == 'indices': - clusters = _cluster_mask_to_indices(clusters) + clusters = _cluster_mask_to_indices(clusters, t_obs.shape) # convert our seed to orders # check to see if we can do an exact test @@ -1087,7 +1098,7 @@ def permutation_cluster_test( no points are excluded. %(clust_stepdown)s %(clust_power_f)s - %(clust_out_none)s + %(clust_out)s %(clust_disjoint)s %(clust_buffer)s %(verbose)s @@ -1147,7 +1158,7 @@ def permutation_cluster_1samp_test( no points are excluded. %(clust_stepdown)s %(clust_power_t)s - %(clust_out_none)s + %(clust_out)s %(clust_disjoint)s %(clust_buffer)s %(verbose)s @@ -1370,6 +1381,7 @@ def _st_mask_from_s_inds(n_times, n_vertices, vertices, set_as=True): @verbose def _get_partitions_from_adjacency(adjacency, n_times, verbose=None): """Specify disjoint subsets (e.g., hemispheres) based on adjacency.""" + from scipy import sparse if isinstance(adjacency, list): test = np.ones(len(adjacency)) test_adj = np.zeros((len(adjacency), len(adjacency)), dtype='bool') diff --git a/mne/stats/multi_comp.py b/mne/stats/multi_comp.py index 8243c37b131..27404e6a6a9 100644 --- a/mne/stats/multi_comp.py +++ b/mne/stats/multi_comp.py @@ -17,7 +17,7 @@ def _ecdf(x): def fdr_correction(pvals, alpha=0.05, method='indep'): """P-value correction with False Discovery Rate (FDR). - Correction for multiple comparison using FDR [1]_. + Correction for multiple comparison using FDR :footcite:`GenoveseEtAl2002`. This covers Benjamini/Hochberg for independent or positively correlated and Benjamini/Yekutieli for general or negatively correlated tests. @@ -41,9 +41,7 @@ def fdr_correction(pvals, alpha=0.05, method='indep'): References ---------- - .. [1] Genovese CR, Lazar NA, Nichols T. Thresholding of statistical maps - in functional neuroimaging using the false discovery rate. - Neuroimage. 2002 Apr;15(4):870-8. + .. footbibliography:: """ pvals = np.asarray(pvals) shape_init = pvals.shape diff --git a/mne/stats/parametric.py b/mne/stats/parametric.py index fa2af42079b..eb2e864e5bf 100644 --- a/mne/stats/parametric.py +++ b/mne/stats/parametric.py @@ -122,7 +122,7 @@ def f_oneway(*args): The one-way ANOVA tests the null hypothesis that 2 or more groups have the same population mean. The test is applied to samples from two or - more groups, possibly with differing sizes [1]_. + more groups, possibly with differing sizes :footcite:`Lowry2014`. This is a modified version of :func:`scipy.stats.f_oneway` that avoids computing the associated p-value. @@ -151,13 +151,11 @@ def f_oneway(*args): possible to use the Kruskal-Wallis H-test (:func:`scipy.stats.kruskal`) although with some loss of power - The algorithm is from Heiman [2]_, pp.394-7. + The algorithm is from Heiman :footcite:`Heiman2002`, pp.394-7. References ---------- - .. [1] Lowry, Richard. "Concepts and Applications of Inferential - Statistics". Chapter 14. - .. [2] Heiman, G.W. Research Methods in Statistics. 2002. + .. footbibliography:: """ n_classes = len(args) n_samples_per_class = np.array([len(a) for a in args]) diff --git a/mne/stats/permutations.py b/mne/stats/permutations.py index d293a6f0bce..d912d232e93 100644 --- a/mne/stats/permutations.py +++ b/mne/stats/permutations.py @@ -32,7 +32,7 @@ def permutation_t_test(X, n_permutations=10000, tail=0, n_jobs=1, adjusts p-values in a way that controls the family-wise error rate. However, the permutation method will be more powerful than Bonferroni correction when different variables in the test - are correlated (see [1]_). + are correlated (see :footcite:`NicholsHolmes2002`). Parameters ---------- @@ -71,9 +71,7 @@ def permutation_t_test(X, n_permutations=10000, tail=0, n_jobs=1, References ---------- - .. [1] Nichols, T. E. & Holmes, A. P. (2002). Nonparametric permutation - tests for functional neuroimaging: a primer with examples. - Human Brain Mapping, 15, 1-25. + .. footbibliography:: """ from .cluster_level import _get_1samp_orders n_samples, n_tests = X.shape diff --git a/mne/stats/regression.py b/mne/stats/regression.py index 80f198b46af..0b977ede512 100644 --- a/mne/stats/regression.py +++ b/mne/stats/regression.py @@ -10,7 +10,6 @@ from collections import namedtuple import numpy as np -from scipy import linalg, sparse from ..source_estimate import SourceEstimate from ..epochs import BaseEpochs @@ -20,7 +19,7 @@ def linear_regression(inst, design_matrix, names=None): - """Fit Ordinary Least Squares regression (OLS). + """Fit Ordinary Least Squares (OLS) regression. Parameters ---------- @@ -33,28 +32,30 @@ def linear_regression(inst, design_matrix, names=None): the first dimension of the data. The first column of this matrix will typically consist of ones (intercept column). names : array-like | None - Optional parameter to name the regressors. If provided, the length must - correspond to the number of columns present in regressors - (including the intercept, if present). - Otherwise the default names are x0, x1, x2...xn for n regressors. + Optional parameter to name the regressors (i.e., the columns in the + design matrix). If provided, the length must correspond to the number + of columns present in design matrix (including the intercept, if + present). Otherwise, the default names are ``'x0'``, ``'x1'``, + ``'x2', …, 'x(n-1)'`` for ``n`` regressors. Returns ------- results : dict of namedtuple - For each regressor (key) a namedtuple is provided with the + For each regressor (key), a namedtuple is provided with the following attributes: - beta : regression coefficients - stderr : standard error of regression coefficients - t_val : t statistics (beta / stderr) - p_val : two-sided p-value of t statistic under the t distribution - mlog10_p_val : -log10 transformed p-value. + - ``beta`` : regression coefficients + - ``stderr`` : standard error of regression coefficients + - ``t_val`` : t statistics (``beta`` / ``stderr``) + - ``p_val`` : two-sided p-value of t statistic under the t + distribution + - ``mlog10_p_val`` : -log₁₀-transformed p-value. The tuple members are numpy arrays. The shape of each numpy array is the shape of the data minus the first dimension; e.g., if the shape of - the original data was (n_observations, n_channels, n_timepoints), + the original data was ``(n_observations, n_channels, n_timepoints)``, then the shape of each of the arrays will be - (n_channels, n_timepoints). + ``(n_channels, n_timepoints)``. """ if names is None: names = ['x%i' % i for i in range(design_matrix.shape[1])] @@ -100,7 +101,7 @@ def linear_regression(inst, design_matrix, names=None): def _fit_lm(data, design_matrix, names): """Aux function.""" - from scipy import stats + from scipy import stats, linalg n_samples = len(data) n_features = np.product(data.shape[1:]) if design_matrix.ndim != 2: @@ -159,7 +160,7 @@ def linear_regression_raw(raw, events, event_id=None, tmin=-.1, tmax=1, Internally, this constructs a predictor matrix X of size n_samples * (n_conds * window length), solving the linear system ``Y = bX`` and returning ``b`` as evoked-like time series split by - condition. See [1]_. + condition. See :footcite:`SmithKutas2015`. Parameters ---------- @@ -238,10 +239,9 @@ def linear_regression_raw(raw, events, event_id=None, tmin=-.1, tmax=1, References ---------- - .. [1] Smith, N. J., & Kutas, M. (2015). Regression-based estimation of ERP - waveforms: II. Non-linear effects, overlap correction, and practical - considerations. Psychophysiology, 52(2), 169-189. + .. footbibliography:: """ + from scipy import linalg if isinstance(solver, str): if solver not in {"cholesky"}: raise ValueError("No such solver: {}".format(solver)) @@ -312,6 +312,7 @@ def _prepare_rerp_data(raw, events, picks=None, decim=1): def _prepare_rerp_preds(n_samples, sfreq, events, event_id=None, tmin=-.1, tmax=1, covariates=None): """Build predictor matrix and metadata (e.g. condition time windows).""" + from scipy import sparse conds = list(event_id) if covariates is not None: conds += list(covariates) diff --git a/mne/stats/tests/test_cluster_level.py b/mne/stats/tests/test_cluster_level.py index 3968c7a943f..7cee96472d7 100644 --- a/mne/stats/tests/test_cluster_level.py +++ b/mne/stats/tests/test_cluster_level.py @@ -14,32 +14,14 @@ from mne import (SourceEstimate, VolSourceEstimate, MixedSourceEstimate, SourceSpaces) -from mne.fixes import has_numba from mne.parallel import _force_serial -from mne.stats import cluster_level, ttest_ind_no_p, combine_adjacency +from mne.stats import ttest_ind_no_p, combine_adjacency from mne.stats.cluster_level import (permutation_cluster_test, f_oneway, permutation_cluster_1samp_test, spatio_temporal_cluster_test, spatio_temporal_cluster_1samp_test, ttest_1samp_no_p, summarize_clusters_stc) -from mne.utils import (run_tests_if_main, catch_logging, check_version, - requires_sklearn) - - -@pytest.fixture(scope="function", params=('Numba', 'NumPy')) -def numba_conditional(monkeypatch, request): - """Test both code paths on machines that have Numba.""" - assert request.param in ('Numba', 'NumPy') - if request.param == 'NumPy' and has_numba: - monkeypatch.setattr( - cluster_level, '_get_buddies', cluster_level._get_buddies_fallback) - monkeypatch.setattr( - cluster_level, '_get_selves', cluster_level._get_selves_fallback) - monkeypatch.setattr( - cluster_level, '_where_first', cluster_level._where_first_fallback) - if request.param == 'Numba' and not has_numba: - pytest.skip('Numba not installed') - yield request.param +from mne.utils import catch_logging, check_version, requires_sklearn n_space = 50 @@ -688,4 +670,43 @@ def test_tfce_thresholds(numba_conditional): data, tail=1, out_type='mask', threshold=dict(start=1, step=-0.5)) -run_tests_if_main() +# 1D gives slices, 2D+ gives boolean masks +@pytest.mark.parametrize('shape', ((11,), (11, 3), (11, 1, 2))) +@pytest.mark.parametrize('out_type', ('mask', 'indices')) +@pytest.mark.parametrize('adjacency', (None, 'sparse')) +def test_output_equiv(shape, out_type, adjacency): + """Test equivalence of output types.""" + rng = np.random.RandomState(0) + n_subjects = 10 + data = rng.randn(n_subjects, *shape) + data -= data.mean(axis=0, keepdims=True) + data[:, 2:4] += 2 + data[:, 6:9] += 2 + want_mask = np.zeros(shape, int) + want_mask[2:4] = 1 + want_mask[6:9] = 2 + if adjacency is not None: + assert adjacency == 'sparse' + adjacency = combine_adjacency(*shape) + clusters = permutation_cluster_1samp_test( + X=data, n_permutations=1, adjacency=adjacency, out_type=out_type)[1] + got_mask = np.zeros_like(want_mask) + for n, clu in enumerate(clusters, 1): + if out_type == 'mask': + if len(shape) == 1 and adjacency is None: + assert isinstance(clu, tuple) + assert len(clu) == 1 + assert isinstance(clu[0], slice) + else: + assert isinstance(clu, np.ndarray) + assert clu.dtype == bool + assert clu.shape == shape + got_mask[clu] = n + else: + assert isinstance(clu, tuple) + for c in clu: + assert isinstance(c, np.ndarray) + assert c.dtype.kind == 'i' + assert out_type == 'indices' + got_mask[np.ix_(*clu)] = n + assert_array_equal(got_mask, want_mask) diff --git a/mne/surface.py b/mne/surface.py index 2c1aef53b75..87405b9a982 100644 --- a/mne/surface.py +++ b/mne/surface.py @@ -18,7 +18,6 @@ from struct import pack import numpy as np -from scipy.sparse import coo_matrix, csr_matrix, eye as speye from .io.constants import FIFF from .io.open import fiff_open @@ -279,6 +278,7 @@ def _triangle_neighbors(tris, npts): # for ti, tri in enumerate(tris): # for t in tri: # neighbor_tri[t].append(ti) + from scipy.sparse import coo_matrix rows = tris.ravel() cols = np.repeat(np.arange(len(tris)), 3) data = np.ones(len(cols)) @@ -738,6 +738,13 @@ def read_surface(fname, read_metadata=False, return_dict=False, return ret +def _read_mri_surface(fname): + surf = read_surface(fname, return_dict=True)[2] + surf['rr'] /= 1000. + surf.update(coord_frame=FIFF.FIFFV_COORD_MRI) + return surf + + def _read_wavefront_obj(fname): """Read a surface form a Wavefront .obj file. @@ -1032,8 +1039,9 @@ def _decimate_surface_spacing(surf, spacing): return surf +@verbose def write_surface(fname, coords, faces, create_stamp='', volume_info=None, - file_format='auto', overwrite=False): + file_format='auto', overwrite=False, *, verbose=None): """Write a triangular Freesurfer surface mesh. Accepts the same data format as is returned by read_surface(). @@ -1072,8 +1080,8 @@ def write_surface(fname, coords, faces, create_stamp='', volume_info=None, file name. Defaults to 'auto'. .. versionadded:: 0.21.0 - overwrite : bool - If True, overwrite the file if it exists. + %(overwrite)s + %(verbose)s See Also -------- @@ -1483,6 +1491,7 @@ def _make_morph_map(subject_from, subject_to, subjects_dir, xhemi): def _make_morph_map_hemi(subject_from, subject_to, subjects_dir, reg_from, reg_to): """Construct morph map for one hemisphere.""" + from scipy.sparse import csr_matrix, eye as speye # add speedy short-circuit for self-maps if subject_from == subject_to and reg_from == reg_to: fname = op.join(subjects_dir, subject_from, 'surf', reg_from) @@ -1656,6 +1665,7 @@ def mesh_edges(tris): edges : sparse matrix The adjacency matrix. """ + from scipy.sparse import coo_matrix if np.max(tris) > len(np.unique(tris)): raise ValueError( 'Cannot compute adjacency on a selection of triangles.') @@ -1690,6 +1700,7 @@ def mesh_dist(tris, vert): dist_matrix : scipy.sparse.csr_matrix Sparse matrix with distances between adjacent vertices. """ + from scipy.sparse import csr_matrix edges = mesh_edges(tris).tocoo() # Euclidean distances between neighboring vertices diff --git a/mne/tests/test_annotations.py b/mne/tests/test_annotations.py index fe62ae8707b..eb210754ee8 100644 --- a/mne/tests/test_annotations.py +++ b/mne/tests/test_annotations.py @@ -1,4 +1,5 @@ # Authors: Jaakko Leppakangas +# Robert Luke # # License: BSD 3 clause @@ -20,10 +21,10 @@ from mne import (create_info, read_annotations, annotations_from_events, events_from_annotations) from mne import Epochs, Annotations -from mne.utils import (run_tests_if_main, _TempDir, requires_version, - catch_logging) +from mne.utils import (requires_version, + catch_logging, requires_pandas) from mne.utils import (assert_and_remove_boundary_annot, _raw_annot, - _dt_to_stamp, _stamp_to_dt) + _dt_to_stamp, _stamp_to_dt, check_version) from mne.io import read_raw_fif, RawArray, concatenate_raws from mne.annotations import (_sync_onset, _handle_meas_date, _read_annotations_txt_parse_header) @@ -36,6 +37,10 @@ first_samps = pytest.mark.parametrize('first_samp', (0, 10000)) +needs_pandas = pytest.mark.skipif( + not check_version('pandas'), reason='Needs pandas') + + # On Windows, datetime.fromtimestamp throws an error for negative times. # We mimic this behavior on non-Windows platforms for ease of testing. class _windows_datetime(datetime): @@ -110,7 +115,7 @@ def test_annot_sanitizing(tmpdir): _assert_annotations_equal(annot, annot_read) # make sure pytest raises error on char-sequence that is not allowed - with pytest.raises(ValueError, match='in descriptions not supported'): + with pytest.raises(ValueError, match='in description not supported'): Annotations([0], [1], ['a{COLON}b']) @@ -164,7 +169,7 @@ def test_raw_array_orig_times(): assert raw.annotations.orig_time == orig_time -def test_crop(): +def test_crop(tmpdir): """Test cropping with annotations.""" raw = read_raw_fif(fif_fname) events = mne.find_events(raw) @@ -216,7 +221,7 @@ def test_crop(): assert_array_almost_equal(raw.annotations.onset, expected_onset, decimal=2) # Test IO - tempdir = _TempDir() + tempdir = str(tmpdir) fname = op.join(tempdir, 'test-annot.fif') raw.annotations.save(fname) annot_read = read_annotations(fname) @@ -226,7 +231,7 @@ def test_crop(): assert annot_read.orig_time == raw.annotations.orig_time assert_array_equal(annot_read.description, raw.annotations.description) annot = Annotations((), (), ()) - annot.save(fname) + annot.save(fname, overwrite=True) pytest.raises(IOError, read_annotations, fif_fname) # none in old raw annot = read_annotations(fname) assert isinstance(annot, Annotations) @@ -787,83 +792,124 @@ def _assert_annotations_equal(a, b, tol=0): assert_allclose(a.onset, b.onset, rtol=0, atol=tol) assert_allclose(a.duration, b.duration, rtol=0, atol=tol) assert_array_equal(a.description, b.description) + assert_array_equal(a.ch_names, b.ch_names) a_orig_time = a.orig_time b_orig_time = b.orig_time assert a_orig_time == b_orig_time -@pytest.fixture(scope='session') -def dummy_annotation_csv_file(tmpdir_factory): - """Create csv file for testing.""" - content = ("onset,duration,description\n" - "2002-12-03 19:01:11.720100,1.0,AA\n" - "2002-12-03 19:01:20.720100,2.425,BB") +_ORIG_TIME = datetime.fromtimestamp(1038942071.7201, timezone.utc) - fname = tmpdir_factory.mktemp('data').join('annotations.csv') - fname.write(content) - return fname - -@pytest.fixture(scope='session') -def dummy_broken_annotation_csv_file(tmpdir_factory): +@pytest.fixture(scope='function', params=('ch_names', 'fmt')) +def dummy_annotation_file(tmpdir_factory, ch_names, fmt): """Create csv file for testing.""" - content = ("onset,duration,description\n" - "1.,1.0,AA\n" - "3.,2.425,BB") - - fname = tmpdir_factory.mktemp('data').join('annotations_broken.csv') - fname.write(content) + if fmt == 'csv': + content = ("onset,duration,description\n" + "2002-12-03 19:01:11.720100,1.0,AA\n" + "2002-12-03 19:01:20.720100,2.425,BB") + elif fmt == 'txt': + content = ("# MNE-Annotations\n" + "# orig_time : 2002-12-03 19:01:11.720100\n" + "# onset, duration, description\n" + "0, 1, AA \n" + "9, 2.425, BB") + else: + assert fmt == 'fif' + content = Annotations( + [0, 9], [1, 2.425], ['AA', 'BB'], orig_time=_ORIG_TIME) + + if ch_names: + if isinstance(content, Annotations): + # this is a bit of a hack but it works + content.ch_names[:] = ((), ('MEG0111', 'MEG2563')) + else: + content = content.splitlines() + content[-3] += ',ch_names' + content[-2] += ',' + content[-1] += ',MEG0111:MEG2563' + content = '\n'.join(content) + + fname = tmpdir_factory.mktemp('data').join(f'annotations-annot.{fmt}') + if isinstance(content, str): + fname.write(content) + else: + content.save(fname) return fname -@requires_version('pandas', '0.16') -def test_io_annotation_csv(dummy_annotation_csv_file, - dummy_broken_annotation_csv_file, - tmpdir_factory): - """Test CSV input/output.""" - annot = read_annotations(str(dummy_annotation_csv_file)) - assert annot.orig_time == _handle_meas_date(1038942071.7201) - assert_array_equal(annot.onset, np.array([0., 9.], dtype=np.float32)) - assert_array_almost_equal(annot.duration, [1., 2.425]) - assert_array_equal(annot.description, ['AA', 'BB']) +@pytest.mark.parametrize('ch_names', (False, True)) +@pytest.mark.parametrize('fmt', [ + pytest.param('csv', marks=needs_pandas), + 'txt', + 'fif' +]) +def test_io_annotation(dummy_annotation_file, tmpdir, fmt, ch_names): + """Test CSV, TXT, and FIF input/output (which support ch_names).""" + annot = read_annotations(dummy_annotation_file) + assert annot.orig_time == _ORIG_TIME + kwargs = dict(orig_time=_ORIG_TIME) + if ch_names: + kwargs['ch_names'] = ((), ('MEG0111', 'MEG2563')) + _assert_annotations_equal( + annot, Annotations([0., 9.], [1., 2.425], ['AA', 'BB'], **kwargs), + tol=1e-6) # Now test writing - fname = str(tmpdir_factory.mktemp('data').join('annotations.csv')) + fname = tmpdir.join(f'annotations-annot.{fmt}') annot.save(fname) annot2 = read_annotations(fname) _assert_annotations_equal(annot, annot2) # Now without an orig_time annot._orig_time = None - annot.save(fname) + annot.save(fname, overwrite=True) annot2 = read_annotations(fname) _assert_annotations_equal(annot, annot2) - # Test broken .csv that does not use timestamps + +@requires_version('pandas') +def test_broken_csv(tmpdir): + """Test broken .csv that does not use timestamps.""" + content = ("onset,duration,description\n" + "1.,1.0,AA\n" + "3.,2.425,BB") + + fname = tmpdir.join('annotations_broken.csv') + fname.write(content) with pytest.warns(RuntimeWarning, match='save your CSV as a TXT'): - annot2 = read_annotations(str(dummy_broken_annotation_csv_file)) + read_annotations(fname) # Test for IO with .txt files -@pytest.fixture(scope='session') -def dummy_annotation_txt_file(tmpdir_factory): +@pytest.fixture(scope='function', params=('ch_names',)) +def dummy_annotation_txt_file(tmpdir_factory, ch_names): """Create txt file for testing.""" content = ("3.14, 42, AA \n" "6.28, 48, BB") + if ch_names: + content = content.splitlines() + content[0] = content[0].strip() + ',' + content[1] = content[1].strip() + ', MEG0111:MEG2563' + content = '\n'.join(content) fname = tmpdir_factory.mktemp('data').join('annotations.txt') fname.write(content) return fname -def test_io_annotation_txt(dummy_annotation_txt_file, tmpdir_factory): - """Test TXT input/output.""" +@pytest.mark.parametrize('ch_names', (False, True)) +def test_io_annotation_txt(dummy_annotation_txt_file, tmpdir_factory, + ch_names): + """Test TXT input/output without meas_date.""" annot = read_annotations(str(dummy_annotation_txt_file)) assert annot.orig_time is None - assert_array_equal(annot.onset, [3.14, 6.28]) - assert_array_equal(annot.duration, [42., 48]) - assert_array_equal(annot.description, ['AA', 'BB']) + kwargs = dict() + if ch_names: + kwargs['ch_names'] = [(), ('MEG0111', 'MEG2563')] + _assert_annotations_equal( + annot, Annotations([3.14, 6.28], [42., 48], ['AA', 'BB'], **kwargs)) # Now test writing fname = str(tmpdir_factory.mktemp('data').join('annotations.txt')) @@ -875,26 +921,12 @@ def test_io_annotation_txt(dummy_annotation_txt_file, tmpdir_factory): assert annot.orig_time is None annot._orig_time = _handle_meas_date(1038942071.7201) assert annot.orig_time is not None - annot.save(fname) + annot.save(fname, overwrite=True) annot2 = read_annotations(fname) assert annot2.orig_time is not None _assert_annotations_equal(annot, annot2) -@pytest.fixture(scope='session') -def dummy_annotation_txt_header(tmpdir_factory): - """Create txt header.""" - content = ("# A something \n" - "# orig_time : 42\n" - "# orig_time : 2002-12-03 19:01:11.720100\n" - "# orig_time : 42\n" - "# C\n" - "Done") - fname = tmpdir_factory.mktemp('data').join('header.txt') - fname.write(content) - return str(fname) - - @pytest.mark.parametrize('meas_date, out', [ pytest.param('toto', None, id='invalid string'), pytest.param(None, None, id='None'), @@ -913,73 +945,40 @@ def test_handle_meas_date(meas_date, out): assert _handle_meas_date(meas_date) == out -def test_read_annotation_txt_header(dummy_annotation_txt_header): +def test_read_annotation_txt_header(tmpdir): """Test TXT orig_time recovery.""" - orig_time = _read_annotations_txt_parse_header(dummy_annotation_txt_header) - want = datetime.fromtimestamp(1038942071.7201, timezone.utc) - assert orig_time == want - - -@pytest.fixture(scope='session') -def dummy_annotation_txt_file_with_orig_time(tmpdir_factory): - """Create TXT annotations with header.""" - content = ("# MNE-Annotations\n" + content = ("# A something \n" + "# orig_time : 42\n" "# orig_time : 2002-12-03 19:01:11.720100\n" - "# onset, duration, description\n" - "3.14, 42, AA \n" - "6.28, 48, BB") - - fname = tmpdir_factory.mktemp('data').join('annotations.txt') + "# orig_time : 42\n" + "# C\n" + "Done") + fname = tmpdir.join('header.txt') fname.write(content) - return fname - - -def test_read_annotation_txt_orig_time( - dummy_annotation_txt_file_with_orig_time): - """Test TXT input/output.""" - annot = read_annotations(str(dummy_annotation_txt_file_with_orig_time)) - dt = datetime.fromtimestamp(1038942071.7201, timezone.utc) - want = Annotations([3.14, 6.28], [42., 48], ['AA', 'BB'], dt) - assert annot == want + orig_time = _read_annotations_txt_parse_header(fname) + want = datetime.fromtimestamp(1038942071.7201, timezone.utc) + assert orig_time == want -@pytest.fixture(scope='session') -def dummy_annotation_txt_one_segment(tmpdir_factory): - """Create empty TXT annotations.""" +def test_read_annotation_txt_one_segment(tmpdir): + """Test empty TXT input/output.""" content = ("# MNE-Annotations\n" "# onset, duration, description\n" "3.14, 42, AA") - fname = tmpdir_factory.mktemp('data').join('one-annotations.txt') + fname = tmpdir.join('one-annotations.txt') fname.write(content) - return fname + annot = read_annotations(fname) + _assert_annotations_equal(annot, Annotations(3.14, 42, ['AA'])) -def test_read_annotation_txt_one_segment( - dummy_annotation_txt_one_segment): +def test_read_annotation_txt_empty(tmpdir): """Test empty TXT input/output.""" - annot = read_annotations(str(dummy_annotation_txt_one_segment)) - assert_array_equal(annot.onset, 3.14) - assert_array_equal(annot.duration, 42) - assert_array_equal(annot.description, 'AA') - - -@pytest.fixture(scope='session') -def dummy_annotation_txt_file_empty(tmpdir_factory): - """Create empty TXT annotations.""" content = ("# MNE-Annotations\n" "# onset, duration, description\n") - fname = tmpdir_factory.mktemp('data').join('empty-annotations.txt') + fname = tmpdir.join('empty-annotations.txt') fname.write(content) - return fname - - -def test_read_annotation_txt_empty( - dummy_annotation_txt_file_empty): - """Test empty TXT input/output.""" - annot = read_annotations(str(dummy_annotation_txt_file_empty)) - assert_array_equal(annot.onset, np.array([], dtype=np.float64)) - assert_array_equal(annot.duration, np.array([], dtype=np.float64)) - assert_array_equal(annot.description, np.array([], dtype='' -run_tests_if_main() +@requires_pandas +def test_annotation_to_data_frame(): + """Test annotation class to data frame conversion.""" + onset = np.arange(1, 10) + durations = np.full_like(onset, [4, 5, 6, 4, 5, 6, 4, 5, 6]) + description = ["yy"] * onset.shape[0] + + a = Annotations(onset=onset, + duration=durations, + description=description, + orig_time=0) + + df = a.to_data_frame() + for col in ['onset', 'duration', 'description']: + assert col in df.columns + assert df.description[0] == 'yy' + assert (df.onset[1] - df.onset[0]).seconds == 1 + assert df.groupby('description').count().onset['yy'] == 9 + + +def test_annotation_ch_names(): + """Test annotation ch_names updating and pruning.""" + info = create_info(10, 1000., 'eeg') + raw = RawArray(np.zeros((10, 1000)), info) + onset = [0.1, 0.3, 0.6] + duration = [0.05, 0.1, 0.2] + description = ['first', 'second', 'third'] + ch_names = [[], raw.ch_names[4:6], raw.ch_names[5:7]] + annot = Annotations(onset, duration, description, ch_names=ch_names) + raw.set_annotations(annot) + # renaming + rename = {name: name + 'new' for name in raw.ch_names} + raw_2 = raw.copy().rename_channels(rename) + for ch_rename, ch in zip(raw_2.annotations.ch_names, annot.ch_names): + assert all(name in raw_2.ch_names for name in ch_rename) + assert all(name in raw.ch_names for name in ch) + assert not any(name in raw.ch_names for name in ch_rename) + assert not any(name in raw_2.ch_names for name in ch) + raw_2.rename_channels({val: key for key, val in rename.items()}) + _assert_annotations_equal(raw.annotations, raw_2.annotations) + # dropping + raw_2.drop_channels(raw.ch_names[5:]) + annot_pruned = raw_2.annotations + assert len(raw_2.annotations) == 2 # dropped the last one + assert raw_2.annotations.ch_names[1] == tuple(raw.ch_names[4:5]) + for ch_drop in raw_2.annotations.ch_names: + assert all(name in raw_2.ch_names for name in ch_drop) + with pytest.raises(ValueError, match='channel name in annotations missin'): + raw_2.set_annotations(annot) + with pytest.warns(RuntimeWarning, match='channel name in annotations mis'): + raw_2.set_annotations(annot, on_missing='warn') + assert raw_2.annotations is not annot_pruned + _assert_annotations_equal(raw_2.annotations, annot_pruned) diff --git a/mne/tests/test_bem.py b/mne/tests/test_bem.py index f19feea1875..80824817a98 100644 --- a/mne/tests/test_bem.py +++ b/mne/tests/test_bem.py @@ -14,7 +14,8 @@ from mne import (make_bem_model, read_bem_surfaces, write_bem_surfaces, make_bem_solution, read_bem_solution, write_bem_solution, - make_sphere_model, Transform, Info, write_surface) + make_sphere_model, Transform, Info, write_surface, + write_head_bem) from mne.preprocessing.maxfilter import fit_sphere_to_headshape from mne.io.constants import FIFF from mne.transforms import translation @@ -37,6 +38,8 @@ 'sample-320-320-320-bem-sol.fif') fname_bem_sol_1 = op.join(subjects_dir, 'sample', 'bem', 'sample-320-bem-sol.fif') +fname_dense_head = op.join(subjects_dir, 'sample', 'bem', + 'sample-head-dense.fif') def _compare_bem_surfaces(surfs_1, surfs_2): @@ -99,7 +102,7 @@ def test_io_bem(tmpdir, ext): sol_read = read_bem_solution(temp_sol) _compare_bem_solutions(sol, sol_read) sol = read_bem_solution(fname_bem_sol_1) - with pytest.raises(RuntimeError, match='BEM model does not have'): + with pytest.raises(RuntimeError, match='BEM does not have.*triangulation'): _bem_find_surface(sol, 3) @@ -387,4 +390,29 @@ def test_fit_sphere_to_headshape(): pytest.raises(TypeError, fit_sphere_to_headshape, 1, units='m') +@testing.requires_testing_data +def test_io_head_bem(tmpdir): + """Test reading and writing of defective head surfaces.""" + head = read_bem_surfaces(fname_dense_head)[0] + fname_defect = op.join(str(tmpdir), 'temp-head-defect.fif') + # create defects + head['rr'][0] = np.array([-0.01487014, -0.04563854, -0.12660208]) + head['tris'][0] = np.array([21919, 21918, 21907]) + + with pytest.raises(RuntimeError, match='topological defects:'): + write_head_bem(fname_defect, head['rr'], head['tris']) + with pytest.warns(RuntimeWarning, match='topological defects:'): + write_head_bem(fname_defect, head['rr'], head['tris'], + on_defects='warn') + # test on_defects in read_bem_surfaces + with pytest.raises(RuntimeError, match='topological defects:'): + read_bem_surfaces(fname_defect) + with pytest.warns(RuntimeWarning, match='topological defects:'): + head_defect = read_bem_surfaces(fname_defect, on_defects='warn')[0] + + assert head['id'] == head_defect['id'] == FIFF.FIFFV_BEM_SURF_ID_HEAD + assert np.allclose(head['rr'], head_defect['rr']) + assert np.allclose(head['tris'], head_defect['tris']) + + run_tests_if_main() diff --git a/mne/tests/test_chpi.py b/mne/tests/test_chpi.py index a412d9071c4..9d422c15a06 100644 --- a/mne/tests/test_chpi.py +++ b/mne/tests/test_chpi.py @@ -13,18 +13,20 @@ from mne import pick_types, pick_info from mne.forward._compute_forward import _MAG_FACTOR from mne.io import (read_raw_fif, read_raw_artemis123, read_raw_ctf, read_info, - RawArray) + RawArray, read_raw_kit) from mne.io.constants import FIFF from mne.chpi import (compute_chpi_amplitudes, compute_chpi_locs, compute_head_pos, _setup_ext_proj, _chpi_locs_to_times_dig, _compute_good_distances, extract_chpi_locs_ctf, head_pos_to_trans_rot_t, read_head_pos, write_head_pos, filter_chpi, - _get_hpi_info, _get_hpi_initial_fit) -from mne.transforms import rot_to_quat, _angle_between_quats -from mne.simulation import add_chpi -from mne.utils import run_tests_if_main, catch_logging, assert_meg_snr, verbose + _get_hpi_info, _get_hpi_initial_fit, + extract_chpi_locs_kit) from mne.datasets import testing +from mne.simulation import add_chpi +from mne.transforms import rot_to_quat, _angle_between_quats +from mne.utils import catch_logging, assert_meg_snr, verbose +from mne.viz import plot_head_positions base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data') ctf_fname = op.join(base_dir, 'test_ctf_raw.fif') @@ -49,6 +51,12 @@ art_mc_fname = op.join(data_path, 'ARTEMIS123', 'Artemis_Data_2017-04-04' + '-15h-44m-22s_Motion_Translation-z_mc.pos') +con_fname = op.join(data_path, 'KIT', 'MQKIT_125_2sec.con') +mrk_fname = op.join(data_path, 'KIT', 'MQKIT_125.mrk') +elp_fname = op.join(data_path, 'KIT', 'MQKIT_125.elp') +hsp_fname = op.join(data_path, 'KIT', 'MQKIT_125.hsp') +berlin_fname = op.join(data_path, 'KIT', 'data_berlin.con') + @testing.requires_testing_data def test_chpi_adjust(): @@ -487,6 +495,7 @@ def test_calculate_chpi_coil_locs_artemis(): raw = read_raw_artemis123(art_fname, preload=True) times, cHPI_digs = _calculate_chpi_coil_locs(raw, verbose='debug') + assert len(np.setdiff1d(times, raw.times + raw.first_time)) == 0 assert_allclose(times[5], 1.5, atol=1e-3) assert_allclose(cHPI_digs[5][0]['gof'], 0.995, atol=5e-3) assert_allclose(cHPI_digs[5][0]['r'], @@ -494,13 +503,20 @@ def test_calculate_chpi_coil_locs_artemis(): _check_dists(raw.info, cHPI_digs[5]) coil_amplitudes = compute_chpi_amplitudes(raw) with pytest.raises(ValueError, match='too_close'): - compute_chpi_locs(raw, coil_amplitudes, too_close='foo') + compute_chpi_locs(raw.info, coil_amplitudes, too_close='foo') # ensure values are in a reasonable range amps = np.linalg.norm(coil_amplitudes['slopes'], axis=-1) amps /= coil_amplitudes['slopes'].shape[-1] assert amps.shape == (len(coil_amplitudes['times']), 3) assert_array_less(amps, 1e-11) assert_array_less(1e-13, amps) + # with nan amplitudes (i.e., cHPI off) it should return an empty array, + # but still one that is 3D + coil_amplitudes['slopes'].fill(np.nan) + chpi_locs = compute_chpi_locs(raw.info, coil_amplitudes) + assert chpi_locs['rrs'].shape == (0, 3, 3) + pos = compute_head_pos(raw.info, chpi_locs) + assert pos.shape == (0, 10) def assert_suppressed(new, old, suppressed, retained): @@ -592,25 +608,42 @@ def test_chpi_subtraction_filter_chpi(): assert '2 cHPI' in log.getvalue() -def calculate_head_pos_ctf(raw): - """Wrap to facilitate API change.""" - chpi_locs = extract_chpi_locs_ctf(raw) - return compute_head_pos(raw.info, chpi_locs) - - @testing.requires_testing_data def test_calculate_head_pos_ctf(): - """Test extracting of cHPI positions from ctf data.""" + """Test extracting of cHPI positions from CTF data.""" raw = read_raw_ctf(ctf_chpi_fname) - quats = calculate_head_pos_ctf(raw) + chpi_locs = extract_chpi_locs_ctf(raw) + quats = compute_head_pos(raw.info, chpi_locs) mc_quats = read_head_pos(ctf_chpi_pos_fname) mc_quats[:, 9] /= 10000 # had old factor in there twice somehow... _assert_quats(quats, mc_quats, dist_tol=0.004, angle_tol=2.5, err_rtol=1., vel_atol=7e-3) # 7 mm/s + plot_head_positions(quats, info=raw.info) raw = read_raw_fif(ctf_fname) with pytest.raises(RuntimeError, match='Could not find'): - calculate_head_pos_ctf(raw) + extract_chpi_locs_ctf(raw) -run_tests_if_main() +@testing.requires_testing_data +def test_calculate_head_pos_kit(): + """Test calculation of head position using KIT data.""" + raw = read_raw_kit(con_fname, mrk_fname, elp_fname, hsp_fname) + assert len(raw.info['hpi_results']) == 1 + chpi_locs = extract_chpi_locs_kit(raw) + assert chpi_locs['rrs'].shape == (2, 5, 3) + assert_array_less(chpi_locs['gofs'], 1.) + assert_array_less(0.98, chpi_locs['gofs']) + quats = compute_head_pos(raw.info, chpi_locs) + assert quats.shape == (2, 10) + # plotting works + plot_head_positions(quats, info=raw.info) + raw_berlin = read_raw_kit(berlin_fname) + assert_allclose(raw_berlin.info['dev_head_t']['trans'], np.eye(4)) + assert len(raw_berlin.info['hpi_results']) == 0 + with pytest.raises(ValueError, match='Invalid value'): + extract_chpi_locs_kit(raw_berlin) + with pytest.raises(RuntimeError, match='not find appropriate'): + extract_chpi_locs_kit(raw_berlin, 'STI 014') + with pytest.raises(RuntimeError, match='no initial cHPI'): + compute_head_pos(raw_berlin.info, chpi_locs) diff --git a/mne/tests/test_coreg.py b/mne/tests/test_coreg.py index 30a8707785c..e62d579cf9d 100644 --- a/mne/tests/test_coreg.py +++ b/mne/tests/test_coreg.py @@ -25,7 +25,7 @@ data_path = testing.data_path(download=False) -@pytest.yield_fixture +@pytest.fixture def few_surfaces(): """Set the _MNE_FEW_SURFACES env var.""" with modified_env(_MNE_FEW_SURFACES='true'): diff --git a/mne/tests/test_cov.py b/mne/tests/test_cov.py index 8232f43be58..b04d334c549 100644 --- a/mne/tests/test_cov.py +++ b/mne/tests/test_cov.py @@ -21,16 +21,15 @@ find_events, compute_raw_covariance, compute_covariance, read_evokeds, compute_proj_raw, pick_channels_cov, pick_types, make_ad_hoc_cov, - make_fixed_length_events, create_info) + make_fixed_length_events, create_info, compute_rank) from mne.channels import equalize_channels from mne.datasets import testing from mne.fixes import _get_args -from mne.io import read_raw_fif, RawArray, read_raw_ctf -from mne.io.pick import _DATA_CH_TYPES_SPLIT +from mne.io import read_raw_fif, RawArray, read_raw_ctf, read_info +from mne.io.pick import _DATA_CH_TYPES_SPLIT, pick_info from mne.preprocessing import maxwell_filter from mne.rank import _compute_rank_int -from mne.utils import (requires_sklearn, run_tests_if_main, - catch_logging, assert_snr) +from mne.utils import requires_sklearn, catch_logging, assert_snr base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data') cov_fname = op.join(base_dir, 'test-cov.fif') @@ -96,8 +95,8 @@ def test_cov_mismatch(): compute_covariance([epochs, epochs_2], on_mismatch='ignore') with pytest.raises(RuntimeWarning, match='transform mismatch'): compute_covariance([epochs, epochs_2], on_mismatch='warn') - pytest.raises(ValueError, compute_covariance, epochs, - on_mismatch='x') + with pytest.raises(ValueError, match='Invalid value'): + compute_covariance(epochs, on_mismatch='x') # This should work epochs.info['dev_head_t'] = None epochs_2.info['dev_head_t'] = None @@ -784,4 +783,25 @@ def test_equalize_channels(): assert cov2.ch_names == ['CH1', 'CH2'] -run_tests_if_main() +def test_compute_whitener_rank(): + """Test risky rank options.""" + info = read_info(ave_fname) + info = pick_info(info, pick_types(info, meg=True)) + info['projs'] = [] + # need a square version because the diag one takes shortcuts in + # compute_whitener (users shouldn't even need this function so it's + # private) + cov = make_ad_hoc_cov(info)._as_square() + assert len(cov['names']) == 306 + _, _, rank = compute_whitener(cov, info, rank=None, return_rank=True) + assert rank == 306 + assert compute_rank(cov, info=info, verbose=True) == dict(meg=rank) + cov['data'][-1] *= 1e-14 # trivially rank-deficient + _, _, rank = compute_whitener(cov, info, rank=None, return_rank=True) + assert rank == 305 + assert compute_rank(cov, info=info, verbose=True) == dict(meg=rank) + # this should emit a warning + with pytest.warns(RuntimeWarning, match='exceeds the estimated'): + _, _, rank = compute_whitener(cov, info, rank=dict(meg=306), + return_rank=True) + assert rank == 306 diff --git a/mne/tests/test_defaults.py b/mne/tests/test_defaults.py index 55bc1883926..bd14a37e92c 100644 --- a/mne/tests/test_defaults.py +++ b/mne/tests/test_defaults.py @@ -1,8 +1,9 @@ from copy import deepcopy -import numpy as np +import pytest from numpy.testing import assert_allclose from mne.defaults import _handle_default +from mne.io.base import _get_scaling def test_handle_default(): @@ -31,54 +32,11 @@ def test_si_units(): scalings['csd_bad'] = 1e5 units['csd_bad'] = 'V/m²' assert set(scalings) == set(units) - known_prefixes = { - '': 1, - 'm': 1e-3, - 'c': 1e-2, - 'µ': 1e-6, - 'n': 1e-9, - 'f': 1e-15, - } - known_SI = {'V', 'T', 'Am', 'm', 'M', 'rad', - 'AU', 'GOF'} # not really SI but we tolerate them - powers = '²' - - def _split_si(x): - if x == 'nAm': - prefix, si = 'n', 'Am' - elif x == 'GOF': - prefix, si = '', 'GOF' - elif x == 'AU': - prefix, si = '', 'AU' - elif x == 'rad': - prefix, si = '', 'rad' - elif len(x) == 2: - if x[1] in powers: - prefix, si = '', x - else: - prefix, si = x - else: - assert len(x) in (0, 1), x - prefix, si = '', x - return prefix, si for key, scale in scalings.items(): - unit = units[key] - try: - num, denom = unit.split('/') - except ValueError: # not enough to unpack - num, denom = unit, '' - # check the numerator and denominator - num_prefix, num_SI = _split_si(num) - assert num_prefix in known_prefixes - assert num_SI in known_SI - den_prefix, den_SI = _split_si(denom) - assert den_prefix in known_prefixes - if not (den_SI == den_prefix == ''): - assert den_SI.strip(powers) in known_SI - # reconstruct the scale factor - want_scale = known_prefixes[den_prefix] / known_prefixes[num_prefix] if key == 'csd_bad': - assert not np.isclose(scale, want_scale, rtol=10) + with pytest.raises(KeyError, match='is not a channel type'): + want_scale = _get_scaling(key, units[key]) else: + want_scale = _get_scaling(key, units[key]) assert_allclose(scale, want_scale, rtol=1e-12) diff --git a/mne/tests/test_dipole.py b/mne/tests/test_dipole.py index 90081eaf3eb..876caaa0d43 100644 --- a/mne/tests/test_dipole.py +++ b/mne/tests/test_dipole.py @@ -25,6 +25,7 @@ from mne.surface import _compute_nearest from mne.bem import _bem_find_surface, read_bem_solution from mne.transforms import apply_trans, _get_trans +from mne.source_space import head_to_mni data_path = testing.data_path(download=False) meg_path = op.join(data_path, 'MEG', 'sample') @@ -140,6 +141,13 @@ def test_dipole_fitting(tmpdir): rank='info') # just to test rank support assert isinstance(residual, Evoked) + # Test conversion of dip.pos to MNI coordinates. + dip_mni_pos = dip.to_mni('sample', fname_trans, + subjects_dir=subjects_dir) + head_to_mni_dip_pos = head_to_mni(dip.pos, 'sample', fwd['mri_head_t'], + subjects_dir=subjects_dir) + assert_allclose(dip_mni_pos, head_to_mni_dip_pos, rtol=1e-3, atol=0) + # Sanity check: do our residuals have less power than orig data? data_rms = np.sqrt(np.sum(evoked.data ** 2, axis=0)) resi_rms = np.sqrt(np.sum(residual.data ** 2, axis=0)) @@ -478,7 +486,11 @@ def test_bdip(fname_dip_, fname_bdip_, tmpdir): err_msg='%s: %s' % (kind, key)) # Not stored assert this_bdip.name is None - assert_allclose(this_bdip.nfree, 0.) + assert this_bdip.nfree is None + + # Test whether indexing works + this_bdip0 = this_bdip[0] + _check_dipole(this_bdip0, 1) run_tests_if_main() diff --git a/mne/tests/test_docstring_parameters.py b/mne/tests/test_docstring_parameters.py index 4743da113b8..875815638b4 100644 --- a/mne/tests/test_docstring_parameters.py +++ b/mne/tests/test_docstring_parameters.py @@ -251,7 +251,6 @@ def test_tabs(): plot_raw_psd_topo plot_source_spectrogram prepare_inverse_operator -read_bad_channels read_fiducials read_tag rescale @@ -275,19 +274,26 @@ def test_documented(): else: public_modules_.append('mne.gui') - doc_file = op.abspath(op.join(op.dirname(__file__), '..', '..', 'doc', - 'python_reference.rst')) + doc_dir = op.abspath(op.join(op.dirname(__file__), '..', '..', 'doc')) + doc_file = op.join(doc_dir, 'python_reference.rst') if not op.isfile(doc_file): raise SkipTest('Documentation file not found: %s' % doc_file) + api_files = ( + 'connectivity', 'covariance', 'creating_from_arrays', 'datasets', + 'decoding', 'events', 'file_io', 'forward', 'inverse', 'logging', + 'most_used_classes', 'mri', 'preprocessing', 'reading_raw_data', + 'realtime', 'report', 'sensor_space', 'simulation', 'source_space', + 'statistics', 'time_frequency', 'visualization') known_names = list() - with open(doc_file, 'rb') as fid: - for line in fid: - line = line.decode('utf-8') - if not line.startswith(' '): # at least two spaces - continue - line = line.split() - if len(line) == 1 and line[0] != ':': - known_names.append(line[0].split('.')[-1]) + for api_file in api_files: + with open(op.join(doc_dir, f'{api_file}.rst'), 'rb') as fid: + for line in fid: + line = line.decode('utf-8') + if not line.startswith(' '): # at least two spaces + continue + line = line.split() + if len(line) == 1 and line[0] != ':': + known_names.append(line[0].split('.')[-1]) known_names = set(known_names) missing = [] diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py index bece65c669d..8d944d985c5 100644 --- a/mne/tests/test_epochs.py +++ b/mne/tests/test_epochs.py @@ -16,6 +16,7 @@ from numpy.testing import (assert_array_equal, assert_array_almost_equal, assert_allclose, assert_equal, assert_array_less) import numpy as np +from numpy.fft import rfft, rfftfreq import matplotlib.pyplot as plt import scipy.signal @@ -28,8 +29,7 @@ from mne.datasets import testing from mne.chpi import read_head_pos, head_pos_to_trans_rot_t from mne.event import merge_events -from mne.fixes import rfft, rfftfreq -from mne.io import RawArray, read_raw_fif +from mne.io import RawArray, read_raw_fif, read_epochs_eeglab from mne.io.constants import FIFF from mne.io.proj import _has_eeg_average_ref_proj from mne.io.write import write_int, INT32_MAX, _get_split_size @@ -37,10 +37,11 @@ from mne.epochs import ( bootstrap, equalize_epoch_counts, combine_event_ids, add_channels_epochs, EpochsArray, concatenate_epochs, BaseEpochs, average_movements, - _handle_event_repeated) + _handle_event_repeated, make_metadata) from mne.utils import (requires_pandas, object_diff, catch_logging, _FakeNoPandas, - assert_meg_snr, check_version, _dt_to_stamp) + assert_meg_snr, check_version, _dt_to_stamp, + _check_eeglabio_installed) data_path = testing.data_path(download=False) fname_raw_testing = op.join(data_path, 'MEG', 'sample', @@ -171,7 +172,7 @@ def test_handle_event_repeated(): def _get_data(preload=False): """Get data.""" - raw = read_raw_fif(raw_fname, preload=preload) + raw = read_raw_fif(raw_fname, preload=preload, verbose='warning') events = read_events(event_name) picks = pick_types(raw.info, meg=True, eeg=True, stim=True, ecg=True, eog=True, include=['STI 014'], @@ -310,9 +311,16 @@ def _assert_drop_log_types(drop_log): def test_reject(): """Test epochs rejection.""" - raw, events, picks = _get_data() + raw, events, _ = _get_data() + names = raw.ch_names[::5] + assert 'MEG 2443' in names + raw.pick(names).load_data() + assert 'eog' in raw + raw.info.normalize_proj() + picks = np.arange(len(raw.ch_names)) # cull the list just to contain the relevant event events = events[events[:, 2] == event_id, :] + assert len(events) == 7 selection = np.arange(3) drop_log = ((),) * 3 + (('MEG 2443',),) * 4 _assert_drop_log_types(drop_log) @@ -839,8 +847,10 @@ def test_read_epochs_bad_events(): epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]), event_id, tmin, tmax, picks=picks) assert (repr(epochs)) # test repr + assert (epochs._repr_html_()) # test _repr_html_ epochs.drop_bad() assert (repr(epochs)) + assert (epochs._repr_html_()) with pytest.warns(RuntimeWarning, match='empty'): evoked = epochs.average() @@ -976,7 +986,8 @@ def test_epochs_io_preload(tmpdir, preload): epochs_no_bl.save(temp_fname_no_bl, overwrite=True) epochs_read = read_epochs(temp_fname) epochs_no_bl_read = read_epochs(temp_fname_no_bl) - pytest.raises(ValueError, epochs.apply_baseline, baseline=[1, 2, 3]) + with pytest.raises(ValueError, match='invalid'): + epochs.apply_baseline(baseline=[1, 2, 3]) epochs_with_bl = epochs_no_bl_read.copy().apply_baseline(baseline) assert (isinstance(epochs_with_bl, BaseEpochs)) assert (epochs_with_bl.baseline == (epochs_no_bl_read.tmin, baseline[1])) @@ -1313,21 +1324,26 @@ def test_evoked_io_from_epochs(tmpdir): atol=1 / evoked.info['sfreq']) # now let's do one with negative time + baseline = (0.1, 0.2) epochs = Epochs(raw, events[:4], event_id, 0.1, tmax, - picks=picks, baseline=(0.1, 0.2), decim=5) + picks=picks, baseline=baseline, decim=5) evoked = epochs.average() + assert_allclose(evoked.baseline, baseline) evoked.save(fname_temp) evoked2 = read_evokeds(fname_temp)[0] assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20) assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20) + assert_allclose(evoked.baseline, baseline) # should be equivalent to a cropped original + baseline = (0.1, 0.2) epochs = Epochs(raw, events[:4], event_id, -0.2, tmax, - picks=picks, baseline=(0.1, 0.2), decim=5) + picks=picks, baseline=baseline, decim=5) evoked = epochs.average() evoked.crop(0.099, None) assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20) assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20) + assert_allclose(evoked.baseline, baseline) # should work when one channel type is changed to a non-data ch picks = pick_types(raw.info, meg=True, eeg=True) @@ -1377,8 +1393,11 @@ def test_evoked_standard_error(tmpdir): assert ave.first == ave2.first -def test_reject_epochs(): +def test_reject_epochs(tmpdir): """Test of epochs rejection.""" + tempdir = str(tmpdir) + temp_fname = op.join(tempdir, 'test-epo.fif') + raw, events, picks = _get_data() events1 = events[events[:, 2] == event_id] epochs = Epochs(raw, events1, event_id, tmin, tmax, @@ -1430,6 +1449,17 @@ def test_reject_epochs(): data = epochs[0].get_data()[0] assert epochs._is_good_epoch(data) == (True, None) + # Check that reject_tmin and reject_tmax are being adjusted for small time + # inaccuracies due to sfreq + epochs = Epochs(raw=raw, events=events1, event_id=event_id, + tmin=tmin, tmax=tmax, reject_tmin=tmin, reject_tmax=tmax) + assert epochs.tmin != tmin + assert epochs.tmax != tmax + assert np.isclose(epochs.tmin, epochs.reject_tmin) + assert np.isclose(epochs.tmax, epochs.reject_tmax) + epochs.save(temp_fname, overwrite=True) + read_epochs(temp_fname) + def test_preload_epochs(): """Test preload of epochs.""" @@ -1518,8 +1548,11 @@ def test_comparision_with_c(): assert_array_almost_equal(evoked.times, c_evoked.times, 12) -def test_crop(): +def test_crop(tmpdir): """Test of crop of epochs.""" + tempdir = str(tmpdir) + temp_fname = op.join(tempdir, 'test-epo.fif') + raw, events, picks = _get_data() epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks, preload=False, reject=reject, flat=flat) @@ -1577,6 +1610,24 @@ def test_crop(): pytest.raises(ValueError, epochs.crop, 1000, 2000) pytest.raises(ValueError, epochs.crop, 0.1, 0) + # Test that cropping adjusts reject_tmin and reject_tmax if need be. + epochs = Epochs(raw=raw, events=events[:5], event_id=event_id, + tmin=tmin, tmax=tmax, reject_tmin=tmin, reject_tmax=tmax) + epochs.load_data() + epochs_cropped = epochs.copy().crop(0, None) + assert np.isclose(epochs_cropped.tmin, epochs_cropped.reject_tmin) + + epochs_cropped = epochs.copy().crop(None, 0.1) + assert np.isclose(epochs_cropped.tmax, epochs_cropped.reject_tmax) + del epochs_cropped + + # Cropping & I/O roundtrip + epochs.crop(0, 0.1) + epochs.save(temp_fname) + epochs_read = mne.read_epochs(temp_fname) + assert np.isclose(epochs_read.tmin, epochs_read.reject_tmin) + assert np.isclose(epochs_read.tmax, epochs_read.reject_tmax) + def test_resample(): """Test of resample of epochs.""" @@ -1730,6 +1781,27 @@ def test_iter_evoked(): assert_array_equal(x, y) +@pytest.mark.parametrize('preload', (True, False)) +def test_iter_epochs(preload): + """Test iteration over epochs.""" + raw, events, picks = _get_data() + epochs = Epochs( + raw, events[:5], event_id, tmin, tmax, picks=picks, preload=preload) + assert not hasattr(epochs, '_current_detrend_picks') + epochs_data = epochs.get_data() + data = list() + for _ in range(10): + try: + data.append(next(epochs)) + except StopIteration: + break + else: + assert hasattr(epochs, '_current_detrend_picks') + assert not hasattr(epochs, '_current_detrend_picks') + data = np.array(data) + assert_allclose(data, epochs_data, atol=1e-20) + + def test_subtract_evoked(): """Test subtraction of Evoked from Epochs.""" raw, events, picks = _get_data() @@ -1864,6 +1936,20 @@ def test_epoch_eq(): assert_equal(len(epochs['a/x']), 0) assert_equal(len(epochs['a/y']), 0) + # test default behavior (event_ids=None) + epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4}, + tmin, tmax, picks=picks, reject=reject) + epochs_1, _ = epochs.copy().equalize_event_counts() + epochs_2, _ = epochs.copy().equalize_event_counts(list(epochs.event_id)) + assert_array_equal(epochs_1.events, epochs_2.events) + + # test invalid values of event_ids + with pytest.raises(TypeError, match='received a string'): + epochs.equalize_event_counts('hello!') + + with pytest.raises(TypeError, match='list-like or None'): + epochs.equalize_event_counts(1.5) + def test_access_by_name(tmpdir): """Test accessing epochs by event name and on_missing for rare events.""" @@ -2212,19 +2298,30 @@ def test_contains(): 'proj_name'): seeg.info[key] = raw.info[key] raw.add_channels([seeg]) - tests = [(('mag', False, False), ('grad', 'eeg', 'seeg')), - (('grad', False, False), ('mag', 'eeg', 'seeg')), - ((False, True, False), ('grad', 'mag', 'seeg')), - ((False, False, True), ('grad', 'mag', 'eeg'))] - - for (meg, eeg, seeg), others in tests: - picks_contains = pick_types(raw.info, meg=meg, eeg=eeg, seeg=seeg) + # Add dbs channel + dbs = RawArray(np.zeros((1, len(raw.times))), + create_info(['DBS 001'], raw.info['sfreq'], 'dbs')) + for key in ('dev_head_t', 'highpass', 'lowpass', + 'dig', 'description', 'acq_pars', 'experimenter', + 'proj_name'): + dbs.info[key] = raw.info[key] + raw.add_channels([dbs]) + tests = [(('mag', False, False, False), ('grad', 'eeg', 'seeg', 'dbs')), + (('grad', False, False, False), ('mag', 'eeg', 'seeg', 'dbs')), + ((False, True, False, False), ('grad', 'mag', 'seeg', 'dbs')), + ((False, False, True, False), ('grad', 'mag', 'eeg', 'dbs'))] + + for (meg, eeg, seeg, dbs), others in tests: + picks_contains = pick_types(raw.info, meg=meg, eeg=eeg, seeg=seeg, + dbs=dbs) epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks_contains) if eeg: test = 'eeg' elif seeg: test = 'seeg' + elif dbs: + test = 'dbs' else: test = meg assert (test in epochs) @@ -2410,6 +2507,22 @@ def make_epochs(picks, proj): pytest.raises(NotImplementedError, add_channels_epochs, [epochs_meg2, epochs_eeg]) + # use delayed projection, add channel, ensure projectors match + epochs_meg2 = make_epochs(picks=picks_meg, proj='delayed') + assert len(epochs_meg2.info['projs']) == 3 + meg2_proj = epochs_meg2._projector + assert meg2_proj is not None + epochs_eeg = make_epochs(picks=picks_eeg, proj='delayed') + epochs_meg2.add_channels([epochs_eeg]) + del epochs_eeg + assert len(epochs_meg2.info['projs']) == 3 + new_proj = epochs_meg2._projector + n_meg, n_eeg = len(picks_meg), len(picks_eeg) + n_tot = n_meg + n_eeg + assert new_proj.shape == (n_tot,) * 2 + assert_allclose(new_proj[:n_meg, :n_meg], meg2_proj, atol=1e-12) + assert_allclose(new_proj[n_meg:, n_meg:], np.eye(n_eeg), atol=1e-12) + def test_array_epochs(tmpdir): """Test creating epochs from array.""" @@ -2612,12 +2725,12 @@ def test_add_channels(): def test_seeg_ecog(): - """Test the compatibility of the Epoch object with SEEG and ECoG data.""" + """Test compatibility of the Epoch object with SEEG, DBS and ECoG data.""" n_epochs, n_channels, n_times, sfreq = 5, 10, 20, 1000. data = np.ones((n_epochs, n_channels, n_times)) events = np.array([np.arange(n_epochs), [0] * n_epochs, [1] * n_epochs]).T pick_dict = dict(meg=False, exclude=[]) - for key in ('seeg', 'ecog'): + for key in ('seeg', 'dbs', 'ecog'): info = create_info(n_channels, sfreq, key) epochs = EpochsArray(data, info, events) pick_dict.update({key: True}) @@ -2836,6 +2949,80 @@ def assert_metadata_equal(got, exp): assert check.all().all() +@pytest.mark.parametrize( + ('all_event_id', 'row_events', 'keep_first', 'keep_last'), + [({'a/1': 1, 'a/2': 2, 'b/1': 3, 'b/2': 4, 'c': 32}, # all events + None, None, None), + ({'a/1': 1, 'a/2': 2}, # subset of events + None, None, None), + (dict(), None, None, None), # empty set of events + ({'a/1': 1, 'a/2': 2, 'b/1': 3, 'b/2': 4, 'c': 32}, + ('a/1', 'a/2', 'b/1', 'b/2'), ('a', 'b'), 'c')] +) +@requires_pandas +def test_make_metadata(all_event_id, row_events, keep_first, + keep_last): + """Test that make_metadata works.""" + raw, all_events, _ = _get_data() + tmin, tmax = -0.5, 1.5 + sfreq = raw.info['sfreq'] + kwargs = dict(events=all_events, event_id=all_event_id, + row_events=row_events, + keep_first=keep_first, keep_last=keep_last, + tmin=tmin, tmax=tmax, + sfreq=sfreq) + + if not kwargs['event_id']: + with pytest.raises(ValueError, match='must contain at least one'): + make_metadata(**kwargs) + return + + metadata, events, event_id = make_metadata(**kwargs) + + assert len(metadata) == len(events) + + if row_events: + assert set(metadata['event_name']) == set(row_events) + else: + assert set(metadata['event_name']) == set(event_id.keys()) + + # Check we have columns all events + keep_first = [] if keep_first is None else keep_first + keep_last = [] if keep_last is None else keep_last + event_names = sorted(set(event_id.keys()) | set(keep_first) | + set(keep_last)) + + for event_name in event_names: + assert event_name in metadata.columns + + # Check the time-locked event's metadata + for _, row in metadata.iterrows(): + event_name = row['event_name'] + assert np.isclose(row[event_name], 0) + + # Check non-time-locked events' metadata + for row_idx, row in metadata.iterrows(): + event_names = sorted(set(event_id.keys()) | set(keep_first) | + set(keep_last) - set(row['event_name'])) + for event_name in event_names: + if event_name in keep_first or event_name in keep_last: + assert isinstance(row[event_name], float) + if not ((event_name == 'a' and row_idx == 30) or + (event_name == 'b' and row_idx == 14) or + (event_name == 'c' and row_idx != 16)): + assert not np.isnan(row[event_name]) + + if event_name in keep_first and event_name not in all_event_id: + assert (row[f'first_{event_name}'] is None or + isinstance(row[f'first_{event_name}'], str)) + elif event_name in keep_last and event_name not in all_event_id: + assert (row[f'last_{event_name}'] is None or + isinstance(row[f'last_{event_name}'], str)) + + Epochs(raw, events=events, event_id=event_id, metadata=metadata, + verbose='warning') + + def test_events_list(): """Test that events can be a list.""" events = [[100, 0, 1], [200, 0, 1], [300, 0, 1]] @@ -2843,6 +3030,8 @@ def test_events_list(): mne.create_info(10, 1000.)), events=events) assert_array_equal(epochs.events, np.array(events)) + assert (repr(epochs)) # test repr + assert (epochs._repr_html_()) # test _repr_html_ def test_save_overwrite(tmpdir): @@ -2908,6 +3097,32 @@ def test_save_complex_data(tmpdir, preload, is_complex, fmt, rtol): assert_allclose(data_read, data, rtol=rtol) +@pytest.mark.skipif(not _check_eeglabio_installed(strict=False), + reason='eeglabio not installed') +@pytest.mark.parametrize('preload', (True, False)) +def test_export_eeglab(tmpdir, preload): + """Test saving an Epochs instance to EEGLAB's set format.""" + raw, events = _get_data()[:2] + raw.load_data() + epochs = Epochs(raw, events, preload=preload) + temp_fname = op.join(str(tmpdir), 'test.set') + epochs.export(temp_fname) + epochs.drop_channels([ch for ch in ['epoc', 'STI 014'] + if ch in epochs.ch_names]) + epochs_read = read_epochs_eeglab(temp_fname) + assert epochs.ch_names == epochs_read.ch_names + cart_coords = np.array([d['loc'][:3] + for d in epochs.info['chs']]) # just xyz + cart_coords_read = np.array([d['loc'][:3] + for d in epochs_read.info['chs']]) + assert_allclose(cart_coords, cart_coords_read) + assert_array_equal(epochs.events[:, 0], + epochs_read.events[:, 0]) # latency + assert epochs.event_id.keys() == epochs_read.event_id.keys() # just keys + assert_allclose(epochs.times, epochs_read.times) + assert_allclose(epochs.get_data(), epochs_read.get_data()) + + def test_no_epochs(tmpdir): """Test that having the first epoch bad does not break writing.""" # a regression noticed in #5564 @@ -3126,6 +3341,18 @@ def test_make_fixed_length_epochs(): assert len(epochs_annot) > 10 assert len(epochs) > len(epochs_annot) + # overlaps + epochs = make_fixed_length_epochs(raw, duration=1) + assert len(epochs.events) > 10 + epochs_ol = make_fixed_length_epochs(raw, duration=1, overlap=0.5) + assert len(epochs_ol.events) > 20 + epochs_ol_2 = make_fixed_length_epochs(raw, duration=1, overlap=0.9) + assert len(epochs_ol_2.events) > 100 + assert_array_equal(epochs_ol_2.events[:, 0], + np.unique(epochs_ol_2.events[:, 0])) + with pytest.raises(ValueError, match='overlap must be'): + make_fixed_length_epochs(raw, duration=1, overlap=1.1) + def test_epochs_huge_events(tmpdir): """Test epochs with event numbers that are too large.""" @@ -3221,3 +3448,60 @@ def test_epochs_baseline_after_cropping(tmpdir): assert_allclose(epochs_orig.baseline, epochs_cropped_read.baseline) assert 'baseline period was cropped' in str(epochs_cropped_read) assert_allclose(epochs_cropped.get_data(), epochs_cropped_read.get_data()) + + +def test_empty_constructor(): + """Test empty constructor for RtEpochs.""" + info = create_info(1, 1000., 'eeg') + event_id = 1 + tmin, tmax, baseline = -0.2, 0.5, None + BaseEpochs(info, None, None, event_id, tmin, tmax, baseline) + + +def test_apply_function(): + """Test apply function to epoch objects.""" + n_channels = 10 + data = np.arange(2 * n_channels * 1000).reshape(2, n_channels, 1000) + events = np.array([[0, 0, 1], [INT32_MAX, 0, 2]]) + info = mne.create_info(n_channels, 1000., 'eeg') + epochs = mne.EpochsArray(data, info, events) + data_epochs = epochs.get_data() + + # apply_function to all channels at once + def fun(data): + """Reverse channel order without changing values.""" + return np.eye(data.shape[1])[::-1] @ data + + want = data_epochs[:, ::-1] + got = epochs.apply_function(fun, channel_wise=False).get_data() + assert_array_equal(want, got) + + # apply_function channel-wise (to first 3 channels) by replacing with mean + picks = np.arange(3) + non_picks = np.arange(3, n_channels) + + def fun(data): + return np.full_like(data, data.mean()) + + out = epochs.apply_function(fun, picks=picks, channel_wise=True) + expected = epochs.get_data(picks).mean(axis=-1, keepdims=True) + assert np.all(out.get_data(picks) == expected) + assert_array_equal(out.get_data(non_picks), epochs.get_data(non_picks)) + + +@testing.requires_testing_data +def test_add_channels_picks(): + """Check that add_channels properly deals with picks.""" + raw = mne.io.read_raw_fif(raw_fname, verbose=False) + raw.pick([2, 3, 310]) # take some MEG and EEG + raw.info.normalize_proj() + + events = mne.make_fixed_length_events(raw, id=3000, start=0) + epochs = mne.Epochs(raw, events, event_id=3000, tmin=0, tmax=1, + proj=True, baseline=None, reject=None, preload=True, + decim=1) + + epochs_final = epochs.copy() + epochs_bis = epochs.copy().rename_channels(lambda ch: ch + '_bis') + epochs_final.add_channels([epochs_bis], force_update_info=True) + epochs_final.drop_channels(epochs.ch_names) diff --git a/mne/tests/test_event.py b/mne/tests/test_event.py index 1ba39dc25d2..4660c9e3729 100644 --- a/mne/tests/test_event.py +++ b/mne/tests/test_event.py @@ -219,7 +219,6 @@ def test_find_events(): # Reset some data for ease of comparison raw._first_samps[0] = 0 raw.info['sfreq'] = 1000 - raw._update_times() stim_channel = 'STI 014' stim_channel_idx = pick_channels(raw.info['ch_names'], diff --git a/mne/tests/test_evoked.py b/mne/tests/test_evoked.py index ab3bbf3ffaa..dc0833ceda0 100644 --- a/mne/tests/test_evoked.py +++ b/mne/tests/test_evoked.py @@ -21,8 +21,7 @@ from mne.evoked import _get_peak, Evoked, EvokedArray from mne.io import read_raw_fif from mne.io.constants import FIFF -from mne.utils import (_TempDir, requires_pandas, - run_tests_if_main, grand_average) +from mne.utils import _TempDir, requires_pandas, grand_average base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data') fname = op.join(base_dir, 'test-ave.fif') @@ -586,8 +585,7 @@ def test_arithmetic(): ev20.comment = None ev = combine_evoked([ev20, -ev30], weights=[1, -1]) assert_equal(ev.comment.count('unknown'), 2) - assert ('-unknown' in ev.comment) - assert (' + ' in ev.comment) + assert ev.comment == 'unknown + unknown' ev20.comment = old_comment1 with pytest.raises(ValueError, match="Invalid value for the 'weights'"): @@ -681,7 +679,7 @@ def test_time_as_index_and_crop(): assert_allclose(evoked.times[[0, -1]], [tmin, tmax], atol=atol) assert_array_equal(evoked.time_as_index([-.1, .1], use_rounding=True), [0, len(evoked.times) - 1]) - evoked.crop(tmin, tmax, include_tmax=False) + evoked.crop(evoked.tmin, evoked.tmax, include_tmax=False) assert_allclose(evoked.times[[0, -1]], [tmin, tmax - delta], atol=atol) @@ -717,20 +715,62 @@ def test_add_channels(): pytest.raises(TypeError, evoked_meg.add_channels, evoked_badsf) -def test_evoked_baseline(): +def test_evoked_baseline(tmpdir): """Test evoked baseline.""" evoked = read_evokeds(fname, condition=0, baseline=None) # Here we create a data_set with constant data. evoked = EvokedArray(np.ones_like(evoked.data), evoked.info, evoked.times[0]) + assert evoked.baseline is None + + evoked_baselined = EvokedArray(np.ones_like(evoked.data), evoked.info, + evoked.times[0], baseline=(None, 0)) + assert_allclose(evoked_baselined.baseline, (evoked_baselined.tmin, 0)) + del evoked_baselined # Mean baseline correction is applied, since the data is equal to its mean # the resulting data should be a matrix of zeroes. - evoked.apply_baseline((None, None)) - + baseline = (None, None) + evoked.apply_baseline(baseline) + assert_allclose(evoked.baseline, (evoked.tmin, evoked.tmax)) assert_allclose(evoked.data, np.zeros_like(evoked.data)) + # Test that the .baseline attribute changes if we apply a different + # baseline now. + baseline = (None, 0) + evoked.apply_baseline(baseline) + assert_allclose(evoked.baseline, (evoked.tmin, 0)) + + # By default for our test file, no baseline should be set upon reading + evoked = read_evokeds(fname, condition=0) + assert evoked.baseline is None + + # Test that the .baseline attribute is set when we call read_evokeds() + # with a `baseline` parameter. + baseline = (-0.2, -0.1) + evoked = read_evokeds(fname, condition=0, baseline=baseline) + assert_allclose(evoked.baseline, baseline) + + # Test that the .baseline attribute survives an I/O roundtrip. + evoked = read_evokeds(fname, condition=0) + baseline = (-0.2, -0.1) + evoked.apply_baseline(baseline) + assert_allclose(evoked.baseline, baseline) + + tmp_fname = tmpdir / 'test-ave.fif' + evoked.save(tmp_fname) + evoked_read = read_evokeds(tmp_fname, condition=0) + assert_allclose(evoked_read.baseline, evoked.baseline) + + # We shouldn't be able to remove a baseline correction after it has been + # applied. + evoked = read_evokeds(fname, condition=0) + baseline = (-0.2, -0.1) + evoked.apply_baseline(baseline) + with pytest.raises(ValueError, match='already been baseline-corrected'): + evoked.apply_baseline(None) + def test_hilbert(): """Test hilbert on raw, epochs, and evoked.""" @@ -758,4 +798,19 @@ def test_hilbert(): assert_allclose(evoked_hilb_env.data, np.abs(evoked_hilb.data)) -run_tests_if_main() +def test_apply_function_evk(): + """Check the apply_function method for evoked data.""" + # create fake evoked data to use for checking apply_function + data = np.random.rand(10, 1000) + info = create_info(10, 1000., 'eeg') + evoked = EvokedArray(data, info) + evoked_data = evoked.data.copy() + # check apply_function channel-wise + + def fun(data, multiplier): + return data * multiplier + + mult = -1 + applied = evoked.apply_function(fun, n_jobs=1, multiplier=mult) + assert np.shape(applied.data) == np.shape(evoked_data) + assert np.equal(applied.data, evoked_data * mult).all() diff --git a/mne/tests/test_filter.py b/mne/tests/test_filter.py index e3e9dc8148e..78d54732cd1 100644 --- a/mne/tests/test_filter.py +++ b/mne/tests/test_filter.py @@ -8,7 +8,7 @@ from scipy.signal import resample as sp_resample, butter, freqz, sosfreqz from mne import create_info -from mne.fixes import fft, fftfreq +from numpy.fft import fft, fftfreq from mne.io import RawArray, read_raw_fif from mne.io.pick import _DATA_CH_TYPES_SPLIT from mne.filter import (filter_data, resample, _resample_stim_channels, @@ -733,9 +733,9 @@ def test_filter_picks(): fs = 1000. kwargs = dict(l_freq=None, h_freq=40.) filt = filter_data(data, fs, **kwargs) - # don't include seeg or stim in this list because they are in the one below - # to ensure default cases are treated properly - for kind in ('eeg', 'grad', 'emg', 'misc'): + # don't include seeg, dbs or stim in this list because they are in the one + # below to ensure default cases are treated properly + for kind in ('eeg', 'grad', 'emg', 'misc', 'dbs'): for picks in (None, [-2], kind, 'k'): # With always at least one data channel info = create_info(['s', 'k', 't'], fs, ['seeg', kind, 'stim']) diff --git a/mne/tests/test_import_nesting.py b/mne/tests/test_import_nesting.py index 407ee55f4e4..18cce60ef81 100644 --- a/mne/tests/test_import_nesting.py +++ b/mne/tests/test_import_nesting.py @@ -12,10 +12,9 @@ out = set() -# check scipy +# check scipy (Numba imports it to check the version) ok_scipy_submodules = set(['scipy', 'numpy', # these appear in old scipy - 'fftpack', 'lib', 'linalg', 'fft', - 'misc', 'sparse', 'version']) + 'version']) scipy_submodules = set(x.split('.')[1] for x in sys.modules.keys() if x.startswith('scipy.') and '__' not in x and not x.split('.')[1].startswith('_') @@ -29,7 +28,8 @@ for key in ('sklearn', 'pandas', 'mayavi', 'pyvista', 'matplotlib', 'dipy', 'nibabel', 'cupy', 'picard', 'pyvistaqt'): if x.startswith(key): - out |= {key} + x = '.'.join(x.split('.')[:2]) + out |= {x} if len(out) > 0: print('\\nFound un-nested import(s) for %s' % (sorted(out),), end='') exit(len(out)) diff --git a/mne/tests/test_label.py b/mne/tests/test_label.py index cd50c57cac1..9bc66d2afb7 100644 --- a/mne/tests/test_label.py +++ b/mne/tests/test_label.py @@ -845,6 +845,10 @@ def test_grow_labels(): l1 = l11 + l12 assert_array_equal(l1.vertices, l0.vertices) + # non-overlapping (gh-8848) + for overlap in (False, True): + grow_labels('fsaverage', [0], 1, 1, subjects_dir, overlap=overlap) + @testing.requires_testing_data def test_random_parcellation(): diff --git a/mne/tests/test_line_endings.py b/mne/tests/test_line_endings.py index aba3225f35b..18dae700679 100644 --- a/mne/tests/test_line_endings.py +++ b/mne/tests/test_line_endings.py @@ -23,6 +23,7 @@ # the line endings and coding schemes used there 'test_old_layout_latin1_software_filter.vhdr', 'test_old_layout_latin1_software_filter.vmrk', + 'test_old_layout_latin1_software_filter_longname.vhdr', 'searchindex.dat', ) diff --git a/mne/tests/test_morph.py b/mne/tests/test_morph.py index c082ad102b6..05abc9ebf52 100644 --- a/mne/tests/test_morph.py +++ b/mne/tests/test_morph.py @@ -778,7 +778,7 @@ def test_volume_labels_morph(tmpdir, sl, n_real, n_mri, n_orig): n_got_real = np.in1d( aseg_img.ravel(), [lut[name] for name in use_label_names]).sum() assert n_got_real == n_real - # - This was 291 on `master` before gh-5590 + # - This was 291 on `main` before gh-5590 # - Refactoring transforms it became 279 with a < 1e-8 change in vox_mri_t # - Dropped to 123 once nearest-voxel was used in gh-7653 # - Jumped back up to 330 with morphing fixes actually correctly @@ -791,7 +791,7 @@ def test_volume_labels_morph(tmpdir, sl, n_real, n_mri, n_orig): src[0]['interpolator'] = None img = stc.as_volume(src, mri_resolution=False) n_on = np.array(img.dataobj).astype(bool).sum() - # was 20 on `master` before gh-5590 + # was 20 on `main` before gh-5590 # then 44 before gh-7653, which took it back to 20 assert n_on == n_orig # without the interpolator, this should fail diff --git a/mne/tests/test_selection.py b/mne/tests/test_read_vectorview_selection.py similarity index 63% rename from mne/tests/test_selection.py rename to mne/tests/test_read_vectorview_selection.py index fef9a8e0bce..b122285bac8 100644 --- a/mne/tests/test_selection.py +++ b/mne/tests/test_read_vectorview_selection.py @@ -2,7 +2,7 @@ import pytest -from mne import read_selection +from mne import read_vectorview_selection from mne.io import read_raw_fif from mne.utils import run_tests_if_main @@ -11,8 +11,8 @@ raw_new_fname = op.join(test_path, 'test_chpi_raw_sss.fif') -def test_read_selection(): - """Test reading of selections.""" +def test_read_vectorview_selection(): + """Test reading of Neuromag Vector View channel selections.""" # test one channel for each selection ch_names = ['MEG 2211', 'MEG 0223', 'MEG 1312', 'MEG 0412', 'MEG 1043', 'MEG 2042', 'MEG 2032', 'MEG 0522', 'MEG 1031'] @@ -22,30 +22,31 @@ def test_read_selection(): raw = read_raw_fif(raw_fname) for i, name in enumerate(sel_names): - sel = read_selection(name) + sel = read_vectorview_selection(name) assert ch_names[i] in sel - sel_info = read_selection(name, info=raw.info) + sel_info = read_vectorview_selection(name, info=raw.info) assert sel == sel_info # test some combinations - all_ch = read_selection(['L', 'R']) - left = read_selection('L') - right = read_selection('R') + all_ch = read_vectorview_selection(['L', 'R']) + left = read_vectorview_selection('L') + right = read_vectorview_selection('R') assert len(all_ch) == len(left) + len(right) assert len(set(left).intersection(set(right))) == 0 - frontal = read_selection('frontal') - occipital = read_selection('Right-occipital') + frontal = read_vectorview_selection('frontal') + occipital = read_vectorview_selection('Right-occipital') assert len(set(frontal).intersection(set(occipital))) == 0 ch_names_new = [ch.replace(' ', '') for ch in ch_names] raw_new = read_raw_fif(raw_new_fname) for i, name in enumerate(sel_names): - sel = read_selection(name, info=raw_new.info) + sel = read_vectorview_selection(name, info=raw_new.info) assert ch_names_new[i] in sel - pytest.raises(TypeError, read_selection, name, info='foo') + with pytest.raises(TypeError, match='must be an instance of Info or None'): + read_vectorview_selection(name, info='foo') run_tests_if_main() diff --git a/mne/tests/test_report.py b/mne/tests/test_report.py index 0f678d5bf9a..01afda8f6a9 100644 --- a/mne/tests/test_report.py +++ b/mne/tests/test_report.py @@ -4,19 +4,22 @@ # # License: BSD (3-clause) +import base64 import copy import glob +import pickle +from io import BytesIO import os import os.path as op +import re import shutil import pathlib import numpy as np -from numpy.testing import assert_equal import pytest from matplotlib import pyplot as plt -from mne import Epochs, read_events, read_evokeds +from mne import Epochs, read_events, read_evokeds, report as report_mod from mne.io import read_raw_fif from mne.datasets import testing from mne.report import Report, open_report, _ReportScraper @@ -38,6 +41,10 @@ inv_fname = op.join(report_dir, 'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif') mri_fname = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz') +bdf_fname = op.realpath(op.join(op.dirname(__file__), '..', 'io', + 'edf', 'tests', 'data', 'test.bdf')) +edf_fname = op.realpath(op.join(op.dirname(__file__), '..', 'io', + 'edf', 'tests', 'data', 'test.edf')) base_dir = op.realpath(op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')) @@ -54,7 +61,7 @@ def _get_example_figures(): @pytest.mark.slowtest @testing.requires_testing_data def test_render_report(renderer, tmpdir): - """Test rendering -*.fif files for mne report.""" + """Test rendering *.fif files for mne report.""" tempdir = str(tmpdir) raw_fname_new = op.join(tempdir, 'temp_raw.fif') raw_fname_new_bids = op.join(tempdir, 'temp_meg.fif') @@ -84,9 +91,11 @@ def test_render_report(renderer, tmpdir): raw.set_eeg_reference(projection=True) epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2) epochs.save(epochs_fname, overwrite=True) - # This can take forever (stall Travis), so let's make it fast + # This can take forever, so let's make it fast # Also, make sure crop range is wide enough to avoid rendering bug - evoked = epochs.average().crop(0.1, 0.2) + evoked = epochs.average() + with pytest.warns(RuntimeWarning, match='tmax is not in Evoked'): + evoked.crop(0.1, 0.2) evoked.save(evoked_fname) report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir, @@ -102,9 +111,9 @@ def test_render_report(renderer, tmpdir): [op.basename(x) for x in report.fnames]) assert (''.join(report.html).find(op.basename(fname)) != -1) - assert_equal(len(report.fnames), len(fnames)) - assert_equal(len(report.html), len(report.fnames)) - assert_equal(len(report.fnames), len(report)) + assert len(report.fnames) == len(fnames) + assert len(report.html) == len(report.fnames) + assert len(report.fnames) == len(report) # Check saving functionality report.data_path = tempdir @@ -123,8 +132,8 @@ def test_render_report(renderer, tmpdir): assert 'Topomap (ch_type =' in html assert f'Evoked: {op.basename(evoked_fname)} (GFPs)' in html - assert_equal(len(report.html), len(fnames)) - assert_equal(len(report.html), len(report.fnames)) + assert len(report.html) == len(fnames) + assert len(report.html) == len(report.fnames) # Check saving same report to new filename report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False) @@ -167,6 +176,80 @@ def test_render_report(renderer, tmpdir): report.add_figs_to_section(['foo'], 'caption', 'section') +def test_add_custom_css(tmpdir): + """Test adding custom CSS rules to the report.""" + tempdir = str(tmpdir) + fname = op.join(tempdir, 'report.html') + fig = plt.figure() # Empty figure + + report = Report() + report.add_figs_to_section(figs=fig, captions='Test section') + custom_css = '.report_custom { color: red; }' + report.add_custom_css(css=custom_css) + + assert custom_css in report.include + report.save(fname, open_browser=False) + with open(fname, 'rb') as fid: + html = fid.read().decode('utf-8') + assert custom_css in html + + +def test_add_custom_js(tmpdir): + """Test adding custom JavaScript to the report.""" + tempdir = str(tmpdir) + fname = op.join(tempdir, 'report.html') + fig = plt.figure() # Empty figure + + report = Report() + report.add_figs_to_section(figs=fig, captions='Test section') + custom_js = ('function hello() {\n' + ' alert("Hello, report!");\n' + '}') + report.add_custom_js(js=custom_js) + + assert custom_js in report.include + report.save(fname, open_browser=False) + with open(fname, 'rb') as fid: + html = fid.read().decode('utf-8') + assert custom_js in html + + +@testing.requires_testing_data +def test_render_non_fiff(tmpdir): + """Test rendering non-FIFF files for mne report.""" + tempdir = str(tmpdir) + fnames_in = [bdf_fname, edf_fname] + fnames_out = [] + for fname in fnames_in: + basename = op.basename(fname) + basename, ext = op.splitext(basename) + fname_out = f'{basename}_raw{ext}' + outpath = op.join(tempdir, fname_out) + shutil.copyfile(fname, outpath) + fnames_out.append(fname_out) + + report = Report() + report.parse_folder(data_path=tempdir, render_bem=False, on_error='raise') + + # Check correct paths and filenames + for fname in fnames_out: + assert (op.basename(fname) in + [op.basename(x) for x in report.fnames]) + + assert len(report.fnames) == len(fnames_out) + assert len(report.html) == len(report.fnames) + assert len(report.fnames) == len(report) + + report.data_path = tempdir + fname = op.join(tempdir, 'report.html') + report.save(fname=fname, open_browser=False) + with open(fname, 'rb') as fid: + html = fid.read().decode('utf-8') + + assert '

    Raw: test_raw.bdf

    ' in html + assert '

    Raw: test_raw.edf

    ' in html + + @testing.requires_testing_data def test_report_raw_psd_and_date(tmpdir): """Test report raw PSD and DATE_NONE functionality.""" @@ -286,6 +369,40 @@ def test_render_mri(renderer, tmpdir): assert html.count('
  • ', + report.html[0])]) + assert imgs.ndim == 4 # images, h, w, rgba + assert len(imgs) == 6 + imgs.shape = (len(imgs), -1) + norms = np.linalg.norm(imgs, axis=-1) + # should have down-up-down shape + corr = np.corrcoef(norms, np.hanning(len(imgs)))[0, 1] + assert 0.78 < corr < 0.80 + + @testing.requires_testing_data @requires_nibabel() def test_render_mri_without_bem(tmpdir): @@ -359,7 +476,7 @@ def test_validate_input(): comments=comments[:-1]) values = report._validate_input(items, captions, section, comments=None) items_new, captions_new, comments_new = values - assert_equal(len(comments_new), len(items)) + assert len(comments_new) == len(items) @requires_h5py @@ -488,7 +605,6 @@ def test_scraper(tmpdir): rst = scraper(block, block_vars, gallery_conf) out_html = op.join(app.builder.outdir, 'auto_examples', 'my_html.html') assert not op.isfile(out_html) - os.makedirs(op.join(app.builder.outdir, 'auto_examples')) scraper.copyfiles() assert op.isfile(out_html) assert rst.count('"') == 6 @@ -511,4 +627,21 @@ def test_split_files(tmpdir, split_naming): assert len(report.fnames) == 1 +def test_survive_pickle(tmpdir): + """Testing functionality of Report-Object after pickling.""" + tempdir = str(tmpdir) + raw_fname_new = op.join(tempdir, 'temp_raw.fif') + shutil.copyfile(raw_fname, raw_fname_new) + + # Pickle report object to simulate multiprocessing with joblib + report = Report(info_fname=raw_fname_new) + pickled_report = pickle.dumps(report) + report = pickle.loads(pickled_report) + + # Just test if no errors occur + report.parse_folder(tempdir, render_bem=False) + save_name = op.join(tempdir, 'report.html') + report.save(fname=save_name, open_browser=False) + + run_tests_if_main() diff --git a/mne/tests/test_source_estimate.py b/mne/tests/test_source_estimate.py index ceb364f3ead..f8ac323e8ca 100644 --- a/mne/tests/test_source_estimate.py +++ b/mne/tests/test_source_estimate.py @@ -9,6 +9,7 @@ import re import numpy as np +from numpy.fft import fft from numpy.testing import (assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal, assert_array_less) import pytest @@ -32,7 +33,7 @@ write_source_spaces) from mne.datasets import testing from mne.externals.h5io import write_hdf5 -from mne.fixes import fft, _get_img_fdata, nullcontext +from mne.fixes import _get_img_fdata, nullcontext from mne.io import read_info from mne.io.constants import FIFF from mne.source_estimate import grade_to_tris, _get_vol_mask @@ -732,7 +733,6 @@ def test_extract_label_time_course_volume( n_tot = 46 assert n_tot == len(src_labels) inv = read_inverse_operator(fname_inv_vol) - trans = inv['mri_head_t'] if cf == 'head': src = inv['src'] assert src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD @@ -808,9 +808,8 @@ def eltc(*args, **kwargs): # actually do the testing if cf == 'head' and not mri_res: # some missing - with pytest.deprecated_call(match='do not pass'): - eltc(labels, src, trans=trans, allow_empty=True, - mri_resolution=mri_res) + with pytest.warns(RuntimeWarning, match='any vertices'): + eltc(labels, src, allow_empty=True, mri_resolution=mri_res) for mode in ('mean', 'max'): with catch_logging() as log: label_tc = eltc(labels, src, mode=mode, allow_empty='ignore', @@ -1323,7 +1322,7 @@ def objective(x): stc_max, directions = stc.project('pca') flips = np.sign(np.sum(directions * want_nn, axis=1, keepdims=True)) directions *= flips - assert_allclose(directions, want_nn, atol=1e-6) + assert_allclose(directions, want_nn, atol=2e-6) @testing.requires_testing_data @@ -1792,3 +1791,54 @@ def test_scale_morph_labels(kind, scale, monkeypatch, tmpdir): else: corr = np.corrcoef(label_tc.ravel(), label_tc_to.ravel())[0, 1] assert 0.93 < corr < 0.96, scale + + +@testing.requires_testing_data +@pytest.mark.parametrize('kind', [ + 'surface', + pytest.param('volume', marks=[pytest.mark.slowtest, + requires_version('nibabel')]), +]) +def test_label_extraction_subject(kind): + """Test that label extraction subject is treated properly.""" + if kind == 'surface': + inv = read_inverse_operator(fname_inv) + labels = read_labels_from_annot( + 'sample', subjects_dir=subjects_dir) + labels_fs = read_labels_from_annot( + 'fsaverage', subjects_dir=subjects_dir) + labels_fs = [label for label in labels_fs + if not label.name.startswith('unknown')] + assert all(label.subject == 'sample' for label in labels) + assert all(label.subject == 'fsaverage' for label in labels_fs) + assert len(labels) == len(labels_fs) == 68 + n_labels = 68 + else: + assert kind == 'volume' + inv = read_inverse_operator(fname_inv_vol) + inv['src'][0]['subject_his_id'] = 'sample' # modernize + labels = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz') + labels_fs = op.join(subjects_dir, 'fsaverage', 'mri', 'aseg.mgz') + n_labels = 46 + src = inv['src'] + assert src.kind == kind + assert src._subject == 'sample' + ave = read_evokeds(fname_evoked)[0].apply_baseline((None, 0)).crop(0, 0.01) + assert len(ave.times) == 4 + stc = apply_inverse(ave, inv) + assert stc.subject == 'sample' + ltc = extract_label_time_course(stc, labels, src) + stc.subject = 'fsaverage' + with pytest.raises(ValueError, match=r'source spac.*not match.* stc\.sub'): + extract_label_time_course(stc, labels, src) + stc.subject = 'sample' + assert ltc.shape == (n_labels, 4) + if kind == 'volume': + with pytest.raises(RuntimeError, match='atlas.*not match.*source spa'): + extract_label_time_course(stc, labels_fs, src) + else: + with pytest.raises(ValueError, match=r'label\.sub.*not match.* stc\.'): + extract_label_time_course(stc, labels_fs, src) + stc.subject = None + with pytest.raises(ValueError, match=r'label\.sub.*not match.* sourc'): + extract_label_time_course(stc, labels_fs, src) diff --git a/mne/time_frequency/_stft.py b/mne/time_frequency/_stft.py index a414afae6a3..6d3a6808451 100644 --- a/mne/time_frequency/_stft.py +++ b/mne/time_frequency/_stft.py @@ -1,7 +1,7 @@ from math import ceil import numpy as np -from ..fixes import fft, ifft, fftfreq +from ..fixes import _import_fft from ..utils import logger, verbose @@ -34,6 +34,7 @@ def stft(x, wsize, tstep=None, verbose=None): istft stftfreq """ + rfft = _import_fft('rfft') if not np.isrealobj(x): raise ValueError("x is not a real valued array") @@ -90,8 +91,7 @@ def stft(x, wsize, tstep=None, verbose=None): wwin = win / swin[t * tstep: t * tstep + wsize] frame = x[:, t * tstep: t * tstep + wsize] * wwin[None, :] # FFT - fframe = fft(frame) - X[:, :, t] = fframe[:, :n_freq] + X[:, :, t] = rfft(frame) return X @@ -101,7 +101,7 @@ def istft(X, tstep=None, Tx=None): Parameters ---------- - X : array, shape (n_signals, wsize / 2 + 1, n_step) + X : array, shape (..., wsize / 2 + 1, n_step) The STFT coefficients for positive frequencies. tstep : int Step between successive windows in samples (must be a multiple of 2, @@ -119,9 +119,14 @@ def istft(X, tstep=None, Tx=None): stft """ # Errors and warnings - n_signals, n_win, n_step = X.shape - if (n_win % 2 == 0): - ValueError('The number of rows of the STFT matrix must be odd.') + irfft = _import_fft('irfft') + X = np.asarray(X) + if X.ndim < 2: + raise ValueError(f'X must have ndim >= 2, got {X.ndim}') + n_win, n_step = X.shape[-2:] + signal_shape = X.shape[:-2] + if n_win % 2 == 0: + raise ValueError('The number of rows of the STFT matrix must be odd.') wsize = 2 * (n_win - 1) if tstep is None: @@ -143,10 +148,10 @@ def istft(X, tstep=None, Tx=None): T = n_step * tstep - x = np.zeros((n_signals, T + wsize - tstep), dtype=np.float64) + x = np.zeros(signal_shape + (T + wsize - tstep,), dtype=np.float64) - if n_signals == 0: - return x[:, :Tx] + if np.prod(signal_shape) == 0: + return x[..., :Tx] # Defining sine window win = np.sin(np.arange(.5, wsize + .5) / wsize * np.pi) @@ -158,18 +163,16 @@ def istft(X, tstep=None, Tx=None): swin[t * tstep:t * tstep + wsize] += win ** 2 swin = np.sqrt(swin / wsize) - fframe = np.empty((n_signals, n_win + wsize // 2 - 1), dtype=X.dtype) for t in range(n_step): # IFFT - fframe[:, :n_win] = X[:, :, t] - fframe[:, n_win:] = np.conj(X[:, wsize // 2 - 1: 0: -1, t]) - frame = ifft(fframe) - wwin = win / swin[t * tstep:t * tstep + wsize] + frame = irfft(X[..., t], wsize) # Overlap-add - x[:, t * tstep: t * tstep + wsize] += np.real(np.conj(frame) * wwin) + frame *= win / swin[t * tstep:t * tstep + wsize] + x[..., t * tstep: t * tstep + wsize] += frame # Truncation - x = x[:, (wsize - tstep) // 2: (wsize - tstep) // 2 + T + 1][:, :Tx].copy() + x = x[..., (wsize - tstep) // 2: (wsize - tstep) // 2 + T + 1] + x = x[..., :Tx].copy() return x @@ -194,9 +197,8 @@ def stftfreq(wsize, sfreq=None): # noqa: D401 stft istft """ - n_freq = wsize // 2 + 1 - freqs = fftfreq(wsize) - freqs = np.abs(freqs[:n_freq]) + rfftfreq = _import_fft('rfftfreq') + freqs = rfftfreq(wsize) if sfreq is not None: freqs *= float(sfreq) return freqs diff --git a/mne/time_frequency/_stockwell.py b/mne/time_frequency/_stockwell.py index 2017998d556..c39d895cda5 100644 --- a/mne/time_frequency/_stockwell.py +++ b/mne/time_frequency/_stockwell.py @@ -4,11 +4,11 @@ # License : BSD 3-clause from copy import deepcopy -import math + import numpy as np -from scipy import fftpack # XXX explore cuda optimization at some point. +from ..fixes import _import_fft from ..io.pick import _pick_data_channels, pick_info from ..utils import verbose, warn, fill_doc, _validate_type from ..parallel import parallel_func, check_n_jobs @@ -25,7 +25,7 @@ def _is_power_of_two(n): if n_fft is None or (not _is_power_of_two(n_fft) and n_times > n_fft): # Compute next power of 2 - n_fft = 2 ** int(math.ceil(math.log(n_times, 2))) + n_fft = 2 ** int(np.ceil(np.log2(n_times))) elif n_fft < n_times: raise ValueError("n_fft cannot be smaller than signal size. " "Got %s < %s." % (n_fft, n_times)) @@ -42,7 +42,8 @@ def _is_power_of_two(n): def _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width): """Precompute stockwell Gaussian windows (in the freq domain).""" - tw = fftpack.fftfreq(n_samp, 1. / sfreq) / n_samp + fft, fftfreq = _import_fft(('fft', 'fftfreq')) + tw = fftfreq(n_samp, 1. / sfreq) / n_samp tw = np.r_[tw[:1], tw[1:][::-1]] k = width # 1 for classical stowckwell transform @@ -55,35 +56,37 @@ def _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width): window = ((f / (np.sqrt(2. * np.pi) * k)) * np.exp(-0.5 * (1. / k ** 2.) * (f ** 2.) * tw ** 2.)) window /= window.sum() # normalisation - windows[i_f] = fftpack.fft(window) + windows[i_f] = fft(window) return windows def _st(x, start_f, windows): """Compute ST based on Ali Moukadem MATLAB code (used in tests).""" + fft, ifft = _import_fft(('fft', 'ifft')) n_samp = x.shape[-1] ST = np.empty(x.shape[:-1] + (len(windows), n_samp), dtype=np.complex128) # do the work - Fx = fftpack.fft(x) + Fx = fft(x) XF = np.concatenate([Fx, Fx], axis=-1) for i_f, window in enumerate(windows): f = start_f + i_f - ST[..., i_f, :] = fftpack.ifft(XF[..., f:f + n_samp] * window) + ST[..., i_f, :] = ifft(XF[..., f:f + n_samp] * window) return ST def _st_power_itc(x, start_f, compute_itc, zero_pad, decim, W): """Aux function.""" + fft, ifft = _import_fft(('fft', 'ifft')) n_samp = x.shape[-1] n_out = (n_samp - zero_pad) n_out = n_out // decim + bool(n_out % decim) psd = np.empty((len(W), n_out)) itc = np.empty_like(psd) if compute_itc else None - X = fftpack.fft(x) + X = fft(x) XX = np.concatenate([X, X], axis=-1) for i_f, window in enumerate(W): f = start_f + i_f - ST = fftpack.ifft(XX[:, f:f + n_samp] * window) + ST = ifft(XX[:, f:f + n_samp] * window) if zero_pad > 0: TFR = ST[:, :-zero_pad:decim] else: @@ -155,6 +158,7 @@ def tfr_array_stockwell(data, sfreq, fmin=None, fmax=None, n_fft=None, ---------- .. footbibliography:: """ + fftfreq = _import_fft('fftfreq') _validate_type(data, np.ndarray, 'data') if data.ndim != 3: raise ValueError( @@ -164,7 +168,7 @@ def tfr_array_stockwell(data, sfreq, fmin=None, fmax=None, n_fft=None, n_out = data.shape[2] // decim + bool(data.shape[-1] % decim) data, n_fft_, zero_pad = _check_input_st(data, n_fft) - freqs = fftpack.fftfreq(n_fft_, 1. / sfreq) + freqs = fftfreq(n_fft_, 1. / sfreq) if fmin is None: fmin = freqs[freqs > 0][0] if fmax is None: diff --git a/mne/time_frequency/ar.py b/mne/time_frequency/ar.py index 649e8d1d08b..1f4cc9a3a85 100644 --- a/mne/time_frequency/ar.py +++ b/mne/time_frequency/ar.py @@ -4,7 +4,6 @@ # License: BSD (3-clause) import numpy as np -from scipy import linalg from ..defaults import _handle_default from ..io.pick import _picks_to_idx, _picks_by_type, pick_info @@ -16,6 +15,7 @@ def _yule_walker(X, order=1): Operates in-place. """ + from scipy import linalg assert X.ndim == 2 denom = X.shape[-1] - np.arange(order + 1) r = np.zeros(order + 1, np.float64) diff --git a/mne/time_frequency/csd.py b/mne/time_frequency/csd.py index c0de757ab89..da10a6a6c7c 100644 --- a/mne/time_frequency/csd.py +++ b/mne/time_frequency/csd.py @@ -9,10 +9,11 @@ import numbers import numpy as np -from .tfr import cwt, morlet -from ..fixes import rfftfreq +from .tfr import _cwt_array, morlet, _get_nfft +from ..fixes import _import_fft from ..io.pick import pick_channels, _picks_to_idx -from ..utils import logger, verbose, warn, copy_function_doc_to_method_doc +from ..utils import (logger, verbose, warn, copy_function_doc_to_method_doc, + ProgressBar) from ..viz.misc import plot_csd from ..time_frequency.multitaper import (_compute_mt_params, _mt_spectra, _csd_from_mt, _psd_from_mt_adaptive) @@ -698,6 +699,7 @@ def csd_array_fourier(X, sfreq, t0=0, fmin=0, fmax=np.inf, tmin=None, csd_morlet csd_multitaper """ + rfftfreq = _import_fft('rfftfreq') X, times, tmin, tmax, fmin, fmax = _prepare_csd_array( X, sfreq, t0, tmin, tmax, fmin, fmax) @@ -845,6 +847,7 @@ def csd_array_multitaper(X, sfreq, t0=0, fmin=0, fmax=np.inf, tmin=None, csd_morlet csd_multitaper """ + rfftfreq = _import_fft('rfftfreq') X, times, tmin, tmax, fmin, fmax = _prepare_csd_array( X, sfreq, t0, tmin, tmax, fmin, fmax) @@ -1021,9 +1024,10 @@ def csd_array_morlet(X, sfreq, frequencies, t0=0, tmin=None, tmax=None, times = times[csd_tslice] # Compute the CSD + nfft = _get_nfft(wavelets, X, use_fft) return _execute_csd_function(X, times, frequencies, _csd_morlet, - params=[sfreq, wavelets, csd_tslice, use_fft, - decim], + params=[sfreq, wavelets, nfft, csd_tslice, + use_fft, decim], n_fft=1, ch_names=ch_names, projs=projs, n_jobs=n_jobs, verbose=verbose) @@ -1140,14 +1144,8 @@ def _execute_csd_function(X, times, frequencies, csd_function, params, n_fft, # Compute CSD for each trial n_blocks = int(np.ceil(n_epochs / float(n_jobs))) - for i in range(n_blocks): + for i in ProgressBar(range(n_blocks), mesg='CSD epoch blocks'): epoch_block = X[i * n_jobs:(i + 1) * n_jobs] - if n_jobs > 1: - logger.info(' Computing CSD matrices for epochs %d..%d' - % (i * n_jobs + 1, (i + 1) * n_jobs)) - else: - logger.info(' Computing CSD matrix for epoch %d' % (i + 1)) - csds = parallel(my_csd(this_epoch, *params) for this_epoch in epoch_block) @@ -1274,7 +1272,8 @@ def _csd_multitaper(X, sfreq, n_times, window_fun, eigvals, freq_mask, n_fft, return csds -def _csd_morlet(data, sfreq, wavelets, tslice=None, use_fft=True, decim=1): +def _csd_morlet(data, sfreq, wavelets, nfft, tslice=None, use_fft=True, + decim=1): """Compute cross spectral density (CSD) using the given Morlet wavelets. Computes the CSD for a single epoch of data. @@ -1289,6 +1288,8 @@ def _csd_morlet(data, sfreq, wavelets, tslice=None, use_fft=True, decim=1): wavelets : list of ndarray The Morlet wavelets for which to compute the CSD's. These have been created by the `mne.time_frequency.tfr.morlet` function. + nfft : int + The number of FFT points. tslice : slice | None The desired time samples to compute the CSD over. If None, defaults to including all time samples. @@ -1314,7 +1315,8 @@ def _csd_morlet(data, sfreq, wavelets, tslice=None, use_fft=True, decim=1): _vector_to_sym_mat : For converting the CSD to a full matrix. """ # Compute PSD - psds = cwt(data, wavelets, use_fft=use_fft, decim=decim) + psds = _cwt_array(data, wavelets, nfft, mode='same', use_fft=use_fft, + decim=decim) if tslice is not None: tstart = None if tslice.start is None else tslice.start // decim diff --git a/mne/time_frequency/multitaper.py b/mne/time_frequency/multitaper.py index f818bddadd9..b05bb59dea5 100644 --- a/mne/time_frequency/multitaper.py +++ b/mne/time_frequency/multitaper.py @@ -6,7 +6,7 @@ import operator import numpy as np -from ..fixes import rfft, irfft, rfftfreq +from ..fixes import _import_fft from ..parallel import parallel_func from ..utils import sum_squared, warn, verbose, logger, _check_option @@ -62,6 +62,7 @@ def dpss_windows(N, half_nbw, Kmax, low_bias=True, interp_from=None, from scipy import interpolate from scipy.signal.windows import dpss as sp_dpss from ..filter import next_fast_len + rfft, irfft = _import_fft(('rfft', 'irfft')) # This np.int32 business works around a weird Windows bug, see # gh-5039 and https://github.com/scipy/scipy/pull/8608 Kmax = np.int32(operator.index(Kmax)) @@ -299,6 +300,7 @@ def _mt_spectra(x, dpss, sfreq, n_fft=None): freqs : array The frequency points in Hz of the spectra """ + rfft, rfftfreq = _import_fft(('rfft', 'rfftfreq')) if n_fft is None: n_fft = x.shape[-1] @@ -410,6 +412,7 @@ def psd_array_multitaper(x, sfreq, fmin=0, fmax=np.inf, bandwidth=None, ----- .. versionadded:: 0.14.0 """ + rfftfreq = _import_fft('rfftfreq') _check_option('normalization', normalization, ['length', 'full']) # Reshape data so its 2-D for parallelization diff --git a/mne/time_frequency/psd.py b/mne/time_frequency/psd.py index 6adaa310876..7e1dac881d0 100644 --- a/mne/time_frequency/psd.py +++ b/mne/time_frequency/psd.py @@ -84,7 +84,8 @@ def _check_psd_data(inst, tmin, tmax, picks, proj, reject_by_annotation=False): @verbose def psd_array_welch(x, sfreq, fmin=0, fmax=np.inf, n_fft=256, n_overlap=0, - n_per_seg=None, n_jobs=1, average='mean', verbose=None): + n_per_seg=None, n_jobs=1, average='mean', window='hamming', + verbose=None): """Compute power spectral density (PSD) using Welch's method. Parameters @@ -107,13 +108,12 @@ def psd_array_welch(x, sfreq, fmin=0, fmax=np.inf, n_fft=256, n_overlap=0, Length of each Welch segment (windowed with a Hamming window). Defaults to None, which sets n_per_seg equal to n_fft. %(n_jobs)s - average : str | None - How to average the segments. If ``mean`` (default), calculate the - arithmetic mean. If ``median``, calculate the median, corrected for - its bias relative to the mean. If ``None``, returns the unaggregated - segments. + %(average-psd)s .. versionadded:: 0.19.0 + %(window-psd)s + + .. versionadded:: 0.22.0 %(verbose)s Returns @@ -154,11 +154,14 @@ def psd_array_welch(x, sfreq, fmin=0, fmax=np.inf, n_fft=256, n_overlap=0, # Parallelize across first N-1 dimensions x_splits = np.array_split(x, n_jobs) + logger.debug( + f'Spectogram using {n_fft}-point FFT on {n_per_seg} samples with ' + f'{n_overlap} overlap and {window} window') from scipy.signal import spectrogram parallel, my_spect_func, n_jobs = parallel_func(_spect_func, n_jobs=n_jobs) func = partial(spectrogram, noverlap=n_overlap, nperseg=n_per_seg, - nfft=n_fft, fs=sfreq) + nfft=n_fft, fs=sfreq, window=window) f_spect = parallel(my_spect_func(d, func=func, freq_sl=freq_sl, average=average) for d in x_splits) @@ -173,7 +176,8 @@ def psd_array_welch(x, sfreq, fmin=0, fmax=np.inf, n_fft=256, n_overlap=0, @verbose def psd_welch(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None, n_fft=256, n_overlap=0, n_per_seg=None, picks=None, proj=False, n_jobs=1, - reject_by_annotation=True, average='mean', verbose=None): + reject_by_annotation=True, average='mean', window='hamming', + verbose=None): """Compute the power spectral density (PSD) using Welch's method. Calculates periodograms for a sliding window over the time dimension, then @@ -209,13 +213,12 @@ def psd_welch(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None, n_fft=256, %(reject_by_annotation_raw)s .. versionadded:: 0.15.0 - average : str | None - How to average the segments. If ``mean`` (default), calculate the - arithmetic mean. If ``median``, calculate the median, corrected for - its bias relative to the mean. If ``None``, returns the unaggregated - segments. + %(average-psd)s .. versionadded:: 0.19.0 + %(window-psd)s + + .. versionadded:: 0.22.0 %(verbose)s Returns @@ -246,7 +249,8 @@ def psd_welch(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None, n_fft=256, reject_by_annotation=reject_by_annotation) return psd_array_welch(data, sfreq, fmin=fmin, fmax=fmax, n_fft=n_fft, n_overlap=n_overlap, n_per_seg=n_per_seg, - average=average, n_jobs=n_jobs, verbose=verbose) + average=average, n_jobs=n_jobs, window=window, + verbose=verbose) @verbose diff --git a/mne/time_frequency/tests/test_psd.py b/mne/time_frequency/tests/test_psd.py index ce4618ac0a7..61e9145206e 100644 --- a/mne/time_frequency/tests/test_psd.py +++ b/mne/time_frequency/tests/test_psd.py @@ -6,7 +6,7 @@ from mne import pick_types, Epochs, read_events from mne.io import RawArray, read_raw_fif -from mne.utils import run_tests_if_main +from mne.utils import catch_logging from mne.time_frequency import psd_welch, psd_multitaper, psd_array_welch base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') @@ -30,6 +30,12 @@ def test_psd_nan(): x[0], float(n_fft), n_fft=n_fft, n_overlap=n_overlap) assert_allclose(freqs, freqs_2) assert_allclose(psds[0], psds_2) + # defaults + with catch_logging() as log: + psd_array_welch(x, float(n_fft), verbose='debug') + log = log.getvalue() + assert 'using 256-point FFT on 256 samples with 0 overlap' in log + assert 'hamming window' in log def test_psd(): @@ -61,7 +67,15 @@ def test_psd(): for func, kws in funcs: kws = kws.copy() kws.update(kws_psd) - psds, freqs = func(raw, proj=False, **kws) + kws.update(verbose='debug') + if func is psd_welch: + kws.update(window='hann') + with catch_logging() as log: + psds, freqs = func(raw, proj=False, **kws) + log = log.getvalue() + if func is psd_welch: + assert f'{n_fft}-point FFT on {n_fft} samples with 0 overl' in log + assert 'hann window' in log psds_proj, freqs_proj = func(raw, proj=True, **kws) assert psds.shape == (len(kws['picks']), len(freqs)) @@ -264,6 +278,3 @@ def test_compares_psd(): assert (np.sum(psds_welch < 0) == 0) assert (np.sum(psds_mpl < 0) == 0) - - -run_tests_if_main() diff --git a/mne/time_frequency/tests/test_stft.py b/mne/time_frequency/tests/test_stft.py index 0784ace90b8..68297422883 100644 --- a/mne/time_frequency/tests/test_stft.py +++ b/mne/time_frequency/tests/test_stft.py @@ -1,3 +1,9 @@ +# Authors : Alexandre Gramfort +# Eric Larson +# +# License : BSD 3-clause + +import pytest import numpy as np from scipy import linalg from numpy.testing import assert_almost_equal, assert_array_almost_equal @@ -6,21 +12,22 @@ from mne.time_frequency._stft import stft_norm2 -def test_stft(): +@pytest.mark.parametrize('T', (127, 128, 255, 256, 1337)) +@pytest.mark.parametrize('wsize', (128, 256)) +@pytest.mark.parametrize('tstep', (4, 64)) +@pytest.mark.parametrize('f', (7., 23.)) # should be close to fftfreqs +def test_stft(T, wsize, tstep, f): """Test stft and istft tight frame property.""" sfreq = 1000. # Hz - f = 7. # Hz - for T in [127, 128]: # try with even and odd numbers + if True: # just to minimize diff # Test with low frequency signal t = np.arange(T).astype(np.float64) x = np.sin(2 * np.pi * f * t / sfreq) x = np.array([x, x + 1.]) - wsize = 128 - tstep = 4 X = stft(x, wsize, tstep) xp = istft(X, tstep, Tx=T) - freqs = stftfreq(wsize, sfreq=1000) + freqs = stftfreq(wsize, sfreq=sfreq) max_freq = freqs[np.argmax(np.sum(np.abs(X[0]) ** 2, axis=1))] diff --git a/mne/time_frequency/tests/test_tfr.py b/mne/time_frequency/tests/test_tfr.py index 01fd66b386c..0c2aafbddf9 100644 --- a/mne/time_frequency/tests/test_tfr.py +++ b/mne/time_frequency/tests/test_tfr.py @@ -78,6 +78,7 @@ def test_time_frequency(): # Test first with a single epoch power, itc = tfr_morlet(epochs[0], freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True) + # Now compute evoked evoked = epochs.average() pytest.raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True) @@ -256,7 +257,7 @@ def test_time_frequency(): # When convolving in time, wavelets must not be longer than the data pytest.raises(ValueError, cwt, data[0, :, :Ws[0].size - 1], Ws, use_fft=False) - with pytest.warns(UserWarning, match='one of the wavelets is longer'): + with pytest.warns(UserWarning, match='one of the wavelets.*is longer'): cwt(data[0, :, :Ws[0].size - 1], Ws, use_fft=True) # Check for off-by-one errors when using wavelets with an even number of @@ -470,16 +471,53 @@ def test_io(): events[:, 0] = np.arange(n_events) events[:, 2] = np.ones(n_events) event_id = {'a/b': 1} + # fake selection + n_dropped_epochs = 3 + selection = np.arange(n_events + n_dropped_epochs)[n_dropped_epochs:] + drop_log = tuple([('IGNORED',) for i in range(n_dropped_epochs)] + + [() for i in range(n_events)]) tfr = EpochsTFR(info, data=data, times=times, freqs=freqs, comment='test', method='crazy-tfr', events=events, - event_id=event_id, metadata=meta) - tfr.save(fname, True) - read_tfr = read_tfrs(fname)[0] - assert_array_equal(tfr.data, read_tfr.data) - assert_metadata_equal(tfr.metadata, read_tfr.metadata) - assert_array_equal(tfr.events, read_tfr.events) - assert tfr.event_id == read_tfr.event_id + event_id=event_id, selection=selection, drop_log=drop_log, + metadata=meta) + fname_save = fname + tfr.save(fname_save, True) + fname_write = op.join(tempdir, 'test3-tfr.h5') + write_tfrs(fname_write, tfr, overwrite=True) + for fname in [fname_save, fname_write]: + read_tfr = read_tfrs(fname)[0] + assert_array_equal(tfr.data, read_tfr.data) + assert_metadata_equal(tfr.metadata, read_tfr.metadata) + assert_array_equal(tfr.events, read_tfr.events) + assert tfr.event_id == read_tfr.event_id + assert_array_equal(tfr.selection, read_tfr.selection) + assert tfr.drop_log == read_tfr.drop_log + with pytest.raises(NotImplementedError, match='condition not supported'): + tfr = read_tfrs(fname, condition='a') + + +def test_init_EpochsTFR(): + """Test __init__ for EpochsTFR.""" + # Create fake data: + data = np.zeros((3, 3, 3, 3)) + times = np.array([.1, .2, .3]) + freqs = np.array([.10, .20, .30]) + info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000., + ['mag', 'mag', 'mag']) + data_x = data[:, :, :, 0] + with pytest.raises(ValueError, match='data should be 4d. Got 3'): + tfr = EpochsTFR(info, data=data_x, times=times, freqs=freqs) + data_x = data[:, :-1, :, :] + with pytest.raises(ValueError, match="channels and data size don't"): + tfr = EpochsTFR(info, data=data_x, times=times, freqs=freqs) + times_x = times[:-1] + with pytest.raises(ValueError, match="times and data size don't match"): + tfr = EpochsTFR(info, data=data, times=times_x, freqs=freqs) + freqs_x = freqs[:-1] + with pytest.raises(ValueError, match="frequencies and data size don't"): + tfr = EpochsTFR(info, data=data, times=times_x, freqs=freqs_x) + del(tfr) def test_plot(): @@ -735,6 +773,47 @@ def test_compute_tfr_correct(method, decim): assert freqs[np.argmax(np.abs(tfr).mean(-1))] == f +def test_averaging_epochsTFR(): + """Test that EpochsTFR averaging methods work.""" + # Setup for reading the raw data + event_id = 1 + tmin = -0.2 + tmax = 0.498 # Allows exhaustive decimation testing + + freqs = np.arange(6, 20, 5) # define frequencies of interest + n_cycles = freqs / 4. + + raw = read_raw_fif(raw_fname) + # only pick a few events for speed + events = read_events(event_fname)[:4] + + include = [] + exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more + + # picks MEG gradiometers + picks = pick_types(raw.info, meg='grad', eeg=False, + stim=False, include=include, exclude=exclude) + picks = picks[:2] + + epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks) + + # Obtain EpochsTFR + power = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, + average=False, use_fft=True, + return_itc=False) + + # Test average methods + for func, method in zip( + [np.mean, np.median, np.mean], + ['mean', 'median', lambda x: np.mean(x, axis=0)]): + avgpower = power.average(method=method) + np.testing.assert_array_equal(func(power.data, axis=0), + avgpower.data) + with pytest.raises(RuntimeError, match='You passed a function that ' + 'resulted in data'): + power.average(method=np.mean) + + @requires_pandas def test_getitem_epochsTFR(): """Test GetEpochsMixin in the context of EpochsTFR.""" @@ -742,69 +821,219 @@ def test_getitem_epochsTFR(): # Setup for reading the raw data and select a few trials raw = read_raw_fif(raw_fname) events = read_events(event_fname) - n_events = 10 - - # create fake metadata - rng = np.random.RandomState(42) - rt = rng.uniform(size=(n_events,)) - trialtypes = np.array(['face', 'place']) - trial = trialtypes[(rng.uniform(size=(n_events,)) > .5).astype(int)] - meta = DataFrame(dict(RT=rt, Trial=trial)) - event_id = dict(a=1, b=2, c=3, d=4) - epochs = Epochs(raw, events[:n_events], event_id=event_id, metadata=meta, - decim=1) - - freqs = np.arange(12., 17., 2.) # define frequencies of interest - n_cycles = freqs / 2. # 0.5 second time windows for all frequencies - - # Choose time x (full) bandwidth product - time_bandwidth = 4.0 # With 0.5 s time windows, this gives 8 Hz smoothing - kwargs = dict(freqs=freqs, n_cycles=n_cycles, use_fft=True, - time_bandwidth=time_bandwidth, return_itc=False, - average=False, n_jobs=1) - power = tfr_multitaper(epochs, **kwargs) + # create fake data, test with and without dropping epochs + for n_drop_epochs in [0, 2]: + n_events = 12 + # create fake metadata + rng = np.random.RandomState(42) + rt = rng.uniform(size=(n_events,)) + trialtypes = np.array(['face', 'place']) + trial = trialtypes[(rng.uniform(size=(n_events,)) > .5).astype(int)] + meta = DataFrame(dict(RT=rt, Trial=trial)) + event_id = dict(a=1, b=2, c=3, d=4) + epochs = Epochs(raw, events[:n_events], event_id=event_id, + metadata=meta, decim=1) + epochs.drop(np.arange(n_drop_epochs)) + n_events -= n_drop_epochs + + freqs = np.arange(12., 17., 2.) # define frequencies of interest + n_cycles = freqs / 2. # 0.5 second time windows for all frequencies + + # Choose time x (full) bandwidth product + time_bandwidth = 4.0 + # With 0.5 s time windows, this gives 8 Hz smoothing + kwargs = dict(freqs=freqs, n_cycles=n_cycles, use_fft=True, + time_bandwidth=time_bandwidth, return_itc=False, + average=False, n_jobs=1) + power = tfr_multitaper(epochs, **kwargs) + + # Check that power and epochs metadata is the same + assert_metadata_equal(epochs.metadata, power.metadata) + assert_metadata_equal(epochs[::2].metadata, power[::2].metadata) + assert_metadata_equal(epochs['RT < .5'].metadata, + power['RT < .5'].metadata) + assert_array_equal(epochs.selection, power.selection) + assert epochs.drop_log == power.drop_log + + # Check that get power is functioning + assert_array_equal(power[3:6].data, power.data[3:6]) + assert_array_equal(power[3:6].events, power.events[3:6]) + assert_array_equal(epochs.selection[3:6], power.selection[3:6]) + + indx_check = (power.metadata['Trial'] == 'face') + try: + indx_check = indx_check.to_numpy() + except Exception: + pass # older Pandas + indx_check = indx_check.nonzero() + assert_array_equal(power['Trial == "face"'].events, + power.events[indx_check]) + assert_array_equal(power['Trial == "face"'].data, + power.data[indx_check]) + + # Check that the wrong Key generates a Key Error for Metadata search + with pytest.raises(KeyError): + power['Trialz == "place"'] + + # Test length function + assert len(power) == n_events + assert len(power[3:6]) == 3 + + # Test iteration function + for ind, power_ep in enumerate(power): + assert_array_equal(power_ep, power.data[ind]) + if ind == 5: + break + + # Test that current state is maintained + assert_array_equal(power.next(), power.data[ind + 1]) # Check decim affects sfreq power_decim = tfr_multitaper(epochs, decim=2, **kwargs) assert power.info['sfreq'] / 2. == power_decim.info['sfreq'] - # Check that power and epochs metadata is the same - assert_metadata_equal(epochs.metadata, power.metadata) - assert_metadata_equal(epochs[::2].metadata, power[::2].metadata) - assert_metadata_equal(epochs['RT < .5'].metadata, - power['RT < .5'].metadata) - - # Check that get power is functioning - assert_array_equal(power[3:6].data, power.data[3:6]) - assert_array_equal(power[3:6].events, power.events[3:6]) - - indx_check = (power.metadata['Trial'] == 'face') - try: - indx_check = indx_check.to_numpy() - except Exception: - pass # older Pandas - indx_check = indx_check.nonzero() - assert_array_equal(power['Trial == "face"'].events, - power.events[indx_check]) - assert_array_equal(power['Trial == "face"'].data, - power.data[indx_check]) - - # Check that the wrong Key generates a Key Error for Metadata search - with pytest.raises(KeyError): - power['Trialz == "place"'] - - # Test length function - assert len(power) == n_events - assert len(power[3:6]) == 3 - - # Test iteration function - for ind, power_ep in enumerate(power): - assert_array_equal(power_ep, power.data[ind]) - if ind == 5: - break - - # Test that current state is maintained - assert_array_equal(power.next(), power.data[ind + 1]) + +@requires_pandas +def test_to_data_frame(): + """Test EpochsTFR Pandas exporter.""" + # Create fake EpochsTFR data: + n_epos = 3 + ch_names = ['EEG 001', 'EEG 002', 'EEG 003', 'EEG 004'] + n_picks = len(ch_names) + ch_types = ['eeg'] * n_picks + n_freqs = 5 + n_times = 6 + data = np.random.rand(n_epos, n_picks, n_freqs, n_times) + times = np.arange(6) + srate = 1000. + freqs = np.arange(5) + events = np.zeros((n_epos, 3), dtype=int) + events[:, 0] = np.arange(n_epos) + events[:, 2] = np.arange(5, 5 + n_epos) + event_id = {k: v for v, k in zip(events[:, 2], ['ha', 'he', 'hu'])} + info = mne.create_info(ch_names, srate, ch_types) + tfr = mne.time_frequency.EpochsTFR(info, data, times, freqs, + events=events, event_id=event_id) + # test index checking + with pytest.raises(ValueError, match='options. Valid index options are'): + tfr.to_data_frame(index=['foo', 'bar']) + with pytest.raises(ValueError, match='"qux" is not a valid option'): + tfr.to_data_frame(index='qux') + with pytest.raises(TypeError, match='index must be `None` or a string '): + tfr.to_data_frame(index=np.arange(400)) + # test wide format + df_wide = tfr.to_data_frame() + assert all(np.in1d(tfr.ch_names, df_wide.columns)) + assert all(np.in1d(['time', 'condition', 'freq', 'epoch'], + df_wide.columns)) + # test long format + df_long = tfr.to_data_frame(long_format=True) + expected = ('condition', 'epoch', 'freq', 'time', 'channel', 'ch_type', + 'value') + assert set(expected) == set(df_long.columns) + assert set(tfr.ch_names) == set(df_long['channel']) + assert(len(df_long) == tfr.data.size) + # test long format w/ index + df_long = tfr.to_data_frame(long_format=True, index=['freq']) + del df_wide, df_long + # test whether data is in correct shape + df = tfr.to_data_frame(index=['condition', 'epoch', 'freq', 'time']) + data = tfr.data + assert_array_equal(df.values[:, 0], + data[:, 0, :, :].reshape(1, -1).squeeze()) + # compare arbitrary observation: + assert df.loc[('he', slice(None), freqs[1], times[2] * srate), + ch_names[3]].iloc[0] == data[1, 3, 1, 2] + + # Check also for AverageTFR: + tfr = tfr.average() + with pytest.raises(ValueError, match='options. Valid index options are'): + tfr.to_data_frame(index=['epoch', 'condition']) + with pytest.raises(ValueError, match='"epoch" is not a valid option'): + tfr.to_data_frame(index='epoch') + with pytest.raises(TypeError, match='index must be `None` or a string '): + tfr.to_data_frame(index=np.arange(400)) + # test wide format + df_wide = tfr.to_data_frame() + assert all(np.in1d(tfr.ch_names, df_wide.columns)) + assert all(np.in1d(['time', 'freq'], df_wide.columns)) + # test long format + df_long = tfr.to_data_frame(long_format=True) + expected = ('freq', 'time', 'channel', 'ch_type', 'value') + assert set(expected) == set(df_long.columns) + assert set(tfr.ch_names) == set(df_long['channel']) + assert(len(df_long) == tfr.data.size) + # test long format w/ index + df_long = tfr.to_data_frame(long_format=True, index=['freq']) + del df_wide, df_long + # test whether data is in correct shape + df = tfr.to_data_frame(index=['freq', 'time']) + data = tfr.data + assert_array_equal(df.values[:, 0], + data[0, :, :].reshape(1, -1).squeeze()) + # compare arbitrary observation: + assert df.loc[(freqs[1], times[2] * srate), ch_names[3]] == \ + data[3, 1, 2] + + +@requires_pandas +@pytest.mark.parametrize('index', ('time', ['condition', 'time', 'freq'], + ['freq', 'time'], ['time', 'freq'], None)) +def test_to_data_frame_index(index): + """Test index creation in epochs Pandas exporter.""" + # Create fake EpochsTFR data: + n_epos = 3 + ch_names = ['EEG 001', 'EEG 002', 'EEG 003', 'EEG 004'] + n_picks = len(ch_names) + ch_types = ['eeg'] * n_picks + n_freqs = 5 + n_times = 6 + data = np.random.rand(n_epos, n_picks, n_freqs, n_times) + times = np.arange(6) + freqs = np.arange(5) + events = np.zeros((n_epos, 3), dtype=int) + events[:, 0] = np.arange(n_epos) + events[:, 2] = np.arange(5, 8) + event_id = {k: v for v, k in zip(events[:, 2], ['ha', 'he', 'hu'])} + info = mne.create_info(ch_names, 1000., ch_types) + tfr = mne.time_frequency.EpochsTFR(info, data, times, freqs, + events=events, event_id=event_id) + df = tfr.to_data_frame(picks=[0, 2, 3], index=index) + # test index order/hierarchy preservation + if not isinstance(index, list): + index = [index] + assert (df.index.names == index) + # test that non-indexed data were present as columns + non_index = list(set(['condition', 'time', 'freq', 'epoch']) - set(index)) + if len(non_index): + assert all(np.in1d(non_index, df.columns)) + + +@requires_pandas +@pytest.mark.parametrize('time_format', (None, 'ms', 'timedelta')) +def test_to_data_frame_time_format(time_format): + """Test time conversion in epochs Pandas exporter.""" + from pandas import Timedelta + n_epos = 3 + ch_names = ['EEG 001', 'EEG 002', 'EEG 003', 'EEG 004'] + n_picks = len(ch_names) + ch_types = ['eeg'] * n_picks + n_freqs = 5 + n_times = 6 + data = np.random.rand(n_epos, n_picks, n_freqs, n_times) + times = np.arange(6) + freqs = np.arange(5) + events = np.zeros((n_epos, 3), dtype=int) + events[:, 0] = np.arange(n_epos) + events[:, 2] = np.arange(5, 8) + event_id = {k: v for v, k in zip(events[:, 2], ['ha', 'he', 'hu'])} + info = mne.create_info(ch_names, 1000., ch_types) + tfr = mne.time_frequency.EpochsTFR(info, data, times, freqs, + events=events, event_id=event_id) + # test time_format + df = tfr.to_data_frame(time_format=time_format) + dtypes = {None: np.float64, 'ms': np.int64, 'timedelta': Timedelta} + assert isinstance(df['time'].iloc[0], dtypes[time_format]) run_tests_if_main() diff --git a/mne/time_frequency/tfr.py b/mne/time_frequency/tfr.py index 74077a48a4b..eaecc035eda 100644 --- a/mne/time_frequency/tfr.py +++ b/mne/time_frequency/tfr.py @@ -11,21 +11,22 @@ from copy import deepcopy from functools import partial -from math import sqrt import numpy as np -from scipy import linalg from .multitaper import dpss_windows from ..baseline import rescale -from ..fixes import fft, ifft +from ..fixes import _import_fft +from ..filter import next_fast_len from ..parallel import parallel_func from ..utils import (logger, verbose, _time_mask, _freq_mask, check_fname, sizeof_fmt, GetEpochsMixin, _prepare_read_metadata, fill_doc, _prepare_write_metadata, _check_event_id, _gen_events, SizeMixin, _is_numeric, _check_option, - _validate_type) + _validate_type, _check_combine, _check_pandas_installed, + _check_pandas_index_arguments, _check_time_format, + _convert_times, _build_data_frame) from ..channels.channels import ContainsMixin, UpdateChannelsMixin from ..channels.layout import _merge_ch_data, _pair_grad_sensors from ..io.pick import (pick_info, _picks_to_idx, channel_type, _pick_inst, @@ -95,7 +96,7 @@ def morlet(sfreq, freqs, n_cycles=7.0, sigma=None, zero_mean=False): real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2) oscillation -= real_offset W = oscillation * gaussian_enveloppe - W /= sqrt(0.5) * linalg.norm(W.ravel()) + W /= np.sqrt(0.5) * np.linalg.norm(W.ravel()) Ws.append(W) return Ws @@ -161,7 +162,7 @@ def _make_dpss(sfreq, freqs, n_cycles=7., time_bandwidth=4.0, zero_mean=False): if zero_mean: # to make it zero mean real_offset = Wk.mean() Wk -= real_offset - Wk /= sqrt(0.5) * linalg.norm(Wk.ravel()) + Wk /= np.sqrt(0.5) * np.linalg.norm(Wk.ravel()) Wm.append(Wk) @@ -172,7 +173,24 @@ def _make_dpss(sfreq, freqs, n_cycles=7., time_bandwidth=4.0, zero_mean=False): # Low level convolution -def _cwt(X, Ws, mode="same", decim=1, use_fft=True): +def _get_nfft(wavelets, X, use_fft=True, check=True): + n_times = X.shape[-1] + max_size = max(w.size for w in wavelets) + if max_size > n_times: + msg = (f'At least one of the wavelets ({max_size}) is longer than the ' + f'signal ({n_times}). Consider using a longer signal or ' + 'shorter wavelets.') + if check: + if use_fft: + warn(msg, UserWarning) + else: + raise ValueError(msg) + nfft = n_times + max_size - 1 + nfft = next_fast_len(nfft) # 2 ** int(np.ceil(np.log2(nfft))) + return nfft + + +def _cwt_gen(X, Ws, *, fsize=0, mode="same", decim=1, use_fft=True): """Compute cwt with fft based convolutions or temporal convolutions. Parameters @@ -181,6 +199,8 @@ def _cwt(X, Ws, mode="same", decim=1, use_fft=True): The data. Ws : list of array Wavelets time series. + fsize : int + FFT length. mode : {'full', 'valid', 'same'} See numpy.convolve. decim : int | slice, default 1 @@ -199,36 +219,21 @@ def _cwt(X, Ws, mode="same", decim=1, use_fft=True): out : array, shape (n_signals, n_freqs, n_time_decim) The time-frequency transform of the signals. """ + fft, ifft = _import_fft(('fft', 'ifft')) _check_option('mode', mode, ['same', 'valid', 'full']) decim = _check_decim(decim) X = np.asarray(X) # Precompute wavelets for given frequency range to save time - n_signals, n_times = X.shape + _, n_times = X.shape n_times_out = X[:, decim].shape[1] n_freqs = len(Ws) - Ws_max_size = max(W.size for W in Ws) - size = n_times + Ws_max_size - 1 - # Always use 2**n-sized FFT - fsize = 2 ** int(np.ceil(np.log2(size))) - # precompute FFTs of Ws if use_fft: fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128) - - warn_me = True - for i, W in enumerate(Ws): - if use_fft: + for i, W in enumerate(Ws): fft_Ws[i] = fft(W, fsize) - if len(W) > n_times and warn_me: - msg = ('At least one of the wavelets is longer than the signal. ' - 'Consider padding the signal or using shorter wavelets.') - if use_fft: - warn(msg, UserWarning) - warn_me = False # Suppress further warnings - else: - raise ValueError(msg) # Make generator looping across signals tfr = np.zeros((n_freqs, n_times_out), dtype=np.complex128) @@ -380,6 +385,8 @@ def _compute_tfr(epoch_data, freqs, sfreq=1.0, method='morlet', out = np.empty((n_chans, n_epochs, n_freqs, n_times), dtype) # Parallel computation + all_Ws = sum([list(W) for W in Ws], list()) + _get_nfft(all_Ws, epoch_data, use_fft) parallel, my_cwt, _ = parallel_func(_time_frequency_loop, n_jobs) # Parallelization is applied across channels. @@ -510,7 +517,10 @@ def _time_frequency_loop(X, Ws, output, use_fft, mode, decim): # Loops across tapers. for W in Ws: - coefs = _cwt(X, W, mode, decim=decim, use_fft=use_fft) + # No need to check here, it's done earlier (outside parallel part) + nfft = _get_nfft(W, X, use_fft, check=False) + coefs = _cwt_gen( + X, W, fsize=nfft, mode=mode, decim=decim, use_fft=use_fft) # Inter-trial phase locking is apparently computed per taper... if 'itc' in output: @@ -586,11 +596,16 @@ def cwt(X, Ws, use_fft=True, mode='same', decim=1): mne.time_frequency.tfr_morlet : Compute time-frequency decomposition with Morlet wavelets. """ - decim = _check_decim(decim) - n_signals, n_times = X[:, decim].shape + nfft = _get_nfft(Ws, X, use_fft) + return _cwt_array(X, Ws, nfft, mode, decim, use_fft) - coefs = _cwt(X, Ws, mode, decim=decim, use_fft=use_fft) +def _cwt_array(X, Ws, nfft, mode, decim, use_fft): + decim = _check_decim(decim) + coefs = _cwt_gen( + X, Ws, fsize=nfft, mode=mode, decim=decim, use_fft=use_fft) + + n_signals, n_times = X[:, decim].shape tfrs = np.empty((n_signals, len(Ws), n_times), dtype=np.complex128) for k, tfr in enumerate(coefs): tfrs[k] = tfr @@ -644,12 +659,15 @@ def _tfr_aux(method, inst, freqs, decim, return_itc, picks, average, meta = deepcopy(inst._metadata) evs = deepcopy(inst.events) ev_id = deepcopy(inst.event_id) + selection = deepcopy(inst.selection) + drop_log = deepcopy(inst.drop_log) else: # if the input is of class Evoked - meta = evs = ev_id = None + meta = evs = ev_id = selection = drop_log = None out = EpochsTFR(info, power, times, freqs, method='%s-power' % method, - events=evs, event_id=ev_id, metadata=meta) + events=evs, event_id=ev_id, selection=selection, + drop_log=drop_log, metadata=meta) return out @@ -982,15 +1000,16 @@ def apply_baseline(self, baseline, mode='mean', verbose=None): rescale(self.data, self.times, baseline, mode, copy=False) return self - def save(self, fname, overwrite=False): + @verbose + def save(self, fname, overwrite=False, *, verbose=None): """Save TFR object to hdf5 file. Parameters ---------- fname : str The file name, which should end with ``-tfr.h5``. - overwrite : bool - If True, overwrite file (if it exists). Defaults to False. + %(overwrite)s + %(verbose)s See Also -------- @@ -998,6 +1017,80 @@ def save(self, fname, overwrite=False): """ write_tfrs(fname, self, overwrite=overwrite) + @fill_doc + def to_data_frame(self, picks=None, index=None, long_format=False, + time_format='ms'): + """Export data in tabular structure as a pandas DataFrame. + + Channels are converted to columns in the DataFrame. By default, + additional columns ``'time'``, ``'freq'``, ``'epoch'``, and + ``'condition'`` (epoch event description) are added, unless ``index`` + is not ``None`` (in which case the columns specified in ``index`` will + be used to form the DataFrame's index instead). ``'epoch'``, and + ``'condition'`` are not supported for ``AverageTFR``. + + Parameters + ---------- + %(picks_all)s + %(df_index_epo)s + Valid string values are ``'time'``, ``'freq'``, ``'epoch'``, and + ``'condition'`` for ``EpochsTFR`` and ``'time'`` and ``'freq'`` + for ``AverageTFR``. + Defaults to ``None``. + %(df_longform_epo)s + %(df_time_format)s + + .. versionadded:: 0.23 + + Returns + ------- + %(df_return)s + """ + # check pandas once here, instead of in each private utils function + pd = _check_pandas_installed() # noqa + # arg checking + valid_index_args = ['time', 'freq'] + if isinstance(self, EpochsTFR): + valid_index_args.extend(['epoch', 'condition']) + valid_time_formats = ['ms', 'timedelta'] + index = _check_pandas_index_arguments(index, valid_index_args) + time_format = _check_time_format(time_format, valid_time_formats) + # get data + times = self.times + picks = _picks_to_idx(self.info, picks, 'all', exclude=()) + if isinstance(self, EpochsTFR): + data = self.data[:, picks, :, :] + else: + data = self.data[np.newaxis, picks] # add singleton "epochs" axis + n_epochs, n_picks, n_freqs, n_times = data.shape + # reshape to (epochs*freqs*times) x signals + data = np.moveaxis(data, 1, -1) + data = data.reshape(n_epochs * n_freqs * n_times, n_picks) + # prepare extra columns / multiindex + mindex = list() + times = np.tile(times, n_epochs * n_freqs) + times = _convert_times(self, times, time_format) + mindex.append(('time', times)) + freqs = self.freqs + freqs = np.tile(np.repeat(freqs, n_times), n_epochs) + mindex.append(('freq', freqs)) + if isinstance(self, EpochsTFR): + mindex.append(('epoch', np.repeat(self.selection, + n_times * n_freqs))) + rev_event_id = {v: k for k, v in self.event_id.items()} + conditions = [rev_event_id[k] for k in self.events[:, 2]] + mindex.append(('condition', np.repeat(conditions, + n_times * n_freqs))) + assert all(len(mdx) == len(mindex[0]) for mdx in mindex) + # build DataFrame + if isinstance(self, EpochsTFR): + default_index = ['condition', 'epoch', 'freq', 'time'] + else: + default_index = ['freq', 'time'] + df = _build_data_frame(self, data, picks, long_format, mindex, index, + default_index=default_index) + return df + @fill_doc class AverageTFR(_BaseTFR): @@ -1998,6 +2091,16 @@ class EpochsTFR(_BaseTFR, GetEpochsMixin): associated events. If None, all events will be used and a dict is created with string integer names corresponding to the event id integers. + selection : iterable | None + Iterable of indices of selected epochs. If ``None``, will be + automatically generated, corresponding to all non-zero events. + + .. versionadded:: 0.23 + drop_log : tuple | None + Tuple of tuple of strings indicating which epochs have been marked to + be ignored. + + .. versionadded:: 0.23 metadata : instance of pandas.DataFrame | None A :class:`pandas.DataFrame` containing pertinent information for each trial. See :class:`mne.Epochs` for further details. @@ -2023,6 +2126,26 @@ class EpochsTFR(_BaseTFR, GetEpochsMixin): Array containing sample information as event_id event_id : dict | None Names of conditions correspond to event_ids + selection : array + List of indices of selected events (not dropped or ignored etc.). For + example, if the original event array had 4 events and the second event + has been dropped, this attribute would be np.array([0, 2, 3]). + drop_log : tuple of tuple + A tuple of the same length as the event array used to initialize the + ``EpochsTFR`` object. If the i-th original event is still part of the + selection, drop_log[i] will be an empty tuple; otherwise it will be + a tuple of the reasons the event is not longer in the selection, e.g.: + + - ``'IGNORED'`` + If it isn't part of the current subset defined by the user + - ``'NO_DATA'`` or ``'TOO_SHORT'`` + If epoch didn't contain enough data names of channels that + exceeded the amplitude threshold + - ``'EQUALIZED_COUNTS'`` + See :meth:`~mne.Epochs.equalize_event_counts` + - ``'USER'`` + For user-defined reasons (see :meth:`~mne.Epochs.drop`). + metadata : pandas.DataFrame, shape (n_events, n_cols) | None DataFrame containing pertinent information for each trial Notes @@ -2032,7 +2155,8 @@ class EpochsTFR(_BaseTFR, GetEpochsMixin): @verbose def __init__(self, info, data, times, freqs, comment=None, method=None, - events=None, event_id=None, metadata=None, verbose=None): + events=None, event_id=None, selection=None, + drop_log=None, metadata=None, verbose=None): # noqa: D102 self.info = info if data.ndim != 4: @@ -2050,17 +2174,38 @@ def __init__(self, info, data, times, freqs, comment=None, method=None, if events is None: n_epochs = len(data) events = _gen_events(n_epochs) + if selection is None: + n_epochs = len(data) + selection = np.arange(n_epochs) + if drop_log is None: + n_epochs_prerejection = max(len(events), max(selection) + 1) + drop_log = tuple( + () if k in selection else ('IGNORED',) + for k in range(n_epochs_prerejection)) + else: + drop_log = drop_log + # check consistency: + assert len(selection) == len(events) + assert len(drop_log) >= len(events) + assert len(selection) == sum( + (len(dl) == 0 for dl in drop_log)) event_id = _check_event_id(event_id, events) self.data = data self.times = np.array(times, dtype=float) self.freqs = np.array(freqs, dtype=float) self.events = events self.event_id = event_id + self.selection = selection + self.drop_log = drop_log self.comment = comment self.method = method self.preload = True self.metadata = metadata + @property + def _detrend_picks(self): + return list() + def __repr__(self): # noqa: D105 s = "time : [%f, %f]" % (self.times[0], self.times[-1]) s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1]) @@ -2075,15 +2220,46 @@ def __abs__(self): epochs.data = np.abs(self.data) return epochs - def average(self): + def average(self, method='mean'): """Average the data across epochs. + Parameters + ---------- + method : str | callable + How to combine the data. If "mean"/"median", the mean/median + are returned. Otherwise, must be a callable which, when passed + an array of shape (n_epochs, n_channels, n_freqs, n_time) + returns an array of shape (n_channels, n_freqs, n_time). + Note that due to file type limitations, the kind for all + these will be "average". + Returns ------- ave : instance of AverageTFR The averaged data. + + Notes + ----- + Passing in ``np.median`` is considered unsafe when there is complex + data because NumPy doesn't compute the marginal median. Numpy currently + sorts the complex values by real part and return whatever value is + computed. Use with caution. We use the marginal median in the + complex case (i.e. the median of each component separately) if + one passes in ``median``. See a discussion in scipy: + + https://github.com/scipy/scipy/pull/12676#issuecomment-783370228 """ - data = np.mean(self.data, axis=0) + # return a lambda function for computing a combination metric + # over epochs + func = _check_combine(mode=method) + data = func(self.data) + + if data.shape != self._data.shape[1:]: + raise RuntimeError( + 'You passed a function that resulted in data of shape {}, ' + 'but it should be {}.'.format( + data.shape, self._data.shape[1:])) + return AverageTFR(info=self.info.copy(), data=data, times=self.times.copy(), freqs=self.freqs.copy(), nave=self.data.shape[0], method=self.method, @@ -2241,19 +2417,20 @@ def _check_decim(decim): # i/o -def write_tfrs(fname, tfr, overwrite=False): +@verbose +def write_tfrs(fname, tfr, overwrite=False, *, verbose=None): """Write a TFR dataset to hdf5. Parameters ---------- fname : str The file name, which should end with ``-tfr.h5``. - tfr : AverageTFR instance, or list of AverageTFR instances + tfr : AverageTFR | list of AverageTFR | EpochsTFR The TFR dataset, or list of TFR datasets, to save in one file. Note. If .comment is not None, a name will be generated on the fly, based on the order in which the TFR objects are passed. - overwrite : bool - If True, overwrite file (if it exists). Defaults to False. + %(overwrite)s + %(verbose)s See Also -------- @@ -2282,6 +2459,8 @@ def _prepare_write_tfr(tfr, condition): elif hasattr(tfr, 'events'): # if EpochsTFR attributes['events'] = tfr.events attributes['event_id'] = tfr.event_id + attributes['selection'] = tfr.selection + attributes['drop_log'] = tfr.drop_log attributes['metadata'] = _prepare_write_metadata(tfr.metadata) return condition, attributes @@ -2299,7 +2478,7 @@ def read_tfrs(fname, condition=None): Returns ------- - tfrs : list of instances of AverageTFR | instance of AverageTFR + tfr : AverageTFR | list of AverageTFR | EpochsTFR Depending on ``condition`` either the TFR object or a list of multiple TFR objects. diff --git a/mne/transforms.py b/mne/transforms.py index 07e128157be..0e4453ca534 100644 --- a/mne/transforms.py +++ b/mne/transforms.py @@ -12,7 +12,6 @@ import numpy as np from copy import deepcopy -from scipy import linalg from .fixes import einsum, jit, mean from .io.constants import FIFF @@ -330,9 +329,9 @@ def rotation3d_align_z_axis(target_z_axis): # assert that r is a rotation matrix r^t * r = I and det(r) = 1 assert(np.any((r.dot(r.T) - np.identity(3)) < 1E-12)) - assert((linalg.det(r) - 1.0) < 1E-12) + assert((np.linalg.det(r) - 1.0) < 1E-12) # assert that r maps [0 0 1] on the device z axis (target_z_axis) - assert(linalg.norm(target_z_axis - r.dot([0, 0, 1])) < 1e-12) + assert(np.linalg.norm(target_z_axis - r.dot([0, 0, 1])) < 1e-12) return r @@ -587,7 +586,7 @@ def invert_transform(trans): inv_trans : dict Inverse transform. """ - return Transform(trans['to'], trans['from'], linalg.inv(trans['trans'])) + return Transform(trans['to'], trans['from'], np.linalg.inv(trans['trans'])) def transform_surface_to(surf, dest, trans, copy=False): @@ -660,12 +659,12 @@ def get_ras_to_neuromag_trans(nasion, lpa, rpa): "arrays of length 3.") right = rpa - lpa - right_unit = right / linalg.norm(right) + right_unit = right / np.linalg.norm(right) origin = lpa + np.dot(nasion - lpa, right_unit) * right_unit anterior = nasion - origin - anterior_unit = anterior / linalg.norm(anterior) + anterior_unit = anterior / np.linalg.norm(anterior) superior_unit = np.cross(right_unit, anterior_unit) @@ -734,7 +733,7 @@ def _sph_to_cart(sph_pts): def _get_n_moments(order): """Compute the number of multipolar moments (spherical harmonics). - Equivalent to [1]_ Eq. 32. + Equivalent to :footcite:`DarvasEtAl2006` Eq. 32. .. note:: This count excludes ``degree=0`` (for ``order=0``). @@ -902,16 +901,16 @@ class _TPSWarp(object): Notes ----- - Adapted from code by `Wang Lin `_. + Based on the method by :footcite:`Bookstein1989` and + adapted from code by Wang Lin (wanglin193@hotmail.com>). References ---------- - .. [1] Bookstein, F. L. "Principal Warps: Thin Plate Splines and the - Decomposition of Deformations." IEEE Trans. Pattern Anal. Mach. - Intell. 11, 567-585, 1989. + .. footbibliography:: """ def fit(self, source, destination, reg=1e-3): + from scipy import linalg from scipy.spatial.distance import cdist assert source.shape[1] == destination.shape[1] == 3 assert source.shape[0] == destination.shape[0] @@ -980,7 +979,9 @@ class _SphericalSurfaceWarp(object): Notes ----- This class can be used to warp data from a source subject to - a destination subject, as described in [1]_. The procedure is: + a destination subject, as described in :footcite:`DarvasEtAl2006`. + + The procedure is: 1. Perform a spherical harmonic approximation to the source and destination surfaces, which smooths them and allows arbitrary @@ -995,9 +996,7 @@ class _SphericalSurfaceWarp(object): References ---------- - .. [1] Darvas F, Ermer JJ, Mosher JC, Leahy RM (2006). "Generic head - models for atlas-based EEG source analysis." - Human Brain Mapping 27:129-143 + .. footbibliography:: """ def __repr__(self): @@ -1040,6 +1039,7 @@ def fit(self, source, destination, order=4, reg=1e-5, center=True, inst : instance of SphericalSurfaceWarp The warping object (for chaining). """ + from scipy import linalg from .bem import _fit_sphere from .source_space import _check_spacing match_rr = _check_spacing(match, verbose=False)[2]['rr'] @@ -1383,6 +1383,7 @@ def _fit_matched_points(p, x, weights=None, scale=False): def _average_quats(quats, weights=None): """Average unit quaternions properly.""" + from scipy import linalg assert quats.ndim == 2 and quats.shape[1] in (3, 4) if weights is None: weights = np.ones(quats.shape[0]) diff --git a/mne/utils/__init__.py b/mne/utils/__init__.py index b79679586e7..8ff04cc1cb5 100644 --- a/mne/utils/__init__.py +++ b/mne/utils/__init__.py @@ -17,7 +17,9 @@ _check_path_like, _check_src_normal, _check_stc_units, _check_pyqt5_version, _check_sphere, _check_time_format, _check_freesurfer_home, _suggest, _require_version, - _on_missing, int_like, _safe_input) + _on_missing, _check_on_missing, int_like, _safe_input, + _check_all_same_channel_names, path_like, _ensure_events, + _check_eeglabio_installed, _infer_check_export_fmt) from .config import (set_config, get_config, get_config_path, set_cache_dir, set_memmap_min_size, get_subjects_dir, _get_stim_channel, sys_info, _get_extra_data_path, _get_root_dir, @@ -60,10 +62,10 @@ _mask_to_onsets_offsets, _array_equal_nan, _julian_to_cal, _cal_to_julian, _dt_to_julian, _julian_to_dt, _dt_to_stamp, _stamp_to_dt, - _check_dt, _ReuseCycle) + _check_dt, _ReuseCycle, _arange_div) from .mixin import (SizeMixin, GetEpochsMixin, _prepare_read_metadata, _prepare_write_metadata, _FakeNoPandas, ShiftTimeMixin) -from .linalg import (_svd_lwork, _repeated_svd, _sym_mat_pow, sqrtm_sym, - dgesdd, dgemm, zgemm, dgemv, ddot, LinAlgError, eigh) +from .linalg import (_svd_lwork, _repeated_svd, _sym_mat_pow, sqrtm_sym, eigh, + _get_blas_funcs) from .dataframe import (_set_pandas_dtype, _scale_dataframe_data, _convert_times, _build_data_frame) diff --git a/mne/utils/_logging.py b/mne/utils/_logging.py index a02be5ad063..46afa18ddfd 100644 --- a/mne/utils/_logging.py +++ b/mne/utils/_logging.py @@ -12,7 +12,9 @@ import logging import os.path as op import warnings +from typing import Any, Callable, TypeVar +from .docs import fill_doc from ..externals.decorator import FunctionMaker @@ -48,7 +50,12 @@ def filter(self, record): logger.addFilter(_filter) -def verbose(function): +# Provide help for static type checkers: +# https://mypy.readthedocs.io/en/stable/generics.html#declaring-decorators +_FuncT = TypeVar('_FuncT', bound=Callable[..., Any]) + + +def verbose(function: _FuncT) -> _FuncT: """Verbose decorator to allow functions to override log-level. Parameters @@ -79,6 +86,7 @@ def verbose(function): Examples -------- You can use the ``verbose`` argument to set the verbose level on the fly:: + >>> import mne >>> cov = mne.compute_raw_covariance(raw, verbose='WARNING') # doctest: +SKIP >>> cov = mne.compute_raw_covariance(raw, verbose='INFO') # doctest: +SKIP @@ -88,7 +96,6 @@ def verbose(function): """ # noqa: E501 # See https://decorator.readthedocs.io/en/latest/tests.documentation.html # #dealing-with-third-party-decorators - from .docs import fill_doc try: fill_doc(function) except TypeError: # nothing to add @@ -353,9 +360,12 @@ def warn(message, category=RuntimeWarning, module='mne'): globals().get('__warningregistry__', {})) # To avoid a duplicate warning print, we only emit the logger.warning if # one of the handlers is a FileHandler. See gh-5592 + # But it's also nice to be able to do: + # with mne.utils.use_log_level('warning', add_frames=3): + # so also check our add_frames attribute. if any(isinstance(h, logging.FileHandler) or getattr(h, '_mne_file_like', False) - for h in logger.handlers): + for h in logger.handlers) or _filter.add_frames: logger.warning(message) diff --git a/mne/utils/_testing.py b/mne/utils/_testing.py index 60cbbbfd619..e25eece7bc5 100644 --- a/mne/utils/_testing.py +++ b/mne/utils/_testing.py @@ -19,7 +19,6 @@ import numpy as np from numpy.testing import assert_array_equal, assert_allclose -from scipy import linalg from ._logging import warn, ClosingStringIO from .numerics import object_diff @@ -449,6 +448,7 @@ def assert_meg_snr(actual, desired, min_tol, med_tol=500., chpi_med_tol=500., def assert_snr(actual, desired, tol): """Assert actual and desired arrays are within some SNR tolerance.""" + from scipy import linalg with np.errstate(divide='ignore'): # allow infinite snr = (linalg.norm(desired, ord='fro') / linalg.norm(desired - actual, ord='fro')) @@ -539,10 +539,12 @@ def _click_ch_name(fig, ch_index=0, button=1): """Click on a channel name in a raw/epochs/ICA browse-style plot.""" from ..viz.utils import _fake_click fig.canvas.draw() - x, y = fig.mne.ax_main.get_yticklabels()[ch_index].get_position() - xrange = np.diff(fig.mne.ax_main.get_xlim())[0] - _fake_click(fig, fig.mne.ax_main, (x - xrange / 50, y), - xform='data', button=button) + text = fig.mne.ax_main.get_yticklabels()[ch_index] + bbox = text.get_window_extent() + x = bbox.intervalx.mean() + y = bbox.intervaly.mean() + _fake_click(fig, fig.mne.ax_main, (x, y), xform='pix', + button=button) def _close_event(fig): diff --git a/mne/utils/check.py b/mne/utils/check.py index fee4b31b929..3a2ab8b1bfe 100644 --- a/mne/utils/check.py +++ b/mne/utils/check.py @@ -10,11 +10,13 @@ import operator import os import os.path as op -import sys from pathlib import Path +import sys +import warnings import numpy as np +from ..fixes import _median_complex from ._logging import warn, logger @@ -149,38 +151,52 @@ def _check_event_id(event_id, events): def _check_fname(fname, overwrite=False, must_exist=False, name='File', - allow_dir=False): + need_dir=False): """Check for file existence.""" - _validate_type(fname, 'path-like', 'fname') - if op.isfile(fname) or (allow_dir and op.isdir(fname)): + _validate_type(fname, 'path-like', name) + if op.exists(fname): if not overwrite: raise FileExistsError('Destination file exists. Please use option ' '"overwrite=True" to force overwriting.') elif overwrite != 'read': logger.info('Overwriting existing file.') - if must_exist and not os.access(fname, os.R_OK): - raise PermissionError( - '%s does not have read permissions: %s' % (name, fname)) + if must_exist: + if need_dir: + if not op.isdir(fname): + raise IOError( + f'Need a directory for {name} but found a file ' + f'at {fname}') + else: + if not op.isfile(fname): + raise IOError( + f'Need a file for {name} but found a directory ' + f'at {fname}') + if not os.access(fname, os.R_OK): + raise PermissionError( + f'{name} does not have read permissions: {fname}') elif must_exist: - raise FileNotFoundError('%s "%s" does not exist' % (name, fname)) - return str(fname) + raise FileNotFoundError(f'{name} does not exist: {fname}') + return str(op.abspath(fname)) -def _check_subject(class_subject, input_subject, raise_error=True, - kind='class subject attribute'): +def _check_subject(first, second, *, raise_error=True, + first_kind='class subject attribute', + second_kind='input subject'): """Get subject name from class.""" - if input_subject is not None: - _validate_type(input_subject, 'str', "subject input") - if class_subject is not None and input_subject != class_subject: - raise ValueError('%s (%r) did not match input subject (%r)' - % (kind, class_subject, input_subject)) - return input_subject - elif class_subject is not None: - _validate_type(class_subject, 'str', - "Either subject input or %s" % (kind,)) - return class_subject + if second is not None: + _validate_type(second, 'str', "subject input") + if first is not None and first != second: + raise ValueError( + f'{first_kind} ({repr(first)}) did not match ' + f'{second_kind} ({second})') + return second + elif first is not None: + _validate_type( + first, 'str', f"Either {second_kind} subject or {first_kind}") + return first elif raise_error is True: - raise ValueError('Neither subject input nor %s was a string' % (kind,)) + raise ValueError(f'Neither {second_kind} subject nor {first_kind} ' + 'was a string') return None @@ -260,6 +276,19 @@ def _check_pandas_installed(strict=True): return False +def _check_eeglabio_installed(strict=True): + """Aux function.""" + try: + import eeglabio + return eeglabio + except ImportError: + if strict is True: + raise RuntimeError('For this functionality to work, the eeglabio ' + 'library is required.') + else: + return False + + def _check_pandas_index_arguments(index, valid): """Check pandas index arguments.""" if index is None: @@ -323,6 +352,7 @@ def __instancecheck__(cls, other): int_like = _IntLike() +path_like = (str, Path) class _Callable(object): @@ -334,7 +364,7 @@ def __instancecheck__(cls, other): _multi = { 'str': (str,), 'numeric': (np.floating, float, int_like), - 'path-like': (str, Path), + 'path-like': path_like, 'int-like': (int_like,), 'callable': (_Callable(),), } @@ -359,7 +389,13 @@ def _validate_type(item, types=None, item_name=None, type_name=None): The thing to be checked. types : type | str | tuple of types | tuple of str The types to be checked against. - If str, must be one of {'int', 'str', 'numeric', 'info', 'path-like'}. + If str, must be one of {'int', 'str', 'numeric', 'info', 'path-like', + 'callable'}. + item_name : str | None + Name of the item to show inside the error message. + type_name : str | None + Possible types to show inside the error message that the checked item + can be. """ if types == "int": _ensure_int(item, name=item_name) @@ -385,8 +421,9 @@ def _validate_type(item, types=None, item_name=None, type_name=None): else: type_name[-1] = 'or ' + type_name[-1] type_name = ', '.join(type_name) - raise TypeError('%s must be an instance of %s, got %s instead' - % (item_name, type_name, type(item),)) + _item_name = 'Item' if item_name is None else item_name + raise TypeError(f"{_item_name} must be an instance of {type_name}, " + f"got {type(item)} instead") def _check_path_like(item): @@ -596,9 +633,9 @@ def fun(data): elif mode == "std": def fun(data): return np.std(data, axis=0) - elif mode == "median": + elif mode == "median" or mode == np.median: def fun(data): - return np.median(data, axis=0) + return _median_complex(data, axis=0) elif callable(mode): fun = mode else: @@ -706,30 +743,18 @@ def _suggest(val, options, cutoff=0.66): return ' Did you mean one of %r?' % (options,) -def _on_missing(on_missing, msg, name='on_missing'): - """Raise error or print warning with a message. +def _check_on_missing(on_missing, name='on_missing'): + _validate_type(on_missing, str, name) + _check_option(name, on_missing, ['raise', 'warn', 'ignore']) - Parameters - ---------- - on_missing : 'raise' | 'warn' | 'ignore' - Whether to raise an error, print a warning or ignore. Valid keys are - 'raise' | 'warn' | 'ignore'. Default is 'raise'. If on_missing is - 'warn' it will proceed but warn, if 'ignore' it will proceed silently. - msg : str - Message to print along with the error or the warning. Ignore if - on_missing is 'ignore'. - Raises - ------ - ValueError - When on_missing is 'raise'. - """ - _validate_type(on_missing, str, name) +def _on_missing(on_missing, msg, name='on_missing', error_klass=None): + _check_on_missing(on_missing, name) + error_klass = ValueError if error_klass is None else error_klass on_missing = 'raise' if on_missing == 'error' else on_missing on_missing = 'warn' if on_missing == 'warning' else on_missing - _check_option(name, on_missing, ['raise', 'warn', 'ignore']) if on_missing == 'raise': - raise ValueError(msg) + raise error_klass(msg) elif on_missing == 'warn': warn(msg) else: # Ignore @@ -745,3 +770,60 @@ def _safe_input(msg, *, alt=None, use=None): raise RuntimeError( f'Could not use input() to get a response to:\n{msg}\n' f'You can {alt} to avoid this error.') + + +def _ensure_events(events): + events_type = type(events) + with warnings.catch_warnings(record=True): + warnings.simplefilter('ignore') # deprecation for object array + events = np.asarray(events) + if not np.issubdtype(events.dtype, np.integer): + raise TypeError('events should be a NumPy array of integers, ' + f'got {events_type}') + if events.ndim != 2 or events.shape[1] != 3: + raise ValueError( + f'events must be of shape (N, 3), got {events.shape}') + return events + + +def _infer_check_export_fmt(fmt, fname, supported_formats): + """Infer export format from filename extension if auto. + + Raises error if fmt is auto and no file extension found, + then checks format against supported formats, raises error if format is not + supported. + + Parameters + ---------- + fmt : str + Format of the export, will only infer the format from filename if fmt + is auto. + fname : str + Name of the target export file, only used when fmt is auto. + supported_formats : dict of str : tuple/list + Dictionary containing supported formats (as keys) and each format's + corresponding file extensions in a tuple/list (e.g. 'eeglab': ('set',)) + """ + _validate_type(fmt, str, 'fmt') + fmt = fmt.lower() + if fmt == "auto": + fmt = op.splitext(fname)[1] + if fmt: + fmt = fmt[1:].lower() + # find fmt in supported formats dict's tuples + fmt = next((k for k, v in supported_formats.items() if fmt in v), + fmt) # default to original fmt for raising error later + else: + raise ValueError(f"Couldn't infer format from filename {fname}" + " (no extension found)") + + if fmt not in supported_formats: + supported = [] + for format, extensions in supported_formats.items(): + ext_str = ', '.join(f'*.{ext}' for ext in extensions) + supported.append(f'{format} ({ext_str})') + + supported_str = ', '.join(supported) + raise ValueError(f"Format '{fmt}' is not supported. " + f"Supported formats are {supported_str}.") + return fmt diff --git a/mne/utils/config.py b/mne/utils/config.py index 6a537ab668e..133642709df 100644 --- a/mne/utils/config.py +++ b/mne/utils/config.py @@ -73,6 +73,7 @@ def set_memmap_min_size(memmap_min_size): 'MNE_COREG_GUESS_MRI_SUBJECT', 'MNE_COREG_HEAD_HIGH_RES', 'MNE_COREG_HEAD_OPACITY', + 'MNE_COREG_HEAD_INSIDE', 'MNE_COREG_INTERACTION', 'MNE_COREG_MARK_INSIDE', 'MNE_COREG_PREPARE_BEM', @@ -89,6 +90,7 @@ def set_memmap_min_size(memmap_min_size): 'MNE_DATA', 'MNE_DATASETS_BRAINSTORM_PATH', 'MNE_DATASETS_EEGBCI_PATH', + 'MNE_DATASETS_EPILEPSY_ECOG_PATH', 'MNE_DATASETS_HF_SEF_PATH', 'MNE_DATASETS_MEGSIM_PATH', 'MNE_DATASETS_MISC_PATH', @@ -107,6 +109,9 @@ def set_memmap_min_size(memmap_min_size): 'MNE_DATASETS_PHANTOM_4DBTI_PATH', 'MNE_DATASETS_LIMO_PATH', 'MNE_DATASETS_REFMEG_NOISE_PATH', + 'MNE_DATASETS_SSVEP_PATH', + 'MNE_DATASETS_ERP_CORE_PATH', + 'MNE_DATASETS_EPILEPSY_ECOG_PATH', 'MNE_FORCE_SERIAL', 'MNE_KIT2FIFF_STIM_CHANNELS', 'MNE_KIT2FIFF_STIM_CHANNEL_CODING', @@ -478,9 +483,10 @@ def sys_info(fid=None, show_paths=False): sklearn: 0.23.1 numba: 0.50.1 nibabel: 3.1.1 + nilearn: 0.7.0 + dipy: 1.1.1 cupy: Not found pandas: 1.0.5 - dipy: 1.1.1 mayavi: Not found pyvista: 0.25.3 {pyvistaqt=0.1.1, OpenGL 3.3 (Core Profile) Mesa 18.3.6 via llvmpipe (LLVM 7.0, 256 bits)} vtk: 9.0.1 @@ -521,7 +527,7 @@ def sys_info(fid=None, show_paths=False): libs = _get_numpy_libs() has_3d = False for mod_name in ('mne', 'numpy', 'scipy', 'matplotlib', '', 'sklearn', - 'numba', 'nibabel', 'cupy', 'pandas', 'dipy', + 'numba', 'nibabel', 'nilearn', 'dipy', 'cupy', 'pandas', 'mayavi', 'pyvista', 'vtk', 'PyQt5'): if mod_name == '': out += '\n' @@ -562,7 +568,12 @@ def sys_info(fid=None, show_paths=False): elif mod_name in ('mayavi', 'vtk'): has_3d = True if mod_name == 'vtk': - version = getattr(mod, 'VTK_VERSION', 'VTK_VERSION missing') + version = mod.vtkVersion() + # 9.0 dev has VersionFull but 9.0 doesn't + for attr in ('GetVTKVersionFull', 'GetVTKVersion'): + if hasattr(version, attr): + version = getattr(version, attr)() + break elif mod_name == 'PyQt5': version = _check_pyqt5_version() else: diff --git a/mne/utils/docs.py b/mne/utils/docs.py index 1e065b844ad..435fcc3149e 100644 --- a/mne/utils/docs.py +++ b/mne/utils/docs.py @@ -12,10 +12,8 @@ import warnings import webbrowser -from .config import get_config from ..defaults import HEAD_SIZE_DEFAULT -from ..externals.doccer import filldoc, unindent_dict -from .check import _check_option +from ..externals.doccer import indentcount_lines ############################################################################## @@ -56,13 +54,10 @@ warning, or ``'ignore'`` to ignore when""" docdict['on_split_missing'] = """ on_split_missing : str - Can be ``'raise'`` to raise an error, ``'warn'`` (default) to emit a - warning, or ``'ignore'`` to ignore when a split file is missing. - The default will change from ``'warn'`` to ``'raise'`` in 0.23, set the - value explicitly to avoid deprecation warnings. + %s split file is missing. .. versionadded:: 0.22 -""" # after deprecation period, this can use _on_missing_base +""" % (_on_missing_base,) # Cropping docdict['include_tmax'] = """ @@ -117,6 +112,58 @@ modes are ignored when ``order`` is not ``None``. Defaults to ``'type'``. """ +# raw/epochs/evoked apply_function method +# apply_function method summary +applyfun_summary = """\ +The function ``fun`` is applied to the channels defined in ``picks``. +The {} object's data is modified in-place. If the function returns a different +data type (e.g. :py:obj:`numpy.complex128`) it must be specified +using the ``dtype`` parameter, which causes the data type of **all** the data +to change (even if the function is only applied to channels in ``picks``).{} + +.. note:: If ``n_jobs`` > 1, more memory is required as + ``len(picks) * n_times`` additional time points need to + be temporarily stored in memory. +.. note:: If the data type changes (``dtype != None``), more memory is + required since the original and the converted data needs + to be stored in memory. +""" +applyfun_preload = (' The object has to have the data loaded e.g. with ' + '``preload=True`` or ``self.load_data()``.') +docdict['applyfun_summary_raw'] = \ + applyfun_summary.format('raw', applyfun_preload) +docdict['applyfun_summary_epochs'] = \ + applyfun_summary.format('epochs', applyfun_preload) +docdict['applyfun_summary_evoked'] = \ + applyfun_summary.format('evoked', '') +# apply_function params: fun +applyfun_fun = """ +fun : callable + A function to be applied to the channels. The first argument of + fun has to be a timeseries (:class:`numpy.ndarray`). The function must + operate on an array of shape ``(n_times,)`` {}. + The function must return an :class:`~numpy.ndarray` shaped like its input. +""" +docdict['applyfun_fun'] = applyfun_fun.format( + ' if ``channel_wise=True`` and ``(len(picks), n_times)`` otherwise') +docdict['applyfun_fun_evoked'] = applyfun_fun.format( + ' because it will apply channel-wise') +docdict['applyfun_dtype'] = """ +dtype : numpy.dtype + Data type to use after applying the function. If None + (default) the data type is not modified. +""" +chwise = """ +channel_wise : bool + Whether to apply the function to each channel {}individually. If ``False``, + the function will be applied to all {}channels at once. Default ``True``. +""" +docdict['applyfun_chwise'] = chwise.format('', '') +docdict['applyfun_chwise_epo'] = chwise.format('in each epoch ', 'epochs and ') +docdict['kwarg_fun'] = """ +**kwargs : dict + Additional keyword arguments to pass to ``fun``. +""" # Epochs docdict['proj_epochs'] = """ @@ -149,6 +196,19 @@ docdict['reject_by_annotation_raw'] = docdict['reject_by_annotation_all'] + """ Has no effect if ``inst`` is not a :class:`mne.io.Raw` object. """ +docdict['annot_ch_names'] = """ +ch_names : list | None + List of lists of channel names associated with the annotations. + Empty entries are assumed to be associated with no specific channel, + i.e., with all channels or with the time slice itself. None (default) is + the same as passing all empty lists. For example, this creates three + annotations, associating the first with the time interval itself, the + second with two channels, and the third with a single channel:: + + Annotations(onset=[0, 3, 10], duration=[1, 0.25, 0.5], + description=['Start', 'BAD_flux', 'BAD_noise'], + ch_names=[[], ['MEG0111', 'MEG2563'], ['MEG1443']]) +""" # General plotting docdict["show"] = """ @@ -578,6 +638,17 @@ Frequency-domain window to use in resampling. See :func:`scipy.signal.resample`. """ +docdict['average-psd'] = """ +average : str | None + How to average the segments. If ``mean`` (default), calculate the + arithmetic mean. If ``median``, calculate the median, corrected for + its bias relative to the mean. If ``None``, returns the unaggregated + segments. +""" +docdict['window-psd'] = """ +window : str | float | tuple + Windowing function to use. See :func:`scipy.signal.get_window`. +""" docdict['decim'] = """ decim : int Factor by which to subsample the data. @@ -671,9 +742,9 @@ must be set to ``False`` (the default in this case). """ docdict['set_eeg_reference_ch_type'] = """ -ch_type : 'auto' | 'eeg' | 'ecog' | 'seeg' +ch_type : 'auto' | 'eeg' | 'ecog' | 'seeg' | 'dbs' The name of the channel type to apply the reference to. If 'auto', - the first channel type of eeg, ecog or seeg that is found (in that + the first channel type of eeg, ecog, seeg or dbs that is found (in that order) will be selected. .. versionadded:: 0.19 @@ -864,12 +935,51 @@ # Rank docdict['rank'] = """ -rank : None | dict | 'info' | 'full' +rank : None | 'info' | 'full' | dict This controls the rank computation that can be read from the - measurement info or estimated from the data. See ``Notes`` - of :func:`mne.compute_rank` for details.""" -docdict['rank_None'] = docdict['rank'] + 'The default is None.' -docdict['rank_info'] = docdict['rank'] + 'The default is "info".' + measurement info or estimated from the data. When a noise covariance + is used for whitening, this should reflect the rank of that covariance, + otherwise amplification of noise components can occur in whitening (e.g., + often during source localization). + + :data:`python:None` + The rank will be estimated from the data after proper scaling of + different channel types. + ``'info'`` + The rank is inferred from ``info``. If data have been processed + with Maxwell filtering, the Maxwell filtering header is used. + Otherwise, the channel counts themselves are used. + In both cases, the number of projectors is subtracted from + the (effective) number of channels in the data. + For example, if Maxwell filtering reduces the rank to 68, with + two projectors the returned value will be 66. + ``'full'`` + The rank is assumed to be full, i.e. equal to the + number of good channels. If a `~mne.Covariance` is passed, this can + make sense if it has been (possibly improperly) regularized without + taking into account the true data rank. + :class:`dict` + Calculate the rank only for a subset of channel types, and explicitly + specify the rank for the remaining channel types. This can be + extremely useful if you already **know** the rank of (part of) your + data, for instance in case you have calculated it earlier. + + This parameter must be a dictionary whose **keys** correspond to + channel types in the data (e.g. ``'meg'``, ``'mag'``, ``'grad'``, + ``'eeg'``), and whose **values** are integers representing the + respective ranks. For example, ``{'mag': 90, 'eeg': 45}`` will assume + a rank of ``90`` and ``45`` for magnetometer data and EEG data, + respectively. + + The ranks for all channel types present in the data, but + **not** specified in the dictionary will be estimated empirically. + That is, if you passed a dataset containing magnetometer, gradiometer, + and EEG data together with the dictionary from the previous example, + only the gradiometer rank would be determined, while the specified + magnetometer and EEG ranks would be taken for granted. +""" +docdict['rank_None'] = docdict['rank'] + "\n The default is ``None``." +docdict['rank_info'] = docdict['rank'] + "\n The default is ``'info'``." docdict['rank_tol'] = """ tol : float | 'auto' Tolerance for singular values to consider non-zero in @@ -902,9 +1012,8 @@ depth : None | float | dict How to weight (or normalize) the forward using a depth prior. If float (default 0.8), it acts as the depth weighting exponent (``exp``) - to use, which must be between 0 and 1. None is equivalent to 0, meaning - no depth weighting is performed. It can also be a :class:`dict` - containing keyword arguments to pass to + to use None is equivalent to 0, meaning no depth weighting is performed. + It can also be a :class:`dict` containing keyword arguments to pass to :func:`mne.forward.compute_depth_prior` (see docstring for details and defaults). This is effectively ignored when ``method='eLORETA'``. @@ -959,6 +1068,15 @@ Support for reducing rank in all modes (previously only supported ``pick='max_power'`` with weight normalization). """ +docdict['on_rank_mismatch'] = """ +on_rank_mismatch : str + If an explicit MEG value is passed, what to do when it does not match + an empirically computed rank (only used for covariances). + Can be 'raise' to raise an error, 'warn' (default) to emit a warning, or + 'ignore' to ignore. + + .. versionadded:: 0.23 +""" docdict['weight_norm'] = """ weight_norm : str | None Can be: @@ -1114,10 +1232,6 @@ trans : str | dict | instance of Transform %s """ % (_trans_base,) -docdict['trans_deprecated'] = """ -trans : str | dict | instance of Transform - Deprecated and will be removed in 0.23, do not pass this argument. -""" docdict['trans'] = """ trans : str | dict | instance of Transform | None %s @@ -1318,8 +1432,9 @@ montage : None | str | DigMontage A montage containing channel positions. If str or DigMontage is specified, the channel info will be updated with the channel - positions. Default is None. See also the documentation of - :class:`mne.channels.DigMontage` for more information. + positions. Default is None. For valid :class:`str` values see documentation + of :func:`mne.channels.make_standard_montage`. See also the documentation + of :class:`mne.channels.DigMontage` for more information. """ docdict["match_case"] = """ match_case : bool @@ -1327,6 +1442,16 @@ .. versionadded:: 0.20 """ +docdict["match_alias"] = """ +match_alias : bool | dict + Whether to use a lookup table to match unrecognized channel location names + to their known aliases. If True, uses the mapping in + ``mne.io.constants.CHANNEL_LOC_ALIASES``. If a :class:`dict` is passed, it + will be used instead, and should map from non-standard channel names to + names in the specified ``montage``. Default is ``False``. + + .. versionadded:: 0.23 +""" docdict['on_header_missing'] = """ on_header_missing : str %s the FastSCAN header is missing. @@ -1348,7 +1473,13 @@ .. versionadded:: 0.20.1 """ % (_on_missing_base,) -docdict['rename_channels_mapping'] = """ +docdict['on_missing_ch_names'] = """ +on_missing : str + %s entries in ch_names are not present in the raw instance. + + .. versionadded:: 0.23.0 +""" % (_on_missing_base,) +docdict['rename_channels_mapping_duplicates'] = """ mapping : dict | callable A dictionary mapping the old channel to a new channel name e.g. {'EEG061' : 'EEG161'}. Can also be a callable function @@ -1356,6 +1487,11 @@ .. versionchanged:: 0.10.0 Support for a callable function. +allow_duplicates : bool + If True (default False), allow duplicates, which will automatically + be renamed with ``-N`` at the end. + + .. versionadded:: 0.22.0 """ # Brain plotting @@ -1493,6 +1629,11 @@ Additional arguments to brain.add_data (e.g., ``dict(time_label_size=10)``). """ +docdict['brain_kwargs'] = """ +brain_kwargs : dict | None + Additional arguments to the :class:`mne.viz.Brain` constructor (e.g., + ``dict(silhouette=True)``). +""" docdict['views'] = """ views : str | list View to use. Can be any of:: @@ -1697,22 +1838,15 @@ docdict['clust_power_f'] = docdict['clust_power'].format('F') docdict['clust_out'] = """ out_type : 'mask' | 'indices' - Output format of clusters. If ``'mask'``, returns boolean arrays the same - shape as the input data, with ``True`` values indicating locations that are - part of a cluster. If ``'indices'``, returns a list of lists, where each - sublist contains the indices of locations that together form a cluster. - Note that for large datasets, ``'indices'`` may use far less memory than - ``'mask'``. Default is ``'indices'``. -""" -docdict['clust_out_none'] = """ -out_type : 'mask' | 'indices' - Output format of clusters. If ``'mask'``, returns boolean arrays the same - shape as the input data, with ``True`` values indicating locations that are - part of a cluster. If ``'indices'``, returns a list of lists, where each - sublist contains the indices of locations that together form a cluster. - Note that for large datasets, ``'indices'`` may use far less memory than - ``'mask'``. The default translates to ``'mask'`` in version 0.21 but will - change to ``'indices'`` in version 0.22. + Output format of clusters within a list. + If ``'mask'``, returns a list of boolean arrays, + each with the same shape as the input data (or slices if the shape is 1D + and adjacency is None), with ``True`` values indicating locations that are + part of a cluster. If ``'indices'``, returns a list of tuple of ndarray, + where each ndarray contains the indices of locations that together form the + given cluster along the given dimension. Note that for large datasets, + ``'indices'`` may use far less memory than ``'mask'``. + Default is ``'indices'``. """ docdict['clust_disjoint'] = """ check_disjoint : bool @@ -1910,6 +2044,81 @@ """ % docdict # Epochs +docdict['epochs_tmin_tmax'] = """ +tmin, tmax : float + Start and end time of the epochs in seconds, relative to the time-locked + event. Defaults to -0.2 and 0.5, respectively. +""" +docdict['epochs_reject_tmin_tmax'] = """ +reject_tmin, reject_tmax : float | None + Start and end of the time window used to reject epochs. The default + ``None`` corresponds to the first and last time points of the epochs, + respectively. +""" +docdict['epochs_events_event_id'] = """ +events : array of int, shape (n_events, 3) + The events typically returned by the read_events function. + If some events don't match the events of interest as specified + by event_id, they will be marked as 'IGNORED' in the drop log. +event_id : int | list of int | dict | None + The id of the event to consider. If dict, + the keys can later be used to access associated events. Example: + dict(auditory=1, visual=3). If int, a dict will be created with + the id as string. If a list, all events with the IDs specified + in the list are used. If None, all events will be used with + and a dict is created with string integer names corresponding + to the event id integers. +""" +docdict['epochs_preload'] = """ + Load all epochs from disk when creating the object + or wait before accessing each epoch (more memory + efficient but can be slower). +""" +docdict['epochs_detrend'] = """ +detrend : int | None + If 0 or 1, the data channels (MEG and EEG) will be detrended when + loaded. 0 is a constant (DC) detrend, 1 is a linear detrend. None + is no detrending. Note that detrending is performed before baseline + correction. If no DC offset is preferred (zeroth order detrending), + either turn off baseline correction, as this may introduce a DC + shift, or set baseline correction to use the entire time interval + (will yield equivalent results but be slower). +""" +docdict['epochs_metadata'] = """ +metadata : instance of pandas.DataFrame | None + A :class:`pandas.DataFrame` specifying metadata about each epoch. + If given, ``len(metadata)`` must equal ``len(events)``. The DataFrame + may only contain values of type (str | int | float | bool). + If metadata is given, then pandas-style queries may be used to select + subsets of data, see :meth:`mne.Epochs.__getitem__`. + When a subset of the epochs is created in this (or any other + supported) manner, the metadata object is subsetted accordingly, and + the row indices will be modified to match ``epochs.selection``. + + .. versionadded:: 0.16 +""" +docdict['epochs_event_repeated'] = """ +event_repeated : str + How to handle duplicates in ``events[:, 0]``. Can be ``'error'`` + (default), to raise an error, 'drop' to only retain the row occurring + first in the ``events``, or ``'merge'`` to combine the coinciding + events (=duplicates) into a new event (see Notes for details). + + .. versionadded:: 0.19 +""" +docdict['epochs_raw'] = """ +raw : Raw object + An instance of `~mne.io.Raw`. +""" +docdict['epochs_on_missing'] = """ +on_missing : str + What to do if one or several event ids are not found in the recording. + Valid keys are 'raise' | 'warn' | 'ignore' + Default is 'raise'. If on_missing is 'warn' it will proceed but + warn, if 'ignore' it will proceed silently. Note. + If none of the event ids are found in the data, an error will be + automatically generated irrespective of this parameter. +""" reject_common = """ Reject epochs based on peak-to-peak signal amplitude (PTP), i.e. the absolute difference between the lowest and the highest signal value. In @@ -1944,16 +2153,203 @@ If ``reject`` is ``None``, no rejection is performed. If ``'existing'`` (default), then the rejection parameters set at instantiation are used. """ +flat_common = """ + Rejection parameters based on flatness of signal. + Valid **keys** are ``'grad'``, ``'mag'``, ``'eeg'``, ``'eog'``, ``'ecg'``. + The **values** are floats that set the minimum acceptable peak-to-peak + amplitude (PTP). If the PTP is smaller than this threshold, the epoch will + be dropped. If ``None`` then no rejection is performed based on flatness + of the signal.""" +docdict['flat'] = f""" +flat : dict | None +{flat_common} +""" +docdict['flat_drop_bad'] = f""" +flat : dict | str | None +{flat_common} + If ``'existing'``, then the flat parameters set during epoch creation are + used. +""" + +# ECG detection +docdict['ecg_event_id'] = """ +event_id : int + The index to assign to found ECG events. +""" +docdict['ecg_ch_name'] = """ +ch_name : None | str + The name of the channel to use for ECG peak detection. + If ``None`` (default), ECG channel is used if present. If ``None`` and + **no** ECG channel is present, a synthetic ECG channel is created from + the cross-channel average. This synthetic channel can only be created from + MEG channels. +""" +docdict['ecg_filter_freqs'] = """ +l_freq : float + Low pass frequency to apply to the ECG channel while finding events. +h_freq : float + High pass frequency to apply to the ECG channel while finding events. +""" +docdict['ecg_filter_length'] = """ +filter_length : str | int | None + Number of taps to use for filtering. +""" +docdict['ecg_tstart'] = """ +tstart : float + Start ECG detection after ``tstart`` seconds. Useful when the beginning + of the run is noisy. +""" +docdict['create_ecg_epochs'] = """This function will: + +#. Filter the ECG data channel. + +#. Find ECG R wave peaks using :func:`mne.preprocessing.find_ecg_events`. + +#. Filter the raw data. + +#. Create `~mne.Epochs` around the R wave peaks, capturing the heartbeats. +""" + +# EOG detection +docdict['create_eog_epochs'] = """This function will: + +#. Filter the EOG data channel. + +#. Find the peaks of eyeblinks in the EOG data using + :func:`mne.preprocessing.find_eog_events`. + +#. Filter the raw data. + +#. Create `~mne.Epochs` around the eyeblinks. +""" +docdict['eog_ch_name'] = """ +ch_name : str | list of str | None + The name of the channel(s) to use for EOG peak detection. If a string, + can be an arbitrary channel. This doesn't have to be a channel of + ``eog`` type; it could, for example, also be an ordinary EEG channel + that was placed close to the eyes, like ``Fp1`` or ``Fp2``. + + Multiple channel names can be passed as a list of strings. + + If ``None`` (default), use the channel(s) in ``raw`` with type ``eog``. +""" + +# SSP +docdict['compute_ssp'] = """This function aims to find those SSP vectors that +will project out the ``n`` most prominent signals from the data for each +specified sensor type. Consequently, if the provided input data contains high +levels of noise, the produced SSP vectors can then be used to eliminate that +noise from the data. +""" +compute_proj_common = """ +#. Optionally average the `~mne.Epochs` to produce an `~mne.Evoked` if + ``average=True`` was passed (default). + +#. Calculate SSP projection vectors on that data to capture the artifacts.""" +docdict['compute_proj_ecg'] = f"""%(create_ecg_epochs)s {compute_proj_common} +""" % docdict +docdict['compute_proj_eog'] = f"""%(create_eog_epochs)s {compute_proj_common} +""" % docdict + +# BEM +docdict['on_defects'] = """ +on_defects : str + What to do if the surface is found to have topological defects. Can be + ``'raise'`` (default) to raise an error, or ``'warn'`` to emit a warning. + Note that a lot of computations in MNE-Python assume the surfaces to be + topologically correct, topological defects may still make other + computations (e.g., ``mne.make_bem_model`` and ``mne.make_bem_solution``) + fail irrespective of this parameter. +""" + +# Export +docdict['export_warning'] = """ +.. warning:: + Since we are exporting to external formats, there's no guarantee that all + the info will be preserved in the external format. To save in native MNE + format (``.fif``) without information loss, use :func:`save` instead. +""" +docdict['export_params_fname'] = """ +fname : str + Name of the output file. +""" +docdict['export_params_fmt'] = """ +fmt : 'auto' | 'eeglab' + Format of the export. Defaults to ``'auto'``, which will infer the format + from the filename extension. See supported formats above for more + information. +""" +docdict['export_eeglab_note'] = """ +For EEGLAB exports, channel locations are expanded to full EEGLAB format. +For more details see :func:`eeglabio.utils.cart_to_eeglab`. +""" # Other docdict['accept'] = """ accept : bool If True (default False), accept the license terms of this dataset. """ +docdict['overwrite'] = """ +overwrite : bool + If True (default False), overwrite the destination file if it + exists. +""" + +docdict['ref_channels'] = """ +ref_channels : str | list of str + Name of the electrode(s) which served as the reference in the + recording. If a name is provided, a corresponding channel is added + and its data is set to 0. This is useful for later re-referencing. +""" + +docdict_indented = {} + + +def fill_doc(f): + """Fill a docstring with docdict entries. -# Finalize -docdict = unindent_dict(docdict) -fill_doc = filldoc(docdict, unindent_params=False) + Parameters + ---------- + f : callable + The function to fill the docstring of. Will be modified in place. + + Returns + ------- + f : callable + The function, potentially with an updated ``__doc__``. + """ + docstring = f.__doc__ + if not docstring: + return f + lines = docstring.splitlines() + # Find the minimum indent of the main docstring, after first line + if len(lines) < 2: + icount = 0 + else: + icount = indentcount_lines(lines[1:]) + # Insert this indent to dictionary docstrings + try: + indented = docdict_indented[icount] + except KeyError: + indent = ' ' * icount + docdict_indented[icount] = indented = {} + for name, dstr in docdict.items(): + lines = dstr.splitlines() + try: + newlines = [lines[0]] + for line in lines[1:]: + newlines.append(indent + line) + indented[name] = '\n'.join(newlines) + except IndexError: + indented[name] = dstr + try: + f.__doc__ = docstring % indented + except (TypeError, ValueError, KeyError) as exp: + funcname = f.__name__ + funcname = docstring.split('\n')[0] if funcname is None else funcname + raise RuntimeError('Error documenting %s:\n%s' + % (funcname, str(exp))) + return f ############################################################################## @@ -2239,7 +2635,7 @@ def linkcode_resolve(domain, info): linespec = "" if 'dev' in mne.__version__: - kind = 'master' + kind = 'main' else: kind = 'maint/%s' % ('.'.join(mne.__version__.split('.')[:2])) return "http://github.com/mne-tools/mne-python/blob/%s/mne/%s%s" % ( @@ -2260,6 +2656,8 @@ def open_docs(kind=None, version=None): The default can be changed by setting the configuration value MNE_DOCS_VERSION. """ + from .check import _check_option + from .config import get_config if kind is None: kind = get_config('MNE_DOCS_KIND', 'api') help_dict = dict(api='python_reference.html', tutorials='tutorials.html', @@ -2382,5 +2780,5 @@ def deprecated_alias(dep_name, func, removed_in=None): # Inject a deprecated version into the namespace inspect.currentframe().f_back.f_globals[dep_name] = deprecated( f'{dep_name} has been deprecated in favor of {func.__name__} and will ' - f'be removed in {removed_in}' + f'be removed in {removed_in}.' )(deepcopy(func)) diff --git a/mne/utils/fetching.py b/mne/utils/fetching.py index 836abc2bce1..e1f7b9b86b1 100644 --- a/mne/utils/fetching.py +++ b/mne/utils/fetching.py @@ -7,8 +7,6 @@ import os import shutil import time -from urllib import parse, request -from urllib.error import HTTPError, URLError from .progressbar import ProgressBar from .numerics import hashfunc @@ -21,6 +19,8 @@ def _get_http(url, temp_file_name, initial_size, timeout, verbose_bool): """Safely (resume a) download to a file from http(s).""" + from urllib import request + from urllib.error import HTTPError, URLError # Actually do the reading response = None extra = '' @@ -94,6 +94,7 @@ def _fetch_file(url, file_name, print_destination=True, resume=True, """ # Adapted from NISL: # https://github.com/nisl/tutorial/blob/master/nisl/datasets.py + from urllib import parse if hash_ is not None and (not isinstance(hash_, str) or len(hash_) != 32) and hash_type == 'md5': raise ValueError('Bad hash value given, should be a 32-character ' @@ -135,6 +136,7 @@ def _fetch_file(url, file_name, print_destination=True, resume=True, def _url_to_local_path(url, path): """Mirror a url path in a local destination (keeping folder structure).""" + from urllib import parse, request destination = parse.urlparse(url).path # First char should be '/', and it needs to be discarded if len(destination) < 2 or destination[0] != '/': diff --git a/mne/utils/linalg.py b/mne/utils/linalg.py index 18447cd2c3a..8cce6a22c6a 100644 --- a/mne/utils/linalg.py +++ b/mne/utils/linalg.py @@ -22,49 +22,47 @@ # # License: BSD (3-clause) +import functools + import numpy as np -from scipy import linalg -from scipy.linalg import LinAlgError -from scipy._lib._util import _asarray_validated -_d = np.empty(0, np.float64) -_z = np.empty(0, np.complex128) -dgemm = linalg.get_blas_funcs('gemm', (_d,)) -zgemm = linalg.get_blas_funcs('gemm', (_z,)) -dgemv = linalg.get_blas_funcs('gemv', (_d,)) -ddot = linalg.get_blas_funcs('dot', (_d,)) -_I = np.cast['F'](1j) + +# For efficiency, names should be str or tuple of str, dtype a builtin +# NumPy dtype + +@functools.lru_cache(None) +def _get_blas_funcs(dtype, names): + from scipy import linalg + return linalg.get_blas_funcs(names, (np.empty(0, dtype),)) + + +@functools.lru_cache(None) +def _get_lapack_funcs(dtype, names): + from scipy import linalg + assert dtype in (np.float64, np.complex128) + x = np.empty(0, dtype) + return linalg.get_lapack_funcs(names, (x,)) ############################################################################### # linalg.svd and linalg.pinv2 -dgesdd, dgesdd_lwork = linalg.get_lapack_funcs(('gesdd', 'gesdd_lwork'), (_d,)) -zgesdd, zgesdd_lwork = linalg.get_lapack_funcs(('gesdd', 'gesdd_lwork'), (_z,)) -dgesvd, dgesvd_lwork = linalg.get_lapack_funcs(('gesvd', 'gesvd_lwork'), (_d,)) -zgesvd, zgesvd_lwork = linalg.get_lapack_funcs(('gesvd', 'gesvd_lwork'), (_z,)) - def _svd_lwork(shape, dtype=np.float64): """Set up SVD calculations on identical-shape float64/complex128 arrays.""" - if dtype == np.float64: - gesdd_lwork, gesvd_lwork = dgesdd_lwork, dgesvd_lwork - else: - assert dtype == np.complex128 - gesdd_lwork, gesvd_lwork = zgesdd_lwork, zgesvd_lwork + from scipy import linalg + gesdd_lwork, gesvd_lwork = _get_lapack_funcs( + dtype, ('gesdd_lwork', 'gesvd_lwork')) sdd_lwork = linalg.decomp_svd._compute_lwork( gesdd_lwork, *shape, compute_uv=True, full_matrices=False) svd_lwork = linalg.decomp_svd._compute_lwork( gesvd_lwork, *shape, compute_uv=True, full_matrices=False) - return (sdd_lwork, svd_lwork) + return sdd_lwork, svd_lwork def _repeated_svd(x, lwork, overwrite_a=False): """Mimic scipy.linalg.svd, avoid lwork and get_lapack_funcs overhead.""" - if x.dtype == np.float64: - gesdd, gesvd = dgesdd, zgesdd - else: - assert x.dtype == np.complex128 - gesdd, gesvd = zgesdd, zgesvd + gesdd, gesvd = _get_lapack_funcs( + x.dtype, ('gesdd', 'gesvd')) # this has to use overwrite_a=False in case we need to fall back to gesvd u, s, v, info = gesdd(x, compute_uv=True, lwork=lwork[0], full_matrices=False, overwrite_a=False) @@ -73,7 +71,7 @@ def _repeated_svd(x, lwork, overwrite_a=False): u, s, v, info = gesvd(x, compute_uv=True, lwork=lwork[1], full_matrices=False, overwrite_a=overwrite_a) if info > 0: - raise LinAlgError("SVD did not converge") + raise np.linalg.LinAlgError("SVD did not converge") if info < 0: raise ValueError('illegal value in %d-th argument of internal gesdd' % -info) @@ -83,8 +81,17 @@ def _repeated_svd(x, lwork, overwrite_a=False): ############################################################################### # linalg.eigh -dsyevd, = linalg.get_lapack_funcs(('syevd',), (_d,)) -zheevd, = linalg.get_lapack_funcs(('heevd',), (_z,)) +@functools.lru_cache(None) +def _get_evd(dtype): + from scipy import linalg + x = np.empty(0, dtype) + if dtype == np.float64: + driver = 'syevd' + else: + assert dtype == np.complex128 + driver = 'heevd' + evr, = linalg.get_lapack_funcs((driver,), (x,)) + return evr, driver def eigh(a, overwrite_a=False, check_finite=True): @@ -108,15 +115,13 @@ def eigh(a, overwrite_a=False, check_finite=True): The normalized eigenvector corresponding to the eigenvalue ``w[i]`` is the column ``v[:, i]``. """ + from scipy.linalg import LinAlgError + from scipy._lib._util import _asarray_validated # We use SYEVD, see https://github.com/scipy/scipy/issues/9212 if check_finite: a = _asarray_validated(a, check_finite=check_finite) - if a.dtype == np.float64: - evr, driver = dsyevd, 'syevd' - else: - assert a.dtype == np.complex128 - evr, driver = zheevd, 'heevd' - w, v, info = evr(a, lower=1, overwrite_a=overwrite_a) + evd, driver = _get_evd(a.dtype) + w, v, info = evd(a, lower=1, overwrite_a=overwrite_a) if info == 0: return w, v if info < 0: diff --git a/mne/utils/misc.py b/mne/utils/misc.py index af5b429048f..caceae5abe2 100644 --- a/mne/utils/misc.py +++ b/mne/utils/misc.py @@ -341,9 +341,11 @@ def _assert_no_instances(cls, when=''): r is not globals() and \ r is not locals() and \ not inspect.isframe(r): - ref.append( - f'{r.__class__.__name__}: ' + - repr(r)[:100].replace('\n', ' ')) + if isinstance(r, (list, dict)): + rep = f'len={len(r)}' + else: + rep = repr(r)[:100].replace('\n', ' ') + ref.append(f'{r.__class__.__name__}: {rep}') count += 1 del r del rr diff --git a/mne/utils/mixin.py b/mne/utils/mixin.py index f18b90a3bfd..79cd45f6c9e 100644 --- a/mne/utils/mixin.py +++ b/mne/utils/mixin.py @@ -285,7 +285,6 @@ def __len__(self): 43 >>> len(epochs.events) # doctest: +SKIP 43 - """ from ..epochs import BaseEpochs if isinstance(self, BaseEpochs) and not self._bad_dropped: @@ -313,6 +312,7 @@ def __iter__(self): :meth:`mne.Epochs.next`. """ self._current = 0 + self._current_detrend_picks = self._detrend_picks return self def __next__(self, return_event_id=False): @@ -330,18 +330,21 @@ def __next__(self, return_event_id=False): event_id : int The event id. Only returned if ``return_event_id`` is ``True``. """ + if not hasattr(self, '_current_detrend_picks'): + self.__iter__() # ensure we're ready to iterate if self.preload: if self._current >= len(self._data): - raise StopIteration # signal the end + self._stop_iter() epoch = self._data[self._current] self._current += 1 else: is_good = False while not is_good: if self._current >= len(self.events): - raise StopIteration # signal the end properly + self._stop_iter() epoch_noproj = self._get_epoch_from_raw(self._current) - epoch_noproj = self._detrend_offset_decim(epoch_noproj) + epoch_noproj = self._detrend_offset_decim( + epoch_noproj, self._current_detrend_picks) epoch = self._project_epoch(epoch_noproj) self._current += 1 is_good, _ = self._is_good_epoch(epoch) @@ -354,6 +357,11 @@ def __next__(self, return_event_id=False): else: return epoch, self.events[self._current - 1][-1] + def _stop_iter(self): + del self._current + del self._current_detrend_picks + raise StopIteration # signal the end + next = __next__ # originally for Python2, now b/c public def _check_metadata(self, metadata=None, reset_index=False): diff --git a/mne/utils/numerics.py b/mne/utils/numerics.py index ad077b08549..7c24daf44be 100644 --- a/mne/utils/numerics.py +++ b/mne/utils/numerics.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- """Some utility functions.""" # Authors: Alexandre Gramfort +# Clemens Brunner # # License: BSD (3-clause) @@ -18,11 +19,11 @@ from datetime import datetime, timedelta, timezone import numpy as np -from scipy import sparse from ._logging import logger, warn, verbose from .check import check_random_state, _ensure_int, _validate_type -from ..fixes import _infer_dimension_, svd_flip, stable_cumsum, _safe_svd +from ..fixes import (_infer_dimension_, svd_flip, stable_cumsum, _safe_svd, + jit, has_numba) from .docs import fill_doc @@ -668,6 +669,7 @@ def object_size(x, memo=None): size : int The estimated size in bytes of the object. """ + from scipy import sparse # Note: this will not process object arrays properly (since those only) # hold references if memo is None: @@ -737,6 +739,7 @@ def object_diff(a, b, pre=''): diffs : str A string representation of the differences. """ + from scipy import sparse out = '' if type(a) != type(b): # Deal with NamedInt and NamedFloat @@ -1056,3 +1059,20 @@ def restore(self, val): else: loc = np.searchsorted(self.indices, idx) self.indices.insert(loc, idx) + + +def _arange_div_fallback(n, d): + x = np.arange(n, dtype=np.float64) + x /= d + return x + + +if has_numba: + @jit(fastmath=False) + def _arange_div(n, d): + out = np.empty(n, np.float64) + for i in range(n): + out[i] = i / d + return out +else: # pragma: no cover + _arange_div = _arange_div_fallback diff --git a/mne/utils/tests/test_check.py b/mne/utils/tests/test_check.py index 9e6fa393ef8..2278d810931 100644 --- a/mne/utils/tests/test_check.py +++ b/mne/utils/tests/test_check.py @@ -5,7 +5,6 @@ # License: BSD (3-clause) import os import os.path as op -import shutil import sys import numpy as np @@ -13,6 +12,7 @@ from pathlib import Path import mne +from mne import read_vectorview_selection from mne.datasets import testing from mne.io.pick import pick_channels_cov from mne.utils import (check_random_state, _check_fname, check_fname, @@ -20,6 +20,7 @@ _check_mayavi_version, _check_info_inv, _check_option, check_version, _check_path_like, _validate_type, _suggest, _on_missing, requires_nibabel, _safe_input) + data_path = testing.data_path(download=False) base_dir = op.join(data_path, 'MEG', 'sample') fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif') @@ -34,7 +35,7 @@ def test_check(tmpdir): """Test checking functions.""" pytest.raises(ValueError, check_random_state, 'foo') pytest.raises(TypeError, _check_fname, 1) - _check_fname(Path('./')) + _check_fname(Path('./foo')) fname = str(tmpdir.join('foo')) with open(fname, 'wb'): pass @@ -59,10 +60,17 @@ def test_check(tmpdir): if check_version('numpy', '1.17'): check_random_state(np.random.default_rng(0)).choice(1) - # _meg.fif is a valid ending and should not raise an error - new_fname = str( - tmpdir.join(op.basename(fname_raw).replace('_raw.', '_meg.'))) - shutil.copyfile(fname_raw, new_fname) + +@testing.requires_testing_data +@pytest.mark.parametrize('suffix', + ('_meg.fif', '_eeg.fif', '_ieeg.fif', + '_meg.fif.gz', '_eeg.fif.gz', '_ieeg.fif.gz')) +def test_check_fname_suffixes(suffix, tmpdir): + """Test checking for valid filename suffixes.""" + new_fname = str(tmpdir.join(op.basename(fname_raw) + .replace('_raw.fif', suffix))) + raw = mne.io.read_raw_fif(fname_raw).crop(0, 0.1) + raw.save(new_fname) mne.io.read_raw_fif(new_fname) @@ -83,7 +91,7 @@ def _get_data(): event_id, tmin, tmax = 1, -0.1, 0.15 # decimate for speed - left_temporal_channels = mne.read_selection('Left-temporal') + left_temporal_channels = read_vectorview_selection('Left-temporal') picks = mne.pick_types(raw.info, meg=True, selection=left_temporal_channels) picks = picks[::2] diff --git a/mne/utils/tests/test_fetching.py b/mne/utils/tests/test_fetching.py index 547419380d2..98a4536ca1f 100644 --- a/mne/utils/tests/test_fetching.py +++ b/mne/utils/tests/test_fetching.py @@ -10,7 +10,7 @@ @pytest.mark.timeout(60) @requires_good_network @pytest.mark.parametrize('url', ( - 'https://raw.githubusercontent.com/mne-tools/mne-python/master/README.rst', + 'https://raw.githubusercontent.com/mne-tools/mne-python/main/README.rst', )) def test_fetch_file(url, tmpdir): """Test URL retrieval.""" diff --git a/mne/utils/tests/test_logging.py b/mne/utils/tests/test_logging.py index 5f1b23e6d9e..a7b2b450d81 100644 --- a/mne/utils/tests/test_logging.py +++ b/mne/utils/tests/test_logging.py @@ -3,15 +3,18 @@ import re import warnings +import numpy as np import pytest -from mne import read_evokeds +from mne import read_evokeds, Epochs +from mne.io import read_raw_fif from mne.utils import (warn, set_log_level, set_log_file, filter_out_warnings, verbose, _get_call_line, use_log_level, catch_logging, logger) from mne.utils._logging import _frame_info base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') +fname_raw = op.join(base_dir, 'test_raw.fif') fname_evoked = op.join(base_dir, 'test-ave.fif') fname_log = op.join(base_dir, 'test-ave.log') fname_log_2 = op.join(base_dir, 'test-ave-2.log') @@ -139,6 +142,33 @@ def test_logging_options(tmpdir): assert log.getvalue() == '' +@pytest.mark.parametrize('verbose', (True, False)) +def test_verbose_method(verbose): + """Test for gh-8772.""" + # raw + raw = read_raw_fif(fname_raw, verbose=verbose) + with catch_logging() as log: + raw.load_data(verbose=True) + log = log.getvalue() + assert 'Reading 0 ... 14399' in log + with catch_logging() as log: + raw.load_data(verbose=False) + log = log.getvalue() + assert log == '' + # epochs + events = np.array([[raw.first_samp + 200, 0, 1]], int) + epochs = Epochs(raw, events, verbose=verbose) + with catch_logging() as log: + epochs.drop_bad(verbose=True) + log = log.getvalue() + assert '0 bad epochs dropped' in log + epochs = Epochs(raw, events, verbose=verbose) + with catch_logging() as log: + epochs.drop_bad(verbose=False) + log = log.getvalue() + assert log == '' + + def test_warn(capsys): """Test the smart warn() function.""" with pytest.warns(RuntimeWarning, match='foo'): @@ -177,11 +207,34 @@ def bad_verbose(): class Okay: @verbose - def meth(self): # allowed because it should just use self.verbose - pass + def meth_1(self): # allowed because it should just use self.verbose + logger.info('meth_1') + + @verbose + def meth_2(self, verbose=None): + logger.info('meth_2') o = Okay() with pytest.raises(RuntimeError, match=r'does not have self\.verbose'): - o.meth() # should raise, no verbose attr yet - o.verbose = None - o.meth() + o.meth_1() # should raise, no verbose attr yet + o.verbose = False + with catch_logging() as log: + o.meth_1() + o.meth_2() + log = log.getvalue() + assert log == '' + with catch_logging() as log: + o.meth_2(verbose=True) + log = log.getvalue() + assert 'meth_2' in log + o.verbose = True + with catch_logging() as log: + o.meth_1() + o.meth_2() + log = log.getvalue() + assert 'meth_1' in log + assert 'meth_2' in log + with catch_logging() as log: + o.meth_2(verbose=False) + log = log.getvalue() + assert log == '' diff --git a/mne/utils/tests/test_numerics.py b/mne/utils/tests/test_numerics.py index 3e8f2b27e3a..bc8546a24e3 100644 --- a/mne/utils/tests/test_numerics.py +++ b/mne/utils/tests/test_numerics.py @@ -21,7 +21,7 @@ _undo_scaling_array, _PCA, requires_sklearn, _array_equal_nan, _julian_to_cal, _cal_to_julian, _dt_to_julian, _julian_to_dt, grand_average, - _ReuseCycle, requires_version) + _ReuseCycle, requires_version, numerics) base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') @@ -530,3 +530,12 @@ def test_reuse_cycle(): iterable.restore('a') assert ''.join(next(iterable) for _ in range(4)) == 'acde' assert ''.join(next(iterable) for _ in range(5)) == 'abcde' + + +@pytest.mark.parametrize('n', (0, 1, 10, 1000)) +@pytest.mark.parametrize('d', (0.0001, 1, 2.5, 1000)) +def test_arange_div(numba_conditional, n, d): + """Test Numba arange_div.""" + want = np.arange(n) / d + got = numerics._arange_div(n, d) + assert_allclose(got, want) diff --git a/mne/utils/tests/test_progressbar.py b/mne/utils/tests/test_progressbar.py index c7e25282b76..c259a8b140a 100644 --- a/mne/utils/tests/test_progressbar.py +++ b/mne/utils/tests/test_progressbar.py @@ -24,7 +24,8 @@ def test_progressbar(): def iter_func(a): for ii in a: pass - pytest.raises(Exception, iter_func, ProgressBar(20)) + with pytest.raises(TypeError, match='not iterable'): + iter_func(pbar) # Make sure different progress bars can be used with catch_logging() as log, modified_env(MNE_TQDM='tqdm'), \ diff --git a/mne/utils/tests/test_testing.py b/mne/utils/tests/test_testing.py index cae8e00aa2d..54b1f769221 100644 --- a/mne/utils/tests/test_testing.py +++ b/mne/utils/tests/test_testing.py @@ -1,13 +1,10 @@ import os.path as op -import os import numpy as np import pytest -from numpy.testing import assert_equal from mne.datasets import testing -from mne.utils import (_TempDir, _url_to_local_path, run_tests_if_main, - buggy_mkl_svd) +from mne.utils import (_TempDir, _url_to_local_path, buggy_mkl_svd) def test_buggy_mkl(): @@ -35,18 +32,18 @@ def test_tempdir(): assert (not op.isdir(x)) -def test_datasets(): +def test_datasets(monkeypatch, tmpdir): """Test dataset config.""" # gh-4192 - data_path = testing.data_path(download=False) - os.environ['MNE_DATASETS_TESTING_PATH'] = op.dirname(data_path) - assert testing.data_path(download=False) == data_path + fake_path = tmpdir.mkdir('MNE-testing-data') + with open(fake_path.join('version.txt'), 'w') as fid: + fid.write('9999.9999') + monkeypatch.setenv('_MNE_FAKE_HOME_DIR', str(tmpdir)) + monkeypatch.setenv('MNE_DATASETS_TESTING_PATH', str(tmpdir)) + assert testing.data_path(download=False, verbose='debug') == str(fake_path) def test_url_to_local_path(): """Test URL to local path.""" - assert_equal(_url_to_local_path('http://google.com/home/why.html', '.'), - op.join('.', 'home', 'why.html')) - - -run_tests_if_main() + assert _url_to_local_path('http://google.com/home/why.html', '.') == \ + op.join('.', 'home', 'why.html') diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py index 02c80746267..d3da119ecbd 100644 --- a/mne/viz/_3d.py +++ b/mne/viz/_3d.py @@ -18,7 +18,6 @@ from functools import partial import numpy as np -from scipy import linalg from ..defaults import DEFAULTS from ..fixes import einsum, _crop_colorbar, _get_img_fdata, _get_args @@ -27,24 +26,23 @@ from ..io.constants import FIFF from ..io.meas_info import read_fiducials, create_info from ..source_space import (_ensure_src, _create_surf_spacing, _check_spacing, - _read_mri_info, SourceSpaces) + _read_mri_info, SourceSpaces, read_freesurfer_lut) -from ..surface import (get_meg_helmet_surf, read_surface, _DistanceQuery, +from ..surface import (get_meg_helmet_surf, _read_mri_surface, _DistanceQuery, transform_surface_to, _project_onto_surface, - _reorder_ccw, _complete_sphere_surf) + _reorder_ccw) from ..transforms import (_find_trans, apply_trans, rot_to_quat, combine_transforms, _get_trans, _ensure_trans, - invert_transform, Transform, + invert_transform, Transform, rotation, read_ras_mni_t, _print_coord_trans) from ..utils import (get_subjects_dir, logger, _check_subject, verbose, warn, has_nibabel, check_version, fill_doc, _pl, get_config, - _ensure_int, _validate_type, _check_option, - _require_version) + _ensure_int, _validate_type, _check_option) from .utils import (mne_analyze_colormap, _get_color_list, plt_show, tight_layout, figure_nobar, _check_time_unit) from .misc import _check_mri -from ..bem import (ConductorModel, _bem_find_surface, _surf_dict, _surf_name, - read_bem_surfaces) +from ..bem import (ConductorModel, _bem_find_surface, + read_bem_surfaces, _ensure_bem_surfaces) verbose_dec = verbose @@ -92,7 +90,6 @@ def plot_head_positions(pos, mode='traces', cmap='viridis', direction='z', mode : str Can be 'traces' (default) to show position and quaternion traces, or 'field' to show the position as a vector field over time. - The 'field' mode requires matplotlib 1.4+. cmap : colormap Colormap to use for the trace plot, default is "viridis". direction : str @@ -245,7 +242,7 @@ def plot_head_positions(pos, mode='traces', cmap='viridis', direction='z', else: # mode == 'field': from matplotlib.colors import Normalize from mpl_toolkits.mplot3d.art3d import Line3DCollection - from mpl_toolkits.mplot3d import axes3d # noqa: F401, analysis:ignore + from mpl_toolkits.mplot3d import Axes3D # noqa: F401, analysis:ignore fig, ax = plt.subplots(1, subplot_kw=dict(projection='3d')) # First plot the trajectory as a colormap: @@ -424,8 +421,8 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, surfaces='auto', coord_frame='head', meg=None, eeg='original', fwd=None, dig=False, ecog=True, src=None, mri_fiducials=False, - bem=None, seeg=True, fnirs=True, show_axes=False, fig=None, - interaction='trackball', verbose=None): + bem=None, seeg=True, fnirs=True, show_axes=False, dbs=True, + fig=None, interaction='trackball', verbose=None): """Plot head, sensor, and source space alignment in 3D. Parameters @@ -489,8 +486,12 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, If not None, also plot the source space points. mri_fiducials : bool | str Plot MRI fiducials (default False). If ``True``, look for a file with - the canonical name (``bem/{subject}-fiducials.fif``). If ``str`` it - should provide the full path to the fiducials file. + the canonical name (``bem/{subject}-fiducials.fif``). If ``str``, + it can be ``'estimated'`` to use :func:`mne.coreg.get_mni_fiducials`, + otherwise it should provide the full path to the fiducials file. + + .. versionadded:: 0.22 + Support for ``'estimated'``. bem : list of dict | instance of ConductorModel | None Can be either the BEM surfaces (list of dict), a BEM solution or a sphere model. If None, we first try loading @@ -516,6 +517,8 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, * MEG in blue (if MEG sensors are present). .. versionadded:: 0.16 + dbs : bool + If True (default), show DBS (deep brain stimulation) electrodes. fig : mayavi.mlab.Figure | None Mayavi Scene in which to plot the alignment. If ``None``, creates a new 600x600 pixel figure with black background. @@ -550,6 +553,7 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, .. versionadded:: 0.15 """ from ..forward import _create_meg_coils, Forward + from ..coreg import get_mni_fiducials # Update the backend from .backends.renderer import _get_renderer @@ -614,15 +618,13 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, else: user_alpha = {} surfaces = list(surfaces) - for s in surfaces: - _validate_type(s, "str", "all entries in surfaces") + for si, s in enumerate(surfaces): + _validate_type(s, "str", f"surfaces[{si}]") + brain = sorted( + set(surfaces) & set(['brain', 'pial', 'white', 'inflated'])) - is_sphere = False - if isinstance(bem, ConductorModel) and bem['is_sphere']: - if len(bem['layers']) != 4 and len(surfaces) > 1: - raise ValueError('The sphere conductor model must have three ' - 'layers for plotting skull and head.') - is_sphere = True + bem = _ensure_bem_surfaces(bem, extra_allow=(ConductorModel, None)) + assert isinstance(bem, ConductorModel) or bem is None _check_option('coord_frame', coord_frame, ['head', 'meg', 'mri']) if src is not None: @@ -632,12 +634,6 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, if src_subject is not None and subject != src_subject: raise ValueError('subject ("%s") did not match the subject name ' ' in src ("%s")' % (subject, src_subject)) - src_rr = np.concatenate([s['rr'][s['inuse'].astype(bool)] - for s in src]) - src_nn = np.concatenate([s['nn'][s['inuse'].astype(bool)] - for s in src]) - else: - src_rr = src_nn = np.empty((0, 3)) if fwd is not None: _validate_type(fwd, [Forward]) @@ -650,13 +646,13 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, ref_meg = 'ref' in meg meg_picks = pick_types(info, meg=True, ref_meg=ref_meg) eeg_picks = pick_types(info, meg=False, eeg=True, ref_meg=False) - fnirs_picks = pick_types(info, meg=False, eeg=False, - ref_meg=False, fnirs=True) - other_bools = dict(ecog=ecog, seeg=seeg, + fnirs_picks = pick_types(info, meg=False, eeg=False, ref_meg=False, + fnirs=True) + other_bools = dict(ecog=ecog, seeg=seeg, dbs=dbs, fnirs=(('channels' in fnirs) | ('sources' in fnirs) | ('detectors' in fnirs))) - del ecog, seeg + del ecog, seeg, dbs other_keys = sorted(other_bools.keys()) other_picks = {key: pick_types(info, meg=False, ref_meg=False, **{key: True}) for key in other_keys} @@ -688,7 +684,6 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, surfs = dict() # Head: - sphere_level = 4 head = False for s in surfaces: if s in ('auto', 'head', 'outer_skin', 'head-dense', 'seghead'): @@ -704,23 +699,10 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, 'Could not find the surface for ' 'head in the provided BEM model, ' 'looking in the subject directory.') - if isinstance(bem, ConductorModel): - if is_sphere: - head_surf = _complete_sphere_surf( - bem, 3, sphere_level, complete=False) - else: # BEM solution - try: - head_surf = _bem_find_surface( - bem, FIFF.FIFFV_BEM_SURF_ID_HEAD) - except RuntimeError: - logger.info(head_missing) - elif bem is not None: # list of dict - for this_surf in bem: - if this_surf['id'] == FIFF.FIFFV_BEM_SURF_ID_HEAD: - head_surf = this_surf - break - else: - logger.info(head_missing) + try: + head_surf = _bem_find_surface(bem, 'head') + except RuntimeError: + logger.info(head_missing) if head_surf is None: if subject is None: if s == 'auto': @@ -753,10 +735,7 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, if op.splitext(fname)[-1] == '.fif': head_surf = read_bem_surfaces(fname)[0] else: - head_surf = read_surface( - fname, return_dict=True)[2] - head_surf['rr'] /= 1000. - head_surf.update(coord_frame=FIFF.FIFFV_COORD_MRI) + head_surf = _read_mri_surface(fname) break else: raise IOError('No head surface found for subject ' @@ -766,8 +745,7 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, # Skull: skull = list() - for name, id_ in (('outer_skull', FIFF.FIFFV_BEM_SURF_ID_SKULL), - ('inner_skull', FIFF.FIFFV_BEM_SURF_ID_BRAIN)): + for name in ('outer_skull', 'inner_skull'): if name in surfaces: surfaces.pop(surfaces.index(name)) if bem is None: @@ -777,30 +755,12 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, if not op.isfile(fname): raise ValueError('bem is None and the the %s file cannot ' 'be found:\n%s' % (name, fname)) - surf = read_surface(fname, return_dict=True)[2] - surf.update(coord_frame=FIFF.FIFFV_COORD_MRI, - id=_surf_dict[name]) - surf['rr'] /= 1000. - skull.append(surf) - elif isinstance(bem, ConductorModel): - if is_sphere: - if len(bem['layers']) != 4: - raise ValueError('The sphere model must have three ' - 'layers for plotting %s' % (name,)) - this_idx = 1 if name == 'inner_skull' else 2 - skull.append(_complete_sphere_surf( - bem, this_idx, sphere_level)) - skull[-1]['id'] = _surf_dict[name] - else: - skull.append(_bem_find_surface(bem, id_)) - else: # BEM model - for this_surf in bem: - if this_surf['id'] == _surf_dict[name]: - skull.append(this_surf) - break - else: - raise ValueError('Could not find the surface for %s.' - % name) + surf = _read_mri_surface(fname) + else: + surf = _bem_find_surface(bem, name).copy() + surf['name'] = name + skull.append(surf) + assert all(isinstance(s, dict) for s in skull) if mri_fiducials: if mri_fiducials is True: @@ -811,9 +771,12 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, mri_fiducials = op.join(subjects_dir, subject, 'bem', subject + '-fiducials.fif') if isinstance(mri_fiducials, str): - mri_fiducials, cf = read_fiducials(mri_fiducials) - if cf != FIFF.FIFFV_COORD_MRI: - raise ValueError("Fiducials are not in MRI space") + if mri_fiducials == 'estimated': + mri_fiducials = get_mni_fiducials(subject, subjects_dir) + else: + mri_fiducials, cf = read_fiducials(mri_fiducials) + if cf != FIFF.FIFFV_COORD_MRI: + raise ValueError("Fiducials are not in MRI space") fid_loc = _fiducial_coords(mri_fiducials, FIFF.FIFFV_COORD_MRI) fid_loc = apply_trans(mri_trans, fid_loc) else: @@ -824,7 +787,6 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, assert surfs['helmet']['coord_frame'] == FIFF.FIFFV_COORD_MRI # Brain: - brain = np.intersect1d(surfaces, ['brain', 'pial', 'white', 'inflated']) if len(brain) > 1: raise ValueError('Only one brain surface can be plotted. ' 'Got %s.' % brain) @@ -835,19 +797,15 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, surfaces.pop(surfaces.index(brain)) if brain in user_alpha: user_alpha['lh'] = user_alpha['rh'] = user_alpha.pop(brain) - brain = 'pial' if brain == 'brain' else brain - if is_sphere: - if len(bem['layers']) > 0: - surfs['lh'] = _complete_sphere_surf( - bem, 0, sphere_level) # only plot 1 + if bem is not None and bem['is_sphere'] and brain == 'brain': + surfs['lh'] = _bem_find_surface(bem, 'brain') else: + brain = 'pial' if brain == 'brain' else brain subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) for hemi in ['lh', 'rh']: fname = op.join(subjects_dir, subject, 'surf', '%s.%s' % (hemi, brain)) - surfs[hemi] = read_surface(fname, return_dict=True)[2] - surfs[hemi]['rr'] /= 1000. - surfs[hemi].update(coord_frame=FIFF.FIFFV_COORD_MRI) + surfs[hemi] = _read_mri_surface(fname) brain = True # we've looked through all of them, raise if some remain @@ -858,48 +816,30 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, skull_alpha = dict() skull_colors = dict() hemi_val = 0.5 - max_alpha = 1.0 if len(other_picks['seeg']) == 0 else 0.75 + no_deep = all(len(other_picks[key]) == 0 for key in ('dbs', 'seeg')) + max_alpha = 1.0 if no_deep else 0.75 if src is None or (brain and any(s['type'] == 'surf' for s in src)): hemi_val = max_alpha alphas = np.linspace(max_alpha / 2., 0, 5)[:len(skull) + 1] for idx, this_skull in enumerate(skull): - if isinstance(this_skull, dict): - skull_surf = this_skull - this_skull = _surf_name[skull_surf['id']] - elif is_sphere: # this_skull == str - this_idx = 1 if this_skull == 'inner_skull' else 2 - skull_surf = _complete_sphere_surf(bem, this_idx, sphere_level) - else: # str - skull_fname = op.join(subjects_dir, subject, 'bem', 'flash', - '%s.surf' % this_skull) - if not op.exists(skull_fname): - skull_fname = op.join(subjects_dir, subject, 'bem', - '%s.surf' % this_skull) - if not op.exists(skull_fname): - raise IOError('No skull surface %s found for subject %s.' - % (this_skull, subject)) - logger.info('Using %s for head surface.' % skull_fname) - skull_surf = read_surface(skull_fname, return_dict=True)[2] - skull_surf['rr'] /= 1000. - skull_surf['coord_frame'] = FIFF.FIFFV_COORD_MRI - skull_alpha[this_skull] = alphas[idx + 1] - skull_colors[this_skull] = (0.95 - idx * 0.2, 0.85, 0.95 - idx * 0.2) - surfs[this_skull] = skull_surf + name = this_skull['name'] + skull_alpha[name] = alphas[idx + 1] + skull_colors[name] = (0.95 - idx * 0.2, 0.85, 0.95 - idx * 0.2) + surfs[name] = this_skull if src is None and brain is False and len(skull) == 0 and not show_axes: head_alpha = max_alpha else: head_alpha = alphas[0] - for key in surfs.keys(): + for key in surfs: # Surfs can sometimes be in head coords (e.g., if coming from sphere) + surf = surfs[key] + assert isinstance(surf, dict), f'{key}: {type(surf)}' surfs[key] = transform_surface_to(surfs[key], coord_frame, [mri_trans, head_trans], copy=True) - if src is not None: - src_rr, src_nn = _update_coord_frame(src[0], src_rr, src_nn, - mri_trans, head_trans) if fwd is not None: fwd_rr, fwd_nn = _update_coord_frame(fwd, fwd_rr, fwd_nn, mri_trans, head_trans) @@ -1014,7 +954,8 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, for k, v in user_alpha.items(): if v is not None: alphas[k] = v - colors = dict(head=(0.6,) * 3, helmet=(0.0, 0.0, 0.6), lh=(0.5,) * 3, + colors = dict(head=DEFAULTS['coreg']['head_color'], + helmet=(0.0, 0.0, 0.6), lh=(0.5,) * 3, rh=(0.5,) * 3) colors.update(skull_colors) for key, surf in surfs.items(): @@ -1060,19 +1001,34 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, defaults['extra_scale'] ] + [defaults[key + '_scale'] for key in other_keys] assert len(datas) == len(colors) == len(alphas) == len(scales) + fid_colors = tuple( + defaults[f'{key}_color'] for key in ('lpa', 'nasion', 'rpa')) + glyphs = ['sphere'] * len(datas) for kind, loc in (('dig', car_loc), ('mri', fid_loc)): if len(loc) > 0: datas.extend(loc[:, np.newaxis]) - colors.extend((defaults['lpa_color'], - defaults['nasion_color'], - defaults['rpa_color'])) - alphas.extend(3 * (defaults[kind + '_fid_opacity'],)) - scales.extend(3 * (defaults[kind + '_fid_scale'],)) - - for data, color, alpha, scale in zip(datas, colors, alphas, scales): + colors.extend(fid_colors) + alphas.extend(3 * (defaults[f'{kind}_fid_opacity'],)) + scales.extend(3 * (defaults[f'{kind}_fid_scale'],)) + glyphs.extend(3 * (('oct' if kind == 'mri' else 'sphere'),)) + for data, color, alpha, scale, glyph in zip( + datas, colors, alphas, scales, glyphs): if len(data) > 0: - renderer.sphere(center=data, color=color, scale=scale, - opacity=alpha, backface_culling=True) + if glyph == 'oct': + transform = np.eye(4) + transform[:3, :3] = mri_trans['trans'][:3, :3] * scale + # rotate around Z axis 45 deg first + transform = transform @ rotation(0, 0, np.pi / 4) + renderer.quiver3d( + x=data[:, 0], y=data[:, 1], z=data[:, 2], + u=1., v=0., w=0., color=color, mode='oct', + scale=1., opacity=alpha, backface_culling=True, + solid_transform=transform) + else: + assert glyph == 'sphere' + assert data.ndim == 2 and data.shape[1] == 3, data.shape + renderer.sphere(center=data, color=color, scale=scale, + opacity=alpha, backface_culling=True) if len(eegp_loc) > 0: renderer.quiver3d( x=eegp_loc[:, 0], y=eegp_loc[:, 1], z=eegp_loc[:, 2], @@ -1088,14 +1044,36 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, surf = dict(rr=meg_rrs, tris=meg_tris) renderer.surface(surface=surf, color=color, opacity=alpha, backface_culling=True) - if len(src_rr) > 0: - renderer.quiver3d( - x=src_rr[:, 0], y=src_rr[:, 1], z=src_rr[:, 2], - u=src_nn[:, 0], v=src_nn[:, 1], w=src_nn[:, 2], - color=(1., 1., 0.), mode='cylinder', scale=3e-3, - opacity=0.75, glyph_height=0.25, - glyph_center=(0., 0., 0.), glyph_resolution=20, - backface_culling=True) + + if src is not None: + atlas_ids, colors = read_freesurfer_lut() + for ss in src: + src_rr = ss['rr'][ss['inuse'].astype(bool)] + src_nn = ss['nn'][ss['inuse'].astype(bool)] + + src_rr, src_nn = _update_coord_frame(src[0], src_rr, src_nn, + mri_trans, head_trans) + # volume sources + if ss['type'] == 'vol': + if ss['seg_name'] in colors.keys(): + color = colors[ss['seg_name']][:3] + color = tuple(i / 256. for i in color) + else: + color = (1., 1., 0.) + + # surface and discrete sources + else: + color = (1., 1., 0.) + + if len(src_rr) > 0: + renderer.quiver3d( + x=src_rr[:, 0], y=src_rr[:, 1], z=src_rr[:, 2], + u=src_nn[:, 0], v=src_nn[:, 1], w=src_nn[:, 2], + color=color, mode='cylinder', scale=3e-3, + opacity=0.75, glyph_height=0.25, + glyph_center=(0., 0., 0.), glyph_resolution=20, + backface_culling=True) + if fwd is not None: red = (1.0, 0.0, 0.0) green = (0.0, 1.0, 0.0) @@ -1218,6 +1196,15 @@ def _sensor_shape(coil): return rrs, tris +def _get_cmap(colormap): + import matplotlib.pyplot as plt + if isinstance(colormap, str) and colormap in ('mne', 'mne_analyze'): + colormap = mne_analyze_colormap([0, 1, 2], format='matplotlib') + else: + colormap = plt.get_cmap(colormap) + return colormap + + def _process_clim(clim, colormap, transparent, data=0., allow_pos_lims=True): """Convert colormap/clim options to dict. @@ -1225,7 +1212,6 @@ def _process_clim(clim, colormap, transparent, data=0., allow_pos_lims=True): calling gives the same results. """ # Based on type of limits specified, get cmap control points - import matplotlib.pyplot as plt from matplotlib.colors import Colormap _validate_type(colormap, (str, Colormap), 'colormap') data = np.asarray(data) @@ -1241,10 +1227,7 @@ def _process_clim(clim, colormap, transparent, data=0., allow_pos_lims=True): colormap = 'hot' else: # 'pos_lims' in clim colormap = 'mne' - if colormap in ('mne', 'mne_analyze'): - colormap = mne_analyze_colormap([0, 1, 2], format='matplotlib') - else: - colormap = plt.get_cmap(colormap) + colormap = _get_cmap(colormap) assert isinstance(colormap, Colormap) diverging_maps = ['PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu', 'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', @@ -1456,6 +1439,7 @@ def _plot_mpl_stc(stc, subject=None, surface='inflated', hemi='lh', transparent=True): """Plot source estimate using mpl.""" import matplotlib.pyplot as plt + from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm from matplotlib.widgets import Slider import nibabel as nib @@ -1491,7 +1475,12 @@ def _plot_mpl_stc(stc, subject=None, surface='inflated', hemi='lh', time_label, times = _handle_time(time_label, time_unit, stc.times) fig = plt.figure(figsize=(6, 6)) if figure is None else figure - ax = fig.gca(projection='3d') + try: + ax = Axes3D(fig, auto_add_to_figure=False) + except Exception: # old mpl + ax = Axes3D(fig) + else: + fig.add_axes(ax) hemi_idx = 0 if hemi == 'lh' else 1 surf = op.join(subjects_dir, subject, 'surf', '%s.%s' % (hemi, surface)) if spacing == 'all': @@ -1564,7 +1553,7 @@ def _plot_mpl_stc(stc, subject=None, surface='inflated', hemi='lh', cax.tick_params(labelsize=16) cb.patch.set_facecolor('0.5') cax.set(xlim=(scale_pts[0], scale_pts[2])) - plt.show() + plt_show(True) return fig @@ -1646,7 +1635,8 @@ def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh', time_unit='s', backend='auto', spacing='oct6', title=None, show_traces='auto', src=None, volume_options=1., view_layout='vertical', - add_data_kwargs=None, verbose=None): + add_data_kwargs=None, brain_kwargs=None, + verbose=None): """Plot SourceEstimate. Parameters @@ -1740,13 +1730,13 @@ def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh', %(src_volume_options)s %(view_layout)s %(add_data_kwargs)s + %(brain_kwargs)s %(verbose)s Returns ------- - figure : instance of surfer.Brain | matplotlib.figure.Figure - An instance of :class:`surfer.Brain` from PySurfer or - matplotlib figure. + figure : instance of mne.viz.Brain | matplotlib.figure.Figure + An instance of :class:`mne.viz.Brain` or matplotlib figure. Notes ----- @@ -1759,29 +1749,23 @@ def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh', - https://surfer.nmr.mgh.harvard.edu/fswiki/FreeSurferOccipitalFlattenedPatch - https://openwetware.org/wiki/Beauchamp:FreeSurfer """ # noqa: E501 - from .backends.renderer import _get_3d_backend, set_3d_backend - from ..source_estimate import _BaseSourceEstimate + from .backends.renderer import _get_3d_backend, use_3d_backend + from ..source_estimate import _BaseSourceEstimate, _check_stc_src + _check_stc_src(stc, src) _validate_type(stc, _BaseSourceEstimate, 'stc', 'source estimate') subjects_dir = get_subjects_dir(subjects_dir=subjects_dir, raise_error=True) - subject = _check_subject(stc.subject, subject, True) + subject = _check_subject(stc.subject, subject) _check_option('backend', backend, - ['auto', 'matplotlib', 'mayavi', 'pyvista']) + ['auto', 'matplotlib', 'mayavi', 'pyvista', 'notebook']) plot_mpl = backend == 'matplotlib' if not plot_mpl: - try: - if backend == 'auto': - set_3d_backend(_get_3d_backend()) - else: - set_3d_backend(backend) - except (ImportError, ModuleNotFoundError): - if backend == 'auto': + if backend == 'auto': + try: + backend = _get_3d_backend() + except (ImportError, ModuleNotFoundError): warn('No 3D backend found. Resorting to matplotlib 3d.') plot_mpl = True - else: # 'mayavi' - raise - else: - backend = _get_3d_backend() kwargs = dict( subject=subject, surface=surface, hemi=hemi, colormap=colormap, time_label=time_label, smoothing_steps=smoothing_steps, @@ -1791,11 +1775,15 @@ def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh', transparent=transparent) if plot_mpl: return _plot_mpl_stc(stc, spacing=spacing, **kwargs) - return _plot_stc( - stc, overlay_alpha=alpha, brain_alpha=alpha, vector_alpha=alpha, - cortex=cortex, foreground=foreground, size=size, scale_factor=None, - show_traces=show_traces, src=src, volume_options=volume_options, - view_layout=view_layout, add_data_kwargs=add_data_kwargs, **kwargs) + else: + with use_3d_backend(backend): + return _plot_stc( + stc, overlay_alpha=alpha, brain_alpha=alpha, + vector_alpha=alpha, cortex=cortex, foreground=foreground, + size=size, scale_factor=None, show_traces=show_traces, + src=src, volume_options=volume_options, + view_layout=view_layout, add_data_kwargs=add_data_kwargs, + brain_kwargs=brain_kwargs, **kwargs) def _plot_stc(stc, subject, surface, hemi, colormap, time_label, @@ -1803,26 +1791,24 @@ def _plot_stc(stc, subject, surface, hemi, colormap, time_label, time_unit, background, time_viewer, colorbar, transparent, brain_alpha, overlay_alpha, vector_alpha, cortex, foreground, size, scale_factor, show_traces, src, volume_options, - view_layout, add_data_kwargs): - from .backends.renderer import _get_3d_backend + view_layout, add_data_kwargs, brain_kwargs): + from .backends.renderer import _get_3d_backend, get_brain_class from ..source_estimate import _BaseVolSourceEstimate vec = stc._data_ndim == 3 subjects_dir = get_subjects_dir(subjects_dir=subjects_dir, raise_error=True) - subject = _check_subject(stc.subject, subject, True) + subject = _check_subject(stc.subject, subject) backend = _get_3d_backend() del _get_3d_backend using_mayavi = backend == "mayavi" - if using_mayavi: - from surfer import Brain - _require_version('surfer', 'stc.plot', '0.9') - else: # PyVista - from ._brain import Brain + Brain = get_brain_class() views = _check_views(surface, views, hemi, stc, backend) _check_option('hemi', hemi, ['lh', 'rh', 'split', 'both']) _check_option('view_layout', view_layout, ('vertical', 'horizontal')) time_label, times = _handle_time(time_label, time_unit, stc.times) + show_traces, time_viewer = _check_st_tv( + show_traces, time_viewer, using_mayavi, times) # convert control points to locations in colormap use = stc.magnitude().data if vec else stc.data @@ -1862,6 +1848,8 @@ def _plot_stc(stc, subject, surface, hemi, colormap, time_label, "figure": figure, "subjects_dir": subjects_dir, "views": views, "alpha": brain_alpha, } + if brain_kwargs is not None: + kwargs.update(brain_kwargs) if backend in ['pyvista', 'notebook']: kwargs["show"] = False kwargs["view_layout"] = view_layout @@ -1873,6 +1861,12 @@ def _plot_stc(stc, subject, surface, hemi, colormap, time_label, with warnings.catch_warnings(record=True): # traits warnings brain = Brain(**kwargs) del kwargs + + if using_mayavi: + # Here we patch to avoid segfault: + # https://github.com/mne-tools/mne-python/pull/8828 + brain.close = lambda *args, **kwargs: brain._close(False) + if scale_factor is None: # Configure the glyphs scale directly width = np.mean([np.ptp(brain.geo[hemi].coords[:, 1]) @@ -1881,8 +1875,8 @@ def _plot_stc(stc, subject, surface, hemi, colormap, time_label, if transparent is None: transparent = True - sd_kwargs = dict(transparent=transparent, verbose=False) center = 0. if diverging else None + sd_kwargs = dict(transparent=transparent, center=center, verbose=False) kwargs = { "array": stc, "colormap": colormap, @@ -1956,11 +1950,27 @@ def _plot_stc(stc, subject, surface, hemi, colormap, time_label, elif need_peeling: brain.enable_depth_peeling() + if time_viewer: + if using_mayavi: + from surfer import TimeViewer + TimeViewer(brain) + else: # PyVista + brain.setup_time_viewer(time_viewer=time_viewer, + show_traces=show_traces) + else: + if not using_mayavi: + brain.show() + + return brain + + +def _check_st_tv(show_traces, time_viewer, using_mayavi, times): # time_viewer and show_traces _check_option('time_viewer', time_viewer, (True, False, 'auto')) _validate_type(show_traces, (str, bool, 'numeric'), 'show_traces') if isinstance(show_traces, str): - _check_option('show_traces', show_traces, ('auto', 'separate'), + _check_option('show_traces', show_traces, + ('auto', 'separate', 'vertex', 'label'), extra='when a string') if time_viewer == 'auto': time_viewer = not using_mayavi @@ -1968,26 +1978,15 @@ def _plot_stc(stc, subject, surface, hemi, colormap, time_label, show_traces = ( not using_mayavi and time_viewer and - brain._times is not None and - len(brain._times) > 1 + times is not None and + len(times) > 1 ) if show_traces and not time_viewer: raise ValueError('show_traces cannot be used when time_viewer=False') if using_mayavi and show_traces: raise NotImplementedError("show_traces=True is not available " "for the mayavi 3d backend.") - if time_viewer: - if using_mayavi: - from surfer import TimeViewer - TimeViewer(brain) - else: # PyVista - brain.setup_time_viewer(time_viewer=time_viewer, - show_traces=show_traces) - else: - if not using_mayavi: - brain.show() - - return brain + return show_traces, time_viewer def _glass_brain_crosshairs(params, x, y, z): @@ -1999,7 +1998,7 @@ def _glass_brain_crosshairs(params, x, y, z): def _cut_coords_to_ijk(cut_coords, img): - ijk = apply_trans(linalg.inv(img.affine), cut_coords) + ijk = apply_trans(np.linalg.inv(img.affine), cut_coords) ijk = np.clip(np.round(ijk).astype(int), 0, np.array(img.shape[:3]) - 1) return ijk @@ -2013,7 +2012,7 @@ def _load_subject_mri(mri, stc, subject, subjects_dir, name): from nibabel.spatialimages import SpatialImage _validate_type(mri, ('path-like', SpatialImage), name) if isinstance(mri, str): - subject = _check_subject(stc.subject, subject, True) + subject = _check_subject(stc.subject, subject) mri = nib.load(_check_mri(mri, subject, subjects_dir)) return mri @@ -2133,7 +2132,7 @@ def plot_volume_source_estimates(stc, src, subject=None, subjects_dir=None, del src _print_coord_trans(Transform('mri_voxel', 'ras', img.affine), prefix='Image affine ', units='mm', level='debug') - subject = _check_subject(src_subject, subject, True, kind=kind) + subject = _check_subject(src_subject, subject, first_kind=kind) stc_ijk = np.array( np.unravel_index(stc.vertices[0], img.shape[:3], order='F')).T assert stc_ijk.shape == (len(stc.vertices[0]), 3) @@ -2264,7 +2263,7 @@ def _onclick(event, params, verbose=None): params['fig'].canvas.draw() if mode == 'glass_brain': - subject = _check_subject(stc.subject, subject, True) + subject = _check_subject(stc.subject, subject) ras_mni_t = read_ras_mni_t(subject, subjects_dir) if not np.allclose(ras_mni_t['trans'], np.eye(4)): _print_coord_trans( @@ -2449,7 +2448,7 @@ def _check_views(surf, views, hemi, stc=None, backend=None): _validate_type(stc, SourceEstimate, 'stc', 'SourceEstimate when a flatmap is used') if backend is not None: - if backend != 'pyvista': + if backend not in ('pyvista', 'notebook'): raise RuntimeError('The PyVista 3D backend must be used to ' 'plot a flatmap') if (views == ['flat']) ^ (surf == 'flat'): # exactly only one of the two @@ -2472,7 +2471,8 @@ def plot_vector_source_estimates(stc, subject=None, hemi='lh', colormap='hot', time_unit='s', show_traces='auto', src=None, volume_options=1., view_layout='vertical', - add_data_kwargs=None, verbose=None): + add_data_kwargs=None, brain_kwargs=None, + verbose=None): """Plot VectorSourceEstimate with PySurfer. A "glass brain" is drawn and all dipoles defined in the source estimate @@ -2548,12 +2548,13 @@ def plot_vector_source_estimates(stc, subject=None, hemi='lh', colormap='hot', %(src_volume_options)s %(view_layout)s %(add_data_kwargs)s + %(brain_kwargs)s %(verbose)s Returns ------- - brain : surfer.Brain - A instance of :class:`surfer.Brain` from PySurfer. + brain : mne.viz.Brain + A instance of :class:`mne.viz.Brain`. Notes ----- @@ -2575,7 +2576,7 @@ def plot_vector_source_estimates(stc, subject=None, hemi='lh', colormap='hot', vector_alpha=vector_alpha, cortex=cortex, foreground=foreground, size=size, scale_factor=scale_factor, show_traces=show_traces, src=src, volume_options=volume_options, view_layout=view_layout, - add_data_kwargs=add_data_kwargs) + add_data_kwargs=add_data_kwargs, brain_kwargs=brain_kwargs) @verbose @@ -2871,7 +2872,7 @@ def plot_dipole_locations(dipoles, trans=None, subject=None, subjects_dir=None, u, v, w = ori.T renderer.quiver3d(x, y, z, u, v, w, scale=3 * scale, color=color, mode='arrow') - + renderer.show() fig = renderer.scene() else: raise ValueError('Mode must be "cone", "arrow" or orthoview", ' @@ -2942,7 +2943,8 @@ def snapshot_brain_montage(fig, montage, hide_sensors=True): @fill_doc -def plot_sensors_connectivity(info, con, picks=None): +def plot_sensors_connectivity(info, con, picks=None, + cbar_label='Connectivity'): """Visualize the sensor connectivity in 3D. Parameters @@ -2953,6 +2955,8 @@ def plot_sensors_connectivity(info, con, picks=None): The computed connectivity measure(s). %(picks_good_data)s Indices of selected channels. + cbar_label : str + Label for the colorbar. Returns ------- @@ -2968,7 +2972,7 @@ def plot_sensors_connectivity(info, con, picks=None): picks = _picks_to_idx(info, picks) if len(picks) != len(con): raise ValueError('The number of channels picked (%s) does not ' - 'correspond the size of the connectivity data ' + 'correspond to the size of the connectivity data ' '(%s)' % (len(picks), len(con))) # Plot the sensor locations @@ -2988,7 +2992,7 @@ def plot_sensors_connectivity(info, con, picks=None): con_nodes = list() con_val = list() for i, j in zip(ii, jj): - if linalg.norm(sens_loc[i] - sens_loc[j]) > min_dist: + if np.linalg.norm(sens_loc[i] - sens_loc[j]) > min_dist: con_nodes.append((i, j)) con_val.append(con[i, j]) @@ -3006,7 +3010,7 @@ def plot_sensors_connectivity(info, con, picks=None): vmin=vmin, vmax=vmax, reverse_lut=True) - renderer.scalarbar(source=tube, title='Phase Lag Index (PLI)') + renderer.scalarbar(source=tube, title=cbar_label) # Add the sensor names for the connections shown nodes_shown = list(set([n[0] for n in con_nodes] + @@ -3054,8 +3058,7 @@ def _plot_dipole_mri_orthoview(dipole, trans, subject, subjects_dir=None, dims = len(data) # Symmetric size assumed. dd = dims // 2 if ax is None: - fig = plt.figure() - ax = fig.gca(projection='3d') + fig, ax = plt.subplots(1, subplot_kw=dict(projection='3d')) else: _validate_type(ax, Axes3D, "ax", "Axes3D") fig = ax.get_figure() diff --git a/mne/viz/__init__.py b/mne/viz/__init__.py index 9b54741afe7..a64bb4efdb9 100644 --- a/mne/viz/__init__.py +++ b/mne/viz/__init__.py @@ -6,7 +6,7 @@ from .topo import plot_topo_image_epochs, iter_topography from .utils import (tight_layout, mne_analyze_colormap, compare_fiff, ClickableImage, add_background_image, plot_sensors, - centers_to_edges) + centers_to_edges, concatenate_images) from ._3d import (plot_sparse_source_estimates, plot_source_estimates, plot_vector_source_estimates, plot_evoked_field, plot_dipole_locations, snapshot_brain_montage, diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index 79d61c5ac79..2c2b77553e5 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -9,34 +9,39 @@ import contextlib from functools import partial +from io import BytesIO import os import os.path as op import sys import time +import copy import traceback import warnings import numpy as np -from scipy import sparse from collections import OrderedDict from .colormap import calculate_lut -from .surface import Surface +from .surface import _Surface from .view import views_dicts, _lh_views_dict -from .mplcanvas import MplCanvas -from .callback import (ShowView, IntSlider, TimeSlider, SmartSlider, - BumpColorbarPoints, UpdateColorbarScale) +from .callback import (ShowView, TimeCallBack, SmartCallBack, + UpdateLUT, UpdateColorbarScale) -from ..utils import _show_help, _get_color_list +from ..utils import (_show_help_fig, _get_color_list, concatenate_images, + _generate_default_filename, _save_ndarray_img) from .._3d import _process_clim, _handle_time, _check_views from ...externals.decorator import decorator from ...defaults import _handle_default from ...surface import mesh_edges from ...source_space import SourceSpaces, vertex_to_mni, read_talxfm -from ...transforms import apply_trans +from ...transforms import apply_trans, invert_transform from ...utils import (_check_option, logger, verbose, fill_doc, _validate_type, - use_log_level, Bunch, _ReuseCycle, warn) + use_log_level, Bunch, _ReuseCycle, warn, + get_subjects_dir) + + +_ARROW_MOVE = 10 # degrees per press @decorator @@ -49,32 +54,31 @@ def safe_event(fun, *args, **kwargs): class _Overlay(object): - def __init__(self, scalars, colormap, rng, opacity): + def __init__(self, scalars, colormap, rng, opacity, name): self._scalars = scalars self._colormap = colormap + assert rng is not None self._rng = rng self._opacity = opacity + self._name = name def to_colors(self): - from matplotlib.cm import get_cmap + from .._3d import _get_cmap from matplotlib.colors import ListedColormap + if isinstance(self._colormap, str): - cmap = get_cmap(self._colormap) + kind = self._colormap + cmap = _get_cmap(self._colormap) else: cmap = ListedColormap(self._colormap / 255.) - - def diff(x): - return np.max(x) - np.min(x) - - def norm(x, rng=None): - if rng is None: - rng = [np.min(x), np.max(x)] - return (x - rng[0]) / (rng[1] - rng[0]) + kind = str(type(self._colormap)) + logger.debug( + f'Color mapping {repr(self._name)} with {kind} ' + f'colormap and range {self._rng}') rng = self._rng - scalars = self._scalars - if diff(scalars) != 0: - scalars = norm(scalars, rng) + assert rng is not None + scalars = _norm(self._scalars, rng) colors = cmap(scalars) if self._opacity is not None: @@ -82,6 +86,14 @@ def norm(x, rng=None): return colors +def _norm(x, rng): + if rng[0] == rng[1]: + factor = 1 if rng[0] == 0 else 1e-6 * rng[0] + else: + factor = rng[1] - rng[0] + return (x - rng[0]) / factor + + class _LayeredMesh(object): def __init__(self, renderer, vertices, triangles, normals): self._renderer = renderer @@ -144,7 +156,8 @@ def add_overlay(self, scalars, colormap, rng, opacity, name): scalars=scalars, colormap=colormap, rng=rng, - opacity=opacity + opacity=opacity, + name=name, ) self._overlays[name] = overlay colors = overlay.to_colors() @@ -167,10 +180,9 @@ def remove_overlay(self, names): self.update() def _update(self): - if self._cache is None: + if self._cache is None or self._renderer is None: return - from ..backends._pyvista import _set_mesh_scalars - _set_mesh_scalars( + self._renderer._set_mesh_scalars( mesh=self._polydata, scalars=self._cache, name=self._default_scalars_name, @@ -189,7 +201,7 @@ def _clean(self): self._renderer = None def update_overlay(self, name, scalars=None, colormap=None, - opacity=None): + opacity=None, rng=None): overlay = self._overlays.get(name, None) if overlay is None: return @@ -199,6 +211,8 @@ def update_overlay(self, name, scalars=None, colormap=None, overlay._colormap = colormap if opacity is not None: overlay._opacity = opacity + if rng is not None: + overlay._rng = rng self.update() @@ -251,9 +265,15 @@ class Brain(object): variable. views : list | str The views to use. - offset : bool - If True, aligs origin with medial wall. Useful for viewing inflated - surface where hemispheres typically overlap (Default: True). + offset : bool | str + If True, shifts the right- or left-most x coordinate of the left and + right surfaces, respectively, to be at zero. This is useful for viewing + inflated surface where hemispheres typically overlap. Can be "auto" + (default) use True with inflated surfaces and False otherwise + (Default: 'auto'). Only used when ``hemi='both'``. + + .. versionchanged:: 0.23 + Default changed to "auto". show_toolbar : bool If True, toolbars will be shown for each view. offscreen : bool @@ -266,6 +286,16 @@ class Brain(object): units : str Can be 'm' or 'mm' (default). %(view_layout)s + silhouette : dict | bool + As a dict, it contains the ``color``, ``linewidth``, ``alpha`` opacity + and ``decimate`` (level of decimation between 0 and 1 or None) of the + brain's silhouette to display. If True, the default values are used + and if False, no silhouette will be displayed. Defaults to False. + theme : str | path-like + Can be "auto" (default), "light", or "dark" or a path-like to a + custom stylesheet. For Dark-Mode and automatic Dark-Mode-Detection, + :mod:`qdarkstyle` respectively and `darkdetect + `__ is required. show : bool Display the window as soon as it is ready. Defaults to True. @@ -303,16 +333,14 @@ class Brain(object): +---------------------------+--------------+---------------+ | foci | ✓ | | +---------------------------+--------------+---------------+ - | labels | ✓ | | - +---------------------------+--------------+---------------+ - | labels_dict | ✓ | | - +---------------------------+--------------+---------------+ - | remove_data | ✓ | | + | labels | ✓ | ✓ | +---------------------------+--------------+---------------+ | remove_foci | ✓ | | +---------------------------+--------------+---------------+ | remove_labels | ✓ | ✓ | +---------------------------+--------------+---------------+ + | remove_annotations | - | ✓ | + +---------------------------+--------------+---------------+ | scale_data_colormap | ✓ | | +---------------------------+--------------+---------------+ | save_image | ✓ | ✓ | @@ -335,17 +363,22 @@ class Brain(object): +---------------------------+--------------+---------------+ | flatmaps | | ✓ | +---------------------------+--------------+---------------+ + | vertex picking | | ✓ | + +---------------------------+--------------+---------------+ + | label picking | | ✓ | + +---------------------------+--------------+---------------+ """ def __init__(self, subject_id, hemi, surf, title=None, cortex="classic", alpha=1.0, size=800, background="black", foreground=None, figure=None, subjects_dir=None, - views='auto', offset=True, show_toolbar=False, + views='auto', offset='auto', show_toolbar=False, offscreen=False, interaction='trackball', units='mm', - view_layout='vertical', show=True): - from ..backends.renderer import backend, _get_renderer, _get_3d_backend + view_layout='vertical', silhouette=False, theme='auto', + show=True): + from ..backends.renderer import backend, _get_renderer + from .._3d import _get_cmap from matplotlib.colors import colorConverter - from matplotlib.cm import get_cmap if hemi in ('both', 'split'): self._hemis = ('lh', 'rh') @@ -387,10 +420,12 @@ def __init__(self, subject_id, hemi, surf, title=None, if len(size) not in (1, 2): raise ValueError('"size" parameter must be an int or length-2 ' 'sequence of ints.') - self._size = size if len(size) == 2 else size * 2 # 1-tuple to 2-tuple + size = size if len(size) == 2 else size * 2 # 1-tuple to 2-tuple + subjects_dir = get_subjects_dir(subjects_dir) + + self.theme = theme self.time_viewer = False - self.notebook = (_get_3d_backend() == "notebook") self._hemi = hemi self._units = units self._alpha = float(alpha) @@ -398,11 +433,28 @@ def __init__(self, subject_id, hemi, surf, title=None, self._subjects_dir = subjects_dir self._views = views self._times = None - self._label_data = {'lh': list(), 'rh': list()} + self._vertex_to_label_id = dict() + self._annotation_labels = dict() + self._labels = {'lh': list(), 'rh': list()} + self._unnamed_label_id = 0 # can only grow + self._annots = {'lh': list(), 'rh': list()} self._layered_meshes = {} - # for now only one color bar can be added - # since it is the same for all figures - self._colorbar_added = False + self._elevation_rng = [15, 165] # range of motion of camera on theta + self._lut_locked = None + # default values for silhouette + self._silhouette = { + 'color': self._bg_color, + 'line_width': 2, + 'alpha': alpha, + 'decimate': 0.9, + } + _validate_type(silhouette, (dict, bool), 'silhouette') + if isinstance(silhouette, dict): + self._silhouette.update(silhouette) + self.silhouette = True + else: + self.silhouette = silhouette + self._scalar_bar = None # for now only one time label can be added # since it is the same for all figures self._time_label_added = False @@ -414,25 +466,29 @@ def __init__(self, subject_id, hemi, surf, title=None, geo_kwargs = self._cortex_colormap(cortex) # evaluate at the midpoint of the used colormap val = -geo_kwargs['vmin'] / (geo_kwargs['vmax'] - geo_kwargs['vmin']) - self._brain_color = get_cmap(geo_kwargs['colormap'])(val) + self._brain_color = _get_cmap(geo_kwargs['colormap'])(val) # load geometry for one or both hemispheres as necessary + _validate_type(offset, (str, bool), 'offset') + if isinstance(offset, str): + _check_option('offset', offset, ('auto',), extra='when str') + offset = (surf in ('inflated', 'flat')) offset = None if (not offset or hemi != 'both') else 0.0 + logger.debug(f'Hemi offset: {offset}') - self._renderer = _get_renderer(name=self._title, size=self._size, + self._renderer = _get_renderer(name=self._title, size=size, bgcolor=background, shape=shape, fig=figure) + self._renderer._window_close_connect(self._clean) + self._renderer._window_set_theme(theme) + self.plotter = self._renderer.plotter - if _get_3d_backend() == "pyvista": - self.plotter = self._renderer.plotter - self.window = self.plotter.app_window - self.window.signal_close.connect(self._clean) - + self._setup_canonical_rotation() for h in self._hemis: # Initialize a Surface object as the geometry - geo = Surface(subject_id, h, surf, subjects_dir, offset, - units=self._units) + geo = _Surface(subject_id, h, surf, subjects_dir, offset, + units=self._units, x_dir=self._rigid[0, :3]) # Load in the geometry and curvature geo.load_geometry() geo.load_curvature() @@ -460,6 +516,15 @@ def __init__(self, subject_id, hemi, surf, title=None, else: actor = self._layered_meshes[h]._actor self._renderer.plotter.add_actor(actor) + if self.silhouette: + mesh = self._layered_meshes[h] + self._renderer._silhouette( + mesh=mesh._polydata, + color=self._silhouette["color"], + line_width=self._silhouette["line_width"], + alpha=self._silhouette["alpha"], + decimate=self._silhouette["decimate"], + ) self._renderer.set_camera(**views_dicts[h][v]) self.interaction = interaction @@ -474,8 +539,20 @@ def __init__(self, subject_id, hemi, surf, title=None, if surf == 'flat': self._renderer.set_interaction("rubber_band_2d") - if hemi == 'rh' and hasattr(self._renderer, "_orient_lights"): - self._renderer._orient_lights() + def _setup_canonical_rotation(self): + from ...coreg import fit_matched_points, _trans_from_params + self._rigid = np.eye(4) + try: + xfm = read_talxfm(self._subject_id, self._subjects_dir) + except Exception: + return + # XYZ+origin + halfway + pts_tal = np.concatenate([np.eye(4)[:, :3], np.eye(3) * 0.5]) + pts_subj = apply_trans(invert_transform(xfm), pts_tal) + # we fit with scaling enabled, but then discard it (we just need + # the rigid-body components) + params = fit_matched_points(pts_subj, pts_tal, scale=3, out='params') + self._rigid[:] = _trans_from_params((True, True, False), params[:6]) def setup_time_viewer(self, time_viewer=True, show_traces=True): """Configure the time viewer parameters. @@ -487,59 +564,79 @@ def setup_time_viewer(self, time_viewer=True, show_traces=True): show_traces : bool If True, enable visualization of time traces. Defaults to True. + + Notes + ----- + The keyboard shortcuts are the following: + + '?': Display help window + 'i': Toggle interface + 's': Apply auto-scaling + 'r': Restore original clim + 'c': Clear all traces + 'n': Shift the time forward by the playback speed + 'b': Shift the time backward by the playback speed + 'Space': Start/Pause playback + 'Up': Decrease camera elevation angle + 'Down': Increase camera elevation angle + 'Left': Decrease camera azimuth angle + 'Right': Increase camera azimuth angle """ if self.time_viewer: return + if not self._data: + raise ValueError("No data to visualize. See ``add_data``.") self.time_viewer = time_viewer self.orientation = list(_lh_views_dict.keys()) self.default_smoothing_range = [0, 15] - # setup notebook - if self.notebook: - self._configure_notebook() - return - # Default configuration self.playback = False self.visibility = False self.refresh_rate_ms = max(int(round(1000. / 60.)), 1) self.default_scaling_range = [0.2, 2.0] self.default_playback_speed_range = [0.01, 1] - self.default_playback_speed_value = 0.05 + self.default_playback_speed_value = 0.01 self.default_status_bar_msg = "Press ? for help" + self.default_label_extract_modes = { + "stc": ["mean", "max"], + "src": ["mean_flip", "pca_flip", "auto"], + } + self.default_trace_modes = ('vertex', 'label') + self.annot = None + self.label_extract_mode = None all_keys = ('lh', 'rh', 'vol') self.act_data_smooth = {key: (None, None) for key in all_keys} - self.color_cycle = None + self.color_list = _get_color_list() + # remove grey for better contrast on the brain + self.color_list.remove("#7f7f7f") + self.color_cycle = _ReuseCycle(self.color_list) self.mpl_canvas = None + self.help_canvas = None + self.rms = None + self.picked_patches = {key: list() for key in all_keys} self.picked_points = {key: list() for key in all_keys} self.pick_table = dict() + self._spheres = list() self._mouse_no_mvt = -1 - self.icons = dict() - self.actions = dict() self.callbacks = dict() - self.sliders = dict() + self.widgets = dict() self.keys = ('fmin', 'fmid', 'fmax') - self.slider_length = 0.02 - self.slider_width = 0.04 - self.slider_color = (0.43137255, 0.44313725, 0.45882353) - self.slider_tube_width = 0.04 - self.slider_tube_color = (0.69803922, 0.70196078, 0.70980392) - - # Direct access parameters: - self._iren = self._renderer.plotter.iren - self.main_menu = self.plotter.main_menu - self.tool_bar = self.window.addToolBar("toolbar") - self.status_bar = self.window.statusBar() - self.interactor = self.plotter.interactor # Derived parameters: self.playback_speed = self.default_playback_speed_value _validate_type(show_traces, (bool, str, 'numeric'), 'show_traces') self.interactor_fraction = 0.25 if isinstance(show_traces, str): - assert show_traces == 'separate' # should be guaranteed earlier self.show_traces = True - self.separate_canvas = True + self.separate_canvas = False + self.traces_mode = 'vertex' + if show_traces == 'separate': + self.separate_canvas = True + elif show_traces == 'label': + self.traces_mode = 'label' + else: + assert show_traces == 'vertex' # guaranteed above else: if isinstance(show_traces, bool): self.show_traces = show_traces @@ -551,40 +648,52 @@ def setup_time_viewer(self, time_viewer=True, show_traces=True): f'got {show_traces}') self.show_traces = True self.interactor_fraction = show_traces + self.traces_mode = 'vertex' self.separate_canvas = False del show_traces - self._spheres = list() - self._load_icons() self._configure_time_label() - self._configure_sliders() self._configure_scalar_bar() - self._configure_playback() - self._configure_point_picking() - self._configure_menu() + self._configure_shortcuts() + self._configure_picking() self._configure_tool_bar() + self._configure_dock() + self._configure_menu() self._configure_status_bar() - + self._configure_playback() + self._configure_help() # show everything at the end self.toggle_interface() - with self.ensure_minimum_sizes(): - self.show() + self._renderer.show() + + # sizes could change, update views + for hemi in ('lh', 'rh'): + for ri, ci, v in self._iter_views(hemi): + self.show_view(view=v, row=ri, col=ci) + self._renderer._process_events() + + self._renderer._update() + # finally, show the MplCanvas + if self.show_traces: + self.mpl_canvas.show() @safe_event def _clean(self): # resolve the reference cycle - self.clear_points() + self.clear_glyphs() + self.remove_annotations() # clear init actors for hemi in self._hemis: self._layered_meshes[hemi]._clean() self._clear_callbacks() + self._clear_widgets() if getattr(self, 'mpl_canvas', None) is not None: self.mpl_canvas.clear() if getattr(self, 'act_data_smooth', None) is not None: for key in list(self.act_data_smooth.keys()): self.act_data_smooth[key] = None # XXX this should be done in PyVista - for renderer in self.plotter.renderers: + for renderer in self._renderer._all_renderers: renderer.RemoveAllLights() # app_window cannot be set to None because it is used in __del__ for key in ('lighting', 'interactor', '_RenderWindow'): @@ -592,44 +701,15 @@ def _clean(self): # Qt LeaveEvent requires _Iren so we use _FakeIren instead of None # to resolve the ref to vtkGenericRenderWindowInteractor self.plotter._Iren = _FakeIren() - if getattr(self.plotter, 'scalar_bar', None) is not None: - self.plotter.scalar_bar = None if getattr(self.plotter, 'picker', None) is not None: self.plotter.picker = None # XXX end PyVista - for key in ('reps', 'plotter', 'main_menu', 'window', 'tool_bar', - 'status_bar', 'interactor', 'mpl_canvas', 'time_actor', - 'picked_renderer', 'act_data_smooth', '_iren', - 'actions', 'sliders', 'geo', '_hemi_actors', '_data'): + for key in ('plotter', 'window', 'dock', 'tool_bar', 'menu_bar', + 'interactor', 'mpl_canvas', 'time_actor', + 'picked_renderer', 'act_data_smooth', '_scalar_bar', + 'actions', 'widgets', 'geo', '_data'): setattr(self, key, None) - @contextlib.contextmanager - def ensure_minimum_sizes(self): - """Ensure that widgets respect the windows size.""" - from ..backends._pyvista import _process_events - sz = self._size - adjust_mpl = self.show_traces and not self.separate_canvas - if not adjust_mpl: - yield - else: - mpl_h = int(round((sz[1] * self.interactor_fraction) / - (1 - self.interactor_fraction))) - self.mpl_canvas.canvas.setMinimumSize(sz[0], mpl_h) - try: - yield - finally: - self.splitter.setSizes([sz[1], mpl_h]) - _process_events(self.plotter) - _process_events(self.plotter) - self.mpl_canvas.canvas.setMinimumSize(0, 0) - _process_events(self.plotter) - _process_events(self.plotter) - # sizes could change, update views - for hemi in ('lh', 'rh'): - for ri, ci, v in self._iter_views(hemi): - self.show_view(view=v, row=ri, col=ci) - _process_events(self.plotter) - def toggle_interface(self, value=None): """Toggle the interface. @@ -645,47 +725,26 @@ def toggle_interface(self, value=None): else: self.visibility = value - # update tool bar icon - if self.visibility: - self.actions["visibility"].setIcon(self.icons["visibility_on"]) - else: - self.actions["visibility"].setIcon(self.icons["visibility_off"]) - - # manage sliders - for slider in self.plotter.slider_widgets: - slider_rep = slider.GetRepresentation() + # update tool bar and dock + with self._renderer._window_ensure_minimum_sizes(): if self.visibility: - slider_rep.VisibilityOn() + self._renderer._dock_show() + self._renderer._tool_bar_update_button_icon( + name="visibility", icon_name="visibility_on") else: - slider_rep.VisibilityOff() + self._renderer._dock_hide() + self._renderer._tool_bar_update_button_icon( + name="visibility", icon_name="visibility_off") - # manage time label - time_label = self._data['time_label'] - # if we actually have time points, we will show the slider so - # hide the time actor - have_ts = self._times is not None and len(self._times) > 1 - if self.time_actor is not None: - if self.visibility and time_label is not None and not have_ts: - self.time_actor.SetInput(time_label(self._current_time)) - self.time_actor.VisibilityOn() - else: - self.time_actor.VisibilityOff() - - self._update() + self._renderer._update() def apply_auto_scaling(self): """Detect automatically fitting scaling parameters.""" self._update_auto_scaling() - for key in ('fmin', 'fmid', 'fmax'): - self.reps[key].SetValue(self._data[key]) - self._update() def restore_user_scaling(self): """Restore original scaling parameters.""" self._update_auto_scaling(restore=True) - for key in ('fmin', 'fmid', 'fmax'): - self.reps[key].SetValue(self._data[key]) - self._update() def toggle_playback(self, value=None): """Toggle time playback. @@ -704,9 +763,11 @@ def toggle_playback(self, value=None): # update tool bar icon if self.playback: - self.actions["play"].setIcon(self.icons["pause"]) + self._renderer._tool_bar_update_button_icon( + name="play", icon_name="pause") else: - self.actions["play"].setIcon(self.icons["play"]) + self._renderer._tool_bar_update_button_icon( + name="play", icon_name="play") if self.playback: time_data = self._data['time'] @@ -724,7 +785,7 @@ def reset(self): self._data["initial_time_idx"], update_widget=True, ) - self._update() + self._renderer._update() def set_playback_speed(self, speed): """Set the time playback speed. @@ -762,128 +823,93 @@ def _advance(self): if time_point == max_time: self.toggle_playback(value=False) - def _set_slider_style(self): - for slider in self.sliders.values(): - if slider is not None: - slider_rep = slider.GetRepresentation() - slider_rep.SetSliderLength(self.slider_length) - slider_rep.SetSliderWidth(self.slider_width) - slider_rep.SetTubeWidth(self.slider_tube_width) - slider_rep.GetSliderProperty().SetColor(self.slider_color) - slider_rep.GetTubeProperty().SetColor(self.slider_tube_color) - slider_rep.GetLabelProperty().SetShadow(False) - slider_rep.GetLabelProperty().SetBold(True) - slider_rep.GetLabelProperty().SetColor(self._fg_color) - slider_rep.GetTitleProperty().ShallowCopy( - slider_rep.GetLabelProperty() - ) - slider_rep.GetCapProperty().SetOpacity(0) - - def _configure_notebook(self): - from ._notebook import _NotebookInteractor - self._renderer.figure.display = _NotebookInteractor(self) - def _configure_time_label(self): self.time_actor = self._data.get('time_actor') if self.time_actor is not None: self.time_actor.SetPosition(0.5, 0.03) self.time_actor.GetTextProperty().SetJustificationToCentered() self.time_actor.GetTextProperty().BoldOn() - self.time_actor.VisibilityOff() def _configure_scalar_bar(self): - if self._colorbar_added: - scalar_bar = self.plotter.scalar_bar - scalar_bar.SetOrientationToVertical() - scalar_bar.SetHeight(0.6) - scalar_bar.SetWidth(0.05) - scalar_bar.SetPosition(0.02, 0.2) - - def _configure_sliders(self): - # Orientation slider - # Use 'lh' as a reference for orientation for 'both' - if self._hemi == 'both': - hemis_ref = ['lh'] - else: - hemis_ref = self._hemis - for hemi in hemis_ref: - for ri, ci, view in self._iter_views(hemi): - orientation_name = f"orientation_{hemi}_{ri}_{ci}" - self.plotter.subplot(ri, ci) - if view == 'flat': - self.callbacks[orientation_name] = None - continue - self.callbacks[orientation_name] = ShowView( - plotter=self.plotter, - brain=self, - orientation=self.orientation, - hemi=hemi, - row=ri, - col=ci, - ) - self.sliders[orientation_name] = \ - self.plotter.add_text_slider_widget( - self.callbacks[orientation_name], - value=0, - data=self.orientation, - pointa=(0.82, 0.74), - pointb=(0.98, 0.74), - event_type='always' - ) - orientation_rep = \ - self.sliders[orientation_name].GetRepresentation() - orientation_rep.ShowSliderLabelOff() - self.callbacks[orientation_name].slider_rep = orientation_rep - self.callbacks[orientation_name](view, update_widget=True) - - # Put other sliders on the bottom right view - ri, ci = np.array(self._subplot_shape) - 1 - self.plotter.subplot(ri, ci) - - # Smoothing slider - self.callbacks["smoothing"] = IntSlider( - plotter=self.plotter, - callback=self.set_data_smoothing, - first_call=False, - ) - self.sliders["smoothing"] = self.plotter.add_slider_widget( - self.callbacks["smoothing"], - value=self._data['smoothing_steps'], - rng=self.default_smoothing_range, title="smoothing", - pointa=(0.82, 0.90), - pointb=(0.98, 0.90) - ) - self.callbacks["smoothing"].slider_rep = \ - self.sliders["smoothing"].GetRepresentation() - - # Time slider - max_time = len(self._data['time']) - 1 - # VTK on macOS bombs if we create these then hide them, so don't - # even create them - if max_time < 1: + if self._scalar_bar is not None: + self._scalar_bar.SetOrientationToVertical() + self._scalar_bar.SetHeight(0.6) + self._scalar_bar.SetWidth(0.05) + self._scalar_bar.SetPosition(0.02, 0.2) + + def _configure_dock_time_widget(self, layout=None): + len_time = len(self._data['time']) - 1 + if len_time < 1: + return + layout = self._renderer.dock_layout if layout is None else layout + hlayout = self._renderer._dock_add_layout(vertical=False) + self.widgets["min_time"] = self._renderer._dock_add_label( + value="-", layout=hlayout) + self._renderer._dock_add_stretch(hlayout) + self.widgets["current_time"] = self._renderer._dock_add_label( + value="x", layout=hlayout) + self._renderer._dock_add_stretch(hlayout) + self.widgets["max_time"] = self._renderer._dock_add_label( + value="+", layout=hlayout) + self._renderer._layout_add_widget(layout, hlayout) + min_time = float(self._data['time'][0]) + max_time = float(self._data['time'][-1]) + self.widgets["min_time"].set_value(f"{min_time: .3f}") + self.widgets["max_time"].set_value(f"{max_time: .3f}") + self.widgets["current_time"].set_value(f"{self._current_time: .3f}") + + def _configure_dock_playback_widget(self, name): + layout = self._renderer._dock_add_group_box(name) + len_time = len(self._data['time']) - 1 + + # Time widget + if len_time < 1: self.callbacks["time"] = None - self.sliders["time"] = None + self.widgets["time"] = None else: - self.callbacks["time"] = TimeSlider( - plotter=self.plotter, + self.callbacks["time"] = TimeCallBack( brain=self, - first_call=False, callback=self.plot_time_line, ) - self.sliders["time"] = self.plotter.add_slider_widget( - self.callbacks["time"], + self.widgets["time"] = self._renderer._dock_add_slider( + name="Time (s)", value=self._data['time_idx'], - rng=[0, max_time], - pointa=(0.23, 0.1), - pointb=(0.77, 0.1), - event_type='always' + rng=[0, len_time], + double=True, + callback=self.callbacks["time"], + compact=False, + layout=layout, ) - self.callbacks["time"].slider_rep = \ - self.sliders["time"].GetRepresentation() - # configure properties of the time slider - self.sliders["time"].GetRepresentation().SetLabelFormat( - 'idx=%0.1f') + self.callbacks["time"].widget = self.widgets["time"] + + # Time labels + if len_time < 1: + self.widgets["min_time"] = None + self.widgets["max_time"] = None + self.widgets["current_time"] = None + else: + self._configure_dock_time_widget(layout) + self.callbacks["time"].label = self.widgets["current_time"] + # Playback speed widget + if len_time < 1: + self.callbacks["playback_speed"] = None + self.widgets["playback_speed"] = None + else: + self.callbacks["playback_speed"] = SmartCallBack( + callback=self.set_playback_speed, + ) + self.widgets["playback_speed"] = self._renderer._dock_add_spin_box( + name="Speed", + value=self.default_playback_speed_value, + rng=self.default_playback_speed_range, + callback=self.callbacks["playback_speed"], + layout=layout, + ) + self.callbacks["playback_speed"].widget = \ + self.widgets["playback_speed"] + + # Time label current_time = self._current_time assert current_time is not None # should never be the case, float time_label = self._data['time_label'] @@ -891,139 +917,256 @@ def _configure_sliders(self): current_time = time_label(current_time) else: current_time = time_label - if self.sliders["time"] is not None: - self.sliders["time"].GetRepresentation().SetTitleText(current_time) if self.time_actor is not None: self.time_actor.SetInput(current_time) del current_time - # Playback speed slider - if self.sliders["time"] is None: - self.callbacks["playback_speed"] = None - self.sliders["playback_speed"] = None - else: - self.callbacks["playback_speed"] = SmartSlider( - plotter=self.plotter, - callback=self.set_playback_speed, + def _configure_dock_orientation_widget(self, name): + layout = self._renderer._dock_add_group_box(name) + # Renderer widget + rends = [str(i) for i in range(len(self._renderer._all_renderers))] + if len(rends) > 1: + def select_renderer(idx): + idx = int(idx) + loc = self._renderer._index_to_loc(idx) + self.plotter.subplot(*loc) + + self.callbacks["renderer"] = SmartCallBack( + callback=select_renderer, ) - self.sliders["playback_speed"] = self.plotter.add_slider_widget( - self.callbacks["playback_speed"], - value=self.default_playback_speed_value, - rng=self.default_playback_speed_range, title="speed", - pointa=(0.02, 0.1), - pointb=(0.18, 0.1), - event_type='always' + self.widgets["renderer"] = self._renderer._dock_add_combo_box( + name="Renderer", + value="0", + rng=rends, + callback=self.callbacks["renderer"], + layout=layout, ) - self.callbacks["playback_speed"].slider_rep = \ - self.sliders["playback_speed"].GetRepresentation() + self.callbacks["renderer"].widget = \ + self.widgets["renderer"] - # Colormap slider - pointa = np.array((0.82, 0.26)) - pointb = np.array((0.98, 0.26)) - shift = np.array([0, 0.1]) + # Use 'lh' as a reference for orientation for 'both' + if self._hemi == 'both': + hemis_ref = ['lh'] + else: + hemis_ref = self._hemis + orientation_data = [None] * len(rends) + for hemi in hemis_ref: + for ri, ci, view in self._iter_views(hemi): + idx = self._renderer._loc_to_index((ri, ci)) + if view == 'flat': + _data = None + else: + _data = dict(default=view, hemi=hemi, row=ri, col=ci) + orientation_data[idx] = _data + self.callbacks["orientation"] = ShowView( + brain=self, + data=orientation_data, + ) + self.widgets["orientation"] = self._renderer._dock_add_combo_box( + name=None, + value=self.orientation[0], + rng=self.orientation, + callback=self.callbacks["orientation"], + layout=layout, + ) - for idx, key in enumerate(self.keys): - title = "clim" if not idx else "" + def _configure_dock_colormap_widget(self, name): + layout = self._renderer._dock_add_group_box(name) + self._renderer._dock_add_label( + value="min / mid / max", + align=True, + layout=layout, + ) + up = UpdateLUT(brain=self) + for key in self.keys: + hlayout = self._renderer._dock_add_layout(vertical=False) rng = _get_range(self) - self.callbacks[key] = BumpColorbarPoints( - plotter=self.plotter, - brain=self, - name=key + self.callbacks[key] = lambda value, key=key: up(**{key: value}) + self.widgets[key] = self._renderer._dock_add_slider( + name=None, + value=self._data[key], + rng=rng, + callback=self.callbacks[key], + double=True, + layout=hlayout, ) - self.sliders[key] = self.plotter.add_slider_widget( - self.callbacks[key], + self.widgets[f"entry_{key}"] = self._renderer._dock_add_spin_box( + name=None, value=self._data[key], - rng=rng, title=title, - pointa=pointa + idx * shift, - pointb=pointb + idx * shift, - event_type="always", + callback=self.callbacks[key], + rng=rng, + layout=hlayout, ) - - # fscale - self.callbacks["fscale"] = UpdateColorbarScale( - plotter=self.plotter, - brain=self, + up.widgets[key] = [self.widgets[key], self.widgets[f"entry_{key}"]] + self._renderer._layout_add_widget(layout, hlayout) + + # reset / minus / plus + hlayout = self._renderer._dock_add_layout(vertical=False) + self._renderer._dock_add_label( + value="Rescale", + align=True, + layout=hlayout, ) - self.sliders["fscale"] = self.plotter.add_slider_widget( - self.callbacks["fscale"], - value=1.0, - rng=self.default_scaling_range, title="fscale", - pointa=(0.82, 0.10), - pointb=(0.98, 0.10) + self.widgets["reset"] = self._renderer._dock_add_button( + name="↺", + callback=self.restore_user_scaling, + layout=hlayout, ) - self.callbacks["fscale"].slider_rep = \ - self.sliders["fscale"].GetRepresentation() + for key, char, val in (("fminus", "➖", 1.2 ** -0.25), + ("fplus", "➕", 1.2 ** 0.25)): + self.callbacks[key] = UpdateColorbarScale( + brain=self, + factor=val, + ) + self.widgets[key] = self._renderer._dock_add_button( + name=char, + callback=self.callbacks[key], + layout=hlayout, + ) + self._renderer._layout_add_widget(layout, hlayout) # register colorbar slider representations - self.reps = \ - {key: self.sliders[key].GetRepresentation() for key in self.keys} - for name in ("fmin", "fmid", "fmax", "fscale"): - self.callbacks[name].reps = self.reps + widgets = {key: self.widgets[key] for key in self.keys} + for name in ("fmin", "fmid", "fmax", "fminus", "fplus"): + self.callbacks[name].widgets = widgets - # set the slider style - self._set_slider_style() + def _configure_dock_trace_widget(self, name): + if not self.show_traces: + return + # do not show trace mode for volumes + if (self._data.get('src', None) is not None and + self._data['src'].kind == 'volume'): + self._configure_vertex_time_course() + return + + layout = self._renderer._dock_add_group_box(name) + + # setup candidate annots + def _set_annot(annot): + self.clear_glyphs() + self.remove_labels() + self.remove_annotations() + self.annot = annot + + if annot == 'None': + self.traces_mode = 'vertex' + self._configure_vertex_time_course() + else: + self.traces_mode = 'label' + self._configure_label_time_course() + self._renderer._update() + + # setup label extraction parameters + def _set_label_mode(mode): + if self.traces_mode != 'label': + return + glyphs = copy.deepcopy(self.picked_patches) + self.label_extract_mode = mode + self.clear_glyphs() + for hemi in self._hemis: + for label_id in glyphs[hemi]: + label = self._annotation_labels[hemi][label_id] + vertex_id = label.vertices[0] + self._add_label_glyph(hemi, None, vertex_id) + self.mpl_canvas.axes.relim() + self.mpl_canvas.axes.autoscale_view() + self.mpl_canvas.update_plot() + self._renderer._update() + + from ...source_estimate import _get_allowed_label_modes + from ...label import _read_annot_cands + dir_name = op.join(self._subjects_dir, self._subject_id, 'label') + cands = _read_annot_cands(dir_name, raise_error=False) + cands = cands + ['None'] + self.annot = cands[0] + stc = self._data["stc"] + modes = _get_allowed_label_modes(stc) + if self._data["src"] is None: + modes = [m for m in modes if m not in + self.default_label_extract_modes["src"]] + self.label_extract_mode = modes[-1] + if self.traces_mode == 'vertex': + _set_annot('None') + else: + _set_annot(self.annot) + self.widgets["annotation"] = self._renderer._dock_add_combo_box( + name="Annotation", + value=self.annot, + rng=cands, + callback=_set_annot, + layout=layout, + ) + self.widgets["extract_mode"] = self._renderer._dock_add_combo_box( + name="Extract mode", + value=self.label_extract_mode, + rng=modes, + callback=_set_label_mode, + layout=layout, + ) + + def _configure_dock(self): + self._renderer._dock_initialize() + self._configure_dock_playback_widget(name="Playback") + self._configure_dock_orientation_widget(name="Orientation") + self._configure_dock_colormap_widget(name="Color Limits") + self._configure_dock_trace_widget(name="Trace") + + # Smoothing widget + self.callbacks["smoothing"] = SmartCallBack( + callback=self.set_data_smoothing, + ) + self.widgets["smoothing"] = self._renderer._dock_add_spin_box( + name="Smoothing", + value=self._data['smoothing_steps'], + rng=self.default_smoothing_range, + callback=self.callbacks["smoothing"], + double=False + ) + self.callbacks["smoothing"].widget = \ + self.widgets["smoothing"] + + self._renderer._dock_finalize() def _configure_playback(self): - self.plotter.add_callback(self._play, self.refresh_rate_ms) + self._renderer._playback_initialize(self._play, self.refresh_rate_ms) - def _configure_point_picking(self): - if not self.show_traces: - return - from ..backends._pyvista import _update_picking_callback - # use a matplotlib canvas - self.color_cycle = _ReuseCycle(_get_color_list()) - win = self.plotter.app_window - dpi = win.windowHandle().screen().logicalDotsPerInch() - ratio = (1 - self.interactor_fraction) / self.interactor_fraction - w = self.interactor.geometry().width() - h = self.interactor.geometry().height() / ratio + def _configure_mplcanvas(self): # Get the fractional components for the brain and mpl - self.mpl_canvas = MplCanvas(self, w / dpi, h / dpi, dpi) + self.mpl_canvas = self._renderer._window_get_mplcanvas( + brain=self, + interactor_fraction=self.interactor_fraction, + show_traces=self.show_traces, + separate_canvas=self.separate_canvas + ) xlim = [np.min(self._data['time']), np.max(self._data['time'])] with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=UserWarning) self.mpl_canvas.axes.set(xlim=xlim) if not self.separate_canvas: - from PyQt5.QtWidgets import QSplitter - from PyQt5.QtCore import Qt - canvas = self.mpl_canvas.canvas - vlayout = self.plotter.frame.layout() - vlayout.removeWidget(self.interactor) - self.splitter = splitter = QSplitter( - orientation=Qt.Vertical, parent=self.plotter.frame) - vlayout.addWidget(splitter) - splitter.addWidget(self.interactor) - splitter.addWidget(canvas) + self._renderer._window_adjust_mplcanvas_layout() self.mpl_canvas.set_color( bg_color=self._bg_color, fg_color=self._fg_color, ) - self.mpl_canvas.show() - # get data for each hemi - for idx, hemi in enumerate(['vol', 'lh', 'rh']): - hemi_data = self._data.get(hemi) - if hemi_data is not None: - act_data = hemi_data['array'] - if act_data.ndim == 3: - act_data = np.linalg.norm(act_data, axis=1) - smooth_mat = hemi_data.get('smooth_mat') - vertices = hemi_data['vertices'] - if hemi == 'vol': - assert smooth_mat is None - smooth_mat = sparse.csr_matrix( - (np.ones(len(vertices)), - (vertices, np.arange(len(vertices))))) - self.act_data_smooth[hemi] = (act_data, smooth_mat) + def _configure_vertex_time_course(self): + if not self.show_traces: + return + if self.mpl_canvas is None: + self._configure_mplcanvas() + else: + self.clear_glyphs() - # plot the GFP + # plot RMS of the activation y = np.concatenate(list(v[0] for v in self.act_data_smooth.values() if v[0] is not None)) - y = np.linalg.norm(y, axis=0) / np.sqrt(len(y)) - self.mpl_canvas.axes.plot( - self._data['time'], y, - lw=3, label='GFP', zorder=3, color=self._fg_color, + rms = np.linalg.norm(y, axis=0) / np.sqrt(len(y)) + del y + + self.rms, = self.mpl_canvas.axes.plot( + self._data['time'], rms, + lw=3, label='RMS', zorder=3, color=self._fg_color, alpha=0.5, ls=':') # now plot the time line @@ -1040,7 +1183,7 @@ def _configure_point_picking(self): # simulate a picked renderer if self._hemi in ('both', 'rh') or hemi == 'vol': idx = 0 - self.picked_renderer = self.plotter.renderers[idx] + self.picked_renderer = self._renderer._all_renderers[idx] # initialize the default point if self._data['initial_time'] is not None: @@ -1056,110 +1199,140 @@ def _configure_point_picking(self): else: mesh = self._layered_meshes[hemi]._polydata vertex_id = vertices[ind[0]] - self.add_point(hemi, mesh, vertex_id) + self._add_vertex_glyph(hemi, mesh, vertex_id) + + def _configure_picking(self): + # get data for each hemi + from scipy import sparse + for idx, hemi in enumerate(['vol', 'lh', 'rh']): + hemi_data = self._data.get(hemi) + if hemi_data is not None: + act_data = hemi_data['array'] + if act_data.ndim == 3: + act_data = np.linalg.norm(act_data, axis=1) + smooth_mat = hemi_data.get('smooth_mat') + vertices = hemi_data['vertices'] + if hemi == 'vol': + assert smooth_mat is None + smooth_mat = sparse.csr_matrix( + (np.ones(len(vertices)), + (vertices, np.arange(len(vertices))))) + self.act_data_smooth[hemi] = (act_data, smooth_mat) - _update_picking_callback( - self.plotter, + self._renderer._update_picking_callback( self._on_mouse_move, self._on_button_press, self._on_button_release, self._on_pick ) - def _load_icons(self): - from PyQt5.QtGui import QIcon - from ..backends._pyvista import _init_resources - _init_resources() - self.icons["help"] = QIcon(":/help.svg") - self.icons["play"] = QIcon(":/play.svg") - self.icons["pause"] = QIcon(":/pause.svg") - self.icons["reset"] = QIcon(":/reset.svg") - self.icons["scale"] = QIcon(":/scale.svg") - self.icons["clear"] = QIcon(":/clear.svg") - self.icons["movie"] = QIcon(":/movie.svg") - self.icons["restore"] = QIcon(":/restore.svg") - self.icons["screenshot"] = QIcon(":/screenshot.svg") - self.icons["visibility_on"] = QIcon(":/visibility_on.svg") - self.icons["visibility_off"] = QIcon(":/visibility_off.svg") - - def _save_movie_noname(self): - return self.save_movie(None) - def _configure_tool_bar(self): - self.actions["screenshot"] = self.tool_bar.addAction( - self.icons["screenshot"], - "Take a screenshot", - self.plotter._qt_screenshot + self._renderer._tool_bar_load_icons() + self._renderer._tool_bar_set_theme(self.theme) + self._renderer._tool_bar_initialize() + self._renderer._tool_bar_add_file_button( + name="screenshot", + desc="Take a screenshot", + func=self.save_image, + ) + self._renderer._tool_bar_add_file_button( + name="movie", + desc="Save movie...", + func=self.save_movie, + shortcut="ctrl+shift+s", ) - self.actions["movie"] = self.tool_bar.addAction( - self.icons["movie"], - "Save movie...", - self._save_movie_noname, + self._renderer._tool_bar_add_button( + name="visibility", + desc="Toggle Visibility", + func=self.toggle_interface, + icon_name="visibility_on" ) - self.actions["visibility"] = self.tool_bar.addAction( - self.icons["visibility_on"], - "Toggle Visibility", - self.toggle_interface + self._renderer._tool_bar_add_button( + name="play", + desc="Play/Pause", + func=self.toggle_playback, + shortcut=" ", ) - self.actions["play"] = self.tool_bar.addAction( - self.icons["play"], - "Play/Pause", - self.toggle_playback + self._renderer._tool_bar_add_button( + name="reset", + desc="Reset", + func=self.reset, ) - self.actions["reset"] = self.tool_bar.addAction( - self.icons["reset"], - "Reset", - self.reset + self._renderer._tool_bar_add_button( + name="scale", + desc="Auto-Scale", + func=self.apply_auto_scaling, ) - self.actions["scale"] = self.tool_bar.addAction( - self.icons["scale"], - "Auto-Scale", - self.apply_auto_scaling + self._renderer._tool_bar_add_button( + name="clear", + desc="Clear traces", + func=self.clear_glyphs, ) - self.actions["restore"] = self.tool_bar.addAction( - self.icons["restore"], - "Restore scaling", - self.restore_user_scaling + self._renderer._tool_bar_add_spacer() + self._renderer._tool_bar_add_button( + name="help", + desc="Help", + func=self.help, + shortcut="?", ) - self.actions["clear"] = self.tool_bar.addAction( - self.icons["clear"], - "Clear traces", - self.clear_points + + def _shift_time(self, op): + self.callbacks["time"]( + value=(op(self._current_time, self.playback_speed)), + time_as_index=False, + update_widget=True, ) - self.actions["help"] = self.tool_bar.addAction( - self.icons["help"], - "Help", - self.help + + def _rotate_azimuth(self, value): + azimuth = (self._renderer.figure._azimuth + value) % 360 + self._renderer.set_camera(azimuth=azimuth, reset_camera=False) + + def _rotate_elevation(self, value): + elevation = np.clip( + self._renderer.figure._elevation + value, + self._elevation_rng[0], + self._elevation_rng[1], ) + self._renderer.set_camera(elevation=elevation, reset_camera=False) - self.actions["movie"].setShortcut("ctrl+shift+s") - self.actions["visibility"].setShortcut("i") - self.actions["play"].setShortcut(" ") - self.actions["scale"].setShortcut("s") - self.actions["restore"].setShortcut("r") - self.actions["clear"].setShortcut("c") - self.actions["help"].setShortcut("?") + def _configure_shortcuts(self): + # First, we remove the default bindings: + self._clear_callbacks() + # Then, we add our own: + self.plotter.add_key_event("i", self.toggle_interface) + self.plotter.add_key_event("s", self.apply_auto_scaling) + self.plotter.add_key_event("r", self.restore_user_scaling) + self.plotter.add_key_event("c", self.clear_glyphs) + self.plotter.add_key_event("n", partial(self._shift_time, + op=lambda x, y: x + y)) + self.plotter.add_key_event("b", partial(self._shift_time, + op=lambda x, y: x - y)) + for key, func, sign in (("Left", self._rotate_azimuth, 1), + ("Right", self._rotate_azimuth, -1), + ("Up", self._rotate_elevation, 1), + ("Down", self._rotate_elevation, -1)): + self.plotter.add_key_event(key, partial(func, sign * _ARROW_MOVE)) def _configure_menu(self): - # remove default picking menu - to_remove = list() - for action in self.main_menu.actions(): - if action.text() == "Tools": - to_remove.append(action) - for action in to_remove: - self.main_menu.removeAction(action) - - # add help menu - menu = self.main_menu.addMenu('Help') - menu.addAction('Show MNE key bindings\t?', self.help) + self._renderer._menu_initialize() + self._renderer._menu_add_submenu( + name="help", + desc="Help", + ) + self._renderer._menu_add_button( + menu_name="help", + name="help", + desc="Show MNE key bindings\t?", + func=self.help, + ) def _configure_status_bar(self): - from PyQt5.QtWidgets import QLabel, QProgressBar - self.status_msg = QLabel(self.default_status_bar_msg) - self.status_progress = QProgressBar() - self.status_bar.layout().addWidget(self.status_msg, 1) - self.status_bar.layout().addWidget(self.status_progress, 0) - self.status_progress.hide() + self._renderer._status_bar_initialize() + self.status_msg = self._renderer._status_bar_add_label( + self.default_status_bar_msg, stretch=1) + self.status_progress = self._renderer._status_bar_add_progress_bar() + if self.status_progress is not None: + self.status_progress.hide() def _on_mouse_move(self, vtk_picker, event): if self._mouse_no_mvt: @@ -1178,6 +1351,9 @@ def _on_button_release(self, vtk_picker, event): self._mouse_no_mvt = 0 def _on_pick(self, vtk_picker, event): + if not self.show_traces: + return + # vtk_picker is a vtkCellPicker cell_id = vtk_picker.GetCellId() mesh = vtk_picker.GetDataSet() @@ -1198,12 +1374,12 @@ def _on_pick(self, vtk_picker, event): if found_sphere is not None: break if found_sphere is not None: - assert found_sphere._is_point + assert found_sphere._is_glyph mesh = found_sphere # 2) Remove sphere if it's what we have - if hasattr(mesh, "_is_point"): - self.remove_point(mesh) + if hasattr(mesh, "_is_glyph"): + self._remove_vertex_glyph(mesh) return # 3) Otherwise, pick the objects in the scene @@ -1258,30 +1434,41 @@ def _on_pick(self, vtk_picker, event): idx = np.argmin(abs(vertices - pos), axis=0) vertex_id = cell[idx[0]] - if vertex_id not in self.picked_points[hemi]: - self.add_point(hemi, mesh, vertex_id) + if self.traces_mode == 'label': + self._add_label_glyph(hemi, mesh, vertex_id) + else: + self._add_vertex_glyph(hemi, mesh, vertex_id) - def add_point(self, hemi, mesh, vertex_id): - """Pick a vertex on the brain. + def _add_label_glyph(self, hemi, mesh, vertex_id): + if hemi == 'vol': + return + label_id = self._vertex_to_label_id[hemi][vertex_id] + label = self._annotation_labels[hemi][label_id] - Parameters - ---------- - hemi : str - The hemisphere id of the vertex. - mesh : object - The mesh where picking is expected. - vertex_id : int - The vertex identifier in the mesh. + # remove the patch if already picked + if label_id in self.picked_patches[hemi]: + self._remove_label_glyph(hemi, label_id) + return + + if hemi == label.hemi: + self.add_label(label, borders=True, reset_camera=False) + self.picked_patches[hemi].append(label_id) + + def _remove_label_glyph(self, hemi, label_id): + label = self._annotation_labels[hemi][label_id] + label._line.remove() + self.color_cycle.restore(label._color) + self.mpl_canvas.update_plot() + self._layered_meshes[hemi].remove_overlay(label.name) + self.picked_patches[hemi].remove(label_id) + + def _add_vertex_glyph(self, hemi, mesh, vertex_id): + if vertex_id in self.picked_points[hemi]: + return - Returns - ------- - sphere : object - The glyph created for the picked point. - """ # skip if the wrong hemi is selected if self.act_data_smooth[hemi][0] is None: return - from ..backends._pyvista import _sphere color = next(self.color_cycle) line = self.plot_time_course(hemi, vertex_id, color) if hemi == 'vol': @@ -1302,8 +1489,12 @@ def add_point(self, hemi, mesh, vertex_id): del mesh # from the picked renderer to the subplot coords - rindex = self.plotter.renderers.index(self.picked_renderer) - row, col = self.plotter.index_to_loc(rindex) + try: + lst = self._renderer._all_renderers._renderers + except AttributeError: + lst = self._renderer._all_renderers + rindex = lst.index(self.picked_renderer) + row, col = self._renderer._index_to_loc(rindex) actors = list() spheres = list() @@ -1315,8 +1506,7 @@ def add_point(self, hemi, mesh, vertex_id): # mitigated with synchronization/delay?) # 2) the glyph filter is used in renderer.sphere() but only one # sphere is required in this function. - actor, sphere = _sphere( - plotter=self.plotter, + actor, sphere = self._renderer._sphere( center=np.array(center), color=color, radius=4.0, @@ -1326,7 +1516,7 @@ def add_point(self, hemi, mesh, vertex_id): # add metadata for picking for sphere in spheres: - sphere._is_point = True + sphere._is_glyph = True sphere._hemi = hemi sphere._line = line sphere._actors = actors @@ -1338,14 +1528,7 @@ def add_point(self, hemi, mesh, vertex_id): self.pick_table[vertex_id] = spheres return sphere - def remove_point(self, mesh): - """Remove the picked point from its glyph. - - Parameters - ---------- - mesh : object - The mesh associated to the point to remove. - """ + def _remove_vertex_glyph(self, mesh, render=True): vertex_id = mesh._vertex_id if vertex_id not in self.pick_table: return @@ -1364,20 +1547,28 @@ def remove_point(self, mesh): self.color_cycle.restore(color) for sphere in spheres: # remove all actors - self.plotter.remove_actor(sphere._actors) + self.plotter.remove_actor(sphere._actors, render=render) sphere._actors = None self._spheres.pop(self._spheres.index(sphere)) self.pick_table.pop(vertex_id) - def clear_points(self): - """Clear the picked points.""" - if not hasattr(self, '_spheres'): + def clear_glyphs(self): + """Clear the picking glyphs.""" + if not self.time_viewer: return for sphere in list(self._spheres): # will remove itself, so copy - self.remove_point(sphere) + self._remove_vertex_glyph(sphere, render=False) assert sum(len(v) for v in self.picked_points.values()) == 0 assert len(self.pick_table) == 0 assert len(self._spheres) == 0 + for hemi in self._hemis: + for label_id in list(self.picked_patches[hemi]): + self._remove_label_glyph(hemi, label_id) + assert sum(len(v) for v in self.picked_patches.values()) == 0 + if self.rms is not None: + self.rms.remove() + self.rms = None + self._renderer._update() def plot_time_course(self, hemi, vertex_id, color): """Plot the vertex time course. @@ -1399,6 +1590,7 @@ def plot_time_course(self, hemi, vertex_id, color): if self.mpl_canvas is None: return time = self._data['time'].copy() # avoid circular ref + mni = None if hemi == 'vol': hemi_str = 'V' xfm = read_talxfm( @@ -1411,15 +1603,20 @@ def plot_time_course(self, hemi, vertex_id, color): mni = apply_trans(np.dot(xfm['trans'], src_mri_t), ijk) else: hemi_str = 'L' if hemi == 'lh' else 'R' - mni = vertex_to_mni( - vertices=vertex_id, - hemis=0 if hemi == 'lh' else 1, - subject=self._subject_id, - subjects_dir=self._subjects_dir - ) - label = "{}:{} MNI: {}".format( - hemi_str, str(vertex_id).ljust(6), - ', '.join('%5.1f' % m for m in mni)) + try: + mni = vertex_to_mni( + vertices=vertex_id, + hemis=0 if hemi == 'lh' else 1, + subject=self._subject_id, + subjects_dir=self._subjects_dir + ) + except Exception: + mni = None + if mni is not None: + mni = ' MNI: ' + ', '.join('%5.1f' % m for m in mni) + else: + mni = '' + label = "{}:{}{}".format(hemi_str, str(vertex_id).ljust(6), mni) act_data, smooth = self.act_data_smooth[hemi] if smooth is not None: act_data = smooth[vertex_id].dot(act_data)[0] @@ -1452,41 +1649,64 @@ def plot_time_line(self): self.time_line.set_xdata(current_time) self.mpl_canvas.update_plot() - def help(self): - """Display the help window.""" + def _configure_help(self): pairs = [ ('?', 'Display help window'), ('i', 'Toggle interface'), ('s', 'Apply auto-scaling'), ('r', 'Restore original clim'), ('c', 'Clear all traces'), + ('n', 'Shift the time forward by the playback speed'), + ('b', 'Shift the time backward by the playback speed'), ('Space', 'Start/Pause playback'), + ('Up', 'Decrease camera elevation angle'), + ('Down', 'Increase camera elevation angle'), + ('Left', 'Decrease camera azimuth angle'), + ('Right', 'Increase camera azimuth angle'), ] text1, text2 = zip(*pairs) text1 = '\n'.join(text1) text2 = '\n'.join(text2) - _show_help( + self.help_canvas = self._renderer._window_get_simple_canvas( + width=5, height=2, dpi=80) + _show_help_fig( col1=text1, col2=text2, - width=5, - height=2, + fig_help=self.help_canvas.fig, + ax=self.help_canvas.axes, + show=False, ) + def help(self): + """Display the help window.""" + self.help_canvas.show() + def _clear_callbacks(self): - from ..backends._pyvista import _remove_picking_callback if not hasattr(self, 'callbacks'): return for callback in self.callbacks.values(): if callback is not None: - if hasattr(callback, "plotter"): - callback.plotter = None - if hasattr(callback, "brain"): - callback.brain = None - if hasattr(callback, "slider_rep"): - callback.slider_rep = None + for key in ('plotter', 'brain', 'callback', + 'widget', 'widgets'): + setattr(callback, key, None) self.callbacks.clear() - if self.show_traces: - _remove_picking_callback(self._iren, self.plotter.picker) + # Remove the default key binding + if getattr(self, "iren", None) is not None: + try: + # pyvista<0.30.0 + self.plotter._key_press_event_callbacks.clear() + except AttributeError: + # pyvista>=0.30.0 + self.plotter.iren.clear_key_event_callbacks() + + def _clear_widgets(self): + if not hasattr(self, 'widgets'): + return + for widget in self.widgets.values(): + if widget is not None: + for key in ('triggered', 'valueChanged'): + setattr(widget, key, None) + self.widgets.clear() @property def interaction(self): @@ -1701,8 +1921,6 @@ def add_data(self, array, fmin=None, fmid=None, fmax=None, self._data['transparent'] = transparent # data specific for a hemi self._data[hemi] = dict() - self._data[hemi]['actors'] = None - self._data[hemi]['mesh'] = None self._data[hemi]['glyph_dataset'] = None self._data[hemi]['glyph_mapper'] = None self._data[hemi]['glyph_actor'] = None @@ -1751,21 +1969,15 @@ def add_data(self, array, fmin=None, fmid=None, fmax=None, ) self._data['time_actor'] = time_actor self._time_label_added = True - if colorbar and not self._colorbar_added and do: + if colorbar and self._scalar_bar is None and do: kwargs = dict(source=actor, n_labels=8, color=self._fg_color, bgcolor=self._brain_color[:3]) kwargs.update(colorbar_kwargs or {}) - self._renderer.scalarbar(**kwargs) - self._colorbar_added = True + self._scalar_bar = self._renderer.scalarbar(**kwargs) self._renderer.set_camera(**views_dicts[hemi][v]) # 4) update the scalar bar and opacity - self.update_lut() - if hemi in self._layered_meshes: - mesh = self._layered_meshes[hemi] - mesh.update_overlay(name='data', opacity=alpha) - - self._update() + self.update_lut(alpha=alpha) def _iter_views(self, hemi): # which rows and columns each type of visual needs to be added to @@ -1791,12 +2003,20 @@ def remove_labels(self): """Remove all the ROI labels from the image.""" for hemi in self._hemis: mesh = self._layered_meshes[hemi] - mesh.remove_overlay(self._label_data[hemi]) - self._label_data[hemi].clear() - self._update() + for label in self._labels[hemi]: + mesh.remove_overlay(label.name) + self._labels[hemi].clear() + self._renderer._update() + + def remove_annotations(self): + """Remove all annotations from the image.""" + for hemi in self._hemis: + mesh = self._layered_meshes[hemi] + mesh.remove_overlay(self._annots[hemi]) + self._annots[hemi].clear() + self._renderer._update() def _add_volume_data(self, hemi, src, volume_options): - from ..backends._pyvista import _volume _validate_type(src, SourceSpaces, 'src') _check_option('src.kind', src.kind, ('volume',)) _validate_type( @@ -1866,8 +2086,9 @@ def _add_volume_data(self, hemi, src, volume_options): scalars = np.zeros(np.prod(dimensions)) scalars[vertices] = 1. # for the outer mesh grid, grid_mesh, volume_pos, volume_neg = \ - _volume(dimensions, origin, spacing, scalars, surface_alpha, - resolution, blending, center) + self._renderer._volume(dimensions, origin, spacing, scalars, + surface_alpha, resolution, blending, + center) self._data[hemi]['alpha'] = alpha # incorrectly set earlier self._data[hemi]['grid'] = grid self._data[hemi]['grid_mesh'] = grid_mesh @@ -1885,7 +2106,6 @@ def _add_volume_data(self, hemi, src, volume_options): actor_neg = None grid_mesh = self._data[hemi]['grid_mesh'] if grid_mesh is not None: - import vtk _, prop = self._renderer.plotter.add_actor( grid_mesh, reset_camera=False, name=None, culling=False, pickable=False) @@ -1894,25 +2114,18 @@ def _add_volume_data(self, hemi, src, volume_options): if silhouette_alpha > 0 and silhouette_linewidth > 0: for ri, ci, v in self._iter_views('vol'): self._renderer.subplot(ri, ci) - grid_silhouette = vtk.vtkPolyDataSilhouette() - grid_silhouette.SetInputData(grid_mesh.GetInput()) - grid_silhouette.SetCamera( - self._renderer.plotter.renderer.GetActiveCamera()) - grid_silhouette.SetEnableFeatureAngle(0) - grid_silhouette_mapper = vtk.vtkPolyDataMapper() - grid_silhouette_mapper.SetInputConnection( - grid_silhouette.GetOutputPort()) - _, prop = self._renderer.plotter.add_actor( - grid_silhouette_mapper, reset_camera=False, name=None, - culling=False, pickable=False) - prop.SetColor(*self._brain_color[:3]) - prop.SetOpacity(silhouette_alpha) - prop.SetLineWidth(silhouette_linewidth) + self._renderer._silhouette( + mesh=grid_mesh.GetInput(), + color=self._brain_color[:3], + line_width=silhouette_linewidth, + alpha=silhouette_alpha, + ) return actor_pos, actor_neg def add_label(self, label, color=None, alpha=1, scalar_thresh=None, - borders=False, hemi=None, subdir=None): + borders=False, hemi=None, subdir=None, + reset_camera=True): """Add an ROI label to the image. Parameters @@ -1943,6 +2156,9 @@ def add_label(self, label, color=None, alpha=1, scalar_thresh=None, label directory rather than in the label directory itself (e.g. for ``$SUBJECTS_DIR/$SUBJECT/label/aparc/lh.cuneus.label`` ``brain.add_label('cuneus', subdir='aparc')``). + reset_camera : bool + If True, reset the camera view after adding the label. Defaults + to True. Notes ----- @@ -1981,9 +2197,9 @@ def add_label(self, label, color=None, alpha=1, scalar_thresh=None, hemi = label.hemi ids = label.vertices if label.name is None: - label_name = 'unnamed' - else: - label_name = str(label.name) + label.name = 'unnamed' + str(self._unnamed_label_id) + self._unnamed_label_id += 1 + label_name = str(label.name) if color is None: if hasattr(label, 'color') and label.color is not None: @@ -2004,11 +2220,24 @@ def add_label(self, label, color=None, alpha=1, scalar_thresh=None, if scalar_thresh is not None: ids = ids[scalars >= scalar_thresh] - # XXX: add support for label_name - self._label_name = label_name + scalars = np.zeros(self.geo[hemi].coords.shape[0]) + scalars[ids] = 1 + + if self.time_viewer and self.show_traces \ + and self.traces_mode == 'label': + stc = self._data["stc"] + src = self._data["src"] + tc = stc.extract_label_time_course(label, src=src, + mode=self.label_extract_mode) + tc = tc[0] if tc.ndim == 2 else tc[0, 0, :] + color = next(self.color_cycle) + line = self.mpl_canvas.plot( + self._data['time'], tc, label=label_name, + color=color) + else: + line = None - label = np.zeros(self.geo[hemi].coords.shape[0]) - label[ids] = 1 + orig_color = color color = colorConverter.to_rgba(color, alpha) cmap = np.array([(0, 0, 0, 0,), color]) ctable = np.round(cmap * 255).astype(np.uint8) @@ -2016,11 +2245,11 @@ def add_label(self, label, color=None, alpha=1, scalar_thresh=None, for ri, ci, v in self._iter_views(hemi): self._renderer.subplot(ri, ci) if borders: - n_vertices = label.size + n_vertices = scalars.size edges = mesh_edges(self.geo[hemi].faces) edges = edges.tocoo() - border_edges = label[edges.row] != label[edges.col] - show = np.zeros(n_vertices, dtype=np.int) + border_edges = scalars[edges.row] != scalars[edges.col] + show = np.zeros(n_vertices, dtype=np.int64) keep_idx = np.unique(edges.row[border_edges]) if isinstance(borders, int): for _ in range(borders): @@ -2031,20 +2260,24 @@ def add_label(self, label, color=None, alpha=1, scalar_thresh=None, keep_idx, axis=1)] keep_idx = np.unique(keep_idx) show[keep_idx] = 1 - label *= show + scalars *= show mesh = self._layered_meshes[hemi] mesh.add_overlay( - scalars=label, + scalars=scalars, colormap=ctable, - rng=None, + rng=[np.min(scalars), np.max(scalars)], opacity=alpha, name=label_name, ) - self._label_data[hemi].append(label_name) - self._renderer.set_camera(**views_dicts[hemi][v]) - - self._update() + if reset_camera: + self._renderer.set_camera(**views_dicts[hemi][v]) + if self.time_viewer and self.show_traces \ + and self.traces_mode == 'label': + label._color = orig_color + label._line = line + self._labels[hemi].append(label) + self._renderer._update() def add_foci(self, coords, coords_as_verts=False, map_surface=None, scale_factor=1, color="white", alpha=1, name=None, @@ -2140,6 +2373,34 @@ def add_text(self, x, y, text, name=None, color=None, opacity=1.0, self._renderer.text2d(x_window=x, y_window=y, text=text, color=color, size=font_size, justification=justification) + def _configure_label_time_course(self): + from ...label import read_labels_from_annot + if not self.show_traces: + return + if self.mpl_canvas is None: + self._configure_mplcanvas() + else: + self.clear_glyphs() + self.traces_mode = 'label' + self.add_annotation(self.annot, color="w", alpha=0.75) + + # now plot the time line + self.plot_time_line() + self.mpl_canvas.update_plot() + + for hemi in self._hemis: + labels = read_labels_from_annot( + subject=self._subject_id, + parc=self.annot, + hemi=hemi, + subjects_dir=self._subjects_dir + ) + self._vertex_to_label_id[hemi] = np.full( + self.geo[hemi].coords.shape[0], -1) + self._annotation_labels[hemi] = labels + for idx, label in enumerate(labels): + self._vertex_to_label_id[hemi][label.vertices] = idx + def add_annotation(self, annot, borders=True, alpha=1, hemi=None, remove_existing=True, color=None, **kwargs): """Add an annotation file. @@ -2213,7 +2474,6 @@ def add_annotation(self, annot, borders=True, alpha=1, hemi=None, annot = 'annotation' for hemi, (labels, cmap) in zip(hemis, annots): - # Maybe zero-out the non-border vertices self._to_borders(labels, hemi, borders) @@ -2243,16 +2503,22 @@ def add_annotation(self, annot, borders=True, alpha=1, hemi=None, cmap[:, :3] = rgb.astype(cmap.dtype) ctable = cmap.astype(np.float64) - mesh = self._layered_meshes[hemi] - mesh.add_overlay( - scalars=ids, - colormap=ctable, - rng=[np.min(ids), np.max(ids)], - opacity=alpha, - name=annot, - ) + for ri, ci, _ in self._iter_views(hemi): + self._renderer.subplot(ri, ci) + mesh = self._layered_meshes[hemi] + mesh.add_overlay( + scalars=ids, + colormap=ctable, + rng=[np.min(ids), np.max(ids)], + opacity=alpha, + name=annot, + ) + self._annots[hemi].append(annot) + if not self.time_viewer or self.traces_mode == 'vertex': + self._renderer._set_colormap_range( + mesh._actor, cmap.astype(np.uint8), None) - self._update() + self._renderer._update() def close(self): """Close all figures and cleanup data structure.""" @@ -2264,7 +2530,7 @@ def show(self): self._renderer.show() def show_view(self, view=None, roll=None, distance=None, row=0, col=0, - hemi=None): + hemi=None, align=True): """Orient camera to display view. Parameters @@ -2281,6 +2547,11 @@ def show_view(self, view=None, roll=None, distance=None, row=0, col=0, The column to set. hemi : str Which hemi to use for string lookup (when in "both" mode). + align : bool + If True, consider view arguments relative to canonical MRI + directions (closest to MNI for the subject) rather than native MRI + space. This helps when MRIs are not in standard orientation (e.g., + have large rotations). """ hemi = self._hemi if hemi is None else hemi if hemi == 'split': @@ -2297,8 +2568,9 @@ def show_view(self, view=None, roll=None, distance=None, row=0, col=0, if distance is not None: view.update(distance=distance) self._renderer.subplot(row, col) - self._renderer.set_camera(**view, reset_camera=False) - self._update() + xfm = self._rigid if align else None + self._renderer.set_camera(**view, reset_camera=False, rigid=xfm) + self._renderer._update() def reset_view(self): """Reset the camera.""" @@ -2308,7 +2580,7 @@ def reset_view(self): self._renderer.set_camera(**views_dicts[h][v], reset_camera=False) - def save_image(self, filename, mode='rgb'): + def save_image(self, filename=None, mode='rgb'): """Save view from all panels to disk. Parameters @@ -2318,7 +2590,10 @@ def save_image(self, filename, mode='rgb'): mode : str Either 'rgb' or 'rgba' for values to return. """ - self._renderer.screenshot(mode=mode, filename=filename) + if filename is None: + filename = _generate_default_filename(".png") + _save_ndarray_img( + filename, self.screenshot(mode=mode, time_viewer=True)) @fill_doc def screenshot(self, mode='rgb', time_viewer=False): @@ -2341,54 +2616,56 @@ def screenshot(self, mode='rgb', time_viewer=False): not self.separate_canvas: canvas = self.mpl_canvas.fig.canvas canvas.draw_idle() - # In theory, one of these should work: - # - # trace_img = np.frombuffer( - # canvas.tostring_rgb(), dtype=np.uint8) - # trace_img.shape = canvas.get_width_height()[::-1] + (3,) - # - # or - # - # trace_img = np.frombuffer( - # canvas.tostring_rgb(), dtype=np.uint8) - # size = time_viewer.mpl_canvas.getSize() - # trace_img.shape = (size.height(), size.width(), 3) - # - # But in practice, sometimes the sizes does not match the - # renderer tostring_rgb() size. So let's directly use what - # matplotlib does in lib/matplotlib/backends/backend_agg.py - # before calling tobytes(): - trace_img = np.asarray( - canvas.renderer._renderer).take([0, 1, 2], axis=2) - # need to slice into trace_img because generally it's a bit - # smaller - delta = trace_img.shape[1] - img.shape[1] - if delta > 0: - start = delta // 2 - trace_img = trace_img[:, start:start + img.shape[1]] - img = np.concatenate([img, trace_img], axis=0) + fig = self.mpl_canvas.fig + with BytesIO() as output: + # Need to pass dpi here so it uses the physical (HiDPI) DPI + # rather than logical DPI when saving in most cases. + # But when matplotlib uses HiDPI and VTK doesn't + # (e.g., macOS w/Qt 5.14+ and VTK9) then things won't work, + # so let's just calculate the DPI we need to get + # the correct size output based on the widths being equal + dpi = img.shape[1] / fig.get_size_inches()[0] + fig.savefig(output, dpi=dpi, format='raw', + facecolor=self._bg_color, edgecolor='none') + output.seek(0) + trace_img = np.reshape( + np.frombuffer(output.getvalue(), dtype=np.uint8), + newshape=(-1, img.shape[1], 4))[:, :, :3] + img = concatenate_images( + [img, trace_img], bgcolor=self._brain_color[:3]) return img + @contextlib.contextmanager + def _no_lut_update(self, why): + orig = self._lut_locked + self._lut_locked = why + try: + yield + finally: + self._lut_locked = orig + @fill_doc - def update_lut(self, fmin=None, fmid=None, fmax=None): + def update_lut(self, fmin=None, fmid=None, fmax=None, alpha=None): """Update color map. Parameters ---------- %(fmin_fmid_fmax)s + alpha : float | None + Alpha to use in the update. """ - from ..backends._pyvista import _set_colormap_range, _set_volume_range + args = f'{fmin}, {fmid}, {fmax}, {alpha}' + if self._lut_locked is not None: + logger.debug(f'LUT update postponed with {args}') + return + logger.debug(f'Updating LUT with {args}') center = self._data['center'] colormap = self._data['colormap'] transparent = self._data['transparent'] - lims = dict(fmin=fmin, fmid=fmid, fmax=fmax) - lims = {key: self._data[key] if val is None else val - for key, val in lims.items()} + lims = {key: self._data[key] for key in ('fmin', 'fmid', 'fmax')} + _update_monotonic(lims, fmin=fmin, fmid=fmid, fmax=fmax) assert all(val is not None for val in lims.values()) - if lims['fmin'] > lims['fmid']: - lims['fmin'] = lims['fmid'] - if lims['fmax'] < lims['fmid']: - lims['fmax'] = lims['fmid'] + self._data.update(lims) self._data['ctable'] = np.round( calculate_lut(colormap, alpha=1., center=center, @@ -2397,34 +2674,37 @@ def update_lut(self, fmin=None, fmid=None, fmax=None): # update our values rng = self._cmap_range ctable = self._data['ctable'] - # in testing, no plotter; if colorbar=False, no scalar_bar - scalar_bar = getattr( - getattr(self._renderer, 'plotter', None), 'scalar_bar', None) for hemi in ['lh', 'rh', 'vol']: hemi_data = self._data.get(hemi) if hemi_data is not None: if hemi in self._layered_meshes: mesh = self._layered_meshes[hemi] mesh.update_overlay(name='data', - colormap=self._data['ctable']) - _set_colormap_range(mesh._actor, ctable, scalar_bar, rng) - scalar_bar = None + colormap=self._data['ctable'], + opacity=alpha, + rng=rng) + self._renderer._set_colormap_range( + mesh._actor, ctable, self._scalar_bar, rng, + self._brain_color) grid_volume_pos = hemi_data.get('grid_volume_pos') grid_volume_neg = hemi_data.get('grid_volume_neg') for grid_volume in (grid_volume_pos, grid_volume_neg): if grid_volume is not None: - _set_volume_range( + self._renderer._set_volume_range( grid_volume, ctable, hemi_data['alpha'], - scalar_bar, rng) - scalar_bar = None + self._scalar_bar, rng) glyph_actor = hemi_data.get('glyph_actor') if glyph_actor is not None: for glyph_actor_ in glyph_actor: - _set_colormap_range( - glyph_actor_, ctable, scalar_bar, rng) - scalar_bar = None + self._renderer._set_colormap_range( + glyph_actor_, ctable, self._scalar_bar, rng) + if self.time_viewer: + with self._no_lut_update(f'update_lut {args}'): + for key in ('fmin', 'fmid', 'fmax'): + self.callbacks[key](lims[key]) + self._renderer._update() def set_data_smoothing(self, n_steps): """Set the number of smoothing steps. @@ -2434,6 +2714,7 @@ def set_data_smoothing(self, n_steps): n_steps : int Number of smoothing steps. """ + from scipy import sparse from ...morph import _hemi_morph for hemi in ['lh', 'rh']: hemi_data = self._data.get(hemi) @@ -2562,7 +2843,7 @@ def set_time_point(self, time_idx): self._update_glyphs(hemi, vectors) self._data['time_idx'] = time_idx - self._update() + self._renderer._update() def set_time(self, time): """Set the time to display (in seconds). @@ -2584,7 +2865,6 @@ def set_time(self, time): f'available times ({min(self._times)}-{max(self._times)} s).') def _update_glyphs(self, hemi, vectors): - from ..backends._pyvista import _set_colormap_range, _create_actor hemi_data = self._data.get(hemi) assert hemi_data is not None vertices = hemi_data['vertices'] @@ -2619,7 +2899,7 @@ def _update_glyphs(self, hemi, vectors): glyph_dataset.point_arrays['vec'] = vectors glyph_mapper = hemi_data['glyph_mapper'] if add: - glyph_actor = _create_actor(glyph_mapper) + glyph_actor = self._renderer._actor(glyph_mapper) prop = glyph_actor.GetProperty() prop.SetLineWidth(2.) prop.SetOpacity(vector_alpha) @@ -2628,7 +2908,7 @@ def _update_glyphs(self, hemi, vectors): else: glyph_actor = hemi_data['glyph_actor'][count] count += 1 - _set_colormap_range( + self._renderer._set_colormap_range( actor=glyph_actor, ctable=self._data['ctable'], scalar_bar=None, @@ -2691,6 +2971,10 @@ def data(self): """Data used by time viewer and color bar widgets.""" return self._data + @property + def labels(self): + return self._labels + @property def views(self): return self._views @@ -2703,8 +2987,7 @@ def _save_movie(self, filename, time_dilation=4., tmin=None, tmax=None, framerate=24, interpolation=None, codec=None, bitrate=None, callback=None, time_viewer=False, **kwargs): import imageio - from ..backends._pyvista import _disabled_interaction - with _disabled_interaction(self._renderer): + with self._renderer._disabled_interaction(): images = self._make_movie_frames( time_dilation, tmin, tmax, framerate, interpolation, callback, time_viewer) @@ -2717,8 +3000,52 @@ def _save_movie(self, filename, time_dilation=4., tmin=None, tmax=None, kwargs['bitrate'] = bitrate imageio.mimwrite(filename, images, **kwargs) + def _save_movie_tv(self, filename, time_dilation=4., tmin=None, tmax=None, + framerate=24, interpolation=None, codec=None, + bitrate=None, callback=None, time_viewer=False, + **kwargs): + def frame_callback(frame, n_frames): + if frame == n_frames: + # On the ImageIO step + self.status_msg.set_value( + "Saving with ImageIO: %s" + % filename + ) + self.status_msg.show() + self.status_progress.hide() + self._renderer._status_bar_update() + else: + self.status_msg.set_value( + "Rendering images (frame %d / %d) ..." + % (frame + 1, n_frames) + ) + self.status_msg.show() + self.status_progress.show() + self.status_progress.set_range([0, n_frames - 1]) + self.status_progress.set_value(frame) + self.status_progress.update() + self.status_msg.update() + self._renderer._status_bar_update() + + # set cursor to busy + default_cursor = self._renderer._window_get_cursor() + self._renderer._window_set_cursor( + self._renderer._window_new_cursor("WaitCursor")) + + try: + self._save_movie( + filename=filename, + time_dilation=(1. / self.playback_speed), + callback=frame_callback, + **kwargs + ) + except (Exception, KeyboardInterrupt): + warn('Movie saving aborted:\n' + traceback.format_exc()) + finally: + self._renderer._window_set_cursor(default_cursor) + @fill_doc - def save_movie(self, filename, time_dilation=4., tmin=None, tmax=None, + def save_movie(self, filename=None, time_dilation=4., tmin=None, tmax=None, framerate=24, interpolation=None, codec=None, bitrate=None, callback=None, time_viewer=False, **kwargs): """Save a movie (for data with a time axis). @@ -2770,83 +3097,12 @@ def save_movie(self, filename, time_dilation=4., tmin=None, tmax=None, dialog : object The opened dialog is returned for testing purpose only. """ - if self.time_viewer: - try: - from pyvista.plotting.qt_plotting import FileDialog - except ImportError: - from pyvistaqt.plotting import FileDialog - - if filename is None: - self.status_msg.setText("Choose movie path ...") - self.status_msg.show() - self.status_progress.setValue(0) - - def _post_setup(unused): - del unused - self.status_msg.hide() - self.status_progress.hide() - - dialog = FileDialog( - self.plotter.app_window, - callback=partial(self._save_movie, **kwargs) - ) - dialog.setDirectory(os.getcwd()) - dialog.finished.connect(_post_setup) - return dialog - else: - from PyQt5.QtCore import Qt - from PyQt5.QtGui import QCursor - - def frame_callback(frame, n_frames): - if frame == n_frames: - # On the ImageIO step - self.status_msg.setText( - "Saving with ImageIO: %s" - % filename - ) - self.status_msg.show() - self.status_progress.hide() - self.status_bar.layout().update() - else: - self.status_msg.setText( - "Rendering images (frame %d / %d) ..." - % (frame + 1, n_frames) - ) - self.status_msg.show() - self.status_progress.show() - self.status_progress.setRange(0, n_frames - 1) - self.status_progress.setValue(frame) - self.status_progress.update() - self.status_progress.repaint() - self.status_msg.update() - self.status_msg.parent().update() - self.status_msg.repaint() - - # temporarily hide interface - default_visibility = self.visibility - self.toggle_interface(value=False) - # set cursor to busy - default_cursor = self.interactor.cursor() - self.interactor.setCursor(QCursor(Qt.WaitCursor)) - - try: - self._save_movie( - filename=filename, - time_dilation=(1. / self.playback_speed), - callback=frame_callback, - **kwargs - ) - except (Exception, KeyboardInterrupt): - warn('Movie saving aborted:\n' + traceback.format_exc()) - - # restore visibility - self.toggle_interface(value=default_visibility) - # restore cursor - self.interactor.setCursor(default_cursor) - else: - self._save_movie(filename, time_dilation, tmin, tmax, - framerate, interpolation, codec, - bitrate, callback, time_viewer, **kwargs) + if filename is None: + filename = _generate_default_filename(".mp4") + func = self._save_movie_tv if self.time_viewer else self._save_movie + func(filename, time_dilation, tmin, tmax, + framerate, interpolation, codec, + bitrate, callback, time_viewer, **kwargs) def _make_movie_frames(self, time_dilation, tmin, tmax, framerate, interpolation, callback, time_viewer): @@ -2927,13 +3183,6 @@ def _iter_time(self, time_idx, callback): # Restore original time index func(current_time_idx) - def _show(self): - """Request rendering of the window.""" - try: - return self._renderer.show() - except RuntimeError: - logger.info("No active/running renderer available.") - def _check_stc(self, hemi, array, vertices): from ...source_estimate import ( _BaseSourceEstimate, _BaseSurfaceSourceEstimate, @@ -3020,14 +3269,6 @@ def enable_depth_peeling(self): """Enable depth peeling.""" self._renderer.enable_depth_peeling() - def _update(self): - from ..backends import renderer - if renderer.get_3d_backend() in ['pyvista', 'notebook']: - if self.notebook and self._renderer.figure.display is not None: - self._renderer.figure.display.update() - else: - self._renderer.plotter.update() - def get_picked_points(self): """Return the vertices of the picked points. @@ -3079,6 +3320,36 @@ def _update_limits(fmin, fmid, fmax, center, array): return fmin, fmid, fmax +def _update_monotonic(lims, fmin, fmid, fmax): + if fmin is not None: + lims['fmin'] = fmin + if lims['fmax'] < fmin: + logger.debug(f' Bumping fmax = {lims["fmax"]} to {fmin}') + lims['fmax'] = fmin + if lims['fmid'] < fmin: + logger.debug(f' Bumping fmid = {lims["fmid"]} to {fmin}') + lims['fmid'] = fmin + assert lims['fmin'] <= lims['fmid'] <= lims['fmax'] + if fmid is not None: + lims['fmid'] = fmid + if lims['fmin'] > fmid: + logger.debug(f' Bumping fmin = {lims["fmin"]} to {fmid}') + lims['fmin'] = fmid + if lims['fmax'] < fmid: + logger.debug(f' Bumping fmax = {lims["fmax"]} to {fmid}') + lims['fmax'] = fmid + assert lims['fmin'] <= lims['fmid'] <= lims['fmax'] + if fmax is not None: + lims['fmax'] = fmax + if lims['fmin'] > fmax: + logger.debug(f' Bumping fmin = {lims["fmin"]} to {fmax}') + lims['fmin'] = fmax + if lims['fmid'] > fmax: + logger.debug(f' Bumping fmid = {lims["fmid"]} to {fmax}') + lims['fmid'] = fmax + assert lims['fmin'] <= lims['fmid'] <= lims['fmax'] + + def _get_range(brain): val = np.abs(np.concatenate(list(brain._current_act_data.values()))) return [np.min(val), np.max(val)] @@ -3097,6 +3368,9 @@ def LeaveEvent(self): def SetEventInformation(self, *args, **kwargs): pass + def CharEvent(self): + pass + def KeyPressEvent(self, *args, **kwargs): pass diff --git a/mne/viz/_brain/_linkviewer.py b/mne/viz/_brain/_linkviewer.py index b84c07e1680..8dd3d8f6834 100644 --- a/mne/viz/_brain/_linkviewer.py +++ b/mne/viz/_brain/_linkviewer.py @@ -26,30 +26,31 @@ def __init__(self, brains, time=True, camera=False, colorbar=True, if time: # link time sliders - self.link_sliders( + self.link_widgets( name="time", callback=self.set_time_point, - event_type="always" + signal_type="valueChanged", ) # link playback speed sliders - self.link_sliders( + self.link_widgets( name="playback_speed", callback=self.set_playback_speed, - event_type="always" + signal_type="valueChanged", ) # link toggle to start/pause playback - for brain in self.brains: - brain.actions["play"].triggered.disconnect() - brain.actions["play"].triggered.connect( - self.toggle_playback) + self.link_widgets( + name="play", + callback=self.toggle_playback, + signal_type="triggered", + actions=True, + ) # link time course canvas def _time_func(*args, **kwargs): for brain in self.brains: brain.callbacks["time"](*args, **kwargs) - for brain in self.brains: if brain.show_traces: brain.mpl_canvas.time_func = _time_func @@ -57,12 +58,12 @@ def _time_func(*args, **kwargs): if picking: def _func_add(*args, **kwargs): for brain in self.brains: - brain._add_point(*args, **kwargs) + brain._add_vertex_glyph2(*args, **kwargs) brain.plotter.update() def _func_remove(*args, **kwargs): for brain in self.brains: - brain._remove_point(*args, **kwargs) + brain._remove_vertex_glyph2(*args, **kwargs) # save initial picked points initial_points = dict() @@ -74,18 +75,18 @@ def _func_remove(*args, **kwargs): # link the viewers for brain in self.brains: - brain.clear_points() - brain._add_point = brain.add_point - brain.add_point = _func_add - brain._remove_point = brain.remove_point - brain.remove_point = _func_remove + brain.clear_glyphs() + brain._add_vertex_glyph2 = brain._add_vertex_glyph + brain._add_vertex_glyph = _func_add + brain._remove_vertex_glyph2 = brain._remove_vertex_glyph + brain._remove_vertex_glyph = _func_remove # link the initial points for hemi in initial_points.keys(): if hemi in brain._layered_meshes: mesh = brain._layered_meshes[hemi]._polydata for vertex_id in initial_points[hemi]: - self.leader.add_point(hemi, mesh, vertex_id) + self.leader._add_vertex_glyph(hemi, mesh, vertex_id) if colorbar: fmin = self.leader._data["fmin"] @@ -95,13 +96,12 @@ def _func_remove(*args, **kwargs): brain.callbacks["fmin"](fmin) brain.callbacks["fmid"](fmid) brain.callbacks["fmax"](fmax) - - for slider_name in ('fmin', 'fmid', 'fmax'): - func = getattr(self, "set_" + slider_name) - self.link_sliders( - name=slider_name, + for name in ('fmin', 'fmid', 'fmax'): + func = getattr(self, "set_" + name) + self.link_widgets( + name=name, callback=func, - event_type="always" + signal_type="valueChanged" ) def set_fmin(self, value): @@ -125,22 +125,22 @@ def set_playback_speed(self, value): brain.callbacks["playback_speed"](value, update_widget=True) def toggle_playback(self): - value = self.leader.callbacks["time"].slider_rep.GetValue() + value = self.leader.callbacks["time"].widget.get_value() # synchronize starting points before playback self.set_time_point(value) for brain in self.brains: brain.toggle_playback() - def link_sliders(self, name, callback, event_type): - from ..backends._pyvista import _update_slider_callback + def link_widgets(self, name, callback, signal_type, actions=False): for brain in self.brains: - slider = brain.sliders[name] - if slider is not None: - _update_slider_callback( - slider=slider, - callback=callback, - event_type=event_type - ) + if actions: + widget = brain._renderer.actions[name] + else: + widget = brain.widgets[name].widget + if widget is not None: + signal = getattr(widget, signal_type) + signal.disconnect() + signal.connect(callback) def link_cameras(self): from ..backends._pyvista import _add_camera_callback diff --git a/mne/viz/_brain/_notebook.py b/mne/viz/_brain/_notebook.py deleted file mode 100644 index 801ba240c07..00000000000 --- a/mne/viz/_brain/_notebook.py +++ /dev/null @@ -1,67 +0,0 @@ -# Authors: Guillaume Favelier -# -# License: Simplified BSD - -from ..backends._notebook \ - import _NotebookInteractor as _PyVistaNotebookInteractor - - -class _NotebookInteractor(_PyVistaNotebookInteractor): - def __init__(self, brain): - self.brain = brain - super().__init__(self.brain._renderer) - - def configure_controllers(self): - from ipywidgets import (IntSlider, interactive, Play, VBox, - HBox, Label, jslink) - super().configure_controllers() - # orientation - self.controllers["orientation"] = interactive( - self.set_orientation, - orientation=self.brain.orientation, - ) - # smoothing - self.sliders["smoothing"] = IntSlider( - value=self.brain._data['smoothing_steps'], - min=self.brain.default_smoothing_range[0], - max=self.brain.default_smoothing_range[1], - continuous_update=False - ) - self.controllers["smoothing"] = VBox([ - Label(value='Smoothing steps'), - interactive( - self.brain.set_data_smoothing, - n_steps=self.sliders["smoothing"] - ) - ]) - # time slider - max_time = len(self.brain._data['time']) - 1 - if max_time >= 1: - time_player = Play( - value=self.brain._data['time_idx'], - min=0, - max=max_time, - continuous_update=False - ) - time_slider = IntSlider( - min=0, - max=max_time, - ) - jslink((time_player, 'value'), (time_slider, 'value')) - time_slider.observe(self.set_time_point, 'value') - self.controllers["time"] = VBox([ - HBox([ - Label(value='Select time point'), - time_player, - ]), - time_slider, - ]) - self.sliders["time"] = time_slider - - def set_orientation(self, orientation): - row, col = self.plotter.index_to_loc( - self.plotter._active_renderer_index) - self.brain.show_view(orientation, row=row, col=col) - - def set_time_point(self, data): - self.brain.set_time_point(data['new']) diff --git a/mne/viz/_brain/callback.py b/mne/viz/_brain/callback.py index ca1c63d93e3..9405b8096cc 100644 --- a/mne/viz/_brain/callback.py +++ b/mne/viz/_brain/callback.py @@ -3,191 +3,110 @@ # Guillaume Favelier # # License: Simplified BSD -import time +from ...utils import logger -class IntSlider(object): - """Class to set a integer slider.""" +class TimeCallBack(object): + """Callback to update the time.""" - def __init__(self, plotter=None, callback=None, first_call=True): - self.plotter = plotter - self.callback = callback - self.slider_rep = None - self.first_call = first_call - self._first_time = True - - def __call__(self, value): - """Round the label of the slider.""" - idx = int(round(value)) - if self.slider_rep is not None: - self.slider_rep.SetValue(idx) - self.plotter.update() - if not self._first_time or all([self._first_time, self.first_call]): - self.callback(idx) - if self._first_time: - self._first_time = False - - -class TimeSlider(object): - """Class to update the time slider.""" - - def __init__(self, plotter=None, brain=None, callback=None, - first_call=True): - self.plotter = plotter + def __init__(self, brain=None, callback=None): self.brain = brain self.callback = callback - self.slider_rep = None - self.first_call = first_call - self._first_time = True - self.time_label = None + self.widget = None + self.label = None if self.brain is not None and callable(self.brain._data['time_label']): self.time_label = self.brain._data['time_label'] + else: + self.time_label = None def __call__(self, value, update_widget=False, time_as_index=True): """Update the time slider.""" - value = float(value) if not time_as_index: value = self.brain._to_time_index(value) - if not self._first_time or all([self._first_time, self.first_call]): - self.brain.set_time_point(value) + self.brain.set_time_point(value) + if self.label is not None: + current_time = self.brain._current_time + self.label.set_value(f"{current_time: .3f}") if self.callback is not None: self.callback() - current_time = self.brain._current_time - if self.slider_rep is not None: - if self.time_label is not None: - current_time = self.time_label(current_time) - self.slider_rep.SetTitleText(current_time) - if update_widget: - self.slider_rep.SetValue(value) - self.plotter.update() - if self._first_time: - self._first_time = False + if self.widget is not None and update_widget: + self.widget.set_value(int(value)) class UpdateColorbarScale(object): """Class to update the values of the colorbar sliders.""" - def __init__(self, plotter=None, brain=None): - self.plotter = plotter + def __init__(self, brain, factor): self.brain = brain - self.keys = ('fmin', 'fmid', 'fmax') - self.reps = {key: None for key in self.keys} - self.slider_rep = None - self._first_time = True + self.factor = factor + self.widget = None + self.widgets = {key: None for key in self.brain.keys} - def __call__(self, value): + def __call__(self): """Update the colorbar sliders.""" - if self._first_time: - self._first_time = False - return - self.brain._update_fscale(value) - for key in self.keys: - if self.reps[key] is not None: - self.reps[key].SetValue(self.brain._data[key]) - if self.slider_rep is not None: - self.slider_rep.SetValue(1.0) - self.plotter.update() - - -class BumpColorbarPoints(object): - """Class that ensure constraints over the colorbar points.""" - - def __init__(self, plotter=None, brain=None, name=None): - self.plotter = plotter + self.brain._update_fscale(self.factor) + for key in self.brain.keys: + if self.widgets[key] is not None: + self.widgets[key].set_value(self.brain._data[key]) + + +class UpdateLUT(object): + """Update the LUT.""" + + def __init__(self, brain=None): self.brain = brain - self.name = name - self.callback = { - "fmin": lambda fmin: brain.update_lut(fmin=fmin), - "fmid": lambda fmid: brain.update_lut(fmid=fmid), - "fmax": lambda fmax: brain.update_lut(fmax=fmax), - } - self.keys = ('fmin', 'fmid', 'fmax') - self.reps = {key: None for key in self.keys} - self.last_update = time.time() - self._first_time = True - - def __call__(self, value): + self.widgets = {key: list() for key in self.brain.keys} + + def __call__(self, fmin=None, fmid=None, fmax=None): """Update the colorbar sliders.""" - if self._first_time: - self._first_time = False - return - vals = {key: self.brain._data[key] for key in self.keys} - if self.name == "fmin" and self.reps["fmin"] is not None: - if vals['fmax'] < value: - vals['fmax'] = value - self.reps['fmax'].SetValue(value) - if vals['fmid'] < value: - vals['fmid'] = value - self.reps['fmid'].SetValue(value) - self.reps['fmin'].SetValue(value) - elif self.name == "fmid" and self.reps['fmid'] is not None: - if vals['fmin'] > value: - vals['fmin'] = value - self.reps['fmin'].SetValue(value) - if vals['fmax'] < value: - vals['fmax'] = value - self.reps['fmax'].SetValue(value) - self.reps['fmid'].SetValue(value) - elif self.name == "fmax" and self.reps['fmax'] is not None: - if vals['fmin'] > value: - vals['fmin'] = value - self.reps['fmin'].SetValue(value) - if vals['fmid'] > value: - vals['fmid'] = value - self.reps['fmid'].SetValue(value) - self.reps['fmax'].SetValue(value) - self.brain.update_lut(**vals) - if time.time() > self.last_update + 1. / 60.: - self.callback[self.name](value) - self.last_update = time.time() - self.plotter.update() + self.brain.update_lut(fmin=fmin, fmid=fmid, fmax=fmax) + with self.brain._no_lut_update(f'UpdateLUT {fmin} {fmid} {fmax}'): + for key in ('fmin', 'fmid', 'fmax'): + value = self.brain._data[key] + logger.debug(f'Updating {key} = {value}') + for widget in self.widgets[key]: + widget.set_value(value) class ShowView(object): """Class that selects the correct view.""" - def __init__(self, plotter=None, brain=None, orientation=None, - row=None, col=None, hemi=None): - self.plotter = plotter + def __init__(self, brain=None, data=None): self.brain = brain - self.orientation = orientation - self.short_orientation = [s[:3] for s in orientation] - self.row = row - self.col = col - self.hemi = hemi - self.slider_rep = None + self.data = data + self.widget = None def __call__(self, value, update_widget=False): """Update the view.""" - self.brain.show_view(value, row=self.row, col=self.col, - hemi=self.hemi) - if update_widget: - if len(value) > 3: - idx = self.orientation.index(value) - else: - idx = self.short_orientation.index(value) - if self.slider_rep is not None: - self.slider_rep.SetValue(idx) - self.slider_rep.SetTitleText(self.orientation[idx]) - self.plotter.update() - - -class SmartSlider(object): + if "renderer" in self.brain.widgets: + idx = self.brain.widgets["renderer"].get_value() + else: + idx = 0 + idx = int(idx) + if self.data[idx] is not None: + self.brain.show_view( + value, + row=self.data[idx]['row'], + col=self.data[idx]['col'], + hemi=self.data[idx]['hemi'], + ) + if update_widget and self.widget is not None: + self.widget.set_value(value) + + +class SmartCallBack(object): """Class to manage smart slider. It stores it's own slider representation for efficiency and uses it when necessary. """ - def __init__(self, plotter=None, callback=None): - self.plotter = plotter + def __init__(self, callback=None): self.callback = callback - self.slider_rep = None + self.widget = None def __call__(self, value, update_widget=False): """Update the value.""" self.callback(value) - if update_widget: - if self.slider_rep is not None: - self.slider_rep.SetValue(value) - self.plotter.update() + if self.widget is not None and update_widget: + self.widget.set_value(value) diff --git a/mne/viz/_brain/colormap.py b/mne/viz/_brain/colormap.py index d5de54c7e7c..5c68ca2da34 100644 --- a/mne/viz/_brain/colormap.py +++ b/mne/viz/_brain/colormap.py @@ -10,9 +10,9 @@ def create_lut(cmap, n_colors=256, center=None): """Return a colormap suitable for setting as a LUT.""" - from matplotlib import cm + from .._3d import _get_cmap assert not (isinstance(cmap, str) and cmap == 'auto') - cmap = cm.get_cmap(cmap) + cmap = _get_cmap(cmap) lut = np.round(cmap(np.linspace(0, 1, n_colors)) * 255.0).astype(np.int64) return lut diff --git a/mne/viz/_brain/mplcanvas.py b/mne/viz/_brain/mplcanvas.py deleted file mode 100644 index 23b9f4d7295..00000000000 --- a/mne/viz/_brain/mplcanvas.py +++ /dev/null @@ -1,116 +0,0 @@ -# Authors: Alexandre Gramfort -# Eric Larson -# Guillaume Favelier -# -# License: Simplified BSD -import warnings -from ..utils import tight_layout -from ...fixes import nullcontext - - -class MplCanvas(object): - """Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.).""" - - def __init__(self, brain, width, height, dpi): - from PyQt5 import QtWidgets - from matplotlib import rc_context - from matplotlib.figure import Figure - from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg - if brain.separate_canvas: - parent = None - else: - parent = brain.window - # prefer constrained layout here but live with tight_layout otherwise - context = nullcontext - extra_events = ('resize',) - try: - context = rc_context({'figure.constrained_layout.use': True}) - extra_events = () - except KeyError: - pass - with context: - self.fig = Figure(figsize=(width, height), dpi=dpi) - self.canvas = FigureCanvasQTAgg(self.fig) - self.axes = self.fig.add_subplot(111) - self.axes.set(xlabel='Time (sec)', ylabel='Activation (AU)') - self.canvas.setParent(parent) - FigureCanvasQTAgg.setSizePolicy( - self.canvas, - QtWidgets.QSizePolicy.Expanding, - QtWidgets.QSizePolicy.Expanding - ) - FigureCanvasQTAgg.updateGeometry(self.canvas) - self.brain = brain - self.time_func = brain.callbacks["time"] - for event in ('button_press', 'motion_notify') + extra_events: - self.canvas.mpl_connect( - event + '_event', getattr(self, 'on_' + event)) - - def plot(self, x, y, label, **kwargs): - """Plot a curve.""" - line, = self.axes.plot( - x, y, label=label, **kwargs) - self.update_plot() - return line - - def plot_time_line(self, x, label, **kwargs): - """Plot the vertical line.""" - line = self.axes.axvline(x, label=label, **kwargs) - self.update_plot() - return line - - def update_plot(self): - """Update the plot.""" - leg = self.axes.legend( - prop={'family': 'monospace', 'size': 'small'}, - framealpha=0.5, handlelength=1., - facecolor=self.brain._bg_color) - for text in leg.get_texts(): - text.set_color(self.brain._fg_color) - with warnings.catch_warnings(record=True): - warnings.filterwarnings('ignore', 'constrained_layout') - self.canvas.draw() - - def set_color(self, bg_color, fg_color): - """Set the widget colors.""" - self.axes.set_facecolor(bg_color) - self.axes.xaxis.label.set_color(fg_color) - self.axes.yaxis.label.set_color(fg_color) - self.axes.spines['top'].set_color(fg_color) - self.axes.spines['bottom'].set_color(fg_color) - self.axes.spines['left'].set_color(fg_color) - self.axes.spines['right'].set_color(fg_color) - self.axes.tick_params(axis='x', colors=fg_color) - self.axes.tick_params(axis='y', colors=fg_color) - self.fig.patch.set_facecolor(bg_color) - - def show(self): - """Show the canvas.""" - self.canvas.show() - - def close(self): - """Close the canvas.""" - self.canvas.close() - - def on_button_press(self, event): - """Handle button presses.""" - # left click (and maybe drag) in progress in axes - if (event.inaxes != self.axes or - event.button != 1): - return - self.time_func( - event.xdata, update_widget=True, time_as_index=False) - - def clear(self): - """Clear internal variables.""" - self.close() - self.axes.clear() - self.fig.clear() - self.brain = None - self.canvas = None - - on_motion_notify = on_button_press # for now they can be the same - - def on_resize(self, event): - """Handle resize events.""" - tight_layout(fig=self.axes.figure) diff --git a/mne/viz/_brain/surface.py b/mne/viz/_brain/surface.py index 5520207afdb..e2e1e512692 100644 --- a/mne/viz/_brain/surface.py +++ b/mne/viz/_brain/surface.py @@ -9,12 +9,13 @@ from os import path as path import numpy as np -from ...utils import _check_option, get_subjects_dir, _check_fname +from ...utils import (_check_option, get_subjects_dir, _check_fname, + _validate_type) from ...surface import (complete_surface_info, read_surface, read_curvature, _read_patch) -class Surface(object): +class _Surface(object): """Container for a brain surface. It is used for storing vertices, faces and morphometric data @@ -37,6 +38,8 @@ class Surface(object): be applied. If != 0.0, an additional offset will be used. units : str Can be 'm' or 'mm' (default). + x_dir : ndarray | None + The x direction to use for offset alignment. Attributes ---------- @@ -68,18 +71,13 @@ class Surface(object): """ def __init__(self, subject_id, hemi, surf, subjects_dir=None, offset=None, - units='mm'): + units='mm', x_dir=None): - hemis = ('lh', 'rh') - - if hemi not in hemis: - raise ValueError('hemi should be either "lh" or "rh",' + - 'given value {0}'.format(hemi)) - - if offset is not None and ((not isinstance(offset, float)) and - (not isinstance(offset, int))): - raise ValueError('offset should either float or int, given ' + - 'type {0}'.format(type(offset).__name__)) + x_dir = np.array([1., 0, 0]) if x_dir is None else x_dir + assert isinstance(x_dir, np.ndarray) + assert np.isclose(np.linalg.norm(x_dir), 1., atol=1e-6) + assert hemi in ('lh', 'rh') + _validate_type(offset, (None, 'numeric'), 'offset') self.units = _check_option('units', units, ('mm', 'm')) self.subject_id = subject_id @@ -93,6 +91,7 @@ def __init__(self, subject_id, hemi, surf, subjects_dir=None, offset=None, self.grey_curv = None self.nn = None self.labels = dict() + self.x_dir = x_dir subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) self.data_path = path.join(subjects_dir, subject_id) @@ -114,6 +113,10 @@ def load_geometry(self): _check_fname(fname, overwrite='read', must_exist=True, name='flatmap surface file') coords, faces, orig_faces = _read_patch(fname) + # rotate 90 degrees to get to a more standard orientation + # where X determines the distance between the hemis + coords = coords[:, [1, 0, 2]] + coords[:, 1] *= -1 else: coords, faces = read_surface( path.join(self.data_path, 'surf', @@ -122,10 +125,11 @@ def load_geometry(self): if self.units == 'm': coords /= 1000. if self.offset is not None: + x_ = coords @ self.x_dir if self.hemi == 'lh': - coords[:, 0] -= (np.max(coords[:, 0]) + self.offset) + coords -= (np.max(x_) + self.offset) * self.x_dir else: - coords[:, 0] -= (np.min(coords[:, 0]) + self.offset) + coords -= (np.min(x_) + self.offset) * self.x_dir surf = dict(rr=coords, tris=faces) complete_surface_info(surf, copy=False, verbose=False) nn = surf['nn'] diff --git a/mne/viz/_brain/tests/test.ipynb b/mne/viz/_brain/tests/test.ipynb deleted file mode 100644 index 0e7dbe7ffbd..00000000000 --- a/mne/viz/_brain/tests/test.ipynb +++ /dev/null @@ -1,84 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import mne\n", - "from mne.datasets import testing\n", - "data_path = testing.data_path()\n", - "raw_fname = data_path + '/MEG/sample/sample_audvis_trunc_raw.fif'\n", - "subjects_dir = data_path + '/subjects'\n", - "subject = 'sample'\n", - "trans = data_path + '/MEG/sample/sample_audvis_trunc-trans.fif'\n", - "info = mne.io.read_info(raw_fname)\n", - "mne.viz.set_3d_backend('notebook')\n", - "mne.viz.plot_alignment(info, trans, subject=subject, dig=True,\n", - " meg=['helmet', 'sensors'], subjects_dir=subjects_dir,\n", - " surfaces=['head-dense'])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import mne\n", - "import matplotlib.pyplot as plt\n", - "from mne.datasets import testing\n", - "data_path = testing.data_path()\n", - "sample_dir = os.path.join(data_path, 'MEG', 'sample')\n", - "subjects_dir = os.path.join(data_path, 'subjects')\n", - "fname_stc = os.path.join(sample_dir, 'sample_audvis_trunc-meg')\n", - "stc = mne.read_source_estimate(fname_stc, subject='sample')\n", - "initial_time = 0.13\n", - "mne.viz.set_3d_backend('notebook')\n", - "brain_class = mne.viz.get_brain_class()\n", - "for interactive_state in (False, True):\n", - " plt.interactive(interactive_state)\n", - " brain = stc.plot(subjects_dir=subjects_dir, initial_time=initial_time,\n", - " clim=dict(kind='value', pos_lims=[3, 6, 9]),\n", - " time_viewer=True,\n", - " hemi='split')\n", - " assert isinstance(brain, brain_class)\n", - " assert brain.notebook\n", - " interactor = brain._renderer.figure.display\n", - " interactor.set_time_point({'new': 0})\n", - " brain.close()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import mne\n", - "mne.viz.set_3d_backend('notebook')\n", - "fig = mne.viz.create_3d_figure(size=(100, 100))\n", - "mne.viz.set_3d_title(fig, 'Notebook testing')\n", - "mne.viz.set_3d_view(fig, 200, 70, focalpoint=[0, 0, 0])" - ] - } - ], - "metadata": { - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.5" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/mne/viz/_brain/tests/test_brain.py b/mne/viz/_brain/tests/test_brain.py index cc2ccf9be51..8dc280baa60 100644 --- a/mne/viz/_brain/tests/test_brain.py +++ b/mne/viz/_brain/tests/test_brain.py @@ -10,28 +10,39 @@ import os import os.path as path +import sys import pytest import numpy as np -from numpy.testing import assert_allclose +from numpy.testing import assert_allclose, assert_array_equal -from mne import (read_source_estimate, SourceEstimate, MixedSourceEstimate, +from mne import (read_source_estimate, read_evokeds, read_cov, + read_forward_solution, pick_types_forward, + SourceEstimate, MixedSourceEstimate, write_surface, VolSourceEstimate) +from mne.minimum_norm import apply_inverse, make_inverse_operator from mne.source_space import (read_source_spaces, vertex_to_mni, setup_volume_source_space) from mne.datasets import testing -from mne.utils import check_version +from mne.utils import check_version, requires_pysurfer +from mne.label import read_label from mne.viz._brain import Brain, _LinkViewer, _BrainScraper, _LayeredMesh from mne.viz._brain.colormap import calculate_lut from matplotlib import cm, image -import matplotlib.pyplot as plt +from matplotlib.lines import Line2D data_path = testing.data_path(download=False) subject_id = 'sample' subjects_dir = path.join(data_path, 'subjects') fname_stc = path.join(data_path, 'MEG/sample/sample_audvis_trunc-meg') fname_label = path.join(data_path, 'MEG/sample/labels/Vis-lh.label') +fname_cov = path.join( + data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif') +fname_evoked = path.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc-ave.fif') +fname_fwd = path.join( + data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif') src_fname = path.join(data_path, 'subjects', 'sample', 'bem', 'sample-oct-6-src.fif') @@ -93,12 +104,10 @@ def GetPosition(self): return np.array(self.GetPickPosition()) - (0, 0, 100) -def test_layered_mesh(renderer_interactive): +def test_layered_mesh(renderer_interactive_pyvista): """Test management of scalars/colormap overlay.""" - if renderer_interactive._get_3d_backend() != 'pyvista': - pytest.skip('TimeViewer tests only supported on PyVista') mesh = _LayeredMesh( - renderer=renderer_interactive._get_renderer(size=[300, 300]), + renderer=renderer_interactive_pyvista._get_renderer(size=(300, 300)), vertices=np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]]), triangles=np.array([[0, 1, 2], [1, 2, 3]]), normals=np.array([[0, 0, 1]] * 4), @@ -112,7 +121,7 @@ def test_layered_mesh(renderer_interactive): mesh.add_overlay( scalars=np.array([0, 1, 1, 0]), colormap=np.array([(1, 1, 1, 1), (0, 0, 0, 0)]), - rng=None, + rng=[0, 1], opacity=None, name='test', ) @@ -125,20 +134,27 @@ def test_layered_mesh(renderer_interactive): @testing.requires_testing_data -def test_brain_gc(renderer, brain_gc): +def test_brain_gc(renderer_pyvista, brain_gc): """Test that a minimal version of Brain gets GC'ed.""" - if renderer._get_3d_backend() != 'pyvista': - pytest.skip('TimeViewer tests only supported on PyVista') brain = Brain('fsaverage', 'both', 'inflated', subjects_dir=subjects_dir) brain.close() +@requires_pysurfer @testing.requires_testing_data -def test_brain_init(renderer, tmpdir, pixel_ratio, brain_gc): +def test_brain_routines(renderer, brain_gc): + """Test backend agnostic Brain routines.""" + brain_klass = renderer.get_brain_class() + if renderer.get_3d_backend() == "mayavi": + from surfer import Brain + else: # PyVista + from mne.viz._brain import Brain + assert brain_klass == Brain + + +@testing.requires_testing_data +def test_brain_init(renderer_pyvista, tmpdir, pixel_ratio, brain_gc): """Test initialization of the Brain instance.""" - if renderer._get_3d_backend() != 'pyvista': - pytest.skip('TimeViewer tests only supported on PyVista') - from mne.label import read_label from mne.source_estimate import _BaseSourceEstimate class FakeSTC(_BaseSourceEstimate): @@ -161,12 +177,15 @@ def __init__(self): Brain(hemi=hemi, surf=surf, interaction=0, **kwargs) with pytest.raises(ValueError, match='interaction'): Brain(hemi=hemi, surf=surf, interaction='foo', **kwargs) - renderer.backend._close_all() + renderer_pyvista.backend._close_all() brain = Brain(hemi=hemi, surf=surf, size=size, title=title, - cortex=cortex, units='m', **kwargs) + cortex=cortex, units='m', + silhouette=dict(decimate=0.95), **kwargs) with pytest.raises(TypeError, match='not supported'): brain._check_stc(hemi='lh', array=FakeSTC(), vertices=None) + with pytest.raises(ValueError, match='add_data'): + brain.setup_time_viewer(time_viewer=True) brain._hemi = 'foo' # for testing: hemis with pytest.raises(ValueError, match='not be None'): brain._check_hemi(hemi=None) @@ -257,8 +276,16 @@ def __init__(self): with pytest.raises(ValueError, match="does not exist"): brain.add_label('foo', subdir='bar') label.name = None # test unnamed label - brain.add_label(label, scalar_thresh=0.) + brain.add_label(label, scalar_thresh=0., color="green") + assert isinstance(brain.labels[label.hemi], list) + overlays = brain._layered_meshes[label.hemi]._overlays + assert 'unnamed0' in overlays + assert np.allclose(overlays['unnamed0']._colormap[0], + [0, 0, 0, 0]) # first component is transparent + assert np.allclose(overlays['unnamed0']._colormap[1], + [0, 128, 0, 255]) # second is green brain.remove_labels() + assert 'unnamed0' not in overlays brain.add_label(fname_label) brain.add_label('V1', borders=True) brain.remove_labels() @@ -278,22 +305,44 @@ def __init__(self): borders = [True, 2] alphas = [1, 0.5] colors = [None, 'r'] + brain = Brain(subject_id='fsaverage', hemi='both', size=size, + surf='inflated', subjects_dir=subjects_dir) + with pytest.raises(RuntimeError, match="both hemispheres"): + brain.add_annotation(annots[-1]) + with pytest.raises(ValueError, match="does not exist"): + brain.add_annotation('foo') + brain.close() brain = Brain(subject_id='fsaverage', hemi=hemi, size=size, surf='inflated', subjects_dir=subjects_dir) for a, b, p, color in zip(annots, borders, alphas, colors): brain.add_annotation(a, b, p, color=color) - brain.show_view(dict(focalpoint=(1e-5, 1e-5, 1e-5)), roll=1, distance=500) + view_args = dict(view=dict(focalpoint=(1e-5, 1e-5, 1e-5)), + roll=1, distance=500) + cam = brain._renderer.figure.plotter.camera + previous_roll = cam.GetRoll() + brain.show_view(**view_args) + assert np.allclose(cam.GetFocalPoint(), view_args["view"]["focalpoint"]) + assert np.allclose(cam.GetDistance(), view_args["distance"]) + assert np.allclose(cam.GetRoll(), previous_roll + view_args["roll"]) + del view_args # image and screenshot fname = path.join(str(tmpdir), 'test.png') assert not path.isfile(fname) brain.save_image(fname) assert path.isfile(fname) - brain.show_view(view=dict(azimuth=180., elevation=90.)) + fp = np.array( + brain._renderer.figure.plotter.renderer.ComputeVisiblePropBounds()) + fp = (fp[1::2] + fp[::2]) * 0.5 + view_args = dict(azimuth=180., elevation=90., focalpoint='auto') + brain.show_view(view=view_args) + assert np.allclose(brain._renderer.figure._azimuth, view_args["azimuth"]) + assert np.allclose( + brain._renderer.figure._elevation, view_args["elevation"]) + assert np.allclose(cam.GetFocalPoint(), fp) + del view_args img = brain.screenshot(mode='rgb') - if renderer._get_3d_backend() == 'mayavi': - pixel_ratio = 1. # no HiDPI when using the testing backend want_size = np.array([size[0] * pixel_ratio, size[1] * pixel_ratio, 3]) assert_allclose(img.shape, want_size) brain.close() @@ -303,10 +352,8 @@ def __init__(self): @pytest.mark.skipif(os.getenv('CI_OS_NAME', '') == 'osx', reason='Unreliable/segfault on macOS CI') @pytest.mark.parametrize('hemi', ('lh', 'rh')) -def test_single_hemi(hemi, renderer_interactive, brain_gc): +def test_single_hemi(hemi, renderer_interactive_pyvista, brain_gc): """Test single hemi support.""" - if renderer_interactive._get_3d_backend() != 'pyvista': - pytest.skip('TimeViewer tests only supported on PyVista') stc = read_source_estimate(fname_stc) idx, order = (0, 1) if hemi == 'lh' else (1, -1) stc = SourceEstimate( @@ -348,24 +395,96 @@ def test_brain_save_movie(tmpdir, renderer, brain_gc): brain.close() +_TINY_SIZE = (350, 300) + + +def tiny(tmpdir): + """Create a tiny fake brain.""" + # This is a minimal version of what we need for our viz-with-timeviewer + # support currently + subject = 'test' + subject_dir = tmpdir.mkdir(subject) + surf_dir = subject_dir.mkdir('surf') + rng = np.random.RandomState(0) + rr = rng.randn(4, 3) + tris = np.array([[0, 1, 2], [2, 1, 3]]) + curv = rng.randn(len(rr)) + with open(surf_dir.join('lh.curv'), 'wb') as fid: + fid.write(np.array([255, 255, 255], dtype=np.uint8)) + fid.write(np.array([len(rr), 0, 1], dtype='>i4')) + fid.write(curv.astype('>f4')) + write_surface(surf_dir.join('lh.white'), rr, tris) + write_surface(surf_dir.join('rh.white'), rr, tris) # needed for vertex tc + vertices = [np.arange(len(rr)), []] + data = rng.randn(len(rr), 10) + stc = SourceEstimate(data, vertices, 0, 1, subject) + brain = stc.plot(subjects_dir=tmpdir, hemi='lh', surface='white', + size=_TINY_SIZE) + # in principle this should be sufficient: + # + # ratio = brain.mpl_canvas.canvas.window().devicePixelRatio() + # + # but in practice VTK can mess up sizes, so let's just calculate it. + sz = brain.plotter.size() + sz = (sz.width(), sz.height()) + sz_ren = brain.plotter.renderer.GetSize() + ratio = np.median(np.array(sz_ren) / np.array(sz)) + return brain, ratio + + +@pytest.mark.filterwarnings('ignore:.*constrained_layout not applied.*:') +def test_brain_screenshot(renderer_interactive_pyvista, tmpdir, brain_gc): + """Test time viewer screenshot.""" + # XXX disable for sprint because it's too unreliable + if sys.platform == 'darwin' and os.getenv('GITHUB_ACTIONS', '') == 'true': + pytest.skip('Test is unreliable on GitHub Actions macOS') + tiny_brain, ratio = tiny(tmpdir) + img_nv = tiny_brain.screenshot(time_viewer=False) + want = (_TINY_SIZE[1] * ratio, _TINY_SIZE[0] * ratio, 3) + assert img_nv.shape == want + img_v = tiny_brain.screenshot(time_viewer=True) + assert img_v.shape[1:] == want[1:] + assert_allclose(img_v.shape[0], want[0] * 4 / 3, atol=3) # some slop + tiny_brain.close() + + +def _assert_brain_range(brain, rng): + __tracebackhide__ = True + assert brain._cmap_range == rng, 'brain._cmap_range == rng' + for hemi, layerer in brain._layered_meshes.items(): + for key, mesh in layerer._overlays.items(): + if key == 'curv': + continue + assert mesh._rng == rng, \ + f'_layered_meshes[{repr(hemi)}][{repr(key)}]._rng != {rng}' + + @testing.requires_testing_data @pytest.mark.slowtest -def test_brain_time_viewer(renderer_interactive, pixel_ratio, brain_gc): +def test_brain_time_viewer(renderer_interactive_pyvista, pixel_ratio, + brain_gc): """Test time viewer primitives.""" - if renderer_interactive._get_3d_backend() != 'pyvista': - pytest.skip('TimeViewer tests only supported on PyVista') with pytest.raises(ValueError, match="between 0 and 1"): _create_testing_brain(hemi='lh', show_traces=-1.0) with pytest.raises(ValueError, match="got unknown keys"): _create_testing_brain(hemi='lh', surf='white', src='volume', volume_options={'foo': 'bar'}) - brain = _create_testing_brain(hemi='both', show_traces=False) + brain = _create_testing_brain( + hemi='both', show_traces=False, + brain_kwargs=dict(silhouette=dict(decimate=0.95)) + ) + # test sub routines when show_traces=False + brain._on_pick(None, None) + brain._configure_vertex_time_course() + brain._configure_label_time_course() + brain.setup_time_viewer() # for coverage brain.callbacks["time"](value=0) - brain.callbacks["orientation_lh_0_0"]( + assert "renderer" not in brain.callbacks + brain.callbacks["orientation"]( value='lat', update_widget=True ) - brain.callbacks["orientation_lh_0_0"]( + brain.callbacks["orientation"]( value='medial', update_widget=True ) @@ -373,14 +492,28 @@ def test_brain_time_viewer(renderer_interactive, pixel_ratio, brain_gc): value=0.0, time_as_index=False, ) + # Need to process events for old Qt brain.callbacks["smoothing"](value=1) - brain.callbacks["fmin"](value=12.0) + _assert_brain_range(brain, [0.1, 0.3]) + from mne.utils import use_log_level + print('\nCallback fmin\n') + with use_log_level('debug'): + brain.callbacks["fmin"](value=12.0) + assert brain._data["fmin"] == 12.0 brain.callbacks["fmax"](value=4.0) + _assert_brain_range(brain, [4.0, 4.0]) brain.callbacks["fmid"](value=6.0) + _assert_brain_range(brain, [4.0, 6.0]) brain.callbacks["fmid"](value=4.0) - brain.callbacks["fscale"](value=1.1) + brain.callbacks["fplus"]() + brain.callbacks["fminus"]() brain.callbacks["fmin"](value=12.0) brain.callbacks["fmid"](value=4.0) + _assert_brain_range(brain, [4.0, 12.0]) + brain._shift_time(op=lambda x, y: x + y) + brain._shift_time(op=lambda x, y: x - y) + brain._rotate_azimuth(15) + brain._rotate_elevation(15) brain.toggle_interface() brain.toggle_interface(value=False) brain.callbacks["playback_speed"](value=0.1) @@ -389,13 +522,17 @@ def test_brain_time_viewer(renderer_interactive, pixel_ratio, brain_gc): brain.apply_auto_scaling() brain.restore_user_scaling() brain.reset() - plt.close('all') + + assert brain.help_canvas is not None + assert not brain.help_canvas.canvas.isVisible() brain.help() - assert len(plt.get_fignums()) == 1 - plt.close('all') - assert len(plt.get_fignums()) == 0 + assert brain.help_canvas.canvas.isVisible() # screenshot + # Need to turn the interface back on otherwise the window is too wide + # (it keeps the window size and expands the 3D area when the interface + # is toggled off) + brain.toggle_interface(value=True) brain.show_view(view=dict(azimuth=180., elevation=90.)) img = brain.screenshot(mode='rgb') want_shape = np.array([300 * pixel_ratio, 300 * pixel_ratio, 3]) @@ -412,25 +549,92 @@ def test_brain_time_viewer(renderer_interactive, pixel_ratio, brain_gc): ]) @pytest.mark.parametrize('src', [ 'surface', + pytest.param('vector', marks=pytest.mark.slowtest), pytest.param('volume', marks=pytest.mark.slowtest), pytest.param('mixed', marks=pytest.mark.slowtest), ]) @pytest.mark.slowtest -def test_brain_traces(renderer_interactive, hemi, src, tmpdir, +def test_brain_traces(renderer_interactive_pyvista, hemi, src, tmpdir, brain_gc): """Test brain traces.""" - if renderer_interactive._get_3d_backend() != 'pyvista': - pytest.skip('Only PyVista supports traces') + hemi_str = list() + if src in ('surface', 'vector', 'mixed'): + hemi_str.extend([hemi] if hemi in ('lh', 'rh') else ['lh', 'rh']) + if src in ('mixed', 'volume'): + hemi_str.extend(['vol']) + + # label traces brain = _create_testing_brain( - hemi=hemi, surf='white', src=src, show_traces=0.5, initial_time=0, + hemi=hemi, surf='white', src=src, show_traces='label', + volume_options=None, # for speed, don't upsample + n_time=5, initial_time=0, + ) + if src == 'surface': + brain._data['src'] = None # test src=None + if src in ('surface', 'vector', 'mixed'): + assert brain.show_traces + assert brain.traces_mode == 'label' + brain.widgets["extract_mode"].set_value('max') + + # test picking a cell at random + rng = np.random.RandomState(0) + for idx, current_hemi in enumerate(hemi_str): + if current_hemi == 'vol': + continue + current_mesh = brain._layered_meshes[current_hemi]._polydata + cell_id = rng.randint(0, current_mesh.n_cells) + test_picker = TstVTKPicker( + current_mesh, cell_id, current_hemi, brain) + assert len(brain.picked_patches[current_hemi]) == 0 + brain._on_pick(test_picker, None) + assert len(brain.picked_patches[current_hemi]) == 1 + for label_id in list(brain.picked_patches[current_hemi]): + label = brain._annotation_labels[current_hemi][label_id] + assert isinstance(label._line, Line2D) + brain.widgets["extract_mode"].set_value('mean') + brain.clear_glyphs() + assert len(brain.picked_patches[current_hemi]) == 0 + brain._on_pick(test_picker, None) # picked and added + assert len(brain.picked_patches[current_hemi]) == 1 + brain._on_pick(test_picker, None) # picked again so removed + assert len(brain.picked_patches[current_hemi]) == 0 + # test switching from 'label' to 'vertex' + brain.widgets["annotation"].set_value('None') + brain.widgets["extract_mode"].set_value('max') + else: # volume + assert "annotation" not in brain.widgets + assert "extract_mode" not in brain.widgets + brain.close() + + # test colormap + if src != 'vector': + brain = _create_testing_brain( + hemi=hemi, surf='white', src=src, show_traces=0.5, + initial_time=0, + volume_options=None, # for speed, don't upsample + n_time=1 if src == 'mixed' else 5, diverging=True, + add_data_kwargs=dict(colorbar_kwargs=dict(n_labels=3)), + ) + # mne_analyze should be chosen + ctab = brain._data['ctable'] + assert_array_equal(ctab[0], [0, 255, 255, 255]) # opaque cyan + assert_array_equal(ctab[-1], [255, 255, 0, 255]) # opaque yellow + assert_allclose(ctab[len(ctab) // 2], [128, 128, 128, 0], atol=3) + brain.close() + + # vertex traces + brain = _create_testing_brain( + hemi=hemi, surf='white', src=src, show_traces=0.5, + initial_time=0, volume_options=None, # for speed, don't upsample n_time=1 if src == 'mixed' else 5, add_data_kwargs=dict(colorbar_kwargs=dict(n_labels=3)), ) assert brain.show_traces + assert brain.traces_mode == 'vertex' assert hasattr(brain, "picked_points") assert hasattr(brain, "_spheres") - assert brain.plotter.scalar_bar.GetNumberOfLabels() == 3 + assert brain._scalar_bar.GetNumberOfLabels() == 3 # add foci should work for volumes brain.add_foci([[0, 0, 0]], hemi='lh' if src == 'surface' else 'vol') @@ -438,11 +642,6 @@ def test_brain_traces(renderer_interactive, hemi, src, tmpdir, # test points picked by default picked_points = brain.get_picked_points() spheres = brain._spheres - hemi_str = list() - if src in ('surface', 'mixed'): - hemi_str.extend([hemi] if hemi in ('lh', 'rh') else ['lh', 'rh']) - if src in ('mixed', 'volume'): - hemi_str.extend(['vol']) for current_hemi in hemi_str: assert len(picked_points[current_hemi]) == 1 n_spheres = len(hemi_str) @@ -450,8 +649,12 @@ def test_brain_traces(renderer_interactive, hemi, src, tmpdir, n_spheres += 1 assert len(spheres) == n_spheres + # test switching from 'vertex' to 'label' + if src == 'surface': + brain.widgets["annotation"].set_value('aparc') + brain.widgets["annotation"].set_value('None') # test removing points - brain.clear_points() + brain.clear_glyphs() assert len(spheres) == 0 for key in ('lh', 'rh', 'vol'): assert len(picked_points[key]) == 0 @@ -475,6 +678,7 @@ def test_brain_traces(renderer_interactive, hemi, src, tmpdir, assert cell_id == test_picker.cell_id assert test_picker.point_id is None brain._on_pick(test_picker, None) + brain._on_pick(test_picker, None) assert test_picker.point_id is not None assert len(picked_points[current_hemi]) == 1 assert picked_points[current_hemi][0] == test_picker.point_id @@ -542,10 +746,8 @@ def test_brain_traces(renderer_interactive, hemi, src, tmpdir, @testing.requires_testing_data @pytest.mark.slowtest -def test_brain_linkviewer(renderer_interactive, brain_gc): +def test_brain_linkviewer(renderer_interactive_pyvista, brain_gc): """Test _LinkViewer primitives.""" - if renderer_interactive._get_3d_backend() != 'pyvista': - pytest.skip('Linkviewer only supported on PyVista') brain1 = _create_testing_brain(hemi='lh', show_traces=False) brain2 = _create_testing_brain(hemi='lh', show_traces='separate') brain1._times = brain1._times * 2 @@ -557,8 +759,9 @@ def test_brain_linkviewer(renderer_interactive, brain_gc): colorbar=False, picking=False, ) + brain1.close() - brain_data = _create_testing_brain(hemi='split', show_traces=True) + brain_data = _create_testing_brain(hemi='split', show_traces='vertex') link_viewer = _LinkViewer( [brain2, brain_data], time=True, @@ -566,15 +769,13 @@ def test_brain_linkviewer(renderer_interactive, brain_gc): colorbar=True, picking=True, ) - link_viewer.set_time_point(value=0) - link_viewer.brains[0].mpl_canvas.time_func(0) - link_viewer.set_fmin(0) - link_viewer.set_fmid(0.5) - link_viewer.set_fmax(1) - link_viewer.set_playback_speed(value=0.1) - link_viewer.toggle_playback() - del link_viewer - brain1.close() + link_viewer.leader.set_time_point(0) + link_viewer.leader.mpl_canvas.time_func(0) + link_viewer.leader.callbacks["fmin"](0) + link_viewer.leader.callbacks["fmid"](0.5) + link_viewer.leader.callbacks["fmax"](1) + link_viewer.leader.set_playback_speed(0.1) + link_viewer.leader.toggle_playback() brain2.close() brain_data.close() @@ -684,13 +885,25 @@ def test_calculate_lut(): calculate_lut(colormap, alpha, 1, 0, 2) -def _create_testing_brain(hemi, surf='inflated', src='surface', size=300, - n_time=5, **kwargs): - assert src in ('surface', 'mixed', 'volume') +def _create_testing_brain(hemi, surf='inflated', src='surface', + size=300, n_time=5, diverging=False, **kwargs): + assert src in ('surface', 'vector', 'mixed', 'volume') meth = 'plot' if src in ('surface', 'mixed'): sample_src = read_source_spaces(src_fname) klass = MixedSourceEstimate if src == 'mixed' else SourceEstimate + if src == 'vector': + fwd = read_forward_solution(fname_fwd) + fwd = pick_types_forward(fwd, meg=True, eeg=False) + evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0] + noise_cov = read_cov(fname_cov) + free = make_inverse_operator( + evoked.info, fwd, noise_cov, loose=1.) + stc = apply_inverse(evoked, free, pick_ori='vector') + return stc.plot( + subject=subject_id, hemi=hemi, size=size, + subjects_dir=subjects_dir, colormap='auto', + **kwargs) if src in ('volume', 'mixed'): vol_src = setup_volume_source_space( subject_id, 7., mri='aseg.mgz', @@ -715,14 +928,17 @@ def _create_testing_brain(hemi, surf='inflated', src='surface', size=300, stc_data[(rng.rand(stc_size // 20) * stc_size).astype(int)] = \ rng.rand(stc_data.size // 20) stc_data.shape = (n_verts, n_time) + if diverging: + stc_data -= 0.5 stc = klass(stc_data, vertices, 1, 1) - fmin = stc.data.min() - fmax = stc.data.max() - fmid = (fmin + fmax) / 2. + clim = dict(kind='value', lims=[0.1, 0.2, 0.3]) + if diverging: + clim['pos_lims'] = clim.pop('lims') + brain_data = getattr(stc, meth)( subject=subject_id, hemi=hemi, surface=surf, size=size, - subjects_dir=subjects_dir, colormap='hot', - clim=dict(kind='value', lims=(fmin, fmid, fmax)), src=sample_src, + subjects_dir=subjects_dir, colormap='auto', + clim=clim, src=sample_src, **kwargs) return brain_data diff --git a/mne/viz/_brain/tests/test_notebook.py b/mne/viz/_brain/tests/test_notebook.py index 48c65c2d066..2b7baed8179 100644 --- a/mne/viz/_brain/tests/test_notebook.py +++ b/mne/viz/_brain/tests/test_notebook.py @@ -1,24 +1,123 @@ -import os -import pytest +# -*- coding: utf-8 -*- +# +# Authors: Guillaume Favelier +# Eric Larson + +# NOTE: Tests in this directory must be self-contained because they are +# executed in a separate IPython kernel. +import sys +import pytest from mne.datasets import testing -from mne.utils import requires_version -PATH = os.path.dirname(os.path.realpath(__file__)) + +# This will skip all tests in this scope +pytestmark = pytest.mark.skipif( + sys.platform.startswith('win'), reason='nbexec does not work on Windows') + + +@testing.requires_testing_data +def test_notebook_alignment(renderer_notebook, brain_gc, nbexec): + """Test plot alignment in a notebook.""" + import mne + data_path = mne.datasets.testing.data_path() + raw_fname = data_path + '/MEG/sample/sample_audvis_trunc_raw.fif' + subjects_dir = data_path + '/subjects' + subject = 'sample' + trans = data_path + '/MEG/sample/sample_audvis_trunc-trans.fif' + info = mne.io.read_info(raw_fname) + mne.viz.set_3d_backend('notebook') + fig = mne.viz.plot_alignment( + info, trans, subject=subject, dig=True, + meg=['helmet', 'sensors'], subjects_dir=subjects_dir, + surfaces=['head-dense']) + assert fig.display is not None + + +@pytest.mark.slowtest # ~3 min on GitHub macOS +@testing.requires_testing_data +def test_notebook_interactive(renderer_notebook, brain_gc, nbexec): + """Test interactive modes.""" + import os + import tempfile + from contextlib import contextmanager + from numpy.testing import assert_allclose + from ipywidgets import Button + import matplotlib.pyplot as plt + import mne + from mne.datasets import testing + data_path = testing.data_path() + sample_dir = os.path.join(data_path, 'MEG', 'sample') + subjects_dir = os.path.join(data_path, 'subjects') + fname_stc = os.path.join(sample_dir, 'sample_audvis_trunc-meg') + stc = mne.read_source_estimate(fname_stc, subject='sample') + initial_time = 0.13 + mne.viz.set_3d_backend('notebook') + brain_class = mne.viz.get_brain_class() + + @contextmanager + def interactive(on): + old = plt.isinteractive() + plt.interactive(on) + try: + yield + finally: + plt.interactive(old) + + with interactive(False): + brain = stc.plot(subjects_dir=subjects_dir, initial_time=initial_time, + clim=dict(kind='value', pos_lims=[3, 6, 9]), + time_viewer=True, + show_traces=True, + hemi='lh', size=300) + assert isinstance(brain, brain_class) + assert brain._renderer.figure.notebook + assert brain._renderer.figure.display is not None + brain._renderer._update() + tmp_path = tempfile.mkdtemp() + movie_path = os.path.join(tmp_path, 'test.gif') + screenshot_path = os.path.join(tmp_path, 'test.png') + brain._renderer.actions['movie_field'].value = movie_path + brain._renderer.actions['screenshot_field'].value = screenshot_path + total_number_of_buttons = sum( + '_field' not in k for k in brain._renderer.actions.keys()) + number_of_buttons = 0 + for action in brain._renderer.actions.values(): + if isinstance(action, Button): + action.click() + number_of_buttons += 1 + assert number_of_buttons == total_number_of_buttons + assert os.path.isfile(movie_path) + assert os.path.isfile(screenshot_path) + img_nv = brain.screenshot() + assert img_nv.shape == (300, 300, 3), img_nv.shape + img_v = brain.screenshot(time_viewer=True) + assert img_v.shape[1:] == (300, 3), img_v.shape + # XXX This rtol is not very good, ideally would be zero + assert_allclose( + img_v.shape[0], img_nv.shape[0] * 1.25, err_msg=img_nv.shape, + rtol=0.1) + brain.close() -@pytest.mark.slowtest @testing.requires_testing_data -@requires_version('nbformat') -@requires_version('nbclient') -@requires_version('ipympl') -def test_notebook_3d_backend(renderer_notebook, brain_gc): - """Test executing a notebook that should not fail.""" - import nbformat - from nbclient import NotebookClient - - notebook_filename = os.path.join(PATH, "test.ipynb") - with open(notebook_filename) as f: - nb = nbformat.read(f, as_version=4) - client = NotebookClient(nb) - client.execute() +def test_notebook_button_counts(renderer_notebook, brain_gc, nbexec): + """Test button counts.""" + import mne + from ipywidgets import Button + mne.viz.set_3d_backend('notebook') + rend = mne.viz.create_3d_figure(size=(100, 100), scene=False) + fig = rend.scene() + mne.viz.set_3d_title(fig, 'Notebook testing') + mne.viz.set_3d_view(fig, 200, 70, focalpoint=[0, 0, 0]) + assert fig.display is None + rend.show() + total_number_of_buttons = sum( + '_field' not in k for k in rend.actions.keys()) + number_of_buttons = 0 + for action in rend.actions.values(): + if isinstance(action, Button): + action.click() + number_of_buttons += 1 + assert number_of_buttons == total_number_of_buttons + assert fig.display is not None diff --git a/mne/viz/_brain/view.py b/mne/viz/_brain/view.py index 4c1447524da..c8eb1e7a258 100644 --- a/mne/viz/_brain/view.py +++ b/mne/viz/_brain/view.py @@ -6,7 +6,7 @@ # # License: Simplified BSD -ORIGIN = (0., 0., 0.) +ORIGIN = 'auto' _lh_views_dict = { 'lateral': dict(azimuth=180., elevation=90., focalpoint=ORIGIN), @@ -38,11 +38,13 @@ lh_views_dict = _lh_views_dict.copy() for k, v in _lh_views_dict.items(): lh_views_dict[k[:3]] = v - lh_views_dict['flat'] = dict(azimuth=250, elevation=0, focalpoint=ORIGIN) + lh_views_dict['flat'] = dict( + azimuth=0, elevation=0, focalpoint=ORIGIN, roll=0) rh_views_dict = _rh_views_dict.copy() for k, v in _rh_views_dict.items(): rh_views_dict[k[:3]] = v - rh_views_dict['flat'] = dict(azimuth=-70, elevation=0, focalpoint=ORIGIN) + rh_views_dict['flat'] = dict( + azimuth=0, elevation=0, focalpoint=ORIGIN, roll=0) views_dicts = dict(lh=lh_views_dict, vol=lh_views_dict, both=lh_views_dict, rh=rh_views_dict) diff --git a/mne/viz/_figure.py b/mne/viz/_figure.py index 1e2c11ae73f..31694f80249 100644 --- a/mne/viz/_figure.py +++ b/mne/viz/_figure.py @@ -45,7 +45,8 @@ import numpy as np from matplotlib.figure import Figure from .epochs import plot_epochs_image -from .ica import _create_properties_layout +from .ica import (_create_properties_layout, _fast_plot_ica_properties, + _prepare_data_ica_properties) from .utils import (plt_show, plot_sensors, _setup_plot_projector, _events_off, _set_window_title, _merge_annotations, DraggableLine, _get_color_list, logger, _validate_if_list_of_axes, @@ -58,6 +59,12 @@ _DATA_CH_TYPES_SPLIT, _DATA_CH_TYPES_ORDER_DEFAULT, _VALID_CHANNEL_TYPES, _FNIRS_CH_TYPES_SPLIT) +# CONSTANTS (inches) +ANNOTATION_FIG_PAD = 0.1 +ANNOTATION_FIG_MIN_H = 2.9 # fixed part, not including radio buttons/labels +ANNOTATION_FIG_W = 5.0 +ANNOTATION_FIG_CHECKBOX_COLUMN_W = 0.5 + class MNEFigParams: """Container object for MNE figure parameters.""" @@ -471,8 +478,7 @@ def __init__(self, inst, figsize, ica=None, xlabel='Time (s)', **kwargs): self.mne.n_times / self.mne.info['sfreq']) # VLINE vline_color = (0., 0.75, 0.) - vline_kwargs = dict(visible=False, animated=True, - zorder=self.mne.zorder['vline']) + vline_kwargs = dict(visible=False, zorder=self.mne.zorder['vline']) if self.mne.is_epochs: x = np.arange(self.mne.n_epochs) vline = ax_main.vlines( @@ -562,10 +568,7 @@ def _resize(self, event): self.mne.zen_w *= old_width / new_width self.mne.zen_h *= old_height / new_height self.mne.fig_size_px = (new_width, new_height) - # for blitting self.canvas.draw_idle() - self.canvas.flush_events() - self.mne.bg = self.canvas.copy_from_bbox(self.bbox) def _hover(self, event): """Handle motion event when annotating.""" @@ -623,7 +626,8 @@ def _keypress(self, event): else: last_time = self.mne.inst.times[-1] # scroll up/down - if key in ('down', 'up'): + if key in ('down', 'up', 'shift+down', 'shift+up'): + key = key.split('+')[-1] direction = -1 if key == 'up' else 1 # butterfly case if self.mne.butterfly: @@ -708,7 +712,7 @@ def _keypress(self, event): self._update_hscroll() if key == 'end' and self.mne.vline_visible: # prevent flicker self._show_vline(None) - self._redraw() + self._redraw(annotations=True) elif key == '?': # help window self._toggle_help_fig(event) elif key == 'a': # annotation mode @@ -722,6 +726,8 @@ def _keypress(self, event): self._toggle_epoch_histogram() elif key == 'j' and len(self.mne.projs): # SSP window self._toggle_proj_fig() + elif key == 'J' and len(self.mne.projs): + self._toggle_proj_checkbox(event, toggle_all=True) elif key == 'p': # toggle draggable annotations self._toggle_draggable_annotations(event) if self.mne.fig_annotation is not None: @@ -764,6 +770,7 @@ def _buttonpress(self, event): self._toggle_bad_channel(idx) return self._show_vline(event.xdata) # butterfly / not on data trace + self._redraw(update_data=False, annotations=False) return # click in vertical scrollbar elif event.inaxes == self.mne.ax_vscroll: @@ -774,7 +781,7 @@ def _buttonpress(self, event): # click in horizontal scrollbar elif event.inaxes == self.mne.ax_hscroll: if self._check_update_hscroll_clicked(event): - self._redraw() + self._redraw(annotations=True) # click on proj button elif event.inaxes == self.mne.ax_proj: self._toggle_proj_fig(event) @@ -788,12 +795,16 @@ def _buttonpress(self, event): start = _sync_onset(inst, inst.annotations.onset) end = start + inst.annotations.duration ann_idx = np.where((xdata > start) & (xdata < end))[0] - inst.annotations.delete(ann_idx) # only first one deleted + for idx in sorted(ann_idx)[::-1]: + # only remove visible annotation spans + descr = inst.annotations[idx]['description'] + if self.mne.visible_annotations[descr]: + inst.annotations.delete(idx) self._remove_annotation_hover_line() self._draw_annotations() self.canvas.draw_idle() - elif event.inaxes == ax_main: # hide green line - self._blit_vline(False) + elif event.inaxes == ax_main: + self._toggle_vline(False) def _pick(self, event): """Handle matplotlib pick events.""" @@ -852,13 +863,22 @@ def _create_ch_location_fig(self, pick): fig.lasso.style_sensors(inds) plt_show(fig=fig) - def _create_ica_properties_fig(self, pick): + def _create_ica_properties_fig(self, idx): """Show ICA properties for the selected component.""" - ch_name = self.mne.ch_names[pick] + ch_name = self.mne.ch_names[idx] + if ch_name not in self.mne.ica._ica_names: # for EOG chans: do nothing + return + pick = self.mne.ica._ica_names.index(ch_name) fig = self._new_child_figure(figsize=(7, 6), fig_name=None, window_title=f'{ch_name} properties') fig, axes = _create_properties_layout(fig=fig) - self.mne.ica.plot_properties(self.mne.ica_inst, picks=pick, axes=axes) + if not hasattr(self.mne, 'data_ica_properties'): + # Precompute epoch sources only once + self.mne.data_ica_properties = _prepare_data_ica_properties( + self.mne.ica_inst, self.mne.ica) + _fast_plot_ica_properties( + self.mne.ica, self.mne.ica_inst, picks=pick, axes=axes, + precomputed_data=self.mne.data_ica_properties) def _create_epoch_image_fig(self, pick): """Show epochs image for the selected channel.""" @@ -973,8 +993,8 @@ def _get_help_text(self): has_proj = bool(len(self.mne.projs)) # adapt keys to different platforms is_mac = platform.system() == 'Darwin' - dur_keys = ('⌘ + ←', '⌘ + →') if is_mac else ('Home', 'End') - ch_keys = ('⌘ + ↑', '⌘ + ↓') if is_mac else ('Page up', 'Page down') + dur_keys = ('fn + ←', 'fn + →') if is_mac else ('Home', 'End') + ch_keys = ('fn + ↑', 'fn + ↓') if is_mac else ('Page up', 'Page down') # adapt descriptions to different instance types ch_cmp = 'component' if is_ica else 'channel' ch_epo = 'epoch' if is_epo else 'channel' @@ -1017,10 +1037,11 @@ def _get_help_text(self): ('a', 'Toggle annotation mode' if is_raw else None), ('h', 'Toggle peak-to-peak histogram' if is_epo else None), ('j', 'Toggle SSP projector window' if has_proj else None), + ('shift+j', 'Toggle all SSPs'), ('p', 'Toggle draggable annotations' if is_raw else None), ('s', 'Toggle scalebars' if not is_ica else None), ('z', 'Toggle scrollbars'), - ('F11', 'Toggle fullscreen'), + ('F11', 'Toggle fullscreen' if not is_mac else None), ('?', 'Open this help window'), ('esc', 'Close focused figure or dialog window'), ('_MOUSE INTERACTION', ' '), @@ -1044,25 +1065,30 @@ def _create_annotation_fig(self): from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable # make figure labels = np.array(sorted(set(self.mne.inst.annotations.description))) - width, var_height, fixed_height, pad = \ - self._compute_annotation_figsize(len(labels)) - figsize = (width, var_height + fixed_height) + radio_button_h = self._compute_annotation_figsize(len(labels)) + figsize = (ANNOTATION_FIG_W, ANNOTATION_FIG_MIN_H + radio_button_h) fig = self._new_child_figure(figsize=figsize, FigureClass=MNEAnnotationFigure, fig_name='fig_annotation', window_title='Annotations') # make main axes - left = fig._inch_to_rel(pad) - bottom = fig._inch_to_rel(pad, horiz=False) + left = fig._inch_to_rel(ANNOTATION_FIG_PAD) + bottom = fig._inch_to_rel(ANNOTATION_FIG_PAD, horiz=False) width = 1 - 2 * left height = 1 - 2 * bottom fig.mne.radio_ax = fig.add_axes((left, bottom, width, height), frame_on=False, aspect='equal') div = make_axes_locatable(fig.mne.radio_ax) - self._update_annotation_fig() # populate w/ radio buttons & labels + # append show/hide checkboxes at right + fig.mne.show_hide_ax = div.append_axes( + position='right', size=Fixed(ANNOTATION_FIG_CHECKBOX_COLUMN_W), + pad=Fixed(ANNOTATION_FIG_PAD), aspect='equal', + sharey=fig.mne.radio_ax) + # populate w/ radio buttons & labels + self._update_annotation_fig() # append instructions at top instructions_ax = div.append_axes(position='top', size=Fixed(1), - pad=Fixed(5 * pad)) + pad=Fixed(5 * ANNOTATION_FIG_PAD)) # XXX when we support a newer matplotlib (something >3.0) the # instructions can have inline bold formatting: # instructions = '\n'.join( @@ -1070,41 +1096,47 @@ def _create_annotation_fig(self): # r'$\mathbf{Right‐click~on~plot~annotation:}$ delete annotation', # r'$\mathbf{Type~in~annotation~window:}$ modify new label name', # r'$\mathbf{Enter~(or~click~button):}$ add new label to list', - # r'$\mathbf{Esc:}$ exit annotation mode & close window']) + # r'$\mathbf{Esc:}$ exit annotation mode & close this window']) instructions = '\n'.join( ['Left click & drag on plot: create/modify annotation', - 'Right click on plot annotation: delete annotation', - 'Type in annotation window: modify new label name', + 'Right click on annotation highlight: delete annotation', + 'Type in this window: modify new label name', 'Enter (or click button): add new label to list', - 'Esc: exit annotation mode & close window']) + 'Esc: exit annotation mode & close this dialog window']) instructions_ax.text(0, 1, instructions, va='top', ha='left', + linespacing=1.7, usetex=False) # force use of MPL mathtext parser instructions_ax.set_axis_off() # append text entry axes at bottom - text_entry_ax = div.append_axes(position='bottom', size=Fixed(3 * pad), - pad=Fixed(pad)) + text_entry_ax = div.append_axes(position='bottom', + size=Fixed(3 * ANNOTATION_FIG_PAD), + pad=Fixed(ANNOTATION_FIG_PAD)) text_entry_ax.text(0.4, 0.5, 'New label:', va='center', ha='right', weight='bold') fig.label = text_entry_ax.text(0.5, 0.5, 'BAD_', va='center', ha='left') text_entry_ax.set_axis_off() # append button at bottom - button_ax = div.append_axes(position='bottom', size=Fixed(3 * pad), - pad=Fixed(pad)) + button_ax = div.append_axes(position='bottom', + size=Fixed(3 * ANNOTATION_FIG_PAD), + pad=Fixed(ANNOTATION_FIG_PAD)) fig.button = Button(button_ax, 'Add new label') fig.button.on_clicked(self._add_annotation_label) plt_show(fig=fig) # add "draggable" checkbox - drag_ax_height = 3 * pad + drag_ax_height = 3 * ANNOTATION_FIG_PAD drag_ax = div.append_axes('bottom', size=Fixed(drag_ax_height), - pad=Fixed(pad), aspect='equal') + pad=Fixed(ANNOTATION_FIG_PAD), + aspect='equal') checkbox = CheckButtons(drag_ax, labels=('Draggable edges?',), actives=(self.mne.draggable_annotations,)) checkbox.on_clicked(self._toggle_draggable_annotations) fig.mne.drag_checkbox = checkbox # reposition & resize axes width_in, height_in = fig.get_size_inches() - width_ax = fig._inch_to_rel(width_in - 2 * pad) + width_ax = fig._inch_to_rel(width_in + - ANNOTATION_FIG_CHECKBOX_COLUMN_W + - 3 * ANNOTATION_FIG_PAD) aspect = width_ax / fig._inch_to_rel(drag_ax_height) drag_ax.set_xlim(0, aspect) drag_ax.set_axis_off() @@ -1123,7 +1155,8 @@ def _create_annotation_fig(self): # setup interactivity in plot window col = ('#ff0000' if len(fig.mne.radio_ax.buttons.circles) < 1 else fig.mne.radio_ax.buttons.circles[0].get_edgecolor()) - # TODO: we would like useblit=True here, but MPL #9660 prevents it + # TODO: we would like useblit=True here, but it behaves oddly when the + # first span is dragged (subsequent spans seem to work OK) selector = SpanSelector(self.mne.ax_main, self._select_annotation_span, 'horizontal', minspan=0.1, useblit=False, rectprops=dict(alpha=0.5, facecolor=col)) @@ -1131,30 +1164,43 @@ def _create_annotation_fig(self): self.mne._callback_ids['motion_notify_event'] = \ self.canvas.mpl_connect('motion_notify_event', self._hover) + def _toggle_visible_annotations(self, event): + """Enable/disable display of annotations on a per-label basis.""" + checkboxes = self.mne.show_hide_annotation_checkboxes + labels = [t.get_text() for t in checkboxes.labels] + actives = checkboxes.get_status() + self.mne.visible_annotations = dict(zip(labels, actives)) + self._redraw(update_data=False, annotations=True) + def _toggle_draggable_annotations(self, event): """Enable/disable draggable annotation edges.""" self.mne.draggable_annotations = not self.mne.draggable_annotations + def _get_annotation_labels(self): + """Get the unique labels in the raw object and added in the UI.""" + return sorted(set(self.mne.inst.annotations.description) | + set(self.mne.new_annotation_labels)) + def _update_annotation_fig(self): """Draw or redraw the radio buttons and annotation labels.""" - from matplotlib.widgets import RadioButtons + from matplotlib.widgets import RadioButtons, CheckButtons # define shorthand variables fig = self.mne.fig_annotation ax = fig.mne.radio_ax - # get all the labels - labels = list(set(self.mne.inst.annotations.description)) - labels = np.union1d(labels, self.mne.new_annotation_labels) + labels = self._get_annotation_labels() # compute new figsize - width, var_height, fixed_height, pad = \ - self._compute_annotation_figsize(len(labels)) - fig.set_size_inches(width, var_height + fixed_height, forward=True) + radio_button_h = self._compute_annotation_figsize(len(labels)) + fig.set_size_inches(ANNOTATION_FIG_W, + ANNOTATION_FIG_MIN_H + radio_button_h, + forward=True) # populate center axes with labels & radio buttons ax.clear() title = 'Existing labels:' if len(labels) else 'No existing labels' ax.set_title(title, size=None, loc='left') ax.buttons = RadioButtons(ax, labels) # adjust xlim to keep equal aspect & full width (keep circles round) - aspect = (width - 2 * pad) / var_height + aspect = (ANNOTATION_FIG_W - ANNOTATION_FIG_CHECKBOX_COLUMN_W + - 3 * ANNOTATION_FIG_PAD) / radio_button_h ax.set_xlim((0, aspect)) # style the buttons & adjust spacing radius = 0.15 @@ -1177,6 +1223,45 @@ def _update_annotation_fig(self): ax.buttons.on_clicked(fig._radiopress) ax.buttons.connect_event('button_press_event', fig._click_override) + # now do the show/hide checkboxes + show_hide_ax = fig.mne.show_hide_ax + show_hide_ax.clear() + show_hide_ax.set_axis_off() + aspect = ANNOTATION_FIG_CHECKBOX_COLUMN_W / radio_button_h + show_hide_ax.set(xlim=(0, aspect), ylim=(0, 1)) + # ensure new labels have checkbox values + check_values = {label: False for label in labels} + check_values.update(self.mne.visible_annotations) # existing checks + actives = [check_values[label] for label in labels] + # regenerate checkboxes + checkboxes = CheckButtons(ax=fig.mne.show_hide_ax, + labels=labels, + actives=actives) + checkboxes.on_clicked(self._toggle_visible_annotations) + # add title, hide labels + show_hide_ax.set_title('show/\nhide ', size=None, loc='right') + for label in checkboxes.labels: + label.set_visible(False) + # fix aspect and right-align + if len(labels) == 1: + bounds = (0.05, 0.375, 0.25, 0.25) # undo MPL special case + checkboxes.rectangles[0].set_bounds(bounds) + for line, step in zip(checkboxes.lines[0], (1, -1)): + line.set_xdata((bounds[0], bounds[0] + bounds[2])) + line.set_ydata((bounds[1], bounds[1] + bounds[3])[::step]) + for rect in checkboxes.rectangles: + rect.set_transform(show_hide_ax.transData) + bbox = rect.get_bbox() + bounds = (aspect, bbox.ymin, -bbox.width, bbox.height) + rect.set_bounds(bounds) + rect.set_clip_on(False) + for line in np.array(checkboxes.lines).ravel(): + line.set_transform(show_hide_ax.transData) + line.set_xdata(aspect + 0.05 - np.array(line.get_xdata())) + # store state + self.mne.visible_annotations = check_values + self.mne.show_hide_annotation_checkboxes = checkboxes + def _toggle_annotation_fig(self): """Show/hide the annotation dialog window.""" if self.mne.fig_annotation is None: @@ -1194,7 +1279,7 @@ def _compute_annotation_figsize(self, n_labels): 0.1 top margin 1.0 instructions 0.5 padding below instructions - --- (variable-height axis for label list) + --- (variable-height axis for label list, returned by this method) 0.1 padding above text entry 0.3 text entry 0.1 padding above button @@ -1205,11 +1290,7 @@ def _compute_annotation_figsize(self, n_labels): ------------------------------------------ 2.9 total fixed height """ - pad = 0.1 - width = 4.5 - var_height = max(pad, 0.7 * n_labels) - fixed_height = 2.9 - return (width, var_height, fixed_height, pad) + return max(ANNOTATION_FIG_PAD, 0.7 * n_labels) def _add_annotation_label(self, event): """Add new annotation description.""" @@ -1227,19 +1308,15 @@ def _add_annotation_label(self, event): self.mne.fig_annotation.label.set_text('BAD_') def _setup_annotation_colors(self): - """Set up colors for annotations.""" - raw = self.mne.inst + """Set up colors for annotations; init some annotation vars.""" segment_colors = getattr(self.mne, 'annotation_segment_colors', dict()) - # sort the segments by start time - ann_order = raw.annotations.onset.argsort(axis=0) - descriptions = raw.annotations.description[ann_order] - color_keys = np.union1d(descriptions, self.mne.new_annotation_labels) + labels = self._get_annotation_labels() colors, red = _get_color_list(annotations=True) color_cycle = cycle(colors) for key, color in segment_colors.items(): - if color != red and key in color_keys: + if color != red and key in labels: next(color_cycle) - for idx, key in enumerate(color_keys): + for idx, key in enumerate(labels): if key in segment_colors: continue elif key.lower().startswith('bad') or \ @@ -1248,6 +1325,9 @@ def _setup_annotation_colors(self): else: segment_colors[key] = next(color_cycle) self.mne.annotation_segment_colors = segment_colors + # init a couple other annotation-related variables + self.mne.visible_annotations = {label: True for label in labels} + self.mne.show_hide_annotation_checkboxes = None def _select_annotation_span(self, vmin, vmax): """Handle annotation span selector.""" @@ -1258,8 +1338,10 @@ def _select_annotation_span(self, vmin, vmax): active_idx = labels.index(buttons.value_selected) _merge_annotations(onset, onset + duration, labels[active_idx], self.mne.inst.annotations) - self._draw_annotations() - self.canvas.draw_idle() + # if adding a span with an annotation label that is hidden, show it + if not self.mne.visible_annotations[buttons.value_selected]: + self.mne.show_hide_annotation_checkboxes.set_active(active_idx) + self._redraw(update_data=False, annotations=True) def _remove_annotation_hover_line(self): """Remove annotation line from the plot and reactivate selector.""" @@ -1298,14 +1380,14 @@ def _modify_annotation(self, old_x, new_x): def _clear_annotations(self): """Clear all annotations from the figure.""" - for annot in self.mne.annotations[::-1]: - self.mne.ax_main.collections.remove(annot) + for annot in list(self.mne.annotations): + annot.remove() self.mne.annotations.remove(annot) - for annot in self.mne.hscroll_annotations[::-1]: - self.mne.ax_hscroll.collections.remove(annot) + for annot in list(self.mne.hscroll_annotations): + annot.remove() self.mne.hscroll_annotations.remove(annot) - for text in self.mne.annotation_texts[::-1]: - self.mne.ax_main.texts.remove(text) + for text in list(self.mne.annotation_texts): + text.remove() self.mne.annotation_texts.remove(text) def _draw_annotations(self): @@ -1321,20 +1403,21 @@ def _draw_annotations(self): segment_color = self.mne.annotation_segment_colors[descr] kwargs = dict(color=segment_color, alpha=0.3, zorder=self.mne.zorder['ann']) - # draw all segments on ax_hscroll - annot = self.mne.ax_hscroll.fill_betweenx((0, 1), start, end, - **kwargs) - self.mne.hscroll_annotations.append(annot) - # draw only visible segments on ax_main - visible_segment = np.clip([start, end], times[0], times[-1]) - if np.diff(visible_segment) > 0: - annot = ax.fill_betweenx(ylim, *visible_segment, **kwargs) - self.mne.annotations.append(annot) - xy = (visible_segment.mean(), ylim[1]) - text = ax.annotate(descr, xy, xytext=(0, 9), - textcoords='offset points', ha='center', - va='baseline', color=segment_color) - self.mne.annotation_texts.append(text) + if self.mne.visible_annotations[descr]: + # draw all segments on ax_hscroll + annot = self.mne.ax_hscroll.fill_betweenx((0, 1), start, end, + **kwargs) + self.mne.hscroll_annotations.append(annot) + # draw only visible segments on ax_main + visible_segment = np.clip([start, end], times[0], times[-1]) + if np.diff(visible_segment) > 0: + annot = ax.fill_betweenx(ylim, *visible_segment, **kwargs) + self.mne.annotations.append(annot) + xy = (visible_segment.mean(), ylim[1]) + text = ax.annotate(descr, xy, xytext=(0, 9), + textcoords='offset points', ha='center', + va='baseline', color=segment_color) + self.mne.annotation_texts.append(text) def _update_annotation_segments(self): """Update the array of annotation start/end times.""" @@ -1499,6 +1582,10 @@ def _create_proj_fig(self): fig = self._new_child_figure(figsize=(width, height), fig_name='fig_proj', window_title='SSP projection vectors') + # pass through some proj fig keypresses to the parent + fig.canvas.mpl_connect( + 'key_press_event', + lambda ev: self._keypress(ev) if ev.key in 'jJ' else None) # make axes offset = (1 / 6 / height) position = (0, offset, 1, 0.8 - offset) @@ -1548,16 +1635,17 @@ def _toggle_proj_checkbox(self, event, toggle_all=False): new_state = (np.full_like(on, not all(on)) if toggle_all else np.array(fig.mne.proj_checkboxes.get_status())) # update Xs when toggling all - if toggle_all: + if fig is not None: + if toggle_all: + with _events_off(fig.mne.proj_checkboxes): + for ix in np.where(on != new_state)[0]: + fig.mne.proj_checkboxes.set_active(ix) + # don't allow disabling already-applied projs with _events_off(fig.mne.proj_checkboxes): - for ix in np.where(on != new_state)[0]: - fig.mne.proj_checkboxes.set_active(ix) - # don't allow disabling already-applied projs - with _events_off(fig.mne.proj_checkboxes): - for ix in np.where(applied)[0]: - if not new_state[ix]: - fig.mne.proj_checkboxes.set_active(ix) - new_state[applied] = True + for ix in np.where(applied)[0]: + if not new_state[ix]: + fig.mne.proj_checkboxes.set_active(ix) + new_state[applied] = True # update the data if necessary if not np.array_equal(on, new_state): self.mne.projs_on = new_state @@ -1619,8 +1707,17 @@ def _toggle_bad_epoch(self, event): # SCROLLBARS # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # + def _update_zen_mode_offsets(self): + """Compute difference between main axes edges and scrollbar edges.""" + self.mne.fig_size_px = self._get_size_px() + self.mne.zen_w = (self.mne.ax_vscroll.get_position().xmax - + self.mne.ax_main.get_position().xmax) + self.mne.zen_h = (self.mne.ax_main.get_position().ymin - + self.mne.ax_hscroll.get_position().ymin) + def _toggle_scrollbars(self): """Show or hide scrollbars (A.K.A. zen mode).""" + self._update_zen_mode_offsets() # grow/shrink main axes to take up space from (or make room for) # scrollbars. We can't use ax.set_position() because axes are # locatable, so we use subplots_adjust @@ -1630,7 +1727,6 @@ def _toggle_scrollbars(self): # if should_show, bottom margin moves up; right margin moves left margins['bottom'] += (1 if should_show else -1) * self.mne.zen_h margins['right'] += (-1 if should_show else 1) * self.mne.zen_w - # squeeze a bit more because we don't need space for xlabel now self.subplots_adjust(**margins) # handle x-axis label self.mne.zen_xlabel.set_visible(not should_show) @@ -1705,9 +1801,9 @@ def _show_scalebars(self): def _hide_scalebars(self): """Remove channel scale bars.""" for bar in self.mne.scalebars.values(): - self.mne.ax_main.lines.remove(bar) + bar.remove() for text in self.mne.scalebar_texts.values(): - self.mne.ax_main.texts.remove(text) + text.remove() self.mne.scalebars = dict() self.mne.scalebar_texts = dict() @@ -1720,7 +1816,7 @@ def _toggle_scalebars(self, event): self._show_scalebars() # toggle self.mne.scalebars_visible = not self.mne.scalebars_visible - self.canvas.draw_idle() + self._redraw(update_data=False) def _draw_one_scalebar(self, x, y, ch_type): """Draw a scalebar.""" @@ -1773,8 +1869,6 @@ def _toggle_butterfly(self): self._update_picks() self._update_trace_offsets() self._redraw(annotations=True) - if self.mne.vline_visible: - self._blit_vline(True) if self.mne.fig_selection is not None: self.mne.fig_selection._style_radio_buttons_butterfly() @@ -1836,7 +1930,9 @@ def _update_data(self): starts = np.maximum(starts[mask], start) - start stops = np.minimum(stops[mask], stop) - start for _start, _stop in zip(starts, stops): - _picks = np.where(np.in1d(picks, self.mne.picks_data)) + _picks = np.where(np.in1d(picks, self.mne.picks_data))[0] + if len(_picks) == 0: + break this_data = data[_picks, _start:_stop] if isinstance(self.mne.filter_coefs, np.ndarray): # FIR this_data = _overlap_add_filter( @@ -1937,7 +2033,7 @@ def _draw_traces(self): # remove extra traces if needed extra_traces = self.mne.traces[n_picks:] for trace in extra_traces: - self.mne.ax_main.lines.remove(trace) + trace.remove() self.mne.traces = self.mne.traces[:n_picks] # check for bad epochs @@ -1949,8 +2045,7 @@ def _draw_traces(self): visible_bad_epochs = epoch_nums[ np.in1d(epoch_nums, self.mne.bad_epochs).nonzero()] while len(self.mne.epoch_traces): - _trace = self.mne.epoch_traces.pop(-1) - self.mne.ax_main.lines.remove(_trace) + self.mne.epoch_traces.pop(-1).remove() # handle custom epoch colors (for autoreject integration) if self.mne.epoch_colors is None: # shape: n_traces × RGBA → n_traces × n_epochs × RGBA @@ -2040,14 +2135,13 @@ def _redraw(self, update_data=True, annotations=False): """Redraw (convenience method for frequently grouped actions).""" if update_data: self._update_data() + if self.mne.vline_visible and self.mne.is_epochs: + # prevent flickering + _ = self._recompute_epochs_vlines(None) self._draw_traces() if annotations and not self.mne.is_epochs: self._draw_annotations() self.canvas.draw_idle() - self.canvas.flush_events() - self.mne.bg = self.canvas.copy_from_bbox(self.bbox) - if self.mne.vline_visible: - self._blit_vline(True) # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # EVENT LINES AND MARKER LINES @@ -2076,8 +2170,7 @@ def _draw_event_lines(self): self.mne.event_lines = event_lines # create event labels while len(self.mne.event_texts): - text = self.mne.event_texts.pop() - self.mne.ax_main.texts.remove(text) + self.mne.event_texts.pop().remove() for _t, _n, _c in zip(this_event_times, this_event_nums, colors): label = self.mne.event_id_rev.get(_n, _n) this_text = self.mne.ax_main.annotate( @@ -2086,41 +2179,45 @@ def _draw_event_lines(self): textcoords='offset points', fontsize=8) self.mne.event_texts.append(this_text) + def _recompute_epochs_vlines(self, xdata): + """Recompute vline x-coords for epochs plots (after scrolling, etc).""" + # special case: changed view duration w/ "home" or "end" key + # (no click event, hence no xdata) + if xdata is None: + xdata = np.array(self.mne.vline.get_segments())[0, 0, 0] + # compute the (continuous) times for the lines on each epoch + epoch_dur = np.diff(self.mne.boundary_times[:2])[0] + rel_time = xdata % epoch_dur + abs_time = self.mne.times[0] + xs = np.arange(self.mne.n_epochs) * epoch_dur + abs_time + rel_time + segs = np.array(self.mne.vline.get_segments()) + # recreate segs from scratch in case view duration changed + # (i.e., handle case when n_segments != n_epochs) + segs = np.tile([[0.], [1.]], (len(xs), 1, 2)) # y values + segs[..., 0] = np.tile(xs[:, None], 2) # x values + self.mne.vline.set_segments(segs) + return rel_time + def _show_vline(self, xdata): - """Show the vertical line.""" + """Show the vertical line(s).""" if self.mne.is_epochs: # special case: changed view duration w/ "home" or "end" key # (no click event, hence no xdata) - if xdata is None: - xdata = np.array(self.mne.vline.get_segments())[0, 0, 0] - # compute the (continuous) times for the lines on each epoch - epoch_dur = np.diff(self.mne.boundary_times[:2])[0] - rel_time = xdata % epoch_dur - abs_time = self.mne.times[0] - xs = np.arange(self.mne.n_epochs) * epoch_dur + abs_time + rel_time - segs = np.array(self.mne.vline.get_segments()) - # handle changed view duration (n_segments != n_epochs) - if segs.shape[0] != len(xs): - segs = np.tile([[0.], [1.]], (len(xs), 1, 2)) # y values - segs[..., 0] = np.tile(xs[:, None], 2) - self.mne.vline.set_segments(segs) + rel_time = self._recompute_epochs_vlines(xdata) xdata = rel_time + self.mne.inst.times[0] # for the text else: self.mne.vline.set_xdata(xdata) self.mne.vline_hscroll.set_xdata(xdata) - self.mne.vline_text.set_text(f'{xdata:0.2f} ') - self._blit_vline(True) + self.mne.vline_text.set_text(f'{xdata:0.2f} s ') + self._toggle_vline(True) - def _blit_vline(self, visible): - """Restore or hide the vline after data change.""" - self.canvas.restore_region(self.mne.bg) + def _toggle_vline(self, visible): + """Show or hide the vertical line(s).""" for artist in (self.mne.vline, self.mne.vline_hscroll, self.mne.vline_text): if artist is not None: artist.set_visible(visible) self.draw_artist(artist) - self.canvas.blit() - self.canvas.flush_events() self.mne.vline_visible = visible @@ -2169,6 +2266,8 @@ def _figure(toolbar=True, FigureClass=MNEFigure, **kwargs): fig = figure(FigureClass=FigureClass, **kwargs) if title is not None: _set_window_title(fig, title) + # add event callbacks + fig._add_default_callbacks() return fig @@ -2180,17 +2279,12 @@ def _browse_figure(inst, **kwargs): figsize=figsize, **kwargs) # initialize zen mode (can't do in __init__ due to get_position() calls) fig.canvas.draw() - fig.mne.fig_size_px = fig._get_size_px() - fig.mne.zen_w = (fig.mne.ax_vscroll.get_position().xmax - - fig.mne.ax_main.get_position().xmax) - fig.mne.zen_h = (fig.mne.ax_main.get_position().ymin - - fig.mne.ax_hscroll.get_position().ymin) + fig._update_zen_mode_offsets() + fig._resize(None) # needed for MPL >=3.4 # if scrollbars are supposed to start hidden, set to True and then toggle if not fig.mne.scrollbars_visible: fig.mne.scrollbars_visible = True fig._toggle_scrollbars() - # add event callbacks - fig._add_default_callbacks() return fig @@ -2213,18 +2307,16 @@ def _line_figure(inst, axes=None, picks=None, **kwargs): fig = axes[0].get_figure() else: figsize = kwargs.pop('figsize', (10, 2.5 * n_axes + 1)) - fig = _figure(inst=inst, toolbar=False, FigureClass=MNELineFigure, + fig = _figure(inst=inst, toolbar=True, FigureClass=MNELineFigure, figsize=figsize, n_axes=n_axes, **kwargs) fig.mne.fig_size_px = fig._get_size_px() # can't do in __init__ axes = fig.mne.ax_list - # add event callbacks - fig._add_default_callbacks() return fig, axes def _psd_figure(inst, proj, picks, axes, area_mode, tmin, tmax, fmin, fmax, n_jobs, color, area_alpha, dB, estimate, average, - spatial_colors, xscale, line_alpha, sphere, **kwargs): + spatial_colors, xscale, line_alpha, sphere, window, **kwargs): """Instantiate a new power spectral density figure.""" from .. import BaseEpochs from ..io import BaseRaw @@ -2236,7 +2328,7 @@ def _psd_figure(inst, proj, picks, axes, area_mode, tmin, tmax, fmin, fmax, if kw in kwargs: psd_kwargs[kw] = kwargs.pop(kw) if isinstance(inst, BaseRaw): - psd_func = psd_welch + psd_func = partial(psd_welch, window=window) elif isinstance(inst, BaseEpochs): psd_func = psd_multitaper else: diff --git a/mne/viz/backends/base_renderer.py b/mne/viz/backends/_abstract.py similarity index 63% rename from mne/viz/backends/base_renderer.py rename to mne/viz/backends/_abstract.py index 377243f8827..879e34786d9 100644 --- a/mne/viz/backends/base_renderer.py +++ b/mne/viz/backends/_abstract.py @@ -1,16 +1,17 @@ -"""Core visualization operations.""" +"""ABCs.""" -# Authors: Alexandre Gramfort +# Authors: Guillaume Favelier -# Oleh Kozynets -# Guillaume Favelier # # License: Simplified BSD -from abc import ABCMeta, abstractclassmethod +import warnings +from abc import ABC, abstractmethod, abstractclassmethod +from ..utils import tight_layout +from ...fixes import nullcontext -class _BaseRenderer(metaclass=ABCMeta): +class _AbstractRenderer(ABC): @abstractclassmethod def __init__(self, fig=None, size=(600, 600), bgcolor=(0., 0., 0.), name=None, show=False, shape=(1, 1)): @@ -443,3 +444,358 @@ def remove_mesh(self, mesh_data): The mesh to remove. """ pass + + +class _AbstractToolBar(ABC): + @abstractmethod + def _tool_bar_load_icons(self): + pass + + @abstractmethod + def _tool_bar_initialize(self, name="default", window=None): + pass + + @abstractmethod + def _tool_bar_add_button(self, name, desc, func, icon_name=None, + shortcut=None): + pass + + @abstractmethod + def _tool_bar_update_button_icon(self, name, icon_name): + pass + + @abstractmethod + def _tool_bar_add_text(self, name, value, placeholder): + pass + + @abstractmethod + def _tool_bar_add_spacer(self): + pass + + @abstractmethod + def _tool_bar_add_file_button(self, name, desc, func, shortcut=None): + pass + + @abstractmethod + def _tool_bar_set_theme(self, theme): + pass + + +class _AbstractDock(ABC): + @abstractmethod + def _dock_initialize(self, window=None): + pass + + @abstractmethod + def _dock_finalize(self): + pass + + @abstractmethod + def _dock_show(self): + pass + + @abstractmethod + def _dock_hide(self): + pass + + @abstractmethod + def _dock_add_stretch(self, layout): + pass + + @abstractmethod + def _dock_add_layout(self, vertical=True): + pass + + @abstractmethod + def _dock_add_label(self, value, align=False, layout=None): + pass + + @abstractmethod + def _dock_add_button(self, name, callback, layout=None): + pass + + @abstractmethod + def _dock_named_layout(self, name, layout, compact): + pass + + @abstractmethod + def _dock_add_slider(self, name, value, rng, callback, + compact=True, double=False, layout=None): + pass + + @abstractmethod + def _dock_add_spin_box(self, name, value, rng, callback, + compact=True, double=True, layout=None): + pass + + @abstractmethod + def _dock_add_combo_box(self, name, value, rng, + callback, compact=True, layout=None): + pass + + @abstractmethod + def _dock_add_group_box(self, name, layout=None): + pass + + +class _AbstractMenuBar(ABC): + @abstractmethod + def _menu_initialize(self, window=None): + pass + + @abstractmethod + def _menu_add_submenu(self, name, desc): + pass + + @abstractmethod + def _menu_add_button(self, menu_name, name, desc, func): + pass + + +class _AbstractStatusBar(ABC): + @abstractmethod + def _status_bar_initialize(self, window=None): + pass + + @abstractmethod + def _status_bar_add_label(self, value, stretch=0): + pass + + @abstractmethod + def _status_bar_add_progress_bar(self, stretch=0): + pass + + @abstractmethod + def _status_bar_update(self): + pass + + +class _AbstractPlayback(ABC): + @abstractmethod + def _playback_initialize(self, func, timeout): + pass + + +class _AbstractLayout(ABC): + @abstractmethod + def _layout_initialize(self, max_width): + pass + + @abstractmethod + def _layout_add_widget(self, layout, widget, stretch=0): + pass + + +class _AbstractWidget(ABC): + def __init__(self, widget): + self._widget = widget + + @property + def widget(self): + return self._widget + + @abstractmethod + def set_value(self, value): + pass + + @abstractmethod + def get_value(self): + pass + + @abstractmethod + def set_range(self, rng): + pass + + @abstractmethod + def show(self): + pass + + @abstractmethod + def hide(self): + pass + + @abstractmethod + def update(self, repaint=True): + pass + + +class _AbstractMplInterface(ABC): + @abstractmethod + def _mpl_initialize(): + pass + + +class _AbstractMplCanvas(ABC): + def __init__(self, width, height, dpi): + """Initialize the MplCanvas.""" + from matplotlib import rc_context + from matplotlib.figure import Figure + # prefer constrained layout here but live with tight_layout otherwise + context = nullcontext + self._extra_events = ('resize',) + try: + context = rc_context({'figure.constrained_layout.use': True}) + self._extra_events = () + except KeyError: + pass + with context: + self.fig = Figure(figsize=(width, height), dpi=dpi) + self.axes = self.fig.add_subplot(111) + self.axes.set(xlabel='Time (sec)', ylabel='Activation (AU)') + self.manager = None + + def _connect(self): + for event in ('button_press', 'motion_notify') + self._extra_events: + self.canvas.mpl_connect( + event + '_event', getattr(self, 'on_' + event)) + + def plot(self, x, y, label, **kwargs): + """Plot a curve.""" + line, = self.axes.plot( + x, y, label=label, **kwargs) + self.update_plot() + return line + + def plot_time_line(self, x, label, **kwargs): + """Plot the vertical line.""" + line = self.axes.axvline(x, label=label, **kwargs) + self.update_plot() + return line + + def update_plot(self): + """Update the plot.""" + with warnings.catch_warnings(record=True): + warnings.filterwarnings('ignore', 'constrained_layout') + self.canvas.draw() + + def set_color(self, bg_color, fg_color): + """Set the widget colors.""" + self.axes.set_facecolor(bg_color) + self.axes.xaxis.label.set_color(fg_color) + self.axes.yaxis.label.set_color(fg_color) + self.axes.spines['top'].set_color(fg_color) + self.axes.spines['bottom'].set_color(fg_color) + self.axes.spines['left'].set_color(fg_color) + self.axes.spines['right'].set_color(fg_color) + self.axes.tick_params(axis='x', colors=fg_color) + self.axes.tick_params(axis='y', colors=fg_color) + self.fig.patch.set_facecolor(bg_color) + + def show(self): + """Show the canvas.""" + if self.manager is None: + self.canvas.show() + else: + self.manager.show() + + def close(self): + """Close the canvas.""" + self.canvas.close() + + def clear(self): + """Clear internal variables.""" + self.close() + self.axes.clear() + self.fig.clear() + self.canvas = None + self.manager = None + + def on_resize(self, event): + """Handle resize events.""" + tight_layout(fig=self.axes.figure) + + +class _AbstractBrainMplCanvas(_AbstractMplCanvas): + def __init__(self, brain, width, height, dpi): + """Initialize the MplCanvas.""" + super().__init__(width, height, dpi) + self.brain = brain + self.time_func = brain.callbacks["time"] + + def update_plot(self): + """Update the plot.""" + leg = self.axes.legend( + prop={'family': 'monospace', 'size': 'small'}, + framealpha=0.5, handlelength=1., + facecolor=self.brain._bg_color) + for text in leg.get_texts(): + text.set_color(self.brain._fg_color) + super().update_plot() + + def on_button_press(self, event): + """Handle button presses.""" + # left click (and maybe drag) in progress in axes + if (event.inaxes != self.axes or + event.button != 1): + return + self.time_func( + event.xdata, update_widget=True, time_as_index=False) + + on_motion_notify = on_button_press # for now they can be the same + + def clear(self): + """Clear internal variables.""" + super().clear() + self.brain = None + + +class _AbstractWindow(ABC): + def _window_initialize(self): + self._window = None + self._interactor = None + self._mplcanvas = None + self._show_traces = None + self._separate_canvas = None + self._interactor_fraction = None + + @abstractmethod + def _window_close_connect(self, func): + pass + + @abstractmethod + def _window_get_dpi(self): + pass + + @abstractmethod + def _window_get_size(self): + pass + + def _window_get_mplcanvas_size(self, fraction): + ratio = (1 - fraction) / fraction + dpi = self._window_get_dpi() + w, h = self._window_get_size() + h /= ratio + return (w / dpi, h / dpi) + + @abstractmethod + def _window_get_simple_canvas(self, width, height, dpi): + pass + + @abstractmethod + def _window_get_mplcanvas(self, brain, interactor_fraction, show_traces, + separate_canvas): + pass + + @abstractmethod + def _window_adjust_mplcanvas_layout(self): + pass + + @abstractmethod + def _window_get_cursor(self): + pass + + @abstractmethod + def _window_set_cursor(self, cursor): + pass + + @abstractmethod + def _window_new_cursor(self, name): + pass + + @abstractmethod + def _window_ensure_minimum_sizes(self): + pass + + @abstractmethod + def _window_set_theme(self, theme): + pass diff --git a/mne/viz/backends/_notebook.py b/mne/viz/backends/_notebook.py index 7a8702e7b47..af1aea447eb 100644 --- a/mne/viz/backends/_notebook.py +++ b/mne/viz/backends/_notebook.py @@ -1,167 +1,366 @@ +"""Notebook implementation of _Renderer and GUI.""" + # Authors: Guillaume Favelier # # License: Simplified BSD -import matplotlib.pyplot as plt from contextlib import contextmanager +from IPython.display import display +from ipywidgets import (Button, Dropdown, FloatSlider, FloatText, HBox, + IntSlider, IntText, Text, VBox, IntProgress) + from ...fixes import nullcontext -from ._pyvista import _Renderer as _PyVistaRenderer -from ._pyvista import \ - _close_all, _set_3d_view, _set_3d_title # noqa: F401 analysis:ignore +from ._abstract import (_AbstractDock, _AbstractToolBar, _AbstractMenuBar, + _AbstractStatusBar, _AbstractLayout, _AbstractWidget, + _AbstractWindow, _AbstractMplCanvas, _AbstractPlayback, + _AbstractBrainMplCanvas, _AbstractMplInterface) +from ._pyvista import _PyVistaRenderer, _close_all, _set_3d_view, _set_3d_title # noqa: F401,E501, analysis:ignore -class _Renderer(_PyVistaRenderer): - def __init__(self, *args, **kwargs): - from IPython import get_ipython - ipython = get_ipython() - ipython.magic('matplotlib widget') - kwargs["notebook"] = True - super().__init__(*args, **kwargs) +class _IpyLayout(_AbstractLayout): + def _layout_initialize(self, max_width): + self._layout_max_width = max_width - def show(self): - self.figure.display = _NotebookInteractor(self) - return self.scene() + def _layout_add_widget(self, layout, widget, stretch=0): + widget.layout.margin = "2px 0px 2px 0px" + widget.layout.min_width = "0px" + children = list(layout.children) + children.append(widget) + layout.children = tuple(children) + # Fix columns + if self._layout_max_width is not None and isinstance(widget, HBox): + children = widget.children + width = int(self._layout_max_width / len(children)) + for child in children: + child.layout.width = f"{width}px" -class _NotebookInteractor(object): - def __init__(self, renderer): - from IPython import display - from ipywidgets import HBox, VBox - self.dpi = 90 - self.sliders = dict() - self.controllers = dict() - self.renderer = renderer - self.plotter = self.renderer.plotter - with self.disabled_interactivity(): - self.fig, self.dh = self.screenshot() - self.configure_controllers() - controllers = VBox(list(self.controllers.values())) - layout = HBox([self.fig.canvas, controllers]) - display.display(layout) +class _IpyDock(_AbstractDock, _IpyLayout): + def _dock_initialize(self, window=None): + self._dock_width = 300 + self._dock = self._dock_layout = VBox() + self._dock.layout.width = f"{self._dock_width}px" + self._layout_initialize(self._dock_width) - @contextmanager - def disabled_interactivity(self): - state = plt.isinteractive() - plt.ioff() - try: - yield - finally: - if state: - plt.ion() - else: - plt.ioff() - - def screenshot(self): - width, height = self.renderer.figure.store['window_size'] - - fig = plt.figure() - fig.figsize = (width / self.dpi, height / self.dpi) - fig.dpi = self.dpi - fig.canvas.toolbar_visible = False - fig.canvas.header_visible = False - fig.canvas.resizable = False - fig.canvas.callbacks.callbacks.clear() - ax = plt.Axes(fig, [0., 0., 1., 1.]) - ax.set_axis_off() - fig.add_axes(ax) - - dh = ax.imshow(self.plotter.screenshot()) - return fig, dh - - def update(self): - self.dh.set_data(self.plotter.screenshot()) - self.fig.canvas.draw() - - def configure_controllers(self): - from ipywidgets import (interactive, Label, VBox, FloatSlider, - IntSlider, Checkbox) - # continuous update - self.continuous_update_button = Checkbox( - value=False, - description='Continuous update', - disabled=False, - indent=False, + def _dock_finalize(self): + pass + + def _dock_show(self): + self._dock_layout.layout.visibility = "visible" + + def _dock_hide(self): + self._dock_layout.layout.visibility = "hidden" + + def _dock_add_stretch(self, layout): + pass + + def _dock_add_layout(self, vertical=True): + return VBox() if vertical else HBox() + + def _dock_add_label(self, value, align=False, layout=None): + layout = self._dock_layout if layout is None else layout + widget = Text(value=value, disabled=True) + self._layout_add_widget(layout, widget) + return _IpyWidget(widget) + + def _dock_add_button(self, name, callback, layout=None): + widget = Button(description=name) + widget.on_click(lambda x: callback()) + self._layout_add_widget(layout, widget) + return _IpyWidget(widget) + + def _dock_named_layout(self, name, layout, compact): + layout = self._dock_layout if layout is None else layout + if name is not None: + hlayout = self._dock_add_layout(not compact) + self._dock_add_label( + value=name, align=not compact, layout=hlayout) + self._layout_add_widget(layout, hlayout) + layout = hlayout + return layout + + def _dock_add_slider(self, name, value, rng, callback, + compact=True, double=False, layout=None): + layout = self._dock_named_layout(name, layout, compact) + klass = FloatSlider if double else IntSlider + widget = klass( + value=value, + min=rng[0], + max=rng[1], + readout=False, ) - self.controllers["continuous_update"] = interactive( - self.set_continuous_update, - value=self.continuous_update_button + widget.observe(_generate_callback(callback), names='value') + self._layout_add_widget(layout, widget) + return _IpyWidget(widget) + + def _dock_add_spin_box(self, name, value, rng, callback, + compact=True, double=True, layout=None): + layout = self._dock_named_layout(name, layout, compact) + klass = FloatText if double else IntText + widget = klass( + value=value, + min=rng[0], + max=rng[1], + readout=False, ) - # subplot - number_of_plots = len(self.plotter.renderers) - if number_of_plots > 1: - self.sliders["subplot"] = IntSlider( - value=number_of_plots - 1, - min=0, - max=number_of_plots - 1, - step=1, - continuous_update=False - ) - self.controllers["subplot"] = VBox([ - Label(value='Select the subplot'), - interactive( - self.set_subplot, - index=self.sliders["subplot"], - ) - ]) - # azimuth - default_azimuth = self.plotter.renderer._azimuth - self.sliders["azimuth"] = FloatSlider( - value=default_azimuth, - min=-180., - max=180., - step=10., - continuous_update=False + widget.observe(_generate_callback(callback), names='value') + self._layout_add_widget(layout, widget) + return _IpyWidget(widget) + + def _dock_add_combo_box(self, name, value, rng, + callback, compact=True, layout=None): + layout = self._dock_named_layout(name, layout, compact) + widget = Dropdown( + value=value, + options=rng, ) - # elevation - default_elevation = self.plotter.renderer._elevation - self.sliders["elevation"] = FloatSlider( - value=default_elevation, - min=-180., - max=180., - step=10., - continuous_update=False + widget.observe(_generate_callback(callback), names='value') + self._layout_add_widget(layout, widget) + return _IpyWidget(widget) + + def _dock_add_group_box(self, name, layout=None): + layout = self._dock_layout if layout is None else layout + hlayout = VBox() + self._layout_add_widget(layout, hlayout) + return hlayout + + +def _generate_callback(callback, to_float=False): + def func(data): + value = data["new"] if "new" in data else data["old"] + callback(float(value) if to_float else value) + return func + + +class _IpyToolBar(_AbstractToolBar, _IpyLayout): + def _tool_bar_load_icons(self): + self.icons = dict() + self.icons["help"] = "question" + self.icons["play"] = None + self.icons["pause"] = None + self.icons["reset"] = "history" + self.icons["scale"] = "magic" + self.icons["clear"] = "trash" + self.icons["movie"] = "video-camera" + self.icons["restore"] = "replay" + self.icons["screenshot"] = "camera" + self.icons["visibility_on"] = "eye" + self.icons["visibility_off"] = "eye" + + def _tool_bar_initialize(self, name="default", window=None): + self.actions = dict() + self._tool_bar = self._tool_bar_layout = HBox() + self._layout_initialize(None) + + def _tool_bar_add_button(self, name, desc, func, icon_name=None, + shortcut=None): + icon_name = name if icon_name is None else icon_name + icon = self.icons[icon_name] + if icon is None: + return + widget = Button(tooltip=desc, icon=icon) + widget.on_click(lambda x: func()) + self._layout_add_widget(self._tool_bar_layout, widget) + self.actions[name] = widget + + def _tool_bar_update_button_icon(self, name, icon_name): + self.actions[name].icon = self.icons[icon_name] + + def _tool_bar_add_text(self, name, value, placeholder): + widget = Text(value=value, placeholder=placeholder) + self._layout_add_widget(self._tool_bar_layout, widget) + self.actions[name] = widget + + def _tool_bar_add_spacer(self): + pass + + def _tool_bar_add_file_button(self, name, desc, func, shortcut=None): + def callback(): + fname = self.actions[f"{name}_field"].value + func(None if len(fname) == 0 else fname) + self._tool_bar_add_text( + name=f"{name}_field", + value=None, + placeholder="Type a file name", ) - # distance - eps = 1e-5 - default_distance = self.plotter.renderer._distance - self.sliders["distance"] = FloatSlider( - value=default_distance, - min=eps, - max=2. * default_distance - eps, - step=default_distance / 10., - continuous_update=False + self._tool_bar_add_button( + name=name, + desc=desc, + func=callback, ) - # camera - self.controllers["camera"] = VBox([ - Label(value='Camera settings'), - interactive( - self.set_camera, - azimuth=self.sliders["azimuth"], - elevation=self.sliders["elevation"], - distance=self.sliders["distance"], - ) - ]) - - def set_camera(self, azimuth, elevation, distance): - focalpoint = self.plotter.camera.GetFocalPoint() - self.renderer.set_camera(azimuth, elevation, - distance, focalpoint) - self.update() - - def set_subplot(self, index): - row, col = self.plotter.index_to_loc(index) - self.renderer.subplot(row, col) - figure = self.renderer.figure - default_azimuth = figure.plotter.renderer._azimuth - default_elevation = figure.plotter.renderer._elevation - default_distance = figure.plotter.renderer._distance - self.sliders["azimuth"].value = default_azimuth - self.sliders["elevation"].value = default_elevation - self.sliders["distance"].value = default_distance - - def set_continuous_update(self, value): - for slider in self.sliders.values(): - slider.continuous_update = value + + def _tool_bar_set_theme(self, theme): + pass + + +class _IpyMenuBar(_AbstractMenuBar): + def _menu_initialize(self, window=None): + pass + + def _menu_add_submenu(self, name, desc): + pass + + def _menu_add_button(self, menu_name, name, desc, func): + pass + + +class _IpyStatusBar(_AbstractStatusBar, _IpyLayout): + def _status_bar_initialize(self, window=None): + self._status_bar = self._status_bar_layout = HBox() + self._layout_initialize(None) + + def _status_bar_add_label(self, value, stretch=0): + widget = Text(value=value, disabled=True) + self._layout_add_widget(self._status_bar_layout, widget) + return _IpyWidget(widget) + + def _status_bar_add_progress_bar(self, stretch=0): + widget = IntProgress() + self._layout_add_widget(self._status_bar_layout, widget) + return _IpyWidget(widget) + + def _status_bar_update(self): + pass + + +class _IpyPlayback(_AbstractPlayback): + def _playback_initialize(self, func, timeout): + pass + + +class _IpyMplInterface(_AbstractMplInterface): + def _mpl_initialize(self): + from matplotlib.backends.backend_nbagg import (FigureCanvasNbAgg, + FigureManager) + self.canvas = FigureCanvasNbAgg(self.fig) + self.manager = FigureManager(self.canvas, 0) + + +class _IpyMplCanvas(_AbstractMplCanvas, _IpyMplInterface): + def __init__(self, width, height, dpi): + super().__init__(width, height, dpi) + self._mpl_initialize() + + +class _IpyBrainMplCanvas(_AbstractBrainMplCanvas, _IpyMplInterface): + def __init__(self, brain, width, height, dpi): + super().__init__(brain, width, height, dpi) + self._mpl_initialize() + self._connect() + + +class _IpyWindow(_AbstractWindow): + def _window_close_connect(self, func): + pass + + def _window_get_dpi(self): + return 96 + + def _window_get_size(self): + return self.figure.plotter.window_size + + def _window_get_simple_canvas(self, width, height, dpi): + return _IpyMplCanvas(width, height, dpi) + + def _window_get_mplcanvas(self, brain, interactor_fraction, show_traces, + separate_canvas): + w, h = self._window_get_mplcanvas_size(interactor_fraction) + self._interactor_fraction = interactor_fraction + self._show_traces = show_traces + self._separate_canvas = separate_canvas + self._mplcanvas = _IpyBrainMplCanvas( + brain, w, h, self._window_get_dpi()) + return self._mplcanvas + + def _window_adjust_mplcanvas_layout(self): + pass + + def _window_get_cursor(self): + pass + + def _window_set_cursor(self, cursor): + pass + + def _window_new_cursor(self, name): + pass + + @contextmanager + def _window_ensure_minimum_sizes(self): + yield + + def _window_set_theme(self, theme): + pass + + +class _IpyWidget(_AbstractWidget): + def set_value(self, value): + self._widget.value = value + + def get_value(self): + return self._widget.value + + def set_range(self, rng): + self._widget.min = rng[0] + self._widget.max = rng[1] + + def show(self): + self._widget.layout.visibility = "visible" + + def hide(self): + self._widget.layout.visibility = "hidden" + + def update(self, repaint=True): + pass + + +class _Renderer(_PyVistaRenderer, _IpyDock, _IpyToolBar, _IpyMenuBar, + _IpyStatusBar, _IpyWindow, _IpyPlayback): + def __init__(self, *args, **kwargs): + self._dock = None + self._tool_bar = None + self._status_bar = None + kwargs["notebook"] = True + super().__init__(*args, **kwargs) + + def _update(self): + if self.figure.display is not None: + self.figure.display.update_canvas() + + def _create_default_tool_bar(self): + self._tool_bar_load_icons() + self._tool_bar_initialize() + self._tool_bar_add_file_button( + name="screenshot", + desc="Take a screenshot", + func=self.screenshot, + ) + + def show(self): + # default tool bar + if self._tool_bar is None: + self._create_default_tool_bar() + display(self._tool_bar) + # viewer + try: + # pyvista<0.30.0 + viewer = self.plotter.show( + use_ipyvtk=True, return_viewer=True) + except RuntimeError: + # pyvista>=0.30.0 + viewer = self.plotter.show( + jupyter_backend="ipyvtk_simple", return_viewer=True) + viewer.layout.width = None # unlock the fixed layout + # main widget + if self._dock is None: + main_widget = viewer + else: + main_widget = HBox([self._dock, viewer]) + display(main_widget) + self.figure.display = viewer + # status bar + if self._status_bar is not None: + display(self._status_bar) + return self.scene() _testing_context = nullcontext diff --git a/mne/viz/backends/_pysurfer_mayavi.py b/mne/viz/backends/_pysurfer_mayavi.py index 39abd16977b..24aa8475da9 100644 --- a/mne/viz/backends/_pysurfer_mayavi.py +++ b/mne/viz/backends/_pysurfer_mayavi.py @@ -23,8 +23,9 @@ from mayavi.core.ui.mayavi_scene import MayaviScene from tvtk.pyface.tvtk_scene import TVTKScene -from .base_renderer import _BaseRenderer -from ._utils import _check_color, ALLOWED_QUIVER_MODES +from ._abstract import _AbstractRenderer +from ._utils import _check_color, _alpha_blend_background, ALLOWED_QUIVER_MODES +from ..utils import _save_ndarray_img from ...surface import _normalize_vectors from ...utils import (_import_mlab, _validate_type, SilenceStdout, copy_base_doc_to_subclass_doc, _check_option) @@ -53,7 +54,7 @@ def visible(self, state): @copy_base_doc_to_subclass_doc -class _Renderer(_BaseRenderer): +class _Renderer(_AbstractRenderer): """Class managing rendering scene. Attributes @@ -234,7 +235,7 @@ def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8, glyph_height=None, glyph_center=None, glyph_resolution=None, opacity=1.0, scale_mode='none', scalars=None, backface_culling=False, colormap=None, vmin=None, vmax=None, - line_width=2., name=None): + line_width=2., name=None, solid_transform=None): _check_option('mode', mode, ALLOWED_QUIVER_MODES) color = _check_color(color) with warnings.catch_warnings(record=True): # traits @@ -244,12 +245,15 @@ def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8, scale_mode=scale_mode, resolution=resolution, scalars=scalars, opacity=opacity, figure=self.fig) - elif mode in ('cone', 'sphere'): + elif mode in ('cone', 'sphere', 'oct'): + use_mode = 'sphere' if mode == 'oct' else mode quiv = self.mlab.quiver3d(x, y, z, u, v, w, color=color, - mode=mode, scale_factor=scale, + mode=use_mode, scale_factor=scale, opacity=opacity, figure=self.fig) if mode == 'sphere': quiv.glyph.glyph_source.glyph_source.center = 0., 0., 0. + elif mode == 'oct': + _oct_glyph(quiv.glyph.glyph_source, solid_transform) else: assert mode == 'cylinder', mode # should be guaranteed above quiv = self.mlab.quiver3d(x, y, z, u, v, w, mode=mode, @@ -298,12 +302,10 @@ def scalarbar(self, source, color="white", title=None, n_labels=4, ctable = lut.table.to_array() cbar_lut = tvtk.LookupTable() cbar_lut.deep_copy(lut) - alphas = ctable[:, -1][:, np.newaxis] / 255. - use_lut = ctable.copy() - use_lut[:, -1] = 255. - vals = (use_lut * alphas) + bgcolor * (1 - alphas) + vals = _alpha_blend_background(ctable, bgcolor) cbar_lut.table.from_array(vals) cmap.scalar_bar.lookup_table = cbar_lut + return bar def show(self): if self.fig is not None: @@ -313,7 +315,8 @@ def close(self): _close_3d_figure(figure=self.fig) def set_camera(self, azimuth=None, elevation=None, distance=None, - focalpoint=None, roll=None, reset_camera=None): + focalpoint=None, roll=None, reset_camera=None, + rigid=None): _set_3d_view(figure=self.fig, azimuth=azimuth, elevation=elevation, distance=distance, focalpoint=focalpoint, roll=roll) @@ -481,12 +484,7 @@ def _check_3d_figure(figure): def _save_figure(img, filename): - from matplotlib.backends.backend_agg import FigureCanvasAgg - from matplotlib.figure import Figure - fig = Figure(frameon=False) - FigureCanvasAgg(fig) - fig.figimage(img, resize=True) - fig.savefig(filename) + _save_ndarray_img(filename, img) def _close_3d_figure(figure): @@ -524,3 +522,32 @@ def _testing_context(interactive): yield finally: mlab.options.backend = orig_backend + + +def _oct_glyph(glyph_source, transform): + from tvtk.api import tvtk + from tvtk.common import configure_input + from traits.api import Array + gs = tvtk.PlatonicSolidSource() + + # Workaround for: + # File "mayavi/components/glyph_source.py", line 231, in _glyph_position_changed # noqa: E501 + # g.center = 0.0, 0.0, 0.0 + # traits.trait_errors.TraitError: Cannot set the undefined 'center' attribute of a 'TransformPolyDataFilter' object. # noqa: E501 + class SafeTransformPolyDataFilter(tvtk.TransformPolyDataFilter): + center = Array(shape=(3,), value=np.zeros(3)) + + gs.solid_type = 'octahedron' + if transform is not None: + # glyph: mayavi.modules.vectors.Vectors + # glyph.glyph: vtkGlyph3D + # glyph.glyph.glyph: mayavi.components.glyph.Glyph + assert transform.shape == (4, 4) + tr = tvtk.Transform() + tr.set_matrix(transform.ravel()) + trp = SafeTransformPolyDataFilter() + configure_input(trp, gs) + trp.transform = tr + trp.update() + gs = trp + glyph_source.glyph_source = gs diff --git a/mne/viz/backends/_pyvista.py b/mne/viz/backends/_pyvista.py index d6b24e0d790..8975ef3fe47 100644 --- a/mne/viz/backends/_pyvista.py +++ b/mne/viz/backends/_pyvista.py @@ -20,11 +20,12 @@ import numpy as np import vtk -from .base_renderer import _BaseRenderer -from ._utils import _get_colormap_from_array, ALLOWED_QUIVER_MODES +from ._abstract import _AbstractRenderer +from ._utils import (_get_colormap_from_array, _alpha_blend_background, + ALLOWED_QUIVER_MODES, _init_qt_resources) from ...fixes import _get_args +from ...transforms import apply_trans from ...utils import copy_base_doc_to_subclass_doc, _check_option -from ...externals.decorator import decorator with warnings.catch_warnings(): @@ -35,7 +36,6 @@ from pyvistaqt import BackgroundPlotter # noqa except ImportError: from pyvista import BackgroundPlotter - from pyvista.utilities import try_callback from pyvista.plotting.plotting import _ALL_PLOTTERS VTK9 = LooseVersion(getattr(vtk, 'VTK_VERSION', '9.0')) >= LooseVersion('9.0') @@ -44,9 +44,8 @@ class _Figure(object): - def __init__(self, plotter=None, - plotter_class=None, - display=None, + def __init__(self, + plotter=None, show=False, title='PyVista Scene', size=(600, 600), @@ -56,47 +55,47 @@ def __init__(self, plotter=None, off_screen=False, notebook=False): self.plotter = plotter - self.plotter_class = plotter_class - self.display = display + self.display = None self.background_color = background_color self.smooth_shading = smooth_shading self.notebook = notebook self.store = dict() - self.store['show'] = show - self.store['title'] = title self.store['window_size'] = size self.store['shape'] = shape self.store['off_screen'] = off_screen self.store['border'] = False - self.store['auto_update'] = False # multi_samples > 1 is broken on macOS + Intel Iris + volume rendering self.store['multi_samples'] = 1 if sys.platform == 'darwin' else 4 + if not self.notebook: + self.store['show'] = show + self.store['title'] = title + self.store['auto_update'] = False + self.store['menu_bar'] = False + self.store['toolbar'] = False + + self._nrows, self._ncols = self.store['shape'] + self._azimuth = self._elevation = None + def build(self): - if self.plotter_class is None: - self.plotter_class = BackgroundPlotter if self.notebook: - self.plotter_class = Plotter - - if self.plotter_class is Plotter: - self.store.pop('show', None) - self.store.pop('title', None) - self.store.pop('auto_update', None) + plotter_class = Plotter + else: + plotter_class = BackgroundPlotter if self.plotter is None: - if self.plotter_class is BackgroundPlotter: + if not self.notebook: from PyQt5.QtWidgets import QApplication app = QApplication.instance() if app is None: app = QApplication(["MNE"]) self.store['app'] = app - plotter = self.plotter_class(**self.store) + plotter = plotter_class(**self.store) plotter.background_color = self.background_color self.plotter = plotter - if self.plotter_class is BackgroundPlotter and \ - hasattr(BackgroundPlotter, 'set_icon'): - _init_resources() + if not self.notebook and hasattr(plotter_class, 'set_icon'): + _init_qt_resources() _process_events(plotter) plotter.set_icon(":/mne-icon.png") _process_events(self.plotter) @@ -130,24 +129,8 @@ def visible(self, state): self.pts.SetVisibility(state) -def _enable_aa(figure, plotter): - """Enable it everywhere except Azure.""" - # XXX for some reason doing this on Azure causes access violations: - # ##[error]Cmd.exe exited with code '-1073741819' - # So for now don't use it there. Maybe has to do with setting these - # before the window has actually been made "active"...? - # For Mayavi we have an "on activated" event or so, we should look into - # using this for Azure at some point, too. - if os.getenv('AZURE_CI_WINDOWS', 'false').lower() == 'true': - return - if figure.is_active(): - if sys.platform != 'darwin': - plotter.enable_anti_aliasing() - plotter.ren_win.LineSmoothingOn() - - @copy_base_doc_to_subclass_doc -class _Renderer(_BaseRenderer): +class _PyVistaRenderer(_AbstractRenderer): """Class managing rendering scene. Attributes @@ -168,7 +151,6 @@ def __init__(self, fig=None, size=(600, 600), bgcolor='black', smooth_shading=smooth_shading) self.font_family = "arial" self.tube_n_sides = 20 - self.shape = shape antialias = _get_3d_option('antialias') self.antialias = antialias and not MNE_3D_BACKEND_TESTING if isinstance(fig, int): @@ -196,13 +178,8 @@ def __init__(self, fig=None, size=(600, 600), bgcolor='black', self.figure.smooth_shading = False with _disabled_depth_peeling(): self.plotter = self.figure.build() - self.plotter.hide_axes() - if hasattr(self.plotter, "default_camera_tool_bar"): - self.plotter.default_camera_tool_bar.close() - if hasattr(self.plotter, "saved_cameras_tool_bar"): - self.plotter.saved_cameras_tool_bar.close() - if self.antialias: - _enable_aa(self.figure, self.plotter) + self._hide_axes() + self._enable_aa() # FIX: https://github.com/pyvista/pyvistaqt/pull/68 if LooseVersion(pyvista.__version__) >= '0.27.0': @@ -211,81 +188,66 @@ def __init__(self, fig=None, size=(600, 600), bgcolor='black', self.update_lighting() - @contextmanager - def ensure_minimum_sizes(self): - sz = self.figure.store['window_size'] - # plotter: pyvista.plotting.qt_plotting.BackgroundPlotter - # plotter.interactor: vtk.qt.QVTKRenderWindowInteractor.QVTKRenderWindowInteractor -> QWidget # noqa - # plotter.app_window: pyvista.plotting.qt_plotting.MainWindow -> QMainWindow # noqa - # plotter.frame: QFrame with QVBoxLayout with plotter.interactor as centralWidget # noqa - # plotter.ren_win: vtkXOpenGLRenderWindow - self.plotter.interactor.setMinimumSize(*sz) - try: - yield # show - finally: - # 1. Process events - _process_events(self.plotter) - _process_events(self.plotter) - # 2. Get the window size that accommodates the size - sz = self.plotter.app_window.size() - # 3. Call app_window.setBaseSize and resize (in pyvistaqt) - self.plotter.window_size = (sz.width(), sz.height()) - # 4. Undo the min size setting and process events - self.plotter.interactor.setMinimumSize(0, 0) - _process_events(self.plotter) - _process_events(self.plotter) - # 5. Resize the window (again!) to the correct size - # (not sure why, but this is required on macOS at least) - self.plotter.window_size = (sz.width(), sz.height()) - _process_events(self.plotter) - _process_events(self.plotter) + @property + def _all_plotters(self): + return [self.figure.plotter] + + @property + def _all_renderers(self): + return self.figure.plotter.renderers + + def _hide_axes(self): + for renderer in self._all_renderers: + renderer.hide_axes() + + def _update(self): + for plotter in self._all_plotters: + plotter.update() + + def _index_to_loc(self, idx): + _ncols = self.figure._ncols + row = idx // _ncols + col = idx % _ncols + return (row, col) + + def _loc_to_index(self, loc): + _ncols = self.figure._ncols + return loc[0] * _ncols + loc[1] def subplot(self, x, y): - x = np.max([0, np.min([x, self.shape[0] - 1])]) - y = np.max([0, np.min([y, self.shape[1] - 1])]) + x = np.max([0, np.min([x, self.figure._nrows - 1])]) + y = np.max([0, np.min([y, self.figure._ncols - 1])]) with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=FutureWarning) self.plotter.subplot(x, y) - if self.antialias: - _enable_aa(self.figure, self.plotter) def scene(self): return self.figure - def _orient_lights(self): - lights = list(self.plotter.renderer.GetLights()) - lights.pop(0) # unused headlight - lights[0].SetPosition(_to_pos(45.0, -45.0)) - lights[1].SetPosition(_to_pos(-30.0, 60.0)) - lights[2].SetPosition(_to_pos(-30.0, -60.0)) - def update_lighting(self): # Inspired from Mayavi's version of Raymond Maple 3-lights illumination - lights = list(self.plotter.renderer.GetLights()) - headlight = lights.pop(0) - headlight.SetSwitch(False) - for i in range(len(lights)): - if i < 3: - lights[i].SetSwitch(True) - lights[i].SetIntensity(1.0) - lights[i].SetColor(1.0, 1.0, 1.0) - else: - lights[i].SetSwitch(False) - lights[i].SetPosition(_to_pos(0.0, 0.0)) - lights[i].SetIntensity(1.0) - lights[i].SetColor(1.0, 1.0, 1.0) - - lights[0].SetPosition(_to_pos(45.0, 45.0)) - lights[1].SetPosition(_to_pos(-30.0, -60.0)) - lights[1].SetIntensity(0.6) - lights[2].SetPosition(_to_pos(-30.0, 60.0)) - lights[2].SetIntensity(0.5) + for renderer in self._all_renderers: + lights = list(renderer.GetLights()) + headlight = lights.pop(0) + headlight.SetSwitch(False) + # below and centered, left and above, right and above + az_el_in = ((0, -45, 0.7), (-60, 30, 0.7), (60, 30, 0.7)) + for li, light in enumerate(lights): + if li < len(az_el_in): + light.SetSwitch(True) + light.SetPosition(_to_pos(*az_el_in[li][:2])) + light.SetIntensity(az_el_in[li][2]) + else: + light.SetSwitch(False) + light.SetPosition(_to_pos(0.0, 0.0)) + light.SetIntensity(0.0) + light.SetColor(1.0, 1.0, 1.0) def set_interaction(self, interaction): if not hasattr(self.plotter, "iren") or self.plotter.iren is None: return if interaction == "rubber_band_2d": - for renderer in self.plotter.renderers: + for renderer in self._all_renderers: renderer.enable_parallel_projection() if hasattr(self.plotter, 'enable_rubber_band_2d_style'): self.plotter.enable_rubber_band_2d_style() @@ -293,7 +255,7 @@ def set_interaction(self, interaction): style = vtk.vtkInteractorStyleRubberBand2D() self.plotter.interactor.SetInteractorStyle(style) else: - for renderer in self.plotter.renderers: + for renderer in self._all_renderers: renderer.disable_parallel_projection() getattr(self.plotter, f'enable_{interaction}_style')() @@ -486,7 +448,9 @@ def tube(self, origin, destination, radius=0.001, color='white', def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8, glyph_height=None, glyph_center=None, glyph_resolution=None, opacity=1.0, scale_mode='none', scalars=None, - backface_culling=False, line_width=2., name=None): + backface_culling=False, line_width=2., name=None, + glyph_width=None, glyph_depth=None, + solid_transform=None): _check_option('mode', mode, ALLOWED_QUIVER_MODES) with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=FutureWarning) @@ -517,6 +481,7 @@ def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8, ) mesh = pyvista.wrap(alg.GetOutput()) else: + tr = None if mode == 'cone': glyph = vtk.vtkConeSource() glyph.SetCenter(0.5, 0, 0) @@ -524,6 +489,9 @@ def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8, elif mode == 'cylinder': glyph = vtk.vtkCylinderSource() glyph.SetRadius(0.15) + elif mode == 'oct': + glyph = vtk.vtkPlatonicSolidSource() + glyph.SetSolidTypeToOctahedron() else: assert mode == 'sphere', mode # guaranteed above glyph = vtk.vtkSphereSource() @@ -534,10 +502,17 @@ def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8, glyph.SetCenter(glyph_center) if glyph_resolution is not None: glyph.SetResolution(glyph_resolution) - # fix orientation - glyph.Update() tr = vtk.vtkTransform() tr.RotateWXYZ(90, 0, 0, 1) + elif mode == 'oct': + if solid_transform is not None: + assert solid_transform.shape == (4, 4) + tr = vtk.vtkTransform() + tr.SetMatrix( + solid_transform.astype(np.float64).ravel()) + if tr is not None: + # fix orientation + glyph.Update() trp = vtk.vtkTransformPolyDataFilter() trp.SetInputData(glyph.GetOutput()) trp.SetTransform(tr) @@ -596,31 +571,34 @@ def text3d(self, x, y, z, text, scale, color='white'): def scalarbar(self, source, color="white", title=None, n_labels=4, bgcolor=None, **extra_kwargs): + if isinstance(source, vtk.vtkMapper): + mapper = source + elif isinstance(source, vtk.vtkActor): + mapper = source.GetMapper() + else: + mapper = None with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=FutureWarning) kwargs = dict(color=color, title=title, n_labels=n_labels, use_opacity=False, n_colors=256, position_x=0.15, position_y=0.05, width=0.7, shadow=False, bold=True, label_font_size=22, font_family=self.font_family, - background_color=bgcolor) + background_color=bgcolor, mapper=mapper) kwargs.update(extra_kwargs) - self.plotter.add_scalar_bar(**kwargs) + return self.plotter.add_scalar_bar(**kwargs) def show(self): - self.figure.display = self.plotter.show() - if hasattr(self.plotter, "app_window"): - with self.ensure_minimum_sizes(): - self.plotter.app_window.show() - return self.scene() + self.plotter.show() def close(self): _close_3d_figure(figure=self.figure) def set_camera(self, azimuth=None, elevation=None, distance=None, - focalpoint=None, roll=None, reset_camera=True): + focalpoint='auto', roll=None, reset_camera=True, + rigid=None): _set_3d_view(self.figure, azimuth=azimuth, elevation=elevation, distance=distance, focalpoint=focalpoint, roll=roll, - reset_camera=reset_camera) + reset_camera=reset_camera, rigid=rigid) def reset_camera(self): self.plotter.reset_camera() @@ -639,20 +617,211 @@ def project(self, xyz, ch_names): def enable_depth_peeling(self): if not self.figure.store['off_screen']: - for renderer in self.plotter.renderers: + for renderer in self._all_renderers: renderer.enable_depth_peeling() + def _enable_aa(self): + """Enable it everywhere except Azure.""" + if not self.antialias: + return + # XXX for some reason doing this on Azure causes access violations: + # ##[error]Cmd.exe exited with code '-1073741819' + # So for now don't use it there. Maybe has to do with setting these + # before the window has actually been made "active"...? + # For Mayavi we have an "on activated" event or so, we should look into + # using this for Azure at some point, too. + if os.getenv('AZURE_CI_WINDOWS', 'false').lower() == 'true': + return + if self.figure.is_active(): + if sys.platform != 'darwin': + for renderer in self._all_renderers: + renderer.enable_anti_aliasing() + for plotter in self._all_plotters: + plotter.ren_win.LineSmoothingOn() + def remove_mesh(self, mesh_data): actor, _ = mesh_data self.plotter.remove_actor(actor) + @contextmanager + def _disabled_interaction(self): + if not self.plotter.renderer.GetInteractive(): + yield + else: + self.plotter.disable() + try: + yield + finally: + self.plotter.enable() + + def _actor(self, mapper=None): + actor = vtk.vtkActor() + if mapper is not None: + actor.SetMapper(mapper) + return actor + + def _process_events(self): + for plotter in self._all_plotters: + _process_events(plotter) -def _create_actor(mapper=None): - """Create a vtkActor.""" - actor = vtk.vtkActor() - if mapper is not None: - actor.SetMapper(mapper) - return actor + def _update_picking_callback(self, + on_mouse_move, + on_button_press, + on_button_release, + on_pick): + try: + # pyvista<0.30.0 + add_obs = self.plotter.iren.AddObserver + except AttributeError: + # pyvista>=0.30.0 + add_obs = self.plotter.iren.add_observer + add_obs(vtk.vtkCommand.RenderEvent, on_mouse_move) + add_obs(vtk.vtkCommand.LeftButtonPressEvent, on_button_press) + add_obs(vtk.vtkCommand.EndInteractionEvent, on_button_release) + self.plotter.picker = vtk.vtkCellPicker() + self.plotter.picker.AddObserver( + vtk.vtkCommand.EndPickEvent, + on_pick + ) + self.plotter.picker.SetVolumeOpacityIsovalue(0.) + + def _set_mesh_scalars(self, mesh, scalars, name): + # Catch: FutureWarning: Conversion of the second argument of + # issubdtype from `complex` to `np.complexfloating` is deprecated. + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=FutureWarning) + mesh.point_arrays[name] = scalars + + def _set_colormap_range(self, actor, ctable, scalar_bar, rng=None, + background_color=None): + from vtk.util.numpy_support import numpy_to_vtk + if rng is not None: + mapper = actor.GetMapper() + mapper.SetScalarRange(*rng) + lut = mapper.GetLookupTable() + lut.SetTable(numpy_to_vtk(ctable)) + if scalar_bar is not None: + lut = scalar_bar.GetLookupTable() + if background_color is not None: + background_color = np.array(background_color) * 255 + ctable = _alpha_blend_background(ctable, background_color) + lut.SetTable(numpy_to_vtk(ctable, + array_type=vtk.VTK_UNSIGNED_CHAR)) + lut.SetRange(*rng) + + def _set_volume_range(self, volume, ctable, alpha, scalar_bar, rng): + import vtk + from vtk.util.numpy_support import numpy_to_vtk + color_tf = vtk.vtkColorTransferFunction() + opacity_tf = vtk.vtkPiecewiseFunction() + for loc, color in zip(np.linspace(*rng, num=len(ctable)), ctable): + color_tf.AddRGBPoint(loc, *(color[:-1] / 255.)) + opacity_tf.AddPoint(loc, color[-1] * alpha / 255.) + color_tf.ClampingOn() + opacity_tf.ClampingOn() + volume.GetProperty().SetColor(color_tf) + volume.GetProperty().SetScalarOpacity(opacity_tf) + if scalar_bar is not None: + lut = vtk.vtkLookupTable() + lut.SetRange(*rng) + lut.SetTable(numpy_to_vtk(ctable)) + scalar_bar.SetLookupTable(lut) + + def _sphere(self, center, color, radius): + sphere = vtk.vtkSphereSource() + sphere.SetThetaResolution(8) + sphere.SetPhiResolution(8) + sphere.SetRadius(radius) + sphere.SetCenter(center) + sphere.Update() + mesh = pyvista.wrap(sphere.GetOutput()) + actor = _add_mesh( + self.plotter, + mesh=mesh, + color=color + ) + return actor, mesh + + def _volume(self, dimensions, origin, spacing, scalars, + surface_alpha, resolution, blending, center): + # Now we can actually construct the visualization + grid = pyvista.UniformGrid() + grid.dimensions = dimensions + 1 # inject data on the cells + grid.origin = origin + grid.spacing = spacing + grid.cell_arrays['values'] = scalars + + # Add contour of enclosed volume (use GetOutput instead of + # GetOutputPort below to avoid updating) + grid_alg = vtk.vtkCellDataToPointData() + grid_alg.SetInputDataObject(grid) + grid_alg.SetPassCellData(False) + grid_alg.Update() + + if surface_alpha > 0: + grid_surface = vtk.vtkMarchingContourFilter() + grid_surface.ComputeNormalsOn() + grid_surface.ComputeScalarsOff() + grid_surface.SetInputData(grid_alg.GetOutput()) + grid_surface.SetValue(0, 0.1) + grid_surface.Update() + grid_mesh = vtk.vtkPolyDataMapper() + grid_mesh.SetInputData(grid_surface.GetOutput()) + else: + grid_mesh = None + + mapper = vtk.vtkSmartVolumeMapper() + if resolution is None: # native + mapper.SetScalarModeToUseCellData() + mapper.SetInputDataObject(grid) + else: + upsampler = vtk.vtkImageReslice() + upsampler.SetInterpolationModeToLinear() # default anyway + upsampler.SetOutputSpacing(*([resolution] * 3)) + upsampler.SetInputConnection(grid_alg.GetOutputPort()) + mapper.SetInputConnection(upsampler.GetOutputPort()) + # Additive, AverageIntensity, and Composite might also be reasonable + remap = dict(composite='Composite', mip='MaximumIntensity') + getattr(mapper, f'SetBlendModeTo{remap[blending]}')() + volume_pos = vtk.vtkVolume() + volume_pos.SetMapper(mapper) + dist = grid.length / (np.mean(grid.dimensions) - 1) + volume_pos.GetProperty().SetScalarOpacityUnitDistance(dist) + if center is not None and blending == 'mip': + # We need to create a minimum intensity projection for the neg half + mapper_neg = vtk.vtkSmartVolumeMapper() + if resolution is None: # native + mapper_neg.SetScalarModeToUseCellData() + mapper_neg.SetInputDataObject(grid) + else: + mapper_neg.SetInputConnection(upsampler.GetOutputPort()) + mapper_neg.SetBlendModeToMinimumIntensity() + volume_neg = vtk.vtkVolume() + volume_neg.SetMapper(mapper_neg) + volume_neg.GetProperty().SetScalarOpacityUnitDistance(dist) + else: + volume_neg = None + return grid, grid_mesh, volume_pos, volume_neg + + def _silhouette(self, mesh, color=None, line_width=None, alpha=None, + decimate=None): + mesh = mesh.decimate(decimate) if decimate is not None else mesh + silhouette_filter = vtk.vtkPolyDataSilhouette() + silhouette_filter.SetInputData(mesh) + silhouette_filter.SetCamera(self.plotter.renderer.GetActiveCamera()) + silhouette_filter.SetEnableFeatureAngle(0) + silhouette_mapper = vtk.vtkPolyDataMapper() + silhouette_mapper.SetInputConnection( + silhouette_filter.GetOutputPort()) + _, prop = self.plotter.add_actor( + silhouette_mapper, reset_camera=False, name=None, + culling=False, pickable=False) + if color is not None: + prop.SetColor(*color) + if alpha is not None: + prop.SetOpacity(alpha) + if line_width is not None: + prop.SetLineWidth(line_width) def _compute_normals(mesh): @@ -696,7 +865,7 @@ def _rad2deg(rad): return rad * 180. / np.pi -def _to_pos(elevation, azimuth): +def _to_pos(azimuth, elevation): theta = azimuth * np.pi / 180.0 phi = (90.0 - elevation) * np.pi / 180.0 x = np.sin(theta) * np.sin(phi) @@ -713,22 +882,15 @@ def _mat_to_array(vtk_mat): def _3d_to_2d(plotter, xyz): - size = plotter.window_size - xyz = np.column_stack([xyz, np.ones(xyz.shape[0])]) - - # Transform points into 'unnormalized' view coordinates - comb_trans_mat = _get_world_to_view_matrix(plotter) - view_coords = np.dot(comb_trans_mat, xyz.T).T - - # Divide through by the fourth element for normalized view coords - norm_view_coords = view_coords / (view_coords[:, 3].reshape(-1, 1)) - - # Transform from normalized view coordinates to display coordinates. - view_to_disp_mat = _get_view_to_display_matrix(size) - xy = np.dot(view_to_disp_mat, norm_view_coords.T).T - - # Pull the first two columns since they're meaningful for 2d plotting - xy = xy[:, :2] + # https://vtk.org/Wiki/VTK/Examples/Cxx/Utilities/Coordinate + import vtk + coordinate = vtk.vtkCoordinate() + coordinate.SetCoordinateSystemToWorld() + xy = list() + for coord in xyz: + coordinate.SetValue(*coord) + xy.append(coordinate.GetComputedLocalDisplayValue(plotter.renderer)) + xy = np.array(xy, float).reshape(-1, 2) # in case it's empty return xy @@ -766,17 +928,33 @@ def _get_camera_direction(focalpoint, position): r = np.sqrt(x * x + y * y + z * z) theta = np.arccos(z / r) phi = np.arctan2(y, x) - return r, theta, phi, focalpoint + return r, theta, phi -def _set_3d_view(figure, azimuth, elevation, focalpoint, distance, roll=None, - reset_camera=True): +def _set_3d_view(figure, azimuth=None, elevation=None, focalpoint='auto', + distance=None, roll=None, reset_camera=True, rigid=None): + rigid = np.eye(4) if rigid is None else rigid position = np.array(figure.plotter.camera_position[0]) + bounds = np.array(figure.plotter.renderer.ComputeVisiblePropBounds()) if reset_camera: figure.plotter.reset_camera() - if focalpoint is None: + + # focalpoint: if 'auto', we use the center of mass of the visible + # bounds, if None, we use the existing camera focal point otherwise + # we use the values given by the user + if isinstance(focalpoint, str): + _check_option('focalpoint', focalpoint, ('auto',), + extra='when a string') + focalpoint = (bounds[1::2] + bounds[::2]) * 0.5 + elif focalpoint is None: focalpoint = np.array(figure.plotter.camera_position[1]) - r, theta, phi, fp = _get_camera_direction(focalpoint, position) + else: + focalpoint = np.asarray(focalpoint) + + # work in the transformed space + position = apply_trans(rigid, position) + focalpoint = apply_trans(rigid, focalpoint) + _, theta, phi = _get_camera_direction(focalpoint, position) if azimuth is not None: phi = _deg2rad(azimuth) @@ -784,37 +962,37 @@ def _set_3d_view(figure, azimuth, elevation, focalpoint, distance, roll=None, theta = _deg2rad(elevation) # set the distance - renderer = figure.plotter.renderer - bounds = np.array(renderer.ComputeVisiblePropBounds()) if distance is None: distance = max(bounds[1::2] - bounds[::2]) * 2.0 - if focalpoint is not None: - focalpoint = np.asarray(focalpoint) - else: - focalpoint = (bounds[1::2] + bounds[::2]) * 0.5 - # Now calculate the view_up vector of the camera. If the view up is # close to the 'z' axis, the view plane normal is parallel to the # camera which is unacceptable, so we use a different view up. if elevation is None or 5. <= abs(elevation) <= 175.: view_up = [0, 0, 1] else: - view_up = [np.sin(phi), np.cos(phi), 0] + view_up = [0, 1, 0] position = [ distance * np.cos(phi) * np.sin(theta), distance * np.sin(phi) * np.sin(theta), distance * np.cos(theta)] + + figure._azimuth = _rad2deg(phi) + figure._elevation = _rad2deg(theta) + + # restore to the original frame + rigid = np.linalg.inv(rigid) + position = apply_trans(rigid, position) + focalpoint = apply_trans(rigid, focalpoint) + view_up = apply_trans(rigid, view_up, move=False) figure.plotter.camera_position = [ position, focalpoint, view_up] + # We need to add the requested roll to the roll dictated by the + # transformed view_up if roll is not None: - figure.plotter.camera.SetRoll(roll) + figure.plotter.camera.SetRoll(figure.plotter.camera.GetRoll() + roll) - figure.plotter.renderer._azimuth = azimuth - figure.plotter.renderer._elevation = elevation - figure.plotter.renderer._distance = distance - figure.plotter.renderer._roll = roll figure.plotter.update() _process_events(figure.plotter) @@ -861,108 +1039,10 @@ def _process_events(plotter): plotter.app.processEvents() -def _set_colormap_range(actor, ctable, scalar_bar, rng=None): - from vtk.util.numpy_support import numpy_to_vtk - mapper = actor.GetMapper() - lut = mapper.GetLookupTable() - # Catch: FutureWarning: Conversion of the second argument of - # issubdtype from `complex` to `np.complexfloating` is deprecated. - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=FutureWarning) - lut.SetTable(numpy_to_vtk(ctable)) - if rng is not None: - mapper.SetScalarRange(rng[0], rng[1]) - lut.SetRange(rng[0], rng[1]) - if scalar_bar is not None: - scalar_bar.SetLookupTable(actor.GetMapper().GetLookupTable()) - - -def _set_volume_range(volume, ctable, alpha, scalar_bar, rng): - import vtk - from vtk.util.numpy_support import numpy_to_vtk - color_tf = vtk.vtkColorTransferFunction() - opacity_tf = vtk.vtkPiecewiseFunction() - for loc, color in zip(np.linspace(*rng, num=len(ctable)), ctable): - color_tf.AddRGBPoint(loc, *color[:-1]) - opacity_tf.AddPoint(loc, color[-1] * alpha / 255. / (len(ctable) - 1)) - color_tf.ClampingOn() - opacity_tf.ClampingOn() - volume.GetProperty().SetColor(color_tf) - volume.GetProperty().SetScalarOpacity(opacity_tf) - if scalar_bar is not None: - lut = vtk.vtkLookupTable() - lut.SetRange(*rng) - lut.SetTable(numpy_to_vtk(ctable)) - scalar_bar.SetLookupTable(lut) - - -def _set_mesh_scalars(mesh, scalars, name): - # Catch: FutureWarning: Conversion of the second argument of - # issubdtype from `complex` to `np.complexfloating` is deprecated. - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=FutureWarning) - mesh.point_arrays[name] = scalars - - -def _update_slider_callback(slider, callback, event_type): - _check_option('event_type', event_type, ['start', 'end', 'always']) - - def _the_callback(widget, event): - value = widget.GetRepresentation().GetValue() - if hasattr(callback, '__call__'): - try_callback(callback, value) - return - - if event_type == 'start': - event = vtk.vtkCommand.StartInteractionEvent - elif event_type == 'end': - event = vtk.vtkCommand.EndInteractionEvent - else: - assert event_type == 'always', event_type - event = vtk.vtkCommand.InteractionEvent - - slider.RemoveObserver(event) - slider.AddObserver(event, _the_callback) - - def _add_camera_callback(camera, callback): camera.AddObserver(vtk.vtkCommand.ModifiedEvent, callback) -def _update_picking_callback(plotter, - on_mouse_move, - on_button_press, - on_button_release, - on_pick): - interactor = plotter.iren - interactor.AddObserver( - vtk.vtkCommand.RenderEvent, - on_mouse_move - ) - interactor.AddObserver( - vtk.vtkCommand.LeftButtonPressEvent, - on_button_press - ) - interactor.AddObserver( - vtk.vtkCommand.EndInteractionEvent, - on_button_release - ) - picker = vtk.vtkCellPicker() - picker.AddObserver( - vtk.vtkCommand.EndPickEvent, - on_pick - ) - picker.SetVolumeOpacityIsovalue(0.) - plotter.picker = picker - - -def _remove_picking_callback(interactor, picker): - interactor.RemoveObservers(vtk.vtkCommand.RenderEvent) - interactor.RemoveObservers(vtk.vtkCommand.LeftButtonPressEvent) - interactor.RemoveObservers(vtk.vtkCommand.EndInteractionEvent) - picker.RemoveObservers(vtk.vtkCommand.EndPickEvent) - - def _arrow_glyph(grid, factor): glyph = vtk.vtkGlyphSource2D() glyph.SetGlyphTypeToArrow() @@ -1019,84 +1099,6 @@ def _glyph(dataset, scale_mode='scalar', orient=True, scalars=True, factor=1.0, return alg -def _sphere(plotter, center, color, radius): - sphere = vtk.vtkSphereSource() - sphere.SetThetaResolution(8) - sphere.SetPhiResolution(8) - sphere.SetRadius(radius) - sphere.SetCenter(center) - sphere.Update() - mesh = pyvista.wrap(sphere.GetOutput()) - actor = _add_mesh( - plotter, - mesh=mesh, - color=color - ) - return actor, mesh - - -def _volume(dimensions, origin, spacing, scalars, - surface_alpha, resolution, blending, center): - # Now we can actually construct the visualization - grid = pyvista.UniformGrid() - grid.dimensions = dimensions + 1 # inject data on the cells - grid.origin = origin - grid.spacing = spacing - grid.cell_arrays['values'] = scalars - - # Add contour of enclosed volume (use GetOutput instead of - # GetOutputPort below to avoid updating) - grid_alg = vtk.vtkCellDataToPointData() - grid_alg.SetInputDataObject(grid) - grid_alg.SetPassCellData(False) - grid_alg.Update() - - if surface_alpha > 0: - grid_surface = vtk.vtkMarchingContourFilter() - grid_surface.ComputeNormalsOn() - grid_surface.ComputeScalarsOff() - grid_surface.SetInputData(grid_alg.GetOutput()) - grid_surface.SetValue(0, 0.1) - grid_surface.Update() - grid_mesh = vtk.vtkPolyDataMapper() - grid_mesh.SetInputData(grid_surface.GetOutput()) - else: - grid_mesh = None - - mapper = vtk.vtkSmartVolumeMapper() - if resolution is None: # native - mapper.SetScalarModeToUseCellData() - mapper.SetInputDataObject(grid) - else: - upsampler = vtk.vtkImageReslice() - upsampler.SetInterpolationModeToLinear() # default anyway - upsampler.SetOutputSpacing(*([resolution] * 3)) - upsampler.SetInputConnection(grid_alg.GetOutputPort()) - mapper.SetInputConnection(upsampler.GetOutputPort()) - # Additive, AverageIntensity, and Composite might also be reasonable - remap = dict(composite='Composite', mip='MaximumIntensity') - getattr(mapper, f'SetBlendModeTo{remap[blending]}')() - volume_pos = vtk.vtkVolume() - volume_pos.SetMapper(mapper) - dist = grid.length / (np.mean(grid.dimensions) - 1) - volume_pos.GetProperty().SetScalarOpacityUnitDistance(dist) - if center is not None and blending == 'mip': - # We need to create a minimum intensity projection for the neg half - mapper_neg = vtk.vtkSmartVolumeMapper() - if resolution is None: # native - mapper_neg.SetScalarModeToUseCellData() - mapper_neg.SetInputDataObject(grid) - else: - mapper_neg.SetInputConnection(upsampler.GetOutputPort()) - mapper_neg.SetBlendModeToMinimumIntensity() - volume_neg = vtk.vtkVolume() - volume_neg.SetMapper(mapper_neg) - volume_neg.GetProperty().SetScalarOpacityUnitDistance(dist) - else: - volume_neg = None - return grid, grid_mesh, volume_pos, volume_neg - - def _require_minimum_version(version_required): from distutils.version import LooseVersion version = LooseVersion(pyvista.__version__) @@ -1106,27 +1108,6 @@ def _require_minimum_version(version_required): version)) -@contextmanager -def _testing_context(interactive): - from . import renderer - orig_offscreen = pyvista.OFF_SCREEN - orig_testing = renderer.MNE_3D_BACKEND_TESTING - orig_interactive = renderer.MNE_3D_BACKEND_INTERACTIVE - renderer.MNE_3D_BACKEND_TESTING = True - if interactive: - pyvista.OFF_SCREEN = False - renderer.MNE_3D_BACKEND_INTERACTIVE = True - else: - pyvista.OFF_SCREEN = True - renderer.MNE_3D_BACKEND_INTERACTIVE = False - try: - yield - finally: - pyvista.OFF_SCREEN = orig_offscreen - renderer.MNE_3D_BACKEND_TESTING = orig_testing - renderer.MNE_3D_BACKEND_INTERACTIVE = orig_interactive - - @contextmanager def _disabled_depth_peeling(): from pyvista import rcParams @@ -1136,30 +1117,3 @@ def _disabled_depth_peeling(): yield finally: rcParams["depth_peeling"]["enabled"] = depth_peeling_enabled - - -@contextmanager -def _disabled_interaction(renderer): - plotter = renderer.plotter - if not plotter.renderer.GetInteractive(): - yield - else: - plotter.disable() - try: - yield - finally: - plotter.enable() - - -@decorator -def run_once(fun, *args, **kwargs): - """Run the function only once.""" - if not hasattr(fun, "_has_run"): - fun._has_run = True - return fun(*args, **kwargs) - - -@run_once -def _init_resources(): - from ...icons import resources - resources.qInitResources() diff --git a/mne/viz/backends/_qt.py b/mne/viz/backends/_qt.py new file mode 100644 index 00000000000..fb8a0674f20 --- /dev/null +++ b/mne/viz/backends/_qt.py @@ -0,0 +1,559 @@ +"""Qt implementation of _Renderer and GUI.""" + +# Authors: Guillaume Favelier +# Eric Larson +# +# License: Simplified BSD + +from contextlib import contextmanager + +import pyvista +try: + from pyvista.plotting.qt_plotting import FileDialog +except ImportError: + from pyvistaqt.plotting import FileDialog + +from PyQt5.QtCore import Qt, pyqtSignal, QLocale +from PyQt5.QtGui import QIcon, QImage, QPixmap, QCursor +from PyQt5.QtWidgets import (QComboBox, QDockWidget, QDoubleSpinBox, QGroupBox, + QHBoxLayout, QLabel, QToolButton, QMenuBar, + QSlider, QSpinBox, QVBoxLayout, QWidget, + QSizePolicy, QScrollArea, QStyle, QProgressBar, + QStyleOptionSlider, QLayout) + +from ._pyvista import _PyVistaRenderer +from ._pyvista import (_close_all, _close_3d_figure, _check_3d_figure, # noqa: F401,E501 analysis:ignore + _set_3d_view, _set_3d_title, _take_3d_screenshot) # noqa: F401,E501 analysis:ignore +from ._abstract import (_AbstractDock, _AbstractToolBar, _AbstractMenuBar, + _AbstractStatusBar, _AbstractLayout, _AbstractWidget, + _AbstractWindow, _AbstractMplCanvas, _AbstractPlayback, + _AbstractBrainMplCanvas, _AbstractMplInterface) +from ._utils import _init_qt_resources, _qt_disable_paint +from ..utils import logger + + +class _QtLayout(_AbstractLayout): + def _layout_initialize(self, max_width): + pass + + def _layout_add_widget(self, layout, widget, stretch=0): + if isinstance(widget, QLayout): + layout.addLayout(widget) + else: + layout.addWidget(widget, stretch) + + +class _QtDock(_AbstractDock, _QtLayout): + def _dock_initialize(self, window=None): + window = self._window if window is None else window + self._dock, self._dock_layout = _create_dock_widget( + self._window, "Controls", Qt.LeftDockWidgetArea) + window.setCorner(Qt.BottomLeftCorner, Qt.LeftDockWidgetArea) + + def _dock_finalize(self): + self._dock.setMinimumSize(self._dock.sizeHint().width(), 0) + self._dock_add_stretch(self._dock_layout) + + def _dock_show(self): + self._dock.show() + + def _dock_hide(self): + self._dock.hide() + + def _dock_add_stretch(self, layout): + layout.addStretch() + + def _dock_add_layout(self, vertical=True): + layout = QVBoxLayout() if vertical else QHBoxLayout() + return layout + + def _dock_add_label(self, value, align=False, layout=None): + layout = self._dock_layout if layout is None else layout + widget = QLabel() + if align: + widget.setAlignment(Qt.AlignCenter) + widget.setText(value) + self._layout_add_widget(layout, widget) + return _QtWidget(widget) + + def _dock_add_button(self, name, callback, layout=None): + layout = self._dock_layout if layout is None else layout + # If we want one with text instead of an icon, we should use + # QPushButton(name) + widget = QToolButton() + widget.clicked.connect(callback) + widget.setText(name) + self._layout_add_widget(layout, widget) + return _QtWidget(widget) + + def _dock_named_layout(self, name, layout, compact): + layout = self._dock_layout if layout is None else layout + if name is not None: + hlayout = self._dock_add_layout(not compact) + self._dock_add_label( + value=name, align=not compact, layout=hlayout) + self._layout_add_widget(layout, hlayout) + layout = hlayout + return layout + + def _dock_add_slider(self, name, value, rng, callback, + compact=True, double=False, layout=None): + layout = self._dock_named_layout(name, layout, compact) + slider_class = QFloatSlider if double else QSlider + cast = float if double else int + widget = slider_class(Qt.Horizontal) + widget.setMinimum(cast(rng[0])) + widget.setMaximum(cast(rng[1])) + widget.setValue(cast(value)) + widget.valueChanged.connect(callback) + self._layout_add_widget(layout, widget) + return _QtWidget(widget) + + def _dock_add_spin_box(self, name, value, rng, callback, + compact=True, double=True, layout=None): + layout = self._dock_named_layout(name, layout, compact) + value = value if double else int(value) + widget = QDoubleSpinBox() if double else QSpinBox() + widget.setAlignment(Qt.AlignCenter) + widget.setMinimum(rng[0]) + widget.setMaximum(rng[1]) + inc = (rng[1] - rng[0]) / 20. + inc = max(int(round(inc)), 1) if not double else inc + widget.setKeyboardTracking(False) + widget.setSingleStep(inc) + widget.setValue(value) + widget.valueChanged.connect(callback) + self._layout_add_widget(layout, widget) + return _QtWidget(widget) + + def _dock_add_combo_box(self, name, value, rng, + callback, compact=True, layout=None): + layout = self._dock_named_layout(name, layout, compact) + widget = QComboBox() + widget.addItems(rng) + widget.setCurrentText(value) + widget.currentTextChanged.connect(callback) + widget.setSizeAdjustPolicy(QComboBox.AdjustToContents) + self._layout_add_widget(layout, widget) + return _QtWidget(widget) + + def _dock_add_group_box(self, name, layout=None): + layout = self._dock_layout if layout is None else layout + hlayout = QVBoxLayout() + widget = QGroupBox(name) + widget.setLayout(hlayout) + self._layout_add_widget(layout, widget) + return hlayout + + +class QFloatSlider(QSlider): + """Slider that handles float values.""" + + valueChanged = pyqtSignal(float) + + def __init__(self, ori, parent=None): + """Initialize the slider.""" + super().__init__(ori, parent) + self._opt = QStyleOptionSlider() + self.initStyleOption(self._opt) + self._gr = self.style().subControlRect( + QStyle.CC_Slider, self._opt, QStyle.SC_SliderGroove, self) + self._sr = self.style().subControlRect( + QStyle.CC_Slider, self._opt, QStyle.SC_SliderHandle, self) + self._precision = 10000 + super().valueChanged.connect(self._convert) + + def _convert(self, value): + self.valueChanged.emit(value / self._precision) + + def minimum(self): + """Get the minimum.""" + return super().minimum() / self._precision + + def setMinimum(self, value): + """Set the minimum.""" + super().setMinimum(int(value * self._precision)) + + def maximum(self): + """Get the maximum.""" + return super().maximum() / self._precision + + def setMaximum(self, value): + """Set the maximum.""" + super().setMaximum(int(value * self._precision)) + + def value(self): + """Get the current value.""" + return super().value() / self._precision + + def setValue(self, value): + """Set the current value.""" + super().setValue(int(value * self._precision)) + + # Adapted from: + # https://stackoverflow.com/questions/52689047/moving-qslider-to-mouse-click-position # noqa: E501 + def mousePressEvent(self, event): + """Add snap-to-location handling.""" + opt = QStyleOptionSlider() + self.initStyleOption(opt) + sr = self.style().subControlRect( + QStyle.CC_Slider, opt, QStyle.SC_SliderHandle, self) + if (event.button() != Qt.LeftButton or sr.contains(event.pos())): + super().mousePressEvent(event) + return + if self.orientation() == Qt.Vertical: + half = (0.5 * sr.height()) + 0.5 + max_ = self.height() + pos = max_ - event.y() + else: + half = (0.5 * sr.width()) + 0.5 + max_ = self.width() + pos = event.x() + max_ = max_ - 2 * half + pos = min(max(pos - half, 0), max_) / max_ + val = self.minimum() + (self.maximum() - self.minimum()) * pos + val = (self.maximum() - val) if self.invertedAppearance() else val + self.setValue(val) + event.accept() + # Process afterward so it's seen as a drag + super().mousePressEvent(event) + + +class _QtToolBar(_AbstractToolBar, _QtLayout): + def _tool_bar_load_icons(self): + _init_qt_resources() + self.icons = dict() + self.icons["help"] = QIcon(":/help.svg") + self.icons["play"] = QIcon(":/play.svg") + self.icons["pause"] = QIcon(":/pause.svg") + self.icons["reset"] = QIcon(":/reset.svg") + self.icons["scale"] = QIcon(":/scale.svg") + self.icons["clear"] = QIcon(":/clear.svg") + self.icons["movie"] = QIcon(":/movie.svg") + self.icons["restore"] = QIcon(":/restore.svg") + self.icons["screenshot"] = QIcon(":/screenshot.svg") + self.icons["visibility_on"] = QIcon(":/visibility_on.svg") + self.icons["visibility_off"] = QIcon(":/visibility_off.svg") + + def _tool_bar_initialize(self, name="default", window=None): + self.actions = dict() + window = self._window if window is None else window + self._tool_bar = window.addToolBar(name) + self._tool_bar_layout = self._tool_bar.layout() + + def _tool_bar_add_button(self, name, desc, func, icon_name=None, + shortcut=None): + icon_name = name if icon_name is None else icon_name + icon = self.icons[icon_name] + self.actions[name] = self._tool_bar.addAction(icon, desc, func) + if shortcut is not None: + self.actions[name].setShortcut(shortcut) + + def _tool_bar_update_button_icon(self, name, icon_name): + self.actions[name].setIcon(self.icons[icon_name]) + + def _tool_bar_add_text(self, name, value, placeholder): + pass + + def _tool_bar_add_spacer(self): + spacer = QWidget() + spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred) + self._tool_bar.addWidget(spacer) + + def _tool_bar_add_file_button(self, name, desc, func, shortcut=None): + def callback(): + return FileDialog( + self.plotter.app_window, + callback=func, + ) + + self._tool_bar_add_button( + name=name, + desc=desc, + func=callback, + shortcut=shortcut, + ) + + def _tool_bar_set_theme(self, theme): + if theme == 'auto': + theme = _detect_theme() + + if theme == 'dark': + for icon_key in self.icons: + icon = self.icons[icon_key] + image = icon.pixmap(80).toImage() + image.invertPixels(mode=QImage.InvertRgb) + self.icons[icon_key] = QIcon(QPixmap.fromImage(image)) + + +class _QtMenuBar(_AbstractMenuBar): + def _menu_initialize(self, window=None): + self._menus = dict() + self._menu_actions = dict() + self._menu_bar = QMenuBar() + self._menu_bar.setNativeMenuBar(False) + window = self._window if window is None else window + window.setMenuBar(self._menu_bar) + + def _menu_add_submenu(self, name, desc): + self._menus[name] = self._menu_bar.addMenu(desc) + + def _menu_add_button(self, menu_name, name, desc, func): + menu = self._menus[menu_name] + self._menu_actions[name] = menu.addAction(desc, func) + + +class _QtStatusBar(_AbstractStatusBar, _QtLayout): + def _status_bar_initialize(self, window=None): + window = self._window if window is None else window + self._status_bar = window.statusBar() + self._status_bar_layout = self._status_bar.layout() + + def _status_bar_add_label(self, value, stretch=0): + widget = QLabel(value) + self._layout_add_widget(self._status_bar_layout, widget, stretch) + return _QtWidget(widget) + + def _status_bar_add_progress_bar(self, stretch=0): + widget = QProgressBar() + self._layout_add_widget(self._status_bar_layout, widget, stretch) + return _QtWidget(widget) + + def _status_bar_update(self): + self._status_bar_layout.update() + + +class _QtPlayback(_AbstractPlayback): + def _playback_initialize(self, func, timeout): + self.figure.plotter.add_callback(func, timeout) + + +class _QtMplInterface(_AbstractMplInterface): + def _mpl_initialize(self): + from PyQt5 import QtWidgets + from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg + self.canvas = FigureCanvasQTAgg(self.fig) + FigureCanvasQTAgg.setSizePolicy( + self.canvas, + QtWidgets.QSizePolicy.Expanding, + QtWidgets.QSizePolicy.Expanding + ) + FigureCanvasQTAgg.updateGeometry(self.canvas) + + +class _QtMplCanvas(_AbstractMplCanvas, _QtMplInterface): + def __init__(self, width, height, dpi): + super().__init__(width, height, dpi) + self._mpl_initialize() + + +class _QtBrainMplCanvas(_AbstractBrainMplCanvas, _QtMplInterface): + def __init__(self, brain, width, height, dpi): + super().__init__(brain, width, height, dpi) + self._mpl_initialize() + if brain.separate_canvas: + self.canvas.setParent(None) + else: + self.canvas.setParent(brain._renderer._window) + self._connect() + + +class _QtWindow(_AbstractWindow): + def _window_initialize(self): + super()._window_initialize() + self._interactor = self.figure.plotter.interactor + self._window = self.figure.plotter.app_window + self._window.setLocale(QLocale(QLocale.Language.English)) + + def _window_close_connect(self, func): + self._window.signal_close.connect(func) + + def _window_get_dpi(self): + return self._window.windowHandle().screen().logicalDotsPerInch() + + def _window_get_size(self): + w = self._interactor.geometry().width() + h = self._interactor.geometry().height() + return (w, h) + + def _window_get_simple_canvas(self, width, height, dpi): + return _QtMplCanvas(width, height, dpi) + + def _window_get_mplcanvas(self, brain, interactor_fraction, show_traces, + separate_canvas): + w, h = self._window_get_mplcanvas_size(interactor_fraction) + self._interactor_fraction = interactor_fraction + self._show_traces = show_traces + self._separate_canvas = separate_canvas + self._mplcanvas = _QtBrainMplCanvas( + brain, w, h, self._window_get_dpi()) + return self._mplcanvas + + def _window_adjust_mplcanvas_layout(self): + canvas = self._mplcanvas.canvas + dock, dock_layout = _create_dock_widget( + self._window, "Traces", Qt.BottomDockWidgetArea) + dock_layout.addWidget(canvas) + + def _window_get_cursor(self): + return self._interactor.cursor() + + def _window_set_cursor(self, cursor): + self._interactor.setCursor(cursor) + + def _window_new_cursor(self, name): + return QCursor(getattr(Qt, name)) + + @contextmanager + def _window_ensure_minimum_sizes(self): + sz = self.figure.store['window_size'] + adjust_mpl = (self._show_traces and not self._separate_canvas) + # plotter: pyvista.plotting.qt_plotting.BackgroundPlotter + # plotter.interactor: vtk.qt.QVTKRenderWindowInteractor.QVTKRenderWindowInteractor -> QWidget # noqa + # plotter.app_window: pyvista.plotting.qt_plotting.MainWindow -> QMainWindow # noqa + # plotter.frame: QFrame with QVBoxLayout with plotter.interactor as centralWidget # noqa + # plotter.ren_win: vtkXOpenGLRenderWindow + self._interactor.setMinimumSize(*sz) + if adjust_mpl: + mpl_h = int(round((sz[1] * self._interactor_fraction) / + (1 - self._interactor_fraction))) + self._mplcanvas.canvas.setMinimumSize(sz[0], mpl_h) + try: + yield # show + finally: + # 1. Process events + self._process_events() + self._process_events() + # 2. Get the window and interactor sizes that work + win_sz = self._window.size() + ren_sz = self._interactor.size() + # 3. Undo the min size setting and process events + self._interactor.setMinimumSize(0, 0) + if adjust_mpl: + self._mplcanvas.canvas.setMinimumSize(0, 0) + self._process_events() + self._process_events() + # 4. Resize the window and interactor to the correct size + # (not sure why, but this is required on macOS at least) + self._interactor.window_size = (win_sz.width(), win_sz.height()) + self._interactor.resize(ren_sz.width(), ren_sz.height()) + self._process_events() + self._process_events() + + def _window_set_theme(self, theme): + if theme == 'auto': + theme = _detect_theme() + + if theme == 'dark': + try: + import qdarkstyle + except ModuleNotFoundError: + logger.info('For Dark-Mode "qdarkstyle" has to be installed! ' + 'You can install it with `pip install qdarkstyle`') + stylesheet = None + else: + stylesheet = qdarkstyle.load_stylesheet() + elif theme != 'light': + with open(theme, 'r') as file: + stylesheet = file.read() + else: + stylesheet = None + + self._window.setStyleSheet(stylesheet) + + +class _QtWidget(_AbstractWidget): + def set_value(self, value): + if hasattr(self._widget, "setValue"): + self._widget.setValue(value) + elif hasattr(self._widget, "setCurrentText"): + self._widget.setCurrentText(value) + else: + assert hasattr(self._widget, "setText") + self._widget.setText(value) + + def get_value(self): + if hasattr(self._widget, "value"): + return self._widget.value() + elif hasattr(self._widget, "currentText"): + return self._widget.currentText() + elif hasattr(self._widget, "text"): + return self._widget.text() + + def set_range(self, rng): + self._widget.setRange(rng[0], rng[1]) + + def show(self): + self._widget.show() + + def hide(self): + self._widget.hide() + + def update(self, repaint=True): + self._widget.update() + if repaint: + self._widget.repaint() + + +class _Renderer(_PyVistaRenderer, _QtDock, _QtToolBar, _QtMenuBar, + _QtStatusBar, _QtWindow, _QtPlayback): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._window_initialize() + + def show(self): + super().show() + with _qt_disable_paint(self.plotter): + with self._window_ensure_minimum_sizes(): + self.plotter.app_window.show() + self._update() + for plotter in self._all_plotters: + plotter._render() + self._process_events() + + +def _create_dock_widget(window, name, area): + dock = QDockWidget() + scroll = QScrollArea(dock) + dock.setWidget(scroll) + widget = QWidget(scroll) + scroll.setWidget(widget) + scroll.setWidgetResizable(True) + dock.setAllowedAreas(area) + dock.setTitleBarWidget(QLabel(name)) + window.addDockWidget(area, dock) + dock_layout = QVBoxLayout() + widget.setLayout(dock_layout) + # Fix resize grip size + # https://stackoverflow.com/a/65050468/2175965 + dock.setStyleSheet("QDockWidget { margin: 4px; }") + return dock, dock_layout + + +def _detect_theme(): + try: + import darkdetect + return darkdetect.theme().lower() + except Exception: + return 'light' + + +@contextmanager +def _testing_context(interactive): + from . import renderer + orig_offscreen = pyvista.OFF_SCREEN + orig_testing = renderer.MNE_3D_BACKEND_TESTING + orig_interactive = renderer.MNE_3D_BACKEND_INTERACTIVE + renderer.MNE_3D_BACKEND_TESTING = True + if interactive: + pyvista.OFF_SCREEN = False + renderer.MNE_3D_BACKEND_INTERACTIVE = True + else: + pyvista.OFF_SCREEN = True + renderer.MNE_3D_BACKEND_INTERACTIVE = False + try: + yield + finally: + pyvista.OFF_SCREEN = orig_offscreen + renderer.MNE_3D_BACKEND_TESTING = orig_testing + renderer.MNE_3D_BACKEND_INTERACTIVE = orig_interactive diff --git a/mne/viz/backends/_utils.py b/mne/viz/backends/_utils.py index 4c271ead23f..c95ae881486 100644 --- a/mne/viz/backends/_utils.py +++ b/mne/viz/backends/_utils.py @@ -7,15 +7,18 @@ # # License: Simplified BSD +from contextlib import contextmanager import numpy as np import collections.abc +from ...externals.decorator import decorator VALID_3D_BACKENDS = ( 'pyvista', # default 3d backend 'mayavi', 'notebook', ) -ALLOWED_QUIVER_MODES = ('2darrow', 'arrow', 'cone', 'cylinder', 'sphere') +ALLOWED_QUIVER_MODES = ('2darrow', 'arrow', 'cone', 'cylinder', 'sphere', + 'oct') def _get_colormap_from_array(colormap=None, normalized_colormap=False, @@ -55,3 +58,34 @@ def _check_color(color): raise TypeError("Expected type is `str` or iterable but " "{} was given.".format(type(color))) return color + + +def _alpha_blend_background(ctable, background_color): + alphas = ctable[:, -1][:, np.newaxis] / 255. + use_table = ctable.copy() + use_table[:, -1] = 255. + return (use_table * alphas) + background_color * (1 - alphas) + + +@decorator +def run_once(fun, *args, **kwargs): + """Run the function only once.""" + if not hasattr(fun, "_has_run"): + fun._has_run = True + return fun(*args, **kwargs) + + +@run_once +def _init_qt_resources(): + from ...icons import resources + resources.qInitResources() + + +@contextmanager +def _qt_disable_paint(widget): + paintEvent = widget.paintEvent + widget.paintEvent = lambda *args, **kwargs: None + try: + yield + finally: + widget.paintEvent = paintEvent diff --git a/mne/viz/backends/renderer.py b/mne/viz/backends/renderer.py index 680a0574a55..461fa26f2b5 100644 --- a/mne/viz/backends/renderer.py +++ b/mne/viz/backends/renderer.py @@ -7,11 +7,14 @@ # # License: Simplified BSD +import sys +import os from contextlib import contextmanager import importlib from ._utils import VALID_3D_BACKENDS -from ...utils import logger, verbose, get_config, _check_option +from ...utils import (logger, verbose, get_config, _check_option, + _require_version) MNE_3D_BACKEND = None MNE_3D_BACKEND_TESTING = False @@ -20,7 +23,7 @@ _backend_name_map = dict( mayavi='._pysurfer_mayavi', - pyvista='._pyvista', + pyvista='._qt', notebook='._notebook', ) backend = None @@ -60,51 +63,46 @@ def set_3d_backend(backend_name, verbose=None): .. table:: :widths: auto - +--------------------------------------+--------+---------+ - | 3D function: | mayavi | pyvista | - +======================================+========+=========+ - | :func:`plot_vector_source_estimates` | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | :func:`plot_source_estimates` | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | :func:`plot_alignment` | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | :func:`plot_sparse_source_estimates` | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | :func:`plot_evoked_field` | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | :func:`plot_sensors_connectivity` | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | :func:`snapshot_brain_montage` | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | :func:`link_brains` | | ✓ | - +--------------------------------------+--------+---------+ - +--------------------------------------+--------+---------+ - | **3D feature:** | - +--------------------------------------+--------+---------+ - | Large data | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | Opacity/transparency | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | Support geometric glyph | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | Jupyter notebook | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | Interactivity in Jupyter notebook | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | Smooth shading | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | Subplotting | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | Save offline movie | ✓ | ✓ | - +--------------------------------------+--------+---------+ - | Point picking | | ✓ | - +--------------------------------------+--------+---------+ - - .. note:: - In the case of `plot_vector_source_estimates` with PyVista, the glyph - size is not consistent with Mayavi, it is also possible that a dark - filter is visible on the mesh when depth peeling is not available. + +--------------------------------------+--------+---------+----------+ + | **3D function:** | mayavi | pyvista | notebook | + +======================================+========+=========+==========+ + | :func:`plot_vector_source_estimates` | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | :func:`plot_source_estimates` | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | :func:`plot_alignment` | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | :func:`plot_sparse_source_estimates` | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | :func:`plot_evoked_field` | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | :func:`plot_sensors_connectivity` | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | :func:`snapshot_brain_montage` | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | :func:`link_brains` | | ✓ | | + +--------------------------------------+--------+---------+----------+ + +--------------------------------------+--------+---------+----------+ + | **Feature:** | + +--------------------------------------+--------+---------+----------+ + | Large data | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | Opacity/transparency | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | Support geometric glyph | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | Smooth shading | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | Subplotting | ✓ | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ + | Inline plot in Jupyter Notebook | ✓ | | ✓ | + +--------------------------------------+--------+---------+----------+ + | Inline plot in JupyterLab | ✓ | | ✓ | + +--------------------------------------+--------+---------+----------+ + | Inline plot in Google Colab | | | | + +--------------------------------------+--------+---------+----------+ + | Toolbar | | ✓ | ✓ | + +--------------------------------------+--------+---------+----------+ """ global MNE_3D_BACKEND try: @@ -116,6 +114,10 @@ def set_3d_backend(backend_name, verbose=None): _reload_backend(backend_name) MNE_3D_BACKEND = backend_name + # Qt5 macOS 11 compatibility + if sys.platform == 'darwin' and 'QT_MAC_WANTS_LAYER' not in os.environ: + os.environ['QT_MAC_WANTS_LAYER'] = '1' + def get_3d_backend(): """Return the backend currently used. @@ -160,11 +162,14 @@ def _get_3d_backend(): @contextmanager def use_3d_backend(backend_name): - """Create a viz context. + """Create a 3d visualization context using the designated backend. + + See :func:`mne.viz.set_3d_backend` for more details on the available + 3d backends and their capabilities. Parameters ---------- - backend_name : str + backend_name : {'mayavi', 'pyvista', 'notebook'} The 3d backend to use in the context. """ old_backend = _get_3d_backend() @@ -294,6 +299,7 @@ def get_brain_class(): """ if get_3d_backend() == "mayavi": from surfer import Brain + _require_version('surfer', 'stc.plot', '0.9') else: # PyVista from ...viz._brain import Brain return Brain diff --git a/mne/viz/backends/tests/test_renderer.py b/mne/viz/backends/tests/test_renderer.py index 8c5299d0ab6..d707694816a 100644 --- a/mne/viz/backends/tests/test_renderer.py +++ b/mne/viz/backends/tests/test_renderer.py @@ -157,7 +157,8 @@ def test_3d_backend(renderer): scalars=np.array([[1.0, 1.0]])) # scalar bar - rend.scalarbar(source=tube, title="Scalar Bar") + rend.scalarbar(source=tube, title="Scalar Bar", + bgcolor=[1, 1, 1]) # use text rend.text2d(x_window=txt_x, y_window=txt_y, text=txt_text, diff --git a/mne/viz/conftest.py b/mne/viz/conftest.py new file mode 100644 index 00000000000..6576bc5ee26 --- /dev/null +++ b/mne/viz/conftest.py @@ -0,0 +1,129 @@ +# Authors: Robert Luke +# Eric Larson +# Alexandre Gramfort +# +# License: BSD (3-clause) + +import inspect +from textwrap import dedent + +import pytest +import numpy as np +import os.path as op + +from mne import create_info, EvokedArray, events_from_annotations, Epochs +from mne.channels import make_standard_montage +from mne.datasets.testing import data_path, _pytest_param +from mne.preprocessing.nirs import optical_density, beer_lambert_law +from mne.io import read_raw_nirx +from mne.utils import Bunch + + +@pytest.fixture() +def fnirs_evoked(): + """Create an fnirs evoked structure.""" + montage = make_standard_montage('biosemi16') + ch_names = montage.ch_names + ch_types = ['eeg'] * 16 + info = create_info(ch_names=ch_names, sfreq=20, ch_types=ch_types) + evoked_data = np.random.randn(16, 30) + evoked = EvokedArray(evoked_data, info=info, tmin=-0.2, nave=4) + evoked.set_montage(montage) + evoked.set_channel_types({'Fp1': 'hbo', 'Fp2': 'hbo', 'F4': 'hbo', + 'Fz': 'hbo'}, verbose='error') + return evoked + + +@pytest.fixture(params=[_pytest_param()]) +def fnirs_epochs(): + """Create an fnirs epoch structure.""" + fname = op.join(data_path(download=False), + 'NIRx', 'nirscout', 'nirx_15_2_recording_w_overlap') + raw_intensity = read_raw_nirx(fname, preload=False) + raw_od = optical_density(raw_intensity) + raw_haemo = beer_lambert_law(raw_od) + evts, _ = events_from_annotations(raw_haemo, event_id={'1.0': 1}) + evts_dct = {'A': 1} + tn, tx = -1, 2 + epochs = Epochs(raw_haemo, evts, event_id=evts_dct, tmin=tn, tmax=tx) + return epochs + + +# Create one nbclient and reuse it +@pytest.fixture(scope='session') +def _nbclient(): + try: + import nbformat + from jupyter_client import AsyncKernelManager + from nbclient import NotebookClient + from ipywidgets import Button # noqa + import ipyvtk_simple # noqa + except Exception as exc: + return pytest.skip(f'Skipping Notebook test: {exc}') + km = AsyncKernelManager(config=None) + nb = nbformat.reads(""" +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata":{}, + "outputs": [], + "source":[] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version":3}, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +}""", as_version=4) + client = NotebookClient(nb, km=km) + yield client + client._cleanup_kernel() + + +@pytest.fixture(scope='function') +def nbexec(_nbclient): + """Execute Python code in a notebook.""" + # Adapted/simplified from nbclient/client.py (BSD 3-clause) + _nbclient._cleanup_kernel() + + def execute(code, reset=False): + _nbclient.reset_execution_trackers() + with _nbclient.setup_kernel(): + assert _nbclient.kc is not None + cell = Bunch(cell_type='code', metadata={}, source=dedent(code)) + _nbclient.execute_cell(cell, 0, execution_count=0) + _nbclient.set_widgets_metadata() + + yield execute + + +def pytest_runtest_call(item): + """Run notebook code written in Python.""" + if 'nbexec' in getattr(item, 'fixturenames', ()): + nbexec = item.funcargs['nbexec'] + code = inspect.getsource(getattr(item.module, item.name.split('[')[0])) + code = code.splitlines() + ci = 0 + for ci, c in enumerate(code): + if c.startswith(' '): # actual content + break + code = '\n'.join(code[ci:]) + + def run(nbexec=nbexec, code=code): + nbexec(code) + + item.runtest = run + return diff --git a/mne/viz/epochs.py b/mne/viz/epochs.py index 7373405e618..7e8fd15c978 100644 --- a/mne/viz/epochs.py +++ b/mne/viz/epochs.py @@ -219,7 +219,7 @@ def plot_epochs_image(epochs, picks=None, sigma=0., vmin=None, ts_args['show_sensors'] = False vlines = [0] if (epochs.times[0] < 0 < epochs.times[-1]) else [] ts_defaults = dict(colors={'cond': 'k'}, title='', show=False, - truncate_yaxis='auto', truncate_xaxis=False, + truncate_yaxis=False, truncate_xaxis=False, vlines=vlines, legend=False) ts_defaults.update(**ts_args) ts_args = ts_defaults.copy() @@ -349,16 +349,10 @@ def plot_epochs_image(epochs, picks=None, sigma=0., vmin=None, ch_type = this_group_dict['ch_type'] if not manual_ylims: args = auto_ylims[ch_type] - func = max if 'invert_y' in ts_args: args = args[::-1] - func = min ax.set_ylim(*args) - yticks = np.array(ax.get_yticks()) - top_tick = func(yticks) - ax.spines['left'].set_bounds(top_tick, args[0]) plt_show(show) - # impose deterministic order of returned objects return_order = np.array(sorted(group_by)) are_ch_types = np.in1d(return_order, _VALID_CHANNEL_TYPES) @@ -501,6 +495,8 @@ def _plot_epochs_image(image, style_axes=True, epochs=None, picks=None, title=None, evoked=False, ts_args=None, combine=None, combine_given=False, norm=False): """Plot epochs image. Helper function for plot_epochs_image.""" + from matplotlib.ticker import AutoLocator + if cmap is None: cmap = 'Reds' if norm else 'RdBu_r' @@ -513,7 +509,7 @@ def _plot_epochs_image(image, style_axes=True, epochs=None, picks=None, # draw the image cmap = _setup_cmap(cmap, norm=norm) n_epochs = len(image) - extent = [1e3 * tmin, 1e3 * tmax, 0, n_epochs] + extent = [tmin, tmax, 0, n_epochs] im = ax_im.imshow(image, vmin=vmin, vmax=vmax, cmap=cmap[0], aspect='auto', origin='lower', interpolation='nearest', extent=extent) @@ -521,15 +517,16 @@ def _plot_epochs_image(image, style_axes=True, epochs=None, picks=None, if style_axes: ax_im.set_title(title) ax_im.set_ylabel('Epochs') + if not evoked: + ax_im.set_xlabel('Time (s)') ax_im.axis('auto') ax_im.axis('tight') ax_im.axvline(0, color='k', linewidth=1, linestyle='--') if overlay_times is not None: - ax_im.plot(1e3 * overlay_times, 0.5 + np.arange(n_epochs), 'k', + ax_im.plot(overlay_times, 0.5 + np.arange(n_epochs), 'k', linewidth=2) - ax_im.set_xlim(1e3 * tmin, 1e3 * tmax) - + ax_im.set_xlim(tmin, tmax) # draw the evoked if evoked: from . import plot_compare_evokeds @@ -538,8 +535,14 @@ def _plot_epochs_image(image, style_axes=True, epochs=None, picks=None, plot_compare_evokeds({'cond': list(epochs.iter_evoked(copy=False))}, picks=_picks, axes=ax['evoked'], combine=pass_combine, **ts_args) - ax['evoked'].set_xlim(tmin, tmax) # don't multiply by 1e3 here - ax_im.set_xticks([]) + ax['evoked'].set_xlim(tmin, tmax) + ax['evoked'].lines[0].set_clip_on(True) + ax['evoked'].collections[0].set_clip_on(True) + ax['evoked'].get_shared_x_axes().join(ax['evoked'], ax_im) + # fix the axes for proper updating during interactivity + loc = ax_im.xaxis.get_major_locator() + ax['evoked'].xaxis.set_major_locator(loc) + ax['evoked'].yaxis.set_major_locator(AutoLocator()) # draw the colorbar if colorbar: @@ -571,8 +574,12 @@ def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown subj', plot. Default is zero (always plot). n_max_plot : int Maximum number of channels to show stats for. - subject : str - The subject name to use in the title of the plot. + subject : str | None + The subject name to use in the title of the plot. If ``None``, do not + display a subject name. + + .. versionchanged:: 0.23 + Added support for ``None``. color : tuple | str Color to use for the bars. width : float @@ -599,7 +606,10 @@ def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown subj', counts = np.array(list(scores.values())) # init figure, handle easy case (no drops) fig, ax = plt.subplots() - ax.set_title('{}: {:.1f}%'.format(subject, percent)) + title = f'{percent:.1f}% of all epochs rejected' + if subject is not None: + title = f'{subject}: {title}' + ax.set_title(title) if len(ch_names) == 0: ax.text(0.5, 0.5, 'No drops', ha='center', fontsize=14) return fig @@ -624,7 +634,7 @@ def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown subj', @fill_doc def plot_epochs(epochs, picks=None, scalings=None, n_epochs=20, n_channels=20, - title=None, events=None, event_colors=None, event_color=None, + title=None, events=None, event_color=None, order=None, show=True, block=False, decim='auto', noise_cov=None, butterfly=False, show_scrollbars=True, epoch_colors=None, event_id=None, group_by='type'): @@ -658,19 +668,15 @@ def plot_epochs(epochs, picks=None, scalings=None, n_epochs=20, n_channels=20, title : str | None The title of the window. If None, epochs name will be displayed. Defaults to None. - events : None, array, shape (n_events, 3) - Events to show with vertical bars. If events are provided, the epoch - numbers are not shown to prevent overlap. You can toggle epoch - numbering through options (press 'o' key). You can use - `~mne.viz.plot_events` as a legend for the colors. By default, the - coloring scheme is the same. + events : None | array, shape (n_events, 3) + Events to show with vertical bars. You can use `~mne.viz.plot_events` + as a legend for the colors. By default, the coloring scheme is the + same. Defaults to ``None``. .. warning:: If the epochs have been resampled, the events no longer align with the data. .. versionadded:: 0.14.0 - event_colors : None - Deprecated. Use ``event_color`` instead. %(event_color)s Defaults to ``None``. order : array of str | None @@ -802,15 +808,6 @@ def plot_epochs(epochs, picks=None, scalings=None, n_epochs=20, n_channels=20, else: event_nums = None event_times = None - if event_colors is not None: - depr_msg = ('event_colors is deprecated and will be replaced by ' - 'event_color in 0.23.') - if event_color is None: - event_color = event_colors - else: - depr_msg += (' Since you passed values for both event_colors and ' - 'event_color, event_colors will be ignored.') - warn(depr_msg, DeprecationWarning) event_color_dict = _make_event_color_dict(event_color, events, event_id) # determine trace order @@ -895,10 +892,6 @@ def plot_epochs(epochs, picks=None, scalings=None, n_epochs=20, n_channels=20, fig._update_data() fig._draw_traces() - # for blitting - fig.canvas.flush_events() - fig.mne.bg = fig.canvas.copy_from_bbox(fig.bbox) - plt_show(show, block=block) return fig @@ -965,12 +958,15 @@ def plot_epochs_psd(epochs, fmin=0, fmax=np.inf, tmin=None, tmax=None, from ._figure import _psd_figure # generate figure + # epochs always use multitaper, not Welch, so no need to allow "window" + # param above fig = _psd_figure( inst=epochs, proj=proj, picks=picks, axes=ax, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, sphere=sphere, xscale=xscale, dB=dB, average=average, estimate=estimate, area_mode=area_mode, line_alpha=line_alpha, area_alpha=area_alpha, color=color, spatial_colors=spatial_colors, n_jobs=n_jobs, bandwidth=bandwidth, - adaptive=adaptive, low_bias=low_bias, normalization=normalization) + adaptive=adaptive, low_bias=low_bias, normalization=normalization, + window='hamming') plt_show(show) return fig diff --git a/mne/viz/evoked.py b/mne/viz/evoked.py index 1e2bddeae10..8be5b08c6e5 100644 --- a/mne/viz/evoked.py +++ b/mne/viz/evoked.py @@ -266,13 +266,20 @@ def _plot_evoked(evoked, picks, exclude, unit, show, ylim, proj, xlim, hline, if axes is not None and proj == 'interactive': raise RuntimeError('Currently only single axis figures are supported' ' for interactive SSP selection.') - if isinstance(gfp, str) and gfp != 'only': - raise ValueError('gfp must be boolean or "only". Got %s' % gfp) + + _check_option('gfp', gfp, [True, False, 'only']) scalings = _handle_default('scalings', scalings) titles = _handle_default('titles', titles) units = _handle_default('units', units) + if plot_type == "image": + if ylim is not None and not isinstance(ylim, dict): + # The user called Evoked.plot_image() or plot_evoked_image(), the + # clim parameters of those functions end up to be the ylim here. + raise ValueError("`clim` must be a dict. " + "E.g. clim = dict(eeg=[-20, 20])") + picks = _picks_to_idx(info, picks, none='all', exclude=()) if len(picks) != len(set(picks)): raise ValueError("`picks` are not unique. Please remove duplicates.") @@ -428,7 +435,7 @@ def _plot_lines(data, info, picks, fig, axes, spatial_colors, unit, units, # Set amplitude scaling D = this_scaling * data[idx, :] _check_if_nan(D) - gfp_only = (isinstance(gfp, str) and gfp == 'only') + gfp_only = gfp == 'only' if not gfp_only: chs = [info['chs'][i] for i in idx] locs3d = np.array([ch['loc'][:3] for ch in chs]) @@ -473,10 +480,17 @@ def _plot_lines(data, info, picks, fig, axes, spatial_colors, unit, units, linewidth=0.5)[0]) line_list[-1].set_pickradius(3.) - if gfp: # 'only' or boolean True + if gfp: + if gfp in [True, 'only']: + if this_type == 'eeg': + this_gfp = D.std(axis=0, ddof=0) + label = 'GFP' + else: + this_gfp = np.linalg.norm(D, axis=0) / np.sqrt(len(D)) + label = 'RMS' + gfp_color = 3 * (0.,) if spatial_colors is True else (0., 1., 0.) - this_gfp = np.sqrt((D * D).mean(axis=0)) this_ylim = ax.get_ylim() if (ylim is None or this_type not in ylim.keys()) else ylim[this_type] if gfp_only: @@ -490,7 +504,7 @@ def _plot_lines(data, info, picks, fig, axes, spatial_colors, unit, units, zorder=3, alpha=line_alpha)[0]) ax.text(times[0] + 0.01 * (times[-1] - times[0]), this_gfp[0] + 0.05 * np.diff(ax.get_ylim())[0], - 'GFP', zorder=4, color=gfp_color, + label, zorder=4, color=gfp_color, path_effects=gfp_path_effects) for ii, line in zip(idx, line_list): if ii in bad_ch_idx: @@ -674,8 +688,23 @@ def plot_evoked(evoked, picks=None, exclude='bads', unit=True, show=True, the same length as the number of channel types. If instance of Axes, there must be only one channel type plotted. gfp : bool | 'only' - Plot GFP in green if True or "only". If "only", then the individual - channel traces will not be shown. + Plot the global field power (GFP) or the root mean square (RMS) of the + data. For MEG data, this will plot the RMS. For EEG, it plots GFP, + i.e. the standard deviation of the signal across channels. The GFP is + equivalent to the RMS of an average-referenced signal. + + - ``True`` + Plot GFP or RMS (for EEG and MEG, respectively) and traces for all + channels. + - ``'only'`` + Plot GFP or RMS (for EEG and MEG, respectively), and omit the + traces for individual channels. + + The color of the GFP/RMS trace will be green if + ``spatial_colors=False``, and black otherwise. + + .. versionchanged:: 0.23 + Plot GFP for EEG instead of RMS. Label RMS traces correctly as such. window_title : str | None The title to put at the top of the figure. spatial_colors : bool @@ -741,11 +770,12 @@ def plot_evoked(evoked, picks=None, exclude='bads', unit=True, show=True, time_unit=time_unit, sphere=sphere) -def plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None, - border='none', ylim=None, scalings=None, title=None, - proj=False, vline=[0.0], fig_background=None, +def plot_evoked_topo(evoked, layout=None, layout_scale=0.945, + color=None, border='none', ylim=None, scalings=None, + title=None, proj=False, vline=[0.0], fig_background=None, merge_grads=False, legend=True, axes=None, - background_color='w', noise_cov=None, show=True): + background_color='w', noise_cov=None, exclude='bads', + show=True): """Plot 2D topography of evoked responses. Clicking on the plot of an individual sensor opens a new figure showing @@ -811,6 +841,9 @@ def plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None, Can be a string to load a covariance from disk. .. versionadded:: 0.16.0 + exclude : list of str | 'bads' + Channels names to exclude from the plot. If 'bads', the + bad channels are excluded. By default, exclude is set to 'bads'. show : bool Show figure if True. @@ -850,8 +883,8 @@ def plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None, axis_facecolor=axis_facecolor, font_color=font_color, merge_channels=merge_grads, - legend=legend, axes=axes, show=show, - noise_cov=noise_cov) + legend=legend, axes=axes, exclude=exclude, + show=show, noise_cov=noise_cov) @fill_doc @@ -1014,9 +1047,10 @@ def plot_evoked_white(evoked, noise_cov, show=True, rank=None, time_unit='s', """Plot whitened evoked response. Plots the whitened evoked response and the whitened GFP as described in - [1]_. This function is especially useful for investigating noise - covariance properties to determine if data are properly whitened (e.g., - achieving expected values in line with model assumptions, see Notes below). + :footcite:`EngemannGramfort2015`. This function is especially useful for + investigating noise covariance properties to determine if data are + properly whitened (e.g., achieving expected values in line with model + assumptions, see Notes below). Parameters ---------- @@ -1451,7 +1485,9 @@ def plot_evoked_joint(evoked, times="peaks", title='', picks=None, else: locator = None - topomap_args_pass = topomap_args.copy() + topomap_args_pass = (dict(extrapolate='local') if ch_type == 'seeg' + else dict()) + topomap_args_pass.update(topomap_args) topomap_args_pass['outlines'] = topomap_args.get('outlines', 'skirt') topomap_args_pass['contours'] = contours evoked.plot_topomap(times=times_sec, axes=map_ax, show=False, @@ -1915,6 +1951,23 @@ def _title_helper_pce(title, picked_types, picks, ch_names, combine): return title +def _ascii_minus_to_unicode(s): + """Replace ASCII-encoded "minus-hyphen" characters with Unicode minus. + + Aux function for ``plot_compare_evokeds`` to prettify ``Evoked.comment``. + """ + if s is None: + return + + # replace ASCII minus operators with Unicode minus characters + s = s.replace(' - ', ' − ') + # replace leading minus operator if present + if s.startswith('-'): + s = f'−{s[1:]}' + + return s + + @fill_doc def plot_compare_evokeds(evokeds, picks=None, colors=None, linestyles=None, styles=None, cmap=None, @@ -2139,7 +2192,10 @@ def plot_compare_evokeds(evokeds, picks=None, colors=None, if isinstance(evokeds, (list, tuple)): evokeds_copy = evokeds.copy() evokeds = dict() - comments = [getattr(_evk, 'comment', None) for _evk in evokeds_copy] + + comments = [_ascii_minus_to_unicode(getattr(_evk, 'comment', None)) + for _evk in evokeds_copy] + for idx, (comment, _evoked) in enumerate(zip(comments, evokeds_copy)): key = str(idx + 1) if comment: # only update key if comment is non-empty @@ -2193,12 +2249,14 @@ def plot_compare_evokeds(evokeds, picks=None, colors=None, if show_sensors is None: show_sensors = (len(picks) == 1) + _validate_type(combine, types=(None, 'callable', str), item_name='combine') # cannot combine a single channel if (len(picks) < 2) and combine is not None: warn('Only {} channel in "picks"; cannot combine by method "{}".' .format(len(picks), combine)) # `combine` defaults to GFP unless picked a single channel or axes='topo' - if combine is None and len(picks) > 1 and axes != 'topo': + do_topo = isinstance(axes, str) and axes == 'topo' + if combine is None and len(picks) > 1 and not do_topo: combine = 'gfp' # convert `combine` into callable (if None or str) combine_func = _make_combine_callable(combine) @@ -2208,7 +2266,6 @@ def plot_compare_evokeds(evokeds, picks=None, colors=None, ch_names=ch_names, combine=combine) # setup axes - do_topo = (axes == 'topo') if do_topo: show_sensors = False if len(picks) > 70: diff --git a/mne/viz/ica.py b/mne/viz/ica.py index 852190bb745..7f0a6dd3c55 100644 --- a/mne/viz/ica.py +++ b/mne/viz/ica.py @@ -22,7 +22,7 @@ from ..io.meas_info import create_info from ..io.pick import pick_types, _picks_to_idx from ..time_frequency.psd import psd_multitaper -from ..utils import _reject_data_segments +from ..utils import _reject_data_segments, verbose @fill_doc @@ -44,11 +44,11 @@ def plot_ica_sources(ica, inst, picks=None, start=None, inst : instance of mne.io.Raw, mne.Epochs, mne.Evoked The object to plot the sources from. %(picks_base)s all sources in the order as fitted. - start : int - X-axis start index. If None, from the beginning. - stop : int - X-axis stop index. If None, next 20 are shown, in case of evoked to the - end. + start : int | None + X-axis start index. If None (default), from the beginning. + stop : int | None + X-axis stop index. If None (default), next 20 are shown, in case of + evoked to the end. title : str | None The window title. If None a default is provided. show : bool @@ -250,11 +250,11 @@ def _get_psd_label_and_std(this_psd, dB, ica, num_std): return psd_ylabel, psds_mean, spectrum_std -@fill_doc +@verbose def plot_ica_properties(ica, inst, picks=None, axes=None, dB=True, plot_std=True, topomap_args=None, image_args=None, psd_args=None, figsize=None, show=True, reject='auto', - reject_by_annotation=True): + reject_by_annotation=True, *, verbose=None): """Display component properties. Properties include the topography, epochs image, ERP/ERF, power @@ -308,6 +308,7 @@ def plot_ica_properties(ica, inst, picks=None, axes=None, dB=True, %(reject_by_annotation_raw)s .. versionadded:: 0.21.0 + %(verbose)s Returns ------- @@ -318,14 +319,26 @@ def plot_ica_properties(ica, inst, picks=None, axes=None, dB=True, ----- .. versionadded:: 0.13 """ - from ..io.base import BaseRaw - from ..epochs import BaseEpochs + return _fast_plot_ica_properties(ica, inst, picks=picks, axes=axes, dB=dB, + plot_std=plot_std, + topomap_args=topomap_args, + image_args=image_args, psd_args=psd_args, + figsize=figsize, show=show, + reject=reject, + reject_by_annotation=reject_by_annotation, + verbose=verbose, precomputed_data=None) + + +def _fast_plot_ica_properties(ica, inst, picks=None, axes=None, dB=True, + plot_std=True, topomap_args=None, + image_args=None, psd_args=None, figsize=None, + show=True, reject='auto', precomputed_data=None, + reject_by_annotation=True, *, verbose=None): + """Display component properties.""" from ..preprocessing import ICA - from ..io import RawArray # input checks and defaults # ------------------------- - _validate_type(inst, (BaseRaw, BaseEpochs), "inst", "Raw or Epochs") _validate_type(ica, ICA, "ica", "ICA") _validate_type(plot_std, (bool, 'numeric'), 'plot_std') if isinstance(plot_std, bool): @@ -366,62 +379,13 @@ def plot_ica_properties(ica, inst, picks=None, axes=None, dB=True, # calculations # ------------ - - if isinstance(inst, BaseRaw): - # when auto, delegate reject to the ica - if reject == 'auto': - reject = getattr(ica, 'reject_', None) - else: - pass - - if reject is None: - inst_rejected = inst - drop_inds = None - else: - data = inst.get_data() - data, drop_inds = _reject_data_segments(data, ica.reject_, - flat=None, decim=None, - info=inst.info, - tstep=2.0) - inst_rejected = RawArray(data, inst.info) - - # break up continuous signal into segments - from ..epochs import make_fixed_length_epochs - inst_rejected = make_fixed_length_epochs( - inst_rejected, - duration=2, - preload=True, - reject_by_annotation=reject_by_annotation, - proj=False, - verbose=False) - inst = make_fixed_length_epochs( - inst, - duration=2, - preload=True, - reject_by_annotation=reject_by_annotation, - proj=False, - verbose=False) - kind = "Segment" + if isinstance(precomputed_data, tuple): + kind, dropped_indices, epochs_src, data = precomputed_data else: - drop_inds = None - inst_rejected = inst - kind = "Epochs" - - epochs_src = ica.get_sources(inst_rejected) - data = epochs_src.get_data() - + kind, dropped_indices, epochs_src, data = _prepare_data_ica_properties( + inst, ica, reject_by_annotation, reject) ica_data = np.swapaxes(data[:, picks, :], 0, 1) - - # getting dropped epochs indexes - if drop_inds is not None: - dropped_indices = [(d[0] // len(inst.times)) + 1 - for d in drop_inds] - else: - dropped_indices = [] - - # getting ica sources from inst - dropped_src = ica.get_sources(inst).get_data() - dropped_src = np.swapaxes(dropped_src[:, picks, :], 0, 1) + dropped_src = ica_data # spectrum Nyquist = inst.info['sfreq'] / 2. @@ -479,6 +443,80 @@ def set_title_and_labels(ax, title, xlab, ylab): return all_fig +def _prepare_data_ica_properties(inst, ica, reject_by_annotation=True, + reject='auto'): + """Prepare Epochs sources to plot ICA properties. + + Parameters + ---------- + ica : instance of mne.preprocessing.ICA + The ICA solution. + inst : instance of Epochs or Raw + The data to use in plotting properties. + reject_by_annotation : bool, optional + [description], by default True + reject : str, optional + [description], by default 'auto' + + Returns + ------- + kind : str + "Segment" for BaseRaw and "Epochs" for BaseEpochs + dropped_indices : list + Dropped epochs indexes. + epochs_src : instance of Epochs + Segmented data of ICA sources. + data : array of shape (n_epochs, n_ica_sources, n_times) + A view on epochs ICA sources data. + """ + from ..io.base import BaseRaw + from ..io import RawArray + from ..epochs import BaseEpochs + + _validate_type(inst, (BaseRaw, BaseEpochs), "inst", "Raw or Epochs") + if isinstance(inst, BaseRaw): + # when auto, delegate reject to the ica + from ..epochs import make_fixed_length_epochs + if reject == 'auto': + reject = getattr(ica, 'reject_', None) + if reject is None: + drop_inds = None + dropped_indices = [] + # break up continuous signal into segments + epochs_src = make_fixed_length_epochs( + ica.get_sources(inst), + duration=2, + preload=True, + reject_by_annotation=reject_by_annotation, + proj=False, + verbose=False) + else: + data = inst.get_data() + data, drop_inds = _reject_data_segments(data, ica.reject_, + flat=None, decim=None, + info=inst.info, + tstep=2.0) + inst_rejected = RawArray(data, inst.info) + # break up continuous signal into segments + epochs_src = make_fixed_length_epochs( + ica.get_sources(inst_rejected), + duration=2, + preload=True, + reject_by_annotation=reject_by_annotation, + proj=False, + verbose=False) + # getting dropped epochs indexes + dropped_indices = [(d[0] // len(epochs_src.times)) + 1 + for d in drop_inds] + kind = "Segment" + else: + drop_inds = None + epochs_src = ica.get_sources(inst) + dropped_indices = [] + kind = "Epochs" + return kind, dropped_indices, epochs_src, epochs_src.get_data() + + def _plot_ica_sources_evoked(evoked, picks, exclude, title, show, ica, labels=None): """Plot average over epochs in ICA space. @@ -731,10 +769,10 @@ def plot_ica_overlay(ica, inst, exclude=None, picks=None, start=None, The components marked for exclusion. If None (default), ICA.exclude will be used. %(picks_base)s all channels that were included during fitting. - start : int - X-axis start index. If None from the beginning. - stop : int - X-axis stop index. If None to the end. + start : int | None + X-axis start index. If None (default) from the beginning. + stop : int | None + X-axis stop index. If None (default) to 3.0s. title : str The figure title. show : bool @@ -940,11 +978,12 @@ def _plot_sources(ica, inst, picks, exclude, start, stop, show, title, block, (picks, ica.n_components_ + np.arange(len(extra_picks)))) ch_order = np.arange(len(picks)) n_channels = min([20, len(picks)]) + ch_names_picked = [ch_names[x] for x in picks] # create info - info = create_info([ch_names[x] for x in picks], sfreq, ch_types=ch_types) + info = create_info(ch_names_picked, sfreq, ch_types=ch_types) info['meas_date'] = inst.info['meas_date'] - info['bads'] = [ch_names[x] for x in exclude] + info['bads'] = [ch_names[x] for x in exclude if x in picks] if is_raw: inst_array = RawArray(data, info, inst.first_samp) inst_array.set_annotations(inst.annotations) @@ -983,7 +1022,7 @@ def _plot_sources(ica, inst, picks, exclude, start, stop, show, title, block, ica_inst=inst, info=info, # channels and channel order - ch_names=np.array(ch_names), + ch_names=np.array(ch_names_picked), ch_types=np.array(ch_types), ch_order=ch_order, picks=picks, @@ -1041,9 +1080,5 @@ def _plot_sources(ica, inst, picks, exclude, start, stop, show, title, block, fig._update_annotation_segments() fig._draw_annotations() - # for blitting - fig.canvas.flush_events() - fig.mne.bg = fig.canvas.copy_from_bbox(fig.bbox) - plt_show(show, block=block) return fig diff --git a/mne/viz/misc.py b/mne/viz/misc.py index 7912006c6d3..a9e9b4b8f51 100644 --- a/mne/viz/misc.py +++ b/mne/viz/misc.py @@ -21,7 +21,6 @@ from collections import defaultdict import numpy as np -from scipy import linalg from ..defaults import DEFAULTS from ..fixes import _get_img_fdata @@ -39,7 +38,8 @@ _mask_to_onsets_offsets, _pl, _on_missing) from ..io.pick import _picks_by_type from ..filter import estimate_ringing_samples -from .utils import tight_layout, _get_color_list, _prepare_trellis, plt_show +from .utils import (tight_layout, _get_color_list, _prepare_trellis, plt_show, + _figure_agg) def _index_info_cov(info, cov, exclude): @@ -116,9 +116,10 @@ def plot_cov(cov, info, exclude=(), colorbar=True, proj=False, show_svd=True, .. versionchanged:: 0.19 Approximate ranks for each channel type are shown with red dashed lines. """ - from ..cov import Covariance import matplotlib.pyplot as plt from matplotlib.colors import Normalize + from scipy import linalg + from ..cov import Covariance info, C, ch_names, idx_names = _index_info_cov(info, cov, exclude) del cov, exclude @@ -305,7 +306,7 @@ def plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None, def _plot_mri_contours(mri_fname, surfaces, src, orientation='coronal', slices=None, show=True, show_indices=False, - show_orientation=False, img_output=False): + show_orientation=False, img_output=False, width=512): """Plot BEM contours on anatomical slices.""" import matplotlib.pyplot as plt from matplotlib import patheffects @@ -365,17 +366,21 @@ def _plot_mri_contours(mri_fname, surfaces, src, orientation='coronal', if img_output: n_col = n_axes = 1 - fig, ax = plt.subplots(1, 1, figsize=(7.0, 7.0)) + dpi = 96 + # 2x standard MRI resolution is probably good enough for the + # traces + w = width / dpi + figsize = (w, w / data.shape[x] * data.shape[y]) + fig = _figure_agg(figsize=figsize, dpi=dpi, facecolor='k') + ax = fig.add_axes([0, 0, 1, 1], frame_on=False, facecolor='k') axs = [ax] * len(slices) - - w = fig.get_size_inches()[0] - fig.set_size_inches([w, w / data.shape[x] * data.shape[y]]) plt.close(fig) else: n_col = 4 fig, axs, _, _ = _prepare_trellis(len(slices), n_col) + fig.set_facecolor('k') + dpi = fig.get_dpi() n_axes = len(axs) - fig.set_facecolor('k') bounds = np.concatenate( [[-np.inf], slices[:-1] + np.diff(slices) / 2., [np.inf]]) # float slicer = [slice(None)] * 3 @@ -438,13 +443,13 @@ def _plot_mri_contours(mri_fname, surfaces, src, orientation='coronal', if img_output: output = BytesIO() fig.savefig(output, bbox_inches='tight', - pad_inches=0, format='png') + pad_inches=0, format='png', dpi=dpi) out.append(base64.b64encode(output.getvalue()).decode('ascii')) fig.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0., hspace=0.) plt_show(show, fig=fig) - return out + return out, flip_z def plot_bem(subject=None, subjects_dir=None, orientation='coronal', @@ -555,7 +560,7 @@ def plot_bem(subject=None, subjects_dir=None, orientation='coronal', # Plot the contours return _plot_mri_contours(mri_fname, surfaces, src, orientation, slices, - show, show_indices, show_orientation) + show, show_indices, show_orientation)[0] def _get_bem_plotting_surfaces(bem_path): @@ -693,7 +698,7 @@ def plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None, else: ax.set_ylim([min_event - 1, max_event + 1]) - ax.set(xlabel=xlabel, ylabel='Events id', xlim=[0, max_x]) + ax.set(xlabel=xlabel, ylabel='Event id', xlim=[0, max_x]) ax.grid(True) diff --git a/mne/viz/raw.py b/mne/viz/raw.py index 36bee9d1920..0392e4776a9 100644 --- a/mne/viz/raw.py +++ b/mne/viz/raw.py @@ -354,17 +354,12 @@ def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=20, # plot annotations (if any) fig._setup_annotation_colors() - fig._update_annotation_segments() fig._draw_annotations() # start with projectors dialog open, if requested if show_options: fig._toggle_proj_fig() - # for blitting - fig.canvas.flush_events() - fig.mne.bg = fig.canvas.copy_from_bbox(fig.bbox) - plt_show(show, block=block) return fig @@ -375,7 +370,8 @@ def plot_raw_psd(raw, fmin=0, fmax=np.inf, tmin=None, tmax=None, proj=False, picks=None, ax=None, color='black', xscale='linear', area_mode='std', area_alpha=0.33, dB=True, estimate='auto', show=True, n_jobs=1, average=False, line_alpha=None, - spatial_colors=True, sphere=None, verbose=None): + spatial_colors=True, sphere=None, window='hamming', + verbose=None): """%(plot_psd_doc)s. Parameters @@ -415,6 +411,9 @@ def plot_raw_psd(raw, fmin=0, fmax=np.inf, tmin=None, tmax=None, proj=False, %(plot_psd_line_alpha)s %(plot_psd_spatial_colors)s %(topomap_sphere_auto)s + %(window-psd)s + + .. versionadded:: 0.22.0 %(verbose)s Returns @@ -436,7 +435,8 @@ def plot_raw_psd(raw, fmin=0, fmax=np.inf, tmin=None, tmax=None, proj=False, average=average, estimate=estimate, area_mode=area_mode, line_alpha=line_alpha, area_alpha=area_alpha, color=color, spatial_colors=spatial_colors, n_jobs=n_jobs, n_fft=n_fft, - n_overlap=n_overlap, reject_by_annotation=reject_by_annotation) + n_overlap=n_overlap, reject_by_annotation=reject_by_annotation, + window=window) plt_show(show) return fig @@ -531,8 +531,8 @@ def plot_raw_psd_topo(raw, tmin=0., tmax=None, fmin=0., fmax=100., proj=False, def _setup_channel_selections(raw, kind, order): """Get dictionary of channel groupings.""" - from ..selection import (read_selection, _SELECTIONS, _EEG_SELECTIONS, - _divide_to_regions) + from ..channels import (read_vectorview_selection, _SELECTIONS, + _EEG_SELECTIONS, _divide_to_regions) from ..utils import _get_stim_channel _check_option('group_by', kind, ('position', 'selection')) if kind == 'position': @@ -553,7 +553,7 @@ def _setup_channel_selections(raw, kind, order): # loop over regions keys = np.concatenate([_SELECTIONS, _EEG_SELECTIONS]) for key in keys: - channels = read_selection(key, info=raw.info) + channels = read_vectorview_selection(key, info=raw.info) picks = pick_channels(raw.ch_names, channels) picks = np.intersect1d(picks, order) if not len(picks): @@ -563,7 +563,7 @@ def _setup_channel_selections(raw, kind, order): misc = pick_types(raw.info, meg=False, eeg=False, stim=True, eog=True, ecg=True, emg=True, ref_meg=False, misc=True, resp=True, chpi=True, exci=True, ias=True, syst=True, - seeg=False, bio=True, ecog=False, fnirs=False, + seeg=False, bio=True, ecog=False, fnirs=False, dbs=False, exclude=()) if len(misc) and np.in1d(misc, order).any(): selections_dict['Misc'] = misc diff --git a/mne/viz/tests/conftest.py b/mne/viz/tests/conftest.py deleted file mode 100644 index e5e6cc06b36..00000000000 --- a/mne/viz/tests/conftest.py +++ /dev/null @@ -1,45 +0,0 @@ -# Authors: Robert Luke -# Eric Larson -# Alexandre Gramfort -# -# License: BSD (3-clause) - -import pytest -import numpy as np -import os.path as op - -from mne import create_info, EvokedArray, events_from_annotations, Epochs -from mne.channels import make_standard_montage -from mne.datasets.testing import data_path, _pytest_param -from mne.preprocessing.nirs import optical_density, beer_lambert_law -from mne.io import read_raw_nirx - - -@pytest.fixture() -def fnirs_evoked(): - """Create an fnirs evoked structure.""" - montage = make_standard_montage('biosemi16') - ch_names = montage.ch_names - ch_types = ['eeg'] * 16 - info = create_info(ch_names=ch_names, sfreq=20, ch_types=ch_types) - evoked_data = np.random.randn(16, 30) - evoked = EvokedArray(evoked_data, info=info, tmin=-0.2, nave=4) - evoked.set_montage(montage) - evoked.set_channel_types({'Fp1': 'hbo', 'Fp2': 'hbo', 'F4': 'hbo', - 'Fz': 'hbo'}, verbose='error') - return evoked - - -@pytest.fixture(params=[_pytest_param()]) -def fnirs_epochs(): - """Create an fnirs epoch structure.""" - fname = op.join(data_path(download=False), - 'NIRx', 'nirscout', 'nirx_15_2_recording_w_overlap') - raw_intensity = read_raw_nirx(fname, preload=False) - raw_od = optical_density(raw_intensity) - raw_haemo = beer_lambert_law(raw_od) - evts, _ = events_from_annotations(raw_haemo, event_id={'1.0': 1}) - evts_dct = {'A': 1} - tn, tx = -1, 2 - epochs = Epochs(raw_haemo, evts, event_id=evts_dct, tmin=tn, tmax=tx) - return epochs diff --git a/mne/viz/tests/test_3d.py b/mne/viz/tests/test_3d.py index 0cc56f326e2..9a16d159ec1 100644 --- a/mne/viz/tests/test_3d.py +++ b/mne/viz/tests/test_3d.py @@ -7,7 +7,6 @@ # # License: Simplified BSD -from mne.minimum_norm.inverse import apply_inverse import os.path as op from pathlib import Path import sys @@ -29,13 +28,14 @@ from mne.io._digitization import write_dig from mne.io.pick import pick_info from mne.io.constants import FIFF +from mne.minimum_norm import apply_inverse from mne.viz import (plot_sparse_source_estimates, plot_source_estimates, snapshot_brain_montage, plot_head_positions, plot_alignment, plot_sensors_connectivity, plot_brain_colorbar, link_brains, mne_analyze_colormap) from mne.viz._3d import _process_clim, _linearize_map, _get_map_ticks from mne.viz.utils import _fake_click -from mne.utils import (requires_pysurfer, requires_nibabel, traits_test, +from mne.utils import (requires_nibabel, traits_test, catch_logging, run_subprocess, modified_env) from mne.datasets import testing from mne.source_space import read_source_spaces @@ -100,7 +100,6 @@ def test_plot_head_positions(): @testing.requires_testing_data -@requires_pysurfer @traits_test @pytest.mark.slowtest def test_plot_sparse_source_estimates(renderer_interactive, brain_gc): @@ -123,9 +122,11 @@ def test_plot_sparse_source_estimates(renderer_interactive, brain_gc): stc, 'sample', colormap=colormap, background=(1, 1, 0), subjects_dir=subjects_dir, colorbar=True, clim='auto') brain.close() - pytest.raises(TypeError, plot_source_estimates, stc, 'sample', - figure='foo', hemi='both', clim='auto', - subjects_dir=subjects_dir) + del brain + with pytest.raises(TypeError, match='figure must be'): + plot_source_estimates( + stc, 'sample', figure='foo', hemi='both', clim='auto', + subjects_dir=subjects_dir) # now do sparse version vertices = sample_src[0]['vertno'] @@ -164,7 +165,7 @@ def test_plot_evoked_field(renderer): @pytest.mark.slowtest # can be slow on OSX @testing.requires_testing_data @traits_test -def test_plot_alignment(tmpdir, renderer): +def test_plot_alignment(tmpdir, renderer, mixed_fwd_cov_evoked): """Test plotting of -trans.fif files and MEG sensor layouts.""" # generate fiducials file for testing tempdir = str(tmpdir) @@ -208,6 +209,13 @@ def test_plot_alignment(tmpdir, renderer): src=sample_src) sample_src.plot(subjects_dir=subjects_dir, head=True, skull=True, brain='white') + # mixed source space + mixed_src = mixed_fwd_cov_evoked[0]['src'] + assert mixed_src.kind == 'mixed' + plot_alignment(info, meg=['helmet', 'sensors'], dig=True, + coord_frame='head', trans=Path(trans_fname), + subject='sample', mri_fiducials=fiducials_path, + subjects_dir=subjects_dir, src=mixed_src) renderer.backend._close_all() # no-head version renderer.backend._close_all() @@ -279,10 +287,15 @@ def test_plot_alignment(tmpdir, renderer): src=src, dig=True, surfaces=['brain', 'inner_skull', 'outer_skull', 'outer_skin']) sphere = make_sphere_model('auto', None, evoked.info) # one layer + # if you ask for a brain surface with a 1-layer sphere model it's an error + with pytest.raises(RuntimeError, match='Sphere model does not have'): + fig = plot_alignment(subject='sample', subjects_dir=subjects_dir, + surfaces=['brain'], bem=sphere) + # but you can ask for a specific brain surface, and # no info is permitted fig = plot_alignment(trans=trans_fname, subject='sample', meg=False, coord_frame='mri', subjects_dir=subjects_dir, - surfaces=['brain'], bem=sphere, show_axes=True) + surfaces=['white'], bem=sphere, show_axes=True) renderer.backend._close_all() if renderer._get_3d_backend() == 'mayavi': import mayavi # noqa: F401 analysis:ignore @@ -300,7 +313,7 @@ def test_plot_alignment(tmpdir, renderer): plot_alignment(info_cube, meg='sensors', surfaces=(), dig=True) # one layer bem with skull surfaces: - with pytest.raises(ValueError, match='sphere conductor model must have'): + with pytest.raises(RuntimeError, match='Sphere model does not.*boundary'): plot_alignment(info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, surfaces=['brain', 'head', 'inner_skull'], bem=sphere) @@ -317,7 +330,7 @@ def test_plot_alignment(tmpdir, renderer): plot_alignment(info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, surfaces=['white', 'pial']) - with pytest.raises(TypeError, match='all entries in surfaces must be'): + with pytest.raises(TypeError, match='surfaces.*must be'): plot_alignment(info=info, trans=trans_fname, subject='sample', subjects_dir=subjects_dir, surfaces=[1]) @@ -371,7 +384,6 @@ def test_plot_alignment(tmpdir, renderer): @pytest.mark.slowtest # can be slow on OSX @testing.requires_testing_data -@requires_pysurfer @traits_test def test_process_clim_plot(renderer_interactive, brain_gc): """Test functionality for determining control points with stc.plot.""" @@ -387,12 +399,21 @@ def test_process_clim_plot(renderer_interactive, brain_gc): stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample') # Test for simple use cases - stc.plot(**kwargs) - stc.plot(clim=dict(pos_lims=(10, 50, 90)), **kwargs) - stc.plot(colormap='hot', clim='auto', **kwargs) - stc.plot(colormap='mne', clim='auto', **kwargs) - stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99, **kwargs) - pytest.raises(TypeError, stc.plot, clim='auto', figure=[0], **kwargs) + brain = stc.plot(**kwargs) + assert brain.data['center'] is None + brain.close() + brain = stc.plot(clim=dict(pos_lims=(10, 50, 90)), **kwargs) + assert brain.data['center'] == 0. + brain.close() + brain = stc.plot(colormap='hot', clim='auto', **kwargs) + brain.close() + brain = stc.plot(colormap='mne', clim='auto', **kwargs) + brain.close() + brain = stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99, + **kwargs) + brain.close() + with pytest.raises(TypeError, match='must be a'): + stc.plot(clim='auto', figure=[0], **kwargs) # Test for correct clim values with pytest.raises(ValueError, match='monotonically'): @@ -414,7 +435,8 @@ def test_process_clim_plot(renderer_interactive, brain_gc): # Test handling of degenerate data: thresholded maps stc._data.fill(0.) with pytest.warns(RuntimeWarning, match='All data were zero'): - plot_source_estimates(stc, **kwargs) + brain = plot_source_estimates(stc, **kwargs) + brain.close() def _assert_mapdata_equal(a, b): @@ -494,19 +516,18 @@ def test_stc_mpl(): stc_data = np.ones((n_verts * n_time)) stc_data.shape = (n_verts, n_time) stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample') - with pytest.warns(RuntimeWarning, match='not included'): - stc.plot(subjects_dir=subjects_dir, time_unit='s', views='ven', - hemi='rh', smoothing_steps=2, subject='sample', - backend='matplotlib', spacing='oct1', initial_time=0.001, - colormap='Reds') - fig = stc.plot(subjects_dir=subjects_dir, time_unit='ms', views='dor', - hemi='lh', smoothing_steps=2, subject='sample', - backend='matplotlib', spacing='ico2', time_viewer=True, - colormap='mne') - time_viewer = fig.time_viewer - _fake_click(time_viewer, time_viewer.axes[0], (0.5, 0.5)) # change t - time_viewer.canvas.key_press_event('ctrl+right') - time_viewer.canvas.key_press_event('left') + stc.plot(subjects_dir=subjects_dir, time_unit='s', views='ven', + hemi='rh', smoothing_steps=7, subject='sample', + backend='matplotlib', spacing='oct1', initial_time=0.001, + colormap='Reds') + fig = stc.plot(subjects_dir=subjects_dir, time_unit='ms', views='dor', + hemi='lh', smoothing_steps=7, subject='sample', + backend='matplotlib', spacing='ico2', time_viewer=True, + colormap='mne') + time_viewer = fig.time_viewer + _fake_click(time_viewer, time_viewer.axes[0], (0.5, 0.5)) # change t + time_viewer.canvas.key_press_event('ctrl+right') + time_viewer.canvas.key_press_event('left') pytest.raises(ValueError, stc.plot, subjects_dir=subjects_dir, hemi='both', subject='sample', backend='matplotlib') pytest.raises(ValueError, stc.plot, subjects_dir=subjects_dir, @@ -576,16 +597,15 @@ def test_snapshot_brain_montage(renderer): @pytest.mark.slowtest # can be slow on OSX @testing.requires_testing_data -@requires_pysurfer -@traits_test @pytest.mark.parametrize('pick_ori', ('vector', None)) @pytest.mark.parametrize('kind', ('surface', 'volume', 'mixed')) def test_plot_source_estimates(renderer_interactive, all_src_types_inv_evoked, pick_ori, kind, brain_gc): """Test plotting of scalar and vector source estimates.""" + backend = renderer_interactive._get_3d_backend() + is_pyvista = backend != 'mayavi' invs, evoked = all_src_types_inv_evoked inv = invs[kind] - is_pyvista = renderer_interactive._get_3d_backend() == 'pyvista' with pytest.warns(None): # PCA mag stc = apply_inverse(evoked, inv, pick_ori=pick_ori) stc.data[1] *= -1 # make it signed @@ -599,7 +619,7 @@ def test_plot_source_estimates(renderer_interactive, all_src_types_inv_evoked, ) if pick_ori != 'vector': kwargs['surface'] = 'white' - kwargs['backend'] = renderer_interactive._get_3d_backend() + kwargs['backend'] = backend # Mayavi can't handle non-surface if kind != 'surface' and not is_pyvista: with pytest.raises(RuntimeError, match='PyVista'): @@ -633,10 +653,14 @@ def test_plot_source_estimates(renderer_interactive, all_src_types_inv_evoked, # flatmaps (mostly a lot of error checking) these_kwargs = kwargs.copy() - these_kwargs.update(surface='flat', views='auto') + these_kwargs.update(surface='flat', views='auto', hemi='both', + verbose='debug') if kind == 'surface' and pick_ori != 'vector' and is_pyvista: - with pytest.raises(FileNotFoundError, match='flatmap'): - meth(**these_kwargs) # sample does not have them + with catch_logging() as log: + with pytest.raises(FileNotFoundError, match='flatmap'): + meth(**these_kwargs) # sample does not have them + log = log.getvalue() + assert 'offset: 0' in log fs_stc = stc.copy() fs_stc.subject = 'fsaverage' # this is wrong, but don't have to care flat_meth = getattr(fs_stc, meth_key) @@ -652,6 +676,7 @@ def test_plot_source_estimates(renderer_interactive, all_src_types_inv_evoked, else: brain = flat_meth(**these_kwargs) brain.close() + del brain these_kwargs.update(surface='inflated', views='flat') with pytest.raises(ValueError, match='surface="flat".*views="flat"'): flat_meth(**these_kwargs) @@ -696,14 +721,26 @@ def test_plot_sensors_connectivity(renderer): n_channels = len(picks) con = np.random.RandomState(42).randn(n_channels, n_channels) info = raw.info - with pytest.raises(TypeError): - plot_sensors_connectivity(info='foo', con=con, - picks=picks) - with pytest.raises(ValueError): - plot_sensors_connectivity(info=info, con=con[::2, ::2], - picks=picks) - - plot_sensors_connectivity(info=info, con=con, picks=picks) + with pytest.raises(TypeError, match='must be an instance of Info'): + plot_sensors_connectivity(info='foo', con=con, picks=picks) + with pytest.raises(ValueError, match='does not correspond to the size'): + plot_sensors_connectivity(info=info, con=con[::2, ::2], picks=picks) + + fig = plot_sensors_connectivity(info=info, con=con, picks=picks) + if renderer._get_3d_backend() == 'pyvista': + try: + # pyvista<0.30.0 + title = fig.plotter.scalar_bar.GetTitle() + except AttributeError: + # pyvista>=0.30.0 + title = list(fig.plotter.scalar_bars.values())[0].GetTitle() + else: + assert renderer._get_3d_backend() == 'mayavi' + # the last thing we add is the Tube, so we need to go + # vtkDataSource->Stripper->Tube->ModuleManager + mod_man = fig.children[-1].children[0].children[0].children[0] + title = mod_man.scalar_lut_manager.scalar_bar.title + assert title == 'Connectivity' @pytest.mark.parametrize('orientation', ('horizontal', 'vertical')) @@ -736,11 +773,10 @@ def test_brain_colorbar(orientation, diverging, lims): @pytest.mark.slowtest # slow-ish on Travis OSX -@requires_pysurfer @testing.requires_testing_data @traits_test def test_mixed_sources_plot_surface(renderer_interactive): - """Test plot_surface() for mixed source space.""" + """Test plot_surface() for mixed source space.""" src = read_source_spaces(fwd_fname2) N = np.sum([s['nuse'] for s in src]) # number of sources @@ -753,9 +789,11 @@ def test_mixed_sources_plot_surface(renderer_interactive): stc = MixedSourceEstimate(data, vertno, 0, 1) - stc.surface().plot(views='lat', hemi='split', - subject='fsaverage', subjects_dir=subjects_dir, - colorbar=False) + brain = stc.surface().plot(views='lat', hemi='split', + subject='fsaverage', subjects_dir=subjects_dir, + colorbar=False) + brain.close() + del brain @testing.requires_testing_data @@ -781,7 +819,7 @@ def test_link_brains(renderer_interactive): subjects_dir=subjects_dir, colorbar=True, clim='auto' ) - if renderer_interactive._get_3d_backend() != 'pyvista': + if renderer_interactive._get_3d_backend() == 'mayavi': with pytest.raises(NotImplementedError, match='backend is pyvista'): link_brains(brain) else: diff --git a/mne/viz/tests/test_epochs.py b/mne/viz/tests/test_epochs.py index 4ac85835eaf..9107ba82533 100644 --- a/mne/viz/tests/test_epochs.py +++ b/mne/viz/tests/test_epochs.py @@ -130,12 +130,8 @@ def test_plot_epochs_colors(epochs): epochs.plot(epoch_colors=[['r'], ['b']]) # epochs obj has only 1 epoch with pytest.raises(ValueError, match=r'epoch colors for epoch \d+ has'): epochs.plot(epoch_colors=[['r']]) # need 1 color for each channel - # also test event_colors - with pytest.warns(DeprecationWarning, match='replaced by event_color in'): - epochs.plot(event_colors='r') - with pytest.warns(DeprecationWarning, - match='in 0.23. Since you passed values for both'): - epochs.plot(event_colors='r', event_color='b') + # also test event_color + epochs.plot(event_color='b') def test_plot_epochs_scale_bar(epochs): diff --git a/mne/viz/tests/test_evoked.py b/mne/viz/tests/test_evoked.py index 27445c0d157..1b80c511910 100644 --- a/mne/viz/tests/test_evoked.py +++ b/mne/viz/tests/test_evoked.py @@ -22,7 +22,7 @@ from mne import (read_events, Epochs, read_cov, compute_covariance, make_fixed_length_events, compute_proj_evoked) from mne.io import read_raw_fif -from mne.utils import run_tests_if_main, catch_logging, requires_version +from mne.utils import catch_logging, requires_version from mne.viz import plot_compare_evokeds, plot_evoked_white from mne.viz.utils import _fake_click from mne.datasets import testing @@ -39,7 +39,8 @@ # Use a subset of channels for plotting speed # make sure we have a magnetometer and a pair of grad pairs for topomap. -default_picks = (0, 1, 2, 3, 4, 6, 7, 61, 122, 183, 244, 305) +default_picks = (0, 1, 2, 3, 4, 6, 7, 61, 122, 183, 244, 305, + 315, 316, 317, 318) # EEG channels sel = (0, 7) @@ -50,7 +51,10 @@ def _get_epochs(picks=default_picks): events = read_events(event_name) epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks, decim=10, verbose='error') - epochs.info['bads'] = [epochs.ch_names[-1]] + epochs.info['bads'] = [ + epochs.ch_names[-5], # MEG + epochs.ch_names[-1] # EEG + ] epochs.info.normalize_proj() return epochs @@ -72,7 +76,8 @@ def test_plot_evoked_cov(): evoked = _get_epochs().average() cov = read_cov(cov_fname) cov['projs'] = [] # avoid warnings - evoked.plot(noise_cov=cov, time_unit='s') + with pytest.warns(RuntimeWarning, match='No average EEG reference'): + evoked.plot(noise_cov=cov, time_unit='s') with pytest.raises(TypeError, match='Covariance'): evoked.plot(noise_cov=1., time_unit='s') with pytest.raises(IOError, match='No such file'): @@ -96,7 +101,7 @@ def test_plot_evoked(): fig = evoked.plot(proj=True, hline=[1], exclude=[], window_title='foo', time_unit='s') amplitudes = _get_amplitudes(fig) - assert len(amplitudes) == 12 + assert len(amplitudes) == len(default_picks) assert evoked.proj is False # Test a click ax = fig.get_axes()[0] @@ -122,9 +127,26 @@ def test_plot_evoked(): proj='interactive', axes='foo', time_unit='s') plt.close('all') - # test GFP only - evoked.plot(gfp='only', time_unit='s') - pytest.raises(ValueError, evoked.plot, gfp='foo', time_unit='s') + # test `gfp='only'`: GFP (EEG) and RMS (MEG) + fig, ax = plt.subplots(3) + evoked.plot(gfp='only', time_unit='s', axes=ax) + + assert len(ax[0].lines) == len(ax[1].lines) == len(ax[2].lines) == 1 + + assert ax[0].get_title() == 'EEG (3 channels)' + assert ax[0].texts[0].get_text() == 'GFP' + + assert ax[1].get_title() == 'Gradiometers (9 channels)' + assert ax[1].texts[0].get_text() == 'RMS' + + assert ax[2].get_title() == 'Magnetometers (2 channels)' + assert ax[1].texts[0].get_text() == 'RMS' + + plt.close('all') + + # Test invalid `gfp` + with pytest.raises(ValueError): + evoked.plot(gfp='foo', time_unit='s') # plot with bad channels excluded, spatial_colors, zorder & pos. layout evoked.rename_channels({'MEG 0133': 'MEG 0000'}) @@ -165,9 +187,9 @@ def _get_amplitudes(fig): @pytest.mark.parametrize('picks, rlims, avg_proj', [ - (default_picks, (0.59, 0.61), False), # MEG - (np.arange(340, 360), (0.49, 0.51), True), # EEG - (np.arange(340, 360), (0.78, 0.80), False), # EEG + (default_picks[:-4], (0.59, 0.61), False), # MEG + (np.arange(340, 360), (0.56, 0.57), True), # EEG + (np.arange(340, 360), (0.79, 0.81), False), # EEG ]) def test_plot_evoked_reconstruct(picks, rlims, avg_proj): """Test proj="reconstruct".""" @@ -259,6 +281,9 @@ def test_plot_evoked_image(): pytest.raises(ValueError, evoked.plot_image, group_by=group_by, axes=axes) + with pytest.raises(ValueError, match='`clim` must be a dict.'): + evoked.plot_image(clim=[-4, 4]) + def test_plot_white(): """Test plot_white.""" @@ -266,34 +291,36 @@ def test_plot_white(): cov['method'] = 'empirical' cov['projs'] = [] # avoid warnings evoked = _get_epochs().average() + evoked.set_eeg_reference('average') # Avoid warnings + # test rank param. - evoked.plot_white(cov, rank={'mag': 101, 'grad': 201}, time_unit='s') - fig = evoked.plot_white(cov, rank={'mag': 101}, time_unit='s') # test rank - evoked.plot_white(cov, rank={'grad': 201}, time_unit='s', axes=fig.axes) - with pytest.raises(ValueError, match=r'must have shape \(3,\), got \(2,'): + with pytest.raises(ValueError, match='exceeds'): + evoked.plot_white(cov, rank={'mag': 10}) + evoked.plot_white(cov, rank={'mag': 1, 'grad': 8, 'eeg': 2}, time_unit='s') + fig = evoked.plot_white(cov, rank={'mag': 1}, time_unit='s') # test rank + evoked.plot_white(cov, rank={'grad': 8}, time_unit='s', axes=fig.axes) + with pytest.raises(ValueError, match=r'must have shape \(4,\), got \(2,'): evoked.plot_white(cov, axes=fig.axes[:2]) with pytest.raises(ValueError, match='When not using SSS'): evoked.plot_white(cov, rank={'meg': 306}) evoked.plot_white([cov, cov], time_unit='s') plt.close('all') - assert 'eeg' not in evoked fig = plot_evoked_white(evoked, [cov, cov]) - assert len(fig.axes) == 2 * 2 - axes = np.array(fig.axes).reshape(2, 2) + assert len(fig.axes) == 3 * 2 + axes = np.array(fig.axes).reshape(3, 2) plot_evoked_white(evoked, [cov, cov], axes=axes) - with pytest.raises(ValueError, match=r'have shape \(2, 2\), got'): + with pytest.raises(ValueError, match=r'have shape \(3, 2\), got'): plot_evoked_white(evoked, [cov, cov], axes=axes[:, :1]) # Hack to test plotting of maxfiltered data - evoked_sss = evoked.copy() + evoked_sss = _get_epochs(picks='meg').average() sss = dict(sss_info=dict(in_order=80, components=np.arange(80))) evoked_sss.info['proc_history'] = [dict(max_info=sss)] evoked_sss.plot_white(cov, rank={'meg': 64}) with pytest.raises(ValueError, match='When using SSS'): evoked_sss.plot_white(cov, rank={'grad': 201}) evoked_sss.plot_white(cov, time_unit='s') - plt.close('all') def test_plot_compare_evokeds(): @@ -301,7 +328,7 @@ def test_plot_compare_evokeds(): evoked = _get_epochs().average() # test defaults figs = plot_compare_evokeds(evoked) - assert len(figs) == 2 + assert len(figs) == 3 # test picks, combine, and vlines (1-channel pick also shows sensor inset) picks = ['MEG 0113', 'mag'] + 2 * [['MEG 0113', 'MEG 0112']] + [[0, 1]] vlines = [[0.1, 0.2], []] + 3 * ['auto'] @@ -358,6 +385,11 @@ def test_plot_compare_evokeds(): plot_compare_evokeds(evoked_dict, cmap=('cmap title', 'inferno'), linestyles=['-', ':', '--']) plt.close('all') + # test combine + match = 'combine must be an instance of None, callable, or str' + with pytest.raises(TypeError, match=match): + plot_compare_evokeds(evoked, combine=["mean", "gfp"]) + plt.close('all') # test warnings with pytest.warns(RuntimeWarning, match='in "picks"; cannot combine'): plot_compare_evokeds(evoked, picks=[0], combine='median') @@ -484,6 +516,3 @@ def get_axes_midpoints(axes): topomap_args={'axes': topo_axes}, title=None) midpoints_after = get_axes_midpoints(topo_axes) assert (np.linalg.norm(midpoints_before - midpoints_after) < 0.1).all() - - -run_tests_if_main() diff --git a/mne/viz/tests/test_figure.py b/mne/viz/tests/test_figure.py index 23643750a6c..9f28d1b87b8 100644 --- a/mne/viz/tests/test_figure.py +++ b/mne/viz/tests/test_figure.py @@ -15,4 +15,4 @@ def test_browse_figure_constructor(): def test_psd_figure_constructor(): """Test error handling in MNELineFigure constructor.""" with pytest.raises(TypeError, match='an instance of Raw or Epochs, got'): - _psd_figure('foo', *((None,) * 18)) + _psd_figure('foo', *((None,) * 19)) diff --git a/mne/viz/tests/test_ica.py b/mne/viz/tests/test_ica.py index b6831db68a0..1c8dba50b88 100644 --- a/mne/viz/tests/test_ica.py +++ b/mne/viz/tests/test_ica.py @@ -14,7 +14,7 @@ make_fixed_length_events) from mne.io import read_raw_fif from mne.preprocessing import ICA, create_ecg_epochs, create_eog_epochs -from mne.utils import (run_tests_if_main, requires_sklearn, _click_ch_name, +from mne.utils import (requires_sklearn, _click_ch_name, catch_logging, _close_event) from mne.viz.ica import _create_properties_layout, plot_ica_properties from mne.viz.utils import _fake_click @@ -54,6 +54,8 @@ def _get_epochs(): @requires_sklearn +@pytest.mark.filterwarnings('ignore:.*max_iter.*will be changed.*:' + 'DeprecationWarning') def test_plot_ica_components(): """Test plotting of ICA solutions.""" res = 8 @@ -70,7 +72,12 @@ def test_plot_ica_components(): plt.close('all') # test interactive mode (passing 'inst' arg) - ica.plot_components([0, 1], image_interp='bilinear', inst=raw, res=16) + with catch_logging() as log: + ica.plot_components([0, 1], image_interp='bilinear', inst=raw, res=16, + verbose='debug', ch_type='grad') + log = log.getvalue() + assert 'grad data' in log + assert 'Interpolation mode local to mean' in log fig = plt.gcf() # test title click @@ -112,6 +119,7 @@ def test_plot_ica_properties(): """Test plotting of ICA properties.""" raw = _get_raw(preload=True).crop(0, 5) raw.add_proj([], remove_existing=True) + raw.info['highpass'] = 1.0 # fake high-pass filtering events = make_fixed_length_events(raw) picks = _get_picks(raw)[:6] pick_names = [raw.ch_names[k] for k in picks] @@ -133,7 +141,11 @@ def test_plot_ica_properties(): _create_properties_layout(figsize=(2, 2), fig=fig) topoargs = dict(topomap_args={'res': 4, 'contours': 0, "sensors": False}) - ica.plot_properties(raw, picks=0, **topoargs) + with catch_logging() as log: + ica.plot_properties(raw, picks=0, verbose='debug', **topoargs) + log = log.getvalue() + assert raw.ch_names[0] == 'MEG 0113' + assert 'Interpolation mode local to mean' in log, log ica.plot_properties(epochs, picks=1, dB=False, plot_std=1.5, **topoargs) ica.plot_properties(epochs, picks=1, image_args={'sigma': 1.5}, topomap_args={'res': 4, 'colorbar': True}, @@ -198,6 +210,8 @@ def test_plot_ica_properties(): @requires_sklearn +@pytest.mark.filterwarnings('ignore:.*max_iter.*will be changed.*:' + 'DeprecationWarning') def test_plot_ica_sources(): """Test plotting of ICA panel.""" raw = read_raw_fif(raw_fname).crop(0, 1).load_data() @@ -222,6 +236,9 @@ def test_plot_ica_sources(): _close_event(fig) assert len(plt.get_fignums()) == 0 assert_array_equal(ica.exclude, [0]) + # test when picks does not include ica.exclude. + fig = ica.plot_sources(raw, picks=[1]) + assert len(plt.get_fignums()) == 1 plt.close('all') # dtype can change int->np.int64 after load, test it explicitly @@ -232,7 +249,7 @@ def test_plot_ica_sources(): fig = ica.plot_sources(long_raw) assert len(plt.get_fignums()) == 1 fig.canvas.draw() - _fake_click(fig, fig.mne.ax_main, (-0.1, 0), xform='data', button=3) + _click_ch_name(fig, ch_index=0, button=3) assert len(fig.mne.child_figs) == 1 assert len(plt.get_fignums()) == 2 # close child fig directly (workaround for mpl issue #18609) @@ -283,9 +300,12 @@ def test_plot_ica_sources(): @pytest.mark.slowtest @requires_sklearn +@pytest.mark.filterwarnings('ignore:.*max_iter.*will be changed.*:' + 'DeprecationWarning') def test_plot_ica_overlay(): """Test plotting of ICA cleaning.""" raw = _get_raw(preload=True) + raw.info['highpass'] = 1.0 # fake high-pass filtering picks = _get_picks(raw) ica = ICA(noise_cov=read_cov(cov_fname), n_components=2, random_state=0) # can't use info.normalize_proj here because of how and when ICA and Epochs @@ -307,6 +327,7 @@ def test_plot_ica_overlay(): # smoke test for CTF raw = read_raw_fif(raw_ctf_fname) raw.apply_gradient_compensation(3) + raw.info['highpass'] = 1.0 # fake high-pass filtering picks = pick_types(raw.info, meg=True, ref_meg=False) ica = ICA(n_components=2, ) ica.fit(raw, picks=picks) @@ -323,6 +344,8 @@ def _get_geometry(fig): @requires_sklearn +@pytest.mark.filterwarnings('ignore:.*max_iter.*will be changed.*:' + 'DeprecationWarning') def test_plot_ica_scores(): """Test plotting of ICA scores.""" raw = _get_raw() @@ -365,6 +388,8 @@ def test_plot_ica_scores(): @requires_sklearn +@pytest.mark.filterwarnings('ignore:.*max_iter.*will be changed.*:' + 'DeprecationWarning') def test_plot_instance_components(): """Test plotting of components as instances of raw and epochs.""" raw = _get_raw() @@ -396,6 +421,3 @@ def test_plot_instance_components(): _fake_click(fig, ax, [line.get_xdata()[0], line.get_ydata()[0]], 'data') _fake_click(fig, ax, [-0.1, 0.9]) # click on y-label fig.canvas.key_press_event('escape') - - -run_tests_if_main() diff --git a/mne/viz/tests/test_raw.py b/mne/viz/tests/test_raw.py index 283b428d171..c004dd23d61 100644 --- a/mne/viz/tests/test_raw.py +++ b/mne/viz/tests/test_raw.py @@ -2,10 +2,10 @@ # # License: Simplified BSD -import numpy as np import os.path as op import itertools +import numpy as np from numpy.testing import assert_allclose import pytest import matplotlib @@ -14,8 +14,7 @@ from mne import read_events, pick_types, Annotations, create_info from mne.datasets import testing from mne.io import read_raw_fif, read_raw_ctf, RawArray -from mne.utils import (run_tests_if_main, _dt_to_stamp, _click_ch_name, - _close_event) +from mne.utils import _dt_to_stamp, _click_ch_name, _close_event from mne.viz.utils import _fake_click from mne.annotations import _sync_onset from mne.viz import plot_raw, plot_sensors @@ -307,6 +306,10 @@ def test_plot_raw_ssp_interaction(raw): _fake_click(ssp_fig, ssp_fig.mne.proj_all.ax, [0.5, 0.5]) _fake_click(ssp_fig, ssp_fig.mne.proj_all.ax, [0.5, 0.5], kind='release') assert _proj_status(ax) == [True, False, False] + fig.canvas.key_press_event('J') + assert _proj_status(ax) == [True, True, True] + fig.canvas.key_press_event('J') + assert _proj_status(ax) == [True, False, False] # turn all on _fake_click(ssp_fig, ssp_fig.mne.proj_all.ax, [0.5, 0.5]) # all on _fake_click(ssp_fig, ssp_fig.mne.proj_all.ax, [0.5, 0.5], kind='release') @@ -526,7 +529,34 @@ def test_plot_annotations(raw): with pytest.warns(RuntimeWarning, match='expanding outside'): raw.set_annotations(annot) _annotation_helper(raw) - plt.close('all') + # test annotation visibility toggle + fig = raw.plot() + assert len(fig.mne.annotations) == 1 + assert len(fig.mne.annotation_texts) == 1 + fig.canvas.key_press_event('a') # start annotation mode + checkboxes = fig.mne.show_hide_annotation_checkboxes + checkboxes.set_active(0) + assert len(fig.mne.annotations) == 0 + assert len(fig.mne.annotation_texts) == 0 + checkboxes.set_active(0) + assert len(fig.mne.annotations) == 1 + assert len(fig.mne.annotation_texts) == 1 + + +@pytest.mark.parametrize('hide_which', ([], [0], [1], [0, 1])) +def test_remove_annotations(raw, hide_which): + """Test that right-click doesn't remove hidden annotation spans.""" + ann = Annotations(onset=[2, 1], duration=[1, 3], + description=['foo', 'bar']) + raw.set_annotations(ann) + assert len(raw.annotations) == 2 + fig = raw.plot() + fig.canvas.key_press_event('a') # start annotation mode + checkboxes = fig.mne.show_hide_annotation_checkboxes + for which in hide_which: + checkboxes.set_active(which) + _fake_click(fig, fig.mne.ax_main, (2.5, 0.1), xform='data', button=3) + assert len(raw.annotations) == len(hide_which) @pytest.mark.parametrize('filtorder', (0, 2)) # FIR, IIR @@ -543,7 +573,8 @@ def test_plot_raw_filtered(filtorder, raw): raw.plot(lowpass=40, clipping='transparent', filtorder=filtorder) raw.plot(highpass=1, clipping='clamp', filtorder=filtorder) raw.plot(lowpass=40, butterfly=True, filtorder=filtorder) - plt.close('all') + # shouldn't break if all shown are non-data + RawArray(np.zeros((1, 100)), create_info(1, 20., 'stim')).plot(lowpass=5) def test_plot_raw_psd(raw): @@ -695,4 +726,7 @@ def test_plot_sensors(raw): raw.plot_sensors() -run_tests_if_main() +def test_scalings_int(): + """Test that auto scalings access samples using integers.""" + raw = RawArray(np.zeros((1, 500)), create_info(1, 1000., 'eeg')) + raw.plot(scalings='auto') diff --git a/mne/viz/tests/test_topo.py b/mne/viz/tests/test_topo.py index 100d60058a8..de76f31a3ec 100644 --- a/mne/viz/tests/test_topo.py +++ b/mne/viz/tests/test_topo.py @@ -84,8 +84,8 @@ def return_inds(d): # to test function kwarg to zorder arg of evoked.plot time_unit='ms'), ts_args=dict(spatial_colors=True, zorder=return_inds, time_unit='s')) - pytest.raises(ValueError, evoked.plot_joint, ts_args=dict(axes=True, - time_unit='s')) + with pytest.raises(ValueError, match='If one of `ts_args` and'): + evoked.plot_joint(ts_args=dict(axes=True, time_unit='s')) axes = plt.subplots(nrows=3)[-1].flatten().tolist() evoked.plot_joint(times=[0], picks=[6, 7, 8], ts_args=dict(axes=axes[0]), @@ -108,6 +108,21 @@ def return_inds(d): # to test function kwarg to zorder arg of evoked.plot topomap_args=dict(proj='reconstruct')) plt.close('all') + # test sEEG (gh:8733) + evoked.del_proj().pick_types('mag') # avoid overlapping positions error + mapping = {ch_name: 'seeg' for ch_name in evoked.ch_names} + with pytest.warns(RuntimeWarning, match='The unit .* has changed from .*'): + evoked.set_channel_types(mapping) + evoked.plot_joint() + + # test DBS (gh:8739) + evoked = _get_epochs().average().pick_types('mag') + mapping = {ch_name: 'dbs' for ch_name in evoked.ch_names} + with pytest.warns(RuntimeWarning, match='The unit for'): + evoked.set_channel_types(mapping) + evoked.plot_joint() + plt.close('all') + def test_plot_topo(): """Test plotting of ERP topography.""" @@ -166,6 +181,13 @@ def test_plot_topo(): evoked.pick_types(meg=True).plot_topo(noise_cov=cov) plt.close('all') + # Test exclude parameter + exclude = ['MEG 0112'] + fig = picked_evoked.plot_topo(exclude=exclude) + n_axes_expected = len(picked_evoked.info['ch_names']) - len(exclude) + n_axes_found = len(fig.axes[0].lines) + assert n_axes_found == n_axes_expected + # test plot_topo evoked.plot_topo() # should auto-find layout _line_plot_onselect(0, 200, ['mag', 'grad'], evoked.info, evoked.data, diff --git a/mne/viz/tests/test_topomap.py b/mne/viz/tests/test_topomap.py index 5b89ee64b79..741d4bf4ef1 100644 --- a/mne/viz/tests/test_topomap.py +++ b/mne/viz/tests/test_topomap.py @@ -14,8 +14,6 @@ import pytest import matplotlib import matplotlib.pyplot as plt -from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas -from matplotlib.figure import Figure from matplotlib.patches import Circle from mne import (read_evokeds, read_proj, make_fixed_length_events, Epochs, @@ -60,9 +58,8 @@ def test_plot_topomap_interactive(): evoked.add_proj(compute_proj_evoked(evoked, n_mag=1)) plt.close('all') - fig = Figure() - canvas = FigureCanvas(fig) - ax = fig.gca() + fig = plt.figure() + ax, canvas = fig.gca(), fig.canvas kwargs = dict(vmin=-240, vmax=240, times=[0.1], colorbar=False, axes=ax, res=8, time_unit='s') @@ -133,22 +130,28 @@ def test_plot_projs_topomap(): plot_projs_topomap([eeg_proj], info_meg) -def test_plot_topomap_animation(): +def test_plot_topomap_animation(capsys): """Test topomap plotting.""" # evoked evoked = read_evokeds(evoked_fname, 'Left Auditory', baseline=(None, 0)) # Test animation _, anim = evoked.animate_topomap(ch_type='grad', times=[0, 0.1], - butterfly=False, time_unit='s') + butterfly=False, time_unit='s', + verbose='debug') anim._func(1) # _animate has to be tested separately on 'Agg' backend. + out, _ = capsys.readouterr() + assert 'Interpolation mode local to 0' in out plt.close('all') -def test_plot_topomap_animation_nirs(fnirs_evoked): +@pytest.mark.filterwarnings('ignore:.*No contour levels.*:UserWarning') +def test_plot_topomap_animation_nirs(fnirs_evoked, capsys): """Test topomap plotting for nirs data.""" - fig, anim = fnirs_evoked.animate_topomap(ch_type='hbo') + fig, anim = fnirs_evoked.animate_topomap(ch_type='hbo', verbose='debug') anim._func(1) # _animate has to be tested separately on 'Agg' backend. + out, _ = capsys.readouterr() + assert 'Interpolation mode head to 0' in out assert len(fig.axes) == 2 plt.close('all') @@ -198,14 +201,14 @@ def test_plot_topomap_basic(monkeypatch): # border=0 and border='mean': # --------------------------- - ch_names = list('abcde') - ch_pos = np.array([[0, 0, 1], [1, 0, 0], [-1, 0, 0], - [0, -1, 0], [0, 1, 0]]) - ch_pos_dict = {name: pos for name, pos in zip(ch_names, ch_pos)} + ch_pos = np.array(sum(([[0, 0, r], [r, 0, 0], [-r, 0, 0], + [0, -r, 0], [0, r, 0]] + for r in np.linspace(0.2, 1.0, 5)), [])) + rng = np.random.RandomState(23) + data = np.full(len(ch_pos), 5) + rng.randn(len(ch_pos)) + info = create_info(len(ch_pos), 250, 'eeg') + ch_pos_dict = {name: pos for name, pos in zip(info['ch_names'], ch_pos)} dig = make_dig_montage(ch_pos_dict, coord_frame='head') - - data = np.full(5, 5) + np.random.RandomState(23).randn(5) - info = create_info(ch_names, 250, ['eeg'] * 5) info.set_montage(dig) # border=0 @@ -213,7 +216,7 @@ def test_plot_topomap_basic(monkeypatch): img_data = ax.get_array().data assert np.abs(img_data[31, 31] - data[0]) < 0.12 - assert np.abs(img_data[10, 55]) < 0.3 + assert np.abs(img_data[0, 0]) < 1.5 # border='mean' ax, _ = plot_topomap(data, info, extrapolate='head', border='mean', @@ -221,7 +224,7 @@ def test_plot_topomap_basic(monkeypatch): img_data = ax.get_array().data assert np.abs(img_data[31, 31] - data[0]) < 0.12 - assert img_data[10, 54] > 5 + assert img_data[0, 0] > 5 # error when not numeric or str: error_msg = 'border must be an instance of numeric or str' @@ -554,6 +557,19 @@ def test_plot_topomap_bads(): plt.close('all') +def test_plot_topomap_bads_grad(): + """Test plotting topomap with bad gradiometer channels (gh-8802).""" + import matplotlib.pyplot as plt + data = np.random.RandomState(0).randn(203) + info = read_info(evoked_fname) + info['bads'] = ['MEG 2242'] + picks = pick_types(info, meg='grad') + info = pick_info(info, picks) + assert len(info['chs']) == 203 + plot_topomap(data, info, res=8) + plt.close('all') + + def test_plot_topomap_nirs_overlap(fnirs_epochs): """Test plotting nirs topomap with overlapping channels (gh-7414).""" fig = fnirs_epochs['A'].average(picks='hbo').plot_topomap() @@ -562,11 +578,19 @@ def test_plot_topomap_nirs_overlap(fnirs_epochs): @requires_sklearn +@pytest.mark.filterwarnings('ignore:.*max_iter.*will be changed.*:' + 'DeprecationWarning') def test_plot_topomap_nirs_ica(fnirs_epochs): """Test plotting nirs ica topomap.""" from mne.preprocessing import ICA fnirs_epochs = fnirs_epochs.load_data().pick(picks='hbo') fnirs_epochs = fnirs_epochs.pick(picks=range(30)) + + # fake high-pass filtering and hide the fact that the epochs were + # baseline corrected + fnirs_epochs.info['highpass'] = 1.0 + fnirs_epochs.baseline = None + ica = ICA().fit(fnirs_epochs) fig = ica.plot_components() assert len(fig[0].axes) == 20 diff --git a/mne/viz/topo.py b/mne/viz/topo.py index d2f95aca3d5..2c758676220 100644 --- a/mne/viz/topo.py +++ b/mne/viz/topo.py @@ -563,11 +563,12 @@ def _erfimage_imshow_unified(bn, ch_idx, tmin, tmax, vmin, vmax, ylim=None, interpolation='nearest')) -def _plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None, - border='none', ylim=None, scalings=None, title=None, - proj=False, vline=(0.,), hline=(0.,), fig_facecolor='k', - fig_background=None, axis_facecolor='k', font_color='w', - merge_channels=False, legend=True, axes=None, show=True, +def _plot_evoked_topo(evoked, layout=None, layout_scale=0.945, + color=None, border='none', ylim=None, scalings=None, + title=None, proj=False, vline=(0.,), hline=(0.,), + fig_facecolor='k', fig_background=None, + axis_facecolor='k', font_color='w', merge_channels=False, + legend=True, axes=None, exclude='bads', show=True, noise_cov=None): """Plot 2D topography of evoked responses. @@ -632,12 +633,15 @@ def _plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None, See matplotlib documentation for more details. axes : instance of matplotlib Axes | None Axes to plot into. If None, axes will be created. - show : bool - Show figure if True. noise_cov : instance of Covariance | str | None Noise covariance used to whiten the data while plotting. Whitened data channels names are shown in italic. Can be a string to load a covariance from disk. + exclude : list of str | 'bads' + Channels names to exclude from being shown. If 'bads', the + bad channels are excluded. By default, exclude is set to 'bads'. + show : bool + Show figure if True. .. versionadded:: 0.16.0 @@ -679,14 +683,14 @@ def _plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None, raise ValueError('All evoked.picks must be the same') ch_names = _clean_names(ch_names) if merge_channels: - picks = _pair_grad_sensors(info, topomap_coords=False) + picks = _pair_grad_sensors(info, topomap_coords=False, exclude=exclude) chs = list() for pick in picks[::2]: ch = info['chs'][pick] ch['ch_name'] = ch['ch_name'][:-1] + 'X' chs.append(ch) info['chs'] = chs - info['bads'] = list() # bads dropped on pair_grad_sensors + info['bads'] = list() # Bads handled by pair_grad_sensors info._update_redundant() info._check_consistency() new_picks = list() @@ -702,31 +706,34 @@ def _plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None, y_label = 'RMS amplitude (%s)' % unit if layout is None: - layout = find_layout(info) + layout = find_layout(info, exclude=exclude) if not merge_channels: # XXX. at the moment we are committed to 1- / 2-sensor-types layouts - chs_in_layout = set(layout.names) & set(ch_names) - types_used = {channel_type(info, ch_names.index(ch)) - for ch in chs_in_layout} + chs_in_layout = [ch_name for ch_name in ch_names + if ch_name in layout.names] + types_used = [channel_type(info, ch_names.index(ch)) + for ch in chs_in_layout] + # Using dict conversion to remove duplicates + types_used = list(dict.fromkeys(types_used)) # remove possible reference meg channels - types_used = set.difference(types_used, set('ref_meg')) + types_used = [types_used for types_used in types_used + if types_used != 'ref_meg'] # one check for all vendors - meg_types = {'mag', 'grad'} - is_meg = len(set.intersection(types_used, meg_types)) > 0 - nirs_types = {'hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od'} - is_nirs = len(set.intersection(types_used, nirs_types)) > 0 + is_meg = len([x for x in types_used if x in ['mag', 'grad']]) > 0 + is_nirs = len([x for x in types_used if x in + ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od')]) > 0 if is_meg: types_used = list(types_used)[::-1] # -> restore kwarg order - picks = [pick_types(info, meg=kk, ref_meg=False, exclude=[]) + picks = [pick_types(info, meg=kk, ref_meg=False, exclude=exclude) for kk in types_used] elif is_nirs: types_used = list(types_used)[::-1] # -> restore kwarg order - picks = [pick_types(info, fnirs=kk, ref_meg=False, exclude=[]) + picks = [pick_types(info, fnirs=kk, ref_meg=False, exclude=exclude) for kk in types_used] else: types_used_kwargs = {t: True for t in types_used} - picks = [pick_types(info, meg=False, exclude=[], + picks = [pick_types(info, meg=False, exclude=exclude, **types_used_kwargs)] assert isinstance(picks, list) and len(types_used) == len(picks) @@ -761,7 +768,10 @@ def _plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None, if len(ylim_) == 1: ylim_ = ylim_[0] else: - ylim_ = zip(*[np.array(yl) for yl in ylim_]) + ylim_ = [np.array(yl) for yl in ylim_] + # Transposing to avoid Zipping confusion + if is_meg or is_nirs: + ylim_ = list(map(list, zip(*ylim_))) else: raise TypeError('ylim must be None or a dict. Got %s.' % type(ylim)) diff --git a/mne/viz/topomap.py b/mne/viz/topomap.py index 9ecbf085608..f859446a53e 100644 --- a/mne/viz/topomap.py +++ b/mne/viz/topomap.py @@ -98,6 +98,12 @@ def _prepare_topomap_plot(inst, ch_type, sphere=None): elif ch_type == 'csd': picks = pick_types(info, meg=False, csd=True, ref_meg=False, exclude='bads') + elif ch_type == 'dbs': + picks = pick_types(info, meg=False, dbs=True, ref_meg=False, + exclude='bads') + elif ch_type == 'seeg': + picks = pick_types(info, meg=False, seeg=True, ref_meg=False, + exclude='bads') else: picks = pick_types(info, meg=ch_type, ref_meg=False, exclude='bads') @@ -349,11 +355,10 @@ def plot_projs_topomap(projs, info, cmap=None, sensors=True, n_projs = len(projs) if axes is None: fig, axes, ncols, nrows = _prepare_trellis( - n_projs, ncols='auto', nrows='auto') + n_projs, ncols='auto', nrows='auto', sharex=True, sharey=True) elif isinstance(axes, plt.Axes): axes = [axes] - if len(axes) != n_projs: - raise RuntimeError('There must be an axes for each picked projector.') + _validate_if_list_of_axes(axes, n_projs) # handle vmin/vmax vlims = [None for _ in range(len(datas))] @@ -568,9 +573,10 @@ def _get_extra_points(pos, extrapolate, origin, radii): else: assert extrapolate == 'head' # return points on the head circle - angle = np.arcsin(distance / 2 / np.mean(radii)) - points_l = np.arange(0, 2 * np.pi, angle) - use_radii = radii * 1.1 + angle = np.arcsin(distance / np.mean(radii)) + n_pnts = max(12, int(np.round(2 * np.pi / angle))) + points_l = np.linspace(0, 2 * np.pi, n_pnts, endpoint=False) + use_radii = radii * 1.1 + distance points_x = np.cos(points_l) * use_radii[0] + x points_y = np.sin(points_l) * use_radii[1] + y new_pos = np.stack([points_x, points_y], axis=1) @@ -767,6 +773,7 @@ def plot_topomap(data, pos, vmin=None, vmax=None, cmap=None, sensors=True, def _setup_interp(pos, res, extrapolate, sphere, outlines, border): + logger.debug(f'Interpolation mode {extrapolate} to {border}') xlim = np.inf, -np.inf, ylim = np.inf, -np.inf, mask_ = np.c_[outlines['mask_pos']] @@ -792,6 +799,30 @@ def _setup_interp(pos, res, extrapolate, sphere, outlines, border): return extent, Xi, Yi, interp +def _get_patch(outlines, extrapolate, interp, ax): + from matplotlib import patches + clip_radius = outlines['clip_radius'] + clip_origin = outlines.get('clip_origin', (0., 0.)) + _use_default_outlines = any(k.startswith('head') for k in outlines) + patch_ = None + if 'patch' in outlines: + patch_ = outlines['patch'] + patch_ = patch_() if callable(patch_) else patch_ + patch_.set_clip_on(False) + ax.add_patch(patch_) + ax.set_transform(ax.transAxes) + ax.set_clip_path(patch_) + if _use_default_outlines: + if extrapolate == 'local': + patch_ = patches.Polygon( + interp.mask_pts, clip_on=True, transform=ax.transData) + else: + patch_ = patches.Ellipse( + clip_origin, 2 * clip_radius[0], 2 * clip_radius[1], + clip_on=True, transform=ax.transData) + return patch_ + + def _plot_topomap(data, pos, vmin=None, vmax=None, cmap=None, sensors=True, res=64, axes=None, names=None, show_names=False, mask=None, mask_params=None, outlines='head', @@ -825,16 +856,13 @@ def _plot_topomap(data, pos, vmin=None, vmax=None, cmap=None, sensors=True, # deal with grad pairs picks = _pair_grad_sensors(pos, topomap_coords=False) pos = _find_topomap_coords(pos, picks=picks[::2], sphere=sphere) - data, _ = _merge_ch_data(data, ch_type, []) + data, _ = _merge_ch_data(data[picks], ch_type, []) data = data.reshape(-1) else: picks = list(range(data.shape[0])) pos = _find_topomap_coords(pos, picks=picks, sphere=sphere) - _check_option('extrapolate', extrapolate, ('box', 'local', 'head', 'auto')) - if extrapolate == 'auto': - extrapolate = 'local' if ch_type in _MEG_CH_TYPES_SPLIT else 'head' - + extrapolate = _check_extrapolate(extrapolate, ch_type) if data.ndim > 1: raise ValueError("Data needs to be array of shape (n_sensors,); got " "shape %s." % str(data.shape)) @@ -875,35 +903,16 @@ def _plot_topomap(data, pos, vmin=None, vmax=None, cmap=None, sensors=True, ax = axes if axes else plt.gca() _prepare_topomap(pos, ax) - _use_default_outlines = any(k.startswith('head') for k in outlines) mask_params = _handle_default('mask_params', mask_params) # find mask limits - clip_radius = outlines['clip_radius'] - clip_origin = outlines.get('clip_origin', (0., 0.)) extent, Xi, Yi, interp = _setup_interp( pos, res, extrapolate, sphere, outlines, border) interp.set_values(data) Zi = interp.set_locations(Xi, Yi)() # plot outline - patch_ = None - if 'patch' in outlines: - patch_ = outlines['patch'] - patch_ = patch_() if callable(patch_) else patch_ - patch_.set_clip_on(False) - ax.add_patch(patch_) - ax.set_transform(ax.transAxes) - ax.set_clip_path(patch_) - if _use_default_outlines: - from matplotlib import patches - if extrapolate == 'local': - patch_ = patches.Polygon( - interp.mask_pts, clip_on=True, transform=ax.transData) - else: - patch_ = patches.Ellipse( - clip_origin, 2 * clip_radius[0], 2 * clip_radius[1], - clip_on=True, transform=ax.transData) + patch_ = _get_patch(outlines, extrapolate, interp, ax) # plot interpolated map im = ax.imshow(Zi, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower', @@ -1010,7 +1019,7 @@ def _plot_ica_topomap(ica, idx=0, ch_type=None, res=64, data.ravel(), pos, vmin=vmin_, vmax=vmax_, res=res, axes=axes, cmap=cmap, outlines=outlines, contours=contours, sensors=sensors, image_interp=image_interp, show=show, extrapolate=extrapolate, - sphere=sphere, border=border)[0] + sphere=sphere, border=border, ch_type=ch_type)[0] if colorbar: cbar, cax = _add_colorbar(axes, im, cmap, pad=.05, title="AU", format='%3.2f') @@ -1019,7 +1028,7 @@ def _plot_ica_topomap(ica, idx=0, ch_type=None, res=64, _hide_frame(axes) -@fill_doc +@verbose def plot_ica_components(ica, picks=None, ch_type=None, res=64, vmin=None, vmax=None, cmap='RdBu_r', sensors=True, colorbar=False, title=None, @@ -1027,7 +1036,7 @@ def plot_ica_components(ica, picks=None, ch_type=None, res=64, image_interp='bilinear', inst=None, plot_std=True, topomap_args=None, image_args=None, psd_args=None, reject='auto', - sphere=None): + sphere=None, *, verbose=None): """Project mixing matrix on interpolated sensor topography. Parameters @@ -1109,6 +1118,7 @@ def plot_ica_components(ica, picks=None, ch_type=None, res=64, which applies the rejection parameters used when fitting the ICA object. %(topomap_sphere_auto)s + %(verbose)s Returns ------- @@ -1142,18 +1152,13 @@ def plot_ica_components(ica, picks=None, ch_type=None, res=64, figs = [] for k in range(0, n_components, p): picks = range(k, min(k + p, n_components)) - fig = plot_ica_components(ica, picks=picks, ch_type=ch_type, - res=res, vmax=vmax, - cmap=cmap, sensors=sensors, - colorbar=colorbar, title=title, - show=show, outlines=outlines, - contours=contours, - image_interp=image_interp, inst=inst, - plot_std=plot_std, - topomap_args=topomap_args, - image_args=image_args, - psd_args=psd_args, reject=reject, - sphere=sphere) + fig = plot_ica_components( + ica, picks=picks, ch_type=ch_type, res=res, vmax=vmax, + cmap=cmap, sensors=sensors, colorbar=colorbar, title=title, + show=show, outlines=outlines, contours=contours, + image_interp=image_interp, inst=inst, plot_std=plot_std, + topomap_args=topomap_args, image_args=image_args, + psd_args=psd_args, reject=reject, sphere=sphere) figs.append(fig) return figs else: @@ -1164,7 +1169,7 @@ def plot_ica_components(ica, picks=None, ch_type=None, res=64, data = np.dot(ica.mixing_matrix_[:, picks].T, ica.pca_components_[:ica.n_components_]) - data_picks, pos, merge_channels, names, _, sphere, clip_origin = \ + data_picks, pos, merge_channels, names, ch_type, sphere, clip_origin = \ _prepare_topomap_plot(ica, ch_type, sphere=sphere) outlines = _make_head_outlines(sphere, pos, outlines, clip_origin) @@ -1187,7 +1192,8 @@ def plot_ica_components(ica, picks=None, ch_type=None, res=64, im = plot_topomap( data_.flatten(), pos, vmin=vmin_, vmax=vmax_, res=res, axes=ax, cmap=cmap[0], outlines=outlines, contours=contours, - image_interp=image_interp, show=False, sensors=sensors)[0] + image_interp=image_interp, show=False, sensors=sensors, + ch_type=ch_type, **topomap_args)[0] im.axes.set_label(ica._ica_names[ii]) if colorbar: cbar, cax = _add_colorbar(ax, im, cmap, title="AU", @@ -1979,7 +1985,7 @@ def plot_psds_topomap( @fill_doc -def plot_layout(layout, picks=None, show=True): +def plot_layout(layout, picks=None, show_axes=False, show=True): """Plot the sensor positions. Parameters @@ -1987,6 +1993,8 @@ def plot_layout(layout, picks=None, show=True): layout : None | Layout Layout instance specifying sensor positions. %(picks_nostr)s + show_axes : bool + Show layout axes if True. Defaults to False. show : bool Show figure if True. Defaults to True. @@ -2005,15 +2013,18 @@ def plot_layout(layout, picks=None, show=True): fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None) ax.set(xticks=[], yticks=[], aspect='equal') - pos = np.array([(p[0] + p[2] / 2., p[1] + p[3] / 2.) for p in layout.pos]) outlines = dict(border=([0, 1, 1, 0, 0], [0, 0, 1, 1, 0])) _draw_outlines(ax, outlines) picks = _picks_to_idx(len(layout.names), picks) - pos = pos[picks] + pos = layout.pos[picks] names = np.array(layout.names)[picks] - for ii, (this_pos, ch_id) in enumerate(zip(pos, names)): - ax.annotate(ch_id, xy=this_pos[:2], horizontalalignment='center', + for ii, (p, ch_id) in enumerate(zip(pos, names)): + center_pos = np.array((p[0] + p[2] / 2., p[1] + p[3] / 2.)) + ax.annotate(ch_id, xy=center_pos, horizontalalignment='center', verticalalignment='center', size='x-small') + if show_axes: + x1, x2, y1, y2 = p[0], p[0] + p[2], p[1], p[1] + p[3] + ax.plot([x1, x1, x2, x2, x1], [y1, y2, y2, y1, y1], color='k') ax.axis('off') tight_layout(fig=fig, pad=0, w_pad=0, h_pad=0) plt_show(show) @@ -2109,9 +2120,17 @@ def _hide_frame(ax): ax.set_frame_on(False) -def _init_anim(ax, ax_line, ax_cbar, params, merge_channels, sphere): +def _check_extrapolate(extrapolate, ch_type): + _check_option('extrapolate', extrapolate, ('box', 'local', 'head', 'auto')) + if extrapolate == 'auto': + extrapolate = 'local' if ch_type in _MEG_CH_TYPES_SPLIT else 'head' + return extrapolate + + +@verbose +def _init_anim(ax, ax_line, ax_cbar, params, merge_channels, sphere, ch_type, + extrapolate, verbose): """Initialize animated topomap.""" - from matplotlib import pyplot as plt, patches logger.info('Initializing animation...') data = params['data'] items = list() @@ -2137,7 +2156,10 @@ def _init_anim(ax, ax_line, ax_cbar, params, merge_channels, sphere): _hide_frame(ax) extent, Xi, Yi, interp = _setup_interp( - params['pos'], 64, 'box', sphere, outlines, 0) + params['pos'], 64, extrapolate, sphere, outlines, 0) + + patch_ = _get_patch(outlines, extrapolate, interp, ax) + params['Zis'] = list() for frame in params['frames']: params['Zis'].append(interp.set_values(data[:, frame])(Xi, Yi)) @@ -2152,14 +2174,9 @@ def _init_anim(ax, ax_line, ax_cbar, params, merge_channels, sphere): aspect='equal', extent=extent, interpolation='bilinear') ax.autoscale(enable=True, tight=True) - plt.colorbar(im, cax=ax_cbar) + ax.figure.colorbar(im, cax=ax_cbar) cont = ax.contour(Xi, Yi, Zi, levels=cont_lims, colors='k', linewidths=1) - patch_ = patches.Ellipse((0, 0), - 2 * outlines['clip_radius'][0], - 2 * outlines['clip_radius'][1], - clip_on=True, - transform=ax.transData) im.set_clip_path(patch_) text = ax.text(0.55, 0.95, '', transform=ax.transAxes, va='center', ha='right') @@ -2249,7 +2266,7 @@ def _key_press(event, params): def _topomap_animation(evoked, ch_type, times, frame_rate, butterfly, blit, - show, time_unit, sphere): + show, time_unit, sphere, extrapolate, *, verbose=None): """Make animation of evoked data as topomap timeseries. See mne.evoked.Evoked.animate_topomap. @@ -2273,7 +2290,6 @@ def _topomap_animation(evoked, ch_type, times, frame_rate, butterfly, blit, raise ValueError('All times must be inside the evoked time series.') frames = [np.abs(evoked.times - time).argmin() for time in times] - blit = False if plt.get_backend() == 'MacOSX' else blit picks, pos, merge_channels, _, ch_type, sphere, clip_origin = \ _prepare_topomap_plot(evoked, ch_type, sphere=sphere) data = evoked.data[picks, :] @@ -2292,6 +2308,7 @@ def _topomap_animation(evoked, ch_type, times, frame_rate, butterfly, blit, frames = np.linspace(0, len(evoked.times) - 1, frames).astype(int) ax_cbar = plt.subplot2grid(shape, (0, colspan), rowspan=rowspan) ax_cbar.set_title(_handle_default('units')[ch_type], fontsize=10) + extrapolate = _check_extrapolate(extrapolate, ch_type) params = dict(data=data, pos=pos, all_times=evoked.times, frame=0, frames=frames, butterfly=butterfly, blit=blit, @@ -2299,7 +2316,8 @@ def _topomap_animation(evoked, ch_type, times, frame_rate, butterfly, blit, clip_origin=clip_origin) init_func = partial(_init_anim, ax=ax, ax_cbar=ax_cbar, ax_line=ax_line, params=params, merge_channels=merge_channels, - sphere=sphere) + sphere=sphere, ch_type=ch_type, + extrapolate=extrapolate, verbose=verbose) animate_func = partial(_animate, ax=ax, ax_line=ax_line, params=params) pause_func = partial(_pause_anim, params=params) fig.canvas.mpl_connect('button_press_event', pause_func) diff --git a/mne/viz/utils.py b/mne/viz/utils.py index eb7ded5a6e3..404b51ea574 100644 --- a/mne/viz/utils.py +++ b/mne/viz/utils.py @@ -23,6 +23,7 @@ from copy import deepcopy from distutils.version import LooseVersion import warnings +from datetime import datetime from ..defaults import _handle_default from ..fixes import _get_status @@ -38,16 +39,14 @@ from ..io.proj import setup_proj from ..utils import (verbose, get_config, warn, _check_ch_locs, _check_option, logger, fill_doc, _pl, _check_sphere, _ensure_int) - -from ..selection import (read_selection, _SELECTIONS, _EEG_SELECTIONS, - _divide_to_regions) from ..transforms import apply_trans _channel_type_prettyprint = {'eeg': "EEG channel", 'grad': "Gradiometer", 'mag': "Magnetometer", 'seeg': "sEEG channel", - 'eog': "EOG channel", 'ecg': "ECG sensor", - 'emg': "EMG sensor", 'ecog': "ECoG channel", + 'dbs': "DBS channel", 'eog': "EOG channel", + 'ecg': "ECG sensor", 'emg': "EMG sensor", + 'ecog': "ECoG channel", 'misc': "miscellaneous sensor"} @@ -362,9 +361,9 @@ def _make_event_color_dict(event_color, events=None, event_id=None): def _prepare_trellis(n_cells, ncols, nrows='auto', title=False, colorbar=False, - size=1.3): - import matplotlib.pyplot as plt + size=1.3, sharex=False, sharey=False): from matplotlib.gridspec import GridSpec + from ._figure import _figure if n_cells == 1: nrows = ncols = 1 @@ -389,9 +388,8 @@ def _prepare_trellis(n_cells, ncols, nrows='auto', title=False, colorbar=False, width = size * ncols height = (size + max(0, 0.1 * (4 - size))) * nrows + bool(title) * 0.5 height_ratios = None - g_kwargs = {} - figure_nobar(figsize=(width * 1.5, height * 1.5)) - gs = GridSpec(nrows, ncols, height_ratios=height_ratios, **g_kwargs) + fig = _figure(toolbar=False, figsize=(width * 1.5, 0.25 + height * 1.5)) + gs = GridSpec(nrows, ncols, figure=fig, height_ratios=height_ratios) axes = [] if colorbar: @@ -401,9 +399,13 @@ def _prepare_trellis(n_cells, ncols, nrows='auto', title=False, colorbar=False, else: ax_idxs = range(n_cells) for ax_idx in ax_idxs: - axes.append(plt.subplot(gs[ax_idx])) - - fig = axes[0].get_figure() + subplot_kw = dict() + if ax_idx > 0: + if sharex: + subplot_kw.update(sharex=axes[0]) + if sharey: + subplot_kw.update(sharey=axes[0]) + axes.append(fig.add_subplot(gs[ax_idx], **subplot_kw)) return fig, axes, ncols, nrows @@ -554,11 +556,8 @@ def figure_nobar(*args, **kwargs): return fig -def _show_help(col1, col2, width, height): - fig_help = figure_nobar(figsize=(width, height), dpi=80) +def _show_help_fig(col1, col2, fig_help, ax, show): _set_window_title(fig_help, 'Help') - - ax = fig_help.add_subplot(111) celltext = [[c1, c2] for c1, c2 in zip(col1.strip().split("\n"), col2.strip().split("\n"))] table = ax.table(cellText=celltext, loc="center", cellLoc="left") @@ -574,12 +573,19 @@ def _show_help(col1, col2, width, height): fig_help.canvas.mpl_connect('key_press_event', _key_press) - # this should work for non-test cases - try: - fig_help.canvas.draw() - plt_show(fig=fig_help, warn=False) - except Exception: - pass + if show: + # this should work for non-test cases + try: + fig_help.canvas.draw() + plt_show(fig=fig_help, warn=False) + except Exception: + pass + + +def _show_help(col1, col2, width, height): + fig_help = figure_nobar(figsize=(width, height), dpi=80) + ax = fig_help.add_subplot(111) + _show_help_fig(col1, col2, fig_help, ax, show=True) def _key_press(event): @@ -818,9 +824,9 @@ def plot_sensors(info, kind='topomap', ch_type=None, title=None, 'topomap'. ch_type : None | str The channel type to plot. Available options 'mag', 'grad', 'eeg', - 'seeg', 'ecog', 'all'. If ``'all'``, all the available mag, grad, eeg, - seeg and ecog channels are plotted. If None (default), then channels - are chosen in the order given above. + 'seeg', 'dbs', 'ecog', 'all'. If ``'all'``, all the available mag, + grad, eeg, seeg, dbs and ecog channels are plotted. If None (default), + then channels are chosen in the order given above. title : str | None Title for the figure. If None (default), equals to ``'Sensor positions (%%s)' %% ch_type``. @@ -922,6 +928,10 @@ def plot_sensors(info, kind='topomap', ch_type=None, title=None, for i, pick in enumerate(picks)] else: if ch_groups in ['position', 'selection']: + # Avoid circular import + from ..channels import (read_vectorview_selection, _SELECTIONS, + _EEG_SELECTIONS, _divide_to_regions) + if ch_groups == 'position': ch_groups = _divide_to_regions(info, add_stim=False) ch_groups = list(ch_groups.values()) @@ -929,7 +939,8 @@ def plot_sensors(info, kind='topomap', ch_type=None, title=None, ch_groups, color_vals = list(), list() for selection in _SELECTIONS + _EEG_SELECTIONS: channels = pick_channels( - info['ch_names'], read_selection(selection, info=info)) + info['ch_names'], + read_vectorview_selection(selection, info=info)) ch_groups.append(channels) color_vals = np.ones((len(ch_groups), 4)) for idx, ch_group in enumerate(ch_groups): @@ -1000,7 +1011,7 @@ def _plot_sensors(pos, info, picks, colors, bads, ch_names, title, show_names, """Plot sensors.""" from matplotlib import rcParams import matplotlib.pyplot as plt - from mpl_toolkits.mplot3d import Axes3D + from mpl_toolkits.mplot3d import Axes3D # noqa: F401 analysis:ignore from .topomap import _get_pos_outlines, _draw_outlines sphere = _check_sphere(sphere, info) @@ -1008,12 +1019,12 @@ def _plot_sensors(pos, info, picks, colors, bads, ch_names, title, show_names, edgecolors[bads] = 'red' axes_was_none = ax is None if axes_was_none: - fig = plt.figure(figsize=(max(rcParams['figure.figsize']),) * 2) + subplot_kw = dict() if kind == '3d': - Axes3D(fig) - ax = fig.gca(projection='3d') - else: - ax = fig.add_subplot(111) + subplot_kw.update(projection='3d') + fig, ax = plt.subplots( + 1, figsize=(max(rcParams['figure.figsize']),) * 2, + subplot_kw=subplot_kw) else: fig = ax.get_figure() @@ -1042,9 +1053,8 @@ def _plot_sensors(pos, info, picks, colors, bads, ch_names, title, show_names, # Equal aspect for 3D looks bad, so only use for 2D ax.set(aspect='equal') - if axes_was_none: - fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, - hspace=None) + if axes_was_none: # we'll show the plot title as the window title + fig.subplots_adjust(left=0, bottom=0, right=1, top=1) ax.axis("off") # remove border around figure del sphere @@ -1066,8 +1076,8 @@ def _plot_sensors(pos, info, picks, colors, bads, ch_names, title, show_names, picker = partial(_onpick_sensor, fig=fig, ax=ax, pos=pos, ch_names=ch_names, show_names=show_names) fig.canvas.mpl_connect('pick_event', picker) - - ax.set(title=title) + if axes_was_none: + _set_window_title(fig, title) closed = partial(_close_event, fig=fig) fig.canvas.mpl_connect('close_event', closed) plt_show(show, block=block) @@ -1122,7 +1132,9 @@ def _compute_scalings(scalings, inst, remove_dc=False, duration=10): time_middle = np.mean(inst.times) tmin = np.clip(time_middle - n_secs / 2., inst.times.min(), None) tmax = np.clip(time_middle + n_secs / 2., None, inst.times.max()) - data = inst._read_segment(tmin, tmax) + smin, smax = [ + int(round(x * inst.info['sfreq'])) for x in (tmin, tmax)] + data = inst._read_segment(smin, smax) elif isinstance(inst, BaseEpochs): # Load a random subset of epochs up to 100mb in size n_epochs = 1e8 // (len(inst.ch_names) * len(inst.times) * 8) @@ -2267,3 +2279,76 @@ def centers_to_edges(*arrays): arr[:-1] + arr_diff, [arr[-1] + arr_diff[-1]]])) return out + + +def _figure_agg(**kwargs): + from matplotlib.backends.backend_agg import FigureCanvasAgg + from matplotlib.figure import Figure + fig = Figure(**kwargs) + FigureCanvasAgg(fig) + return fig + + +def _ndarray_to_fig(img): + """Convert to MPL figure, adapted from matplotlib.image.imsave.""" + dpi = 100 + figsize = np.array(img.shape[:2][::-1]) / dpi + fig = _figure_agg(dpi=dpi, figsize=figsize, frameon=False) + fig.figimage(img, resize=True) + return fig + + +def _save_ndarray_img(fname, img): + """Save an image to disk.""" + from PIL import Image + Image.fromarray(img).save(fname) + + +def concatenate_images(images, axis=0, bgcolor='black', centered=True): + """Concatenate a list of images. + + Parameters + ---------- + images : list of ndarray + The list of images to concatenate. + axis : 0 or 1 + The images are concatenated horizontally if 0 and vertically otherwise. + The default orientation is horizontal. + bgcolor : str | list + The color of the background. The name of the color is accepted + (e.g 'red') or a list of RGB values between 0 and 1. Defaults to + 'black'. + centered : bool + If True, the images are centered. Defaults to True. + + Returns + ------- + img : ndarray + The concatenated image. + """ + from matplotlib.colors import colorConverter + if isinstance(bgcolor, str): + bgcolor = colorConverter.to_rgb(bgcolor) + bgcolor = np.asarray(bgcolor) * 255 + funcs = [np.sum, np.max] + ret_shape = np.asarray([ + funcs[axis]([image.shape[0] for image in images]), + funcs[1 - axis]([image.shape[1] for image in images]), + ]) + ret = np.zeros((ret_shape[0], ret_shape[1], 3), dtype=np.uint8) + ret[:, :, :] = bgcolor + ptr = np.array([0, 0]) + sec = np.array([0 == axis, 1 == axis]).astype(int) + for image in images: + shape = image.shape[:-1] + dec = ptr + dec += ((ret_shape - shape) // 2) * (1 - sec) if centered else 0 + ret[dec[0]:dec[0] + shape[0], dec[1]:dec[1] + shape[1], :] = image + ptr += shape * sec + return ret + + +def _generate_default_filename(ext=".png"): + now = datetime.now() + dt_string = now.strftime("_%Y-%m-%d_%H-%M-%S") + return "MNE" + dt_string + ext diff --git a/requirements.txt b/requirements.txt index 9f80afe540c..0ff305c7355 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,9 +1,10 @@ -numpy -scipy +# requirements for full MNE-Python functionality (other than raw/epochs export) +numpy>=1.15.4 +scipy>=1.1.0 matplotlib pyqt5>=5.10,<5.14; platform_system == "Darwin" -pyqt5>=5.10,!=5.15.2; platform_system == "Linux" -pyqt5>=5.10; platform_system != "Linux" and platform_system != "Darwin" +pyqt5>=5.10,!=5.15.2,!=5.15.3; platform_system == "Linux" +pyqt5>=5.10,!=5.15.3; platform_system != "Linux" and platform_system != "Darwin" pyqt5-sip sip scikit-learn @@ -30,3 +31,5 @@ pyvista>=0.24 pyvistaqt>=0.2.0 tqdm mffpy>=0.5.7 +ipywidgets +ipyvtk-simple diff --git a/requirements_doc.txt b/requirements_doc.txt index ad2c7ec6a04..bb78e14737b 100644 --- a/requirements_doc.txt +++ b/requirements_doc.txt @@ -1,9 +1,12 @@ +# requirements for building docs sphinx -https://github.com/numpy/numpydoc/archive/master.zip -sphinx_fontawesome -sphinx_bootstrap_theme +https://github.com/numpy/numpydoc/archive/main.zip +pydata-sphinx-theme==0.6.1 https://github.com/sphinx-gallery/sphinx-gallery/archive/master.zip -https://github.com/mcmtroffaes/sphinxcontrib-bibtex/archive/29694f215b39d64a31b845aafd9ff2ae9329494f.zip +sphinxcontrib-bibtex>=2.1.2 memory_profiler neo seaborn +sphinx_copybutton +https://github.com/mne-tools/mne-bids/archive/main.zip +pyxdf diff --git a/requirements_testing.txt b/requirements_testing.txt index 75f92facd96..aac2f9ccf91 100644 --- a/requirements_testing.txt +++ b/requirements_testing.txt @@ -1,12 +1,12 @@ +# requirements for running tests (on top of environment.yml/requirements.txt) pytest!=4.6.0 pytest-cov pytest-timeout pytest-harvest flake8 flake8-array-spacing -https://github.com/sphinx-gallery/sphinx-gallery/archive/master.zip -https://github.com/numpy/numpydoc/archive/master.zip -https://github.com/codespell-project/codespell/archive/master.zip +numpydoc +codespell pydocstyle check-manifest twine diff --git a/requirements_testing_extra.txt b/requirements_testing_extra.txt new file mode 100644 index 00000000000..777d3b7aa8d --- /dev/null +++ b/requirements_testing_extra.txt @@ -0,0 +1,5 @@ +# requirements for full testing (on top of environment.yml/requirements.txt) +nitime +nbclient +sphinx-gallery +eeglabio diff --git a/server_environment.yml b/server_environment.yml index 066e8773c02..ce41c0c774d 100644 --- a/server_environment.yml +++ b/server_environment.yml @@ -2,7 +2,6 @@ name: base channels: - conda-forge/label/vtk_dev - conda-forge -- defaults dependencies: - python>=3.7 - pip @@ -10,16 +9,19 @@ dependencies: - ffmpeg - vtk - traits +- scipy +- numpy +- matplotlib-base +- pyvista +- nilearn +- nibabel +- nbformat <5.1 # XXX remove pinning once https://github.com/jupyter/nbformat/issues/206 has been fixed +- nbclient +- mffpy>=0.5.7 - pip: - mne - - scipy - - numpy<1.19.0 - - matplotlib - - ipympl - jupyter - - pyvista + - ipympl - ipywidgets - - nbformat - - nbclient + - ipyvtk_simple - jupyter_client!=6.1.5 - - mffpy>=0.5.7 diff --git a/setup.py b/setup.py index 874ac390c81..9a5e4da5ff3 100755 --- a/setup.py +++ b/setup.py @@ -48,6 +48,15 @@ def package_tree(pkgroot): with open('README.rst', 'r') as fid: long_description = fid.read() + hard_dependencies = ('numpy', 'scipy') + install_requires = list() + with open('requirements.txt', 'r') as fid: + for line in fid: + req = line.strip() + for hard_dep in hard_dependencies: + if req.startswith(hard_dep): + install_requires.append(req) + setup(name=DISTNAME, maintainer=MAINTAINER, include_package_data=True, @@ -80,7 +89,7 @@ def package_tree(pkgroot): }, platforms='any', python_requires='>=3.6', - install_requires=['numpy>=1.11.3', 'scipy>=0.17.1'], + install_requires=install_requires, packages=package_tree('mne'), package_data={'mne': [ op.join('data', '*.sel'), @@ -98,6 +107,8 @@ def package_tree(pkgroot): op.join('channels', 'data', 'montages', '*.elc'), op.join('channels', 'data', 'neighbors', '*.mat'), op.join('datasets', 'sleep_physionet', 'SHA1SUMS'), + op.join('datasets', '_fsaverage', '*.txt'), + op.join('datasets', '_infant', '*.txt'), op.join('gui', 'help', '*.json'), op.join('html', '*.js'), op.join('html', '*.css'), diff --git a/tools/azure_dependencies.sh b/tools/azure_dependencies.sh new file mode 100755 index 00000000000..42a59ce99bf --- /dev/null +++ b/tools/azure_dependencies.sh @@ -0,0 +1,20 @@ +#!/bin/bash -ef + +if [ "${TEST_MODE}" == "pip" ]; then + python -m pip install --upgrade pip setuptools + python -m pip install --upgrade --only-binary ":all:" numpy scipy vtk + python -m pip install --upgrade --only-binary="numba,llvmlite" -r requirements.txt +elif [ "${TEST_MODE}" == "pip-pre" ]; then + python -m pip install --progress-bar off --upgrade pip setuptools + python -m pip install --progress-bar off --upgrade --pre --only-binary ":all:" -i "https://pypi.anaconda.org/scipy-wheels-nightly/simple" --extra-index-url https://www.riverbankcomputing.com/pypi/simple numpy scipy pandas scikit-learn PyQt5 + python -m pip install --progress-bar off --upgrade --pre --only-binary ":all:" -f "https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" h5py Pillow + python -m pip install --progress-bar off --upgrade --pre --only-binary ":all" vtk + python -m pip install --progress-bar off --upgrade --only-binary ":all" matplotlib + python -m pip install --progress-bar off https://github.com/pyvista/pyvista/zipball/master + python -m pip install --progress-bar off https://github.com/pyvista/pyvistaqt/zipball/master + python -m pip install --progress-bar off --upgrade --only-binary="numba,llvmlite" -r requirements.txt +else + echo "Unknown run type ${TEST_MODE}" + exit 1 +fi +python -m pip install -r requirements_testing.txt -r requirements_testing_extra.txt codecov diff --git a/tools/circleci_dependencies.sh b/tools/circleci_dependencies.sh new file mode 100755 index 00000000000..ea17be4ed46 --- /dev/null +++ b/tools/circleci_dependencies.sh @@ -0,0 +1,39 @@ +#!/bin/bash -ef + +echo "Working around PyQt5 bugs" +# https://github.com/ContinuumIO/anaconda-issues/issues/9190#issuecomment-386508136 +# https://github.com/golemfactory/golem/issues/1019 +sudo apt-get install libosmesa6 libglx-mesa0 libopengl0 libglx0 libdbus-1-3 \ + libxkbcommon-x11-0 libxcb-icccm4 libxcb-image0 libxcb-keysyms1 libxcb-randr0 \ + libxcb-render-util0 libxcb-shape0 libxcb-xfixes0 libxcb-xinerama0 \ + graphviz optipng +sudo ln -s /usr/lib/x86_64-linux-gnu/libxcb-util.so.0 /usr/lib/x86_64-linux-gnu/libxcb-util.so.1 + +echo "Installing setuptools and sphinx" +python -m pip install --progress-bar off --upgrade "pip!=20.3.0" setuptools wheel +python -m pip install --upgrade --progress-bar off --pre sphinx +if [[ "$CIRCLE_JOB" == "interactive_test" ]]; then + echo "Installing latest dependencies for interactive_test" + python -m pip install --progress-bar off --upgrade --pre --only-binary ":all:" python-dateutil pytz joblib threadpoolctl + python -m pip install --progress-bar off --upgrade --pre --only-binary ":all:" -i "https://pypi.anaconda.org/scipy-wheels-nightly/simple" --extra-index-url https://www.riverbankcomputing.com/pypi/simple numpy scipy pandas scikit-learn PyQt5 + python -m pip install --progress-bar off --upgrade --pre --only-binary ":all:" -f "https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" h5py pillow matplotlib + python -m pip install --progress-bar off --upgrade --pre --only-binary ":all:" numba llvmlite + wget -q https://osf.io/kej3v/download -O vtk-9.0.20201117-cp39-cp39-linux_x86_64.whl + python -m pip install --progress-bar off vtk-9.0.20201117-cp39-cp39-linux_x86_64.whl + python -m pip install --progress-bar off https://github.com/pyvista/pyvista/zipball/master + python -m pip install --progress-bar off https://github.com/pyvista/pyvistaqt/zipball/master + python -m pip install --progress-bar off --upgrade -r requirements_testing.txt -r requirements_testing_extra.txt + python -m pip install -e . +elif [[ "$CIRCLE_JOB" == "linkcheck"* ]]; then + echo "Installing minimal linkcheck dependencies" + python -m pip install --progress-bar off numpy scipy matplotlib pillow pytest + python -m pip install -e . + python -m pip install --progress-bar off -r requirements_doc.txt +else # standard doc build + echo "Installing doc build dependencies" + python -m pip uninstall -y pydata-sphinx-theme + python -m pip install --upgrade --progress-bar off -r requirements.txt -r requirements_testing.txt -r requirements_doc.txt + python -m pip install --progress-bar off https://github.com/sphinx-gallery/sphinx-gallery/zipball/master https://github.com/pyvista/pyvista/zipball/master https://github.com/pyvista/pyvistaqt/zipball/master + python -m pip uninstall -yq pysurfer mayavi + python -m pip install -e . +fi diff --git a/tools/circleci_download.sh b/tools/circleci_download.sh index cfe808a815d..bd43bf11cae 100755 --- a/tools/circleci_download.sh +++ b/tools/circleci_download.sh @@ -1,27 +1,25 @@ #!/bin/bash -ef -if [ "$CIRCLE_BRANCH" == "master" ] || [[ $(cat gitlog.txt) == *"[circle full]"* ]]; then +if [ "$CIRCLE_BRANCH" == "main" ] || [[ $(cat gitlog.txt) == *"[circle full]"* ]]; then echo "Doing a full dev build"; echo html_dev-memory > build.txt; python -c "import mne; mne.datasets._download_all_example_data()"; - elif [ "$CIRCLE_BRANCH" == "maint/0.21" ]; then +elif [ "$CIRCLE_BRANCH" == "maint/0.22" ]; then echo "Doing a full stable build"; echo html_stable-memory > build.txt; python -c "import mne; mne.datasets._download_all_example_data()"; else echo "Doing a partial build"; - if ! git remote -v | grep upstream ; then git remote add upstream git://github.com/mne-tools/mne-python.git; fi - git fetch upstream - FNAMES=$(git diff --name-only $(git merge-base $CIRCLE_BRANCH upstream/master) $CIRCLE_BRANCH); + FNAMES=$(git diff --name-only $(git merge-base $CIRCLE_BRANCH upstream/main) $CIRCLE_BRANCH); if [[ $(cat gitlog.txt) == *"[circle front]"* ]]; then - FNAMES="tutorials/source-modeling/plot_mne_dspm_source_localization.py tutorials/machine-learning/plot_receptive_field.py examples/connectivity/plot_mne_inverse_label_connectivity.py tutorials/machine-learning/plot_sensors_decoding.py tutorials/stats-source-space/plot_stats_cluster_spatio_temporal.py tutorials/evoked/plot_20_visualize_evoked.py "${FNAMES}; + FNAMES="tutorials/inverse/30_mne_dspm_loreta.py tutorials/machine-learning/30_strf.py examples/connectivity/mne_inverse_label_connectivity.py tutorials/machine-learning/50_decoding.py tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.py tutorials/evoked/20_visualize_evoked.py "${FNAMES}; python -c "import mne; print(mne.datasets.testing.data_path(update_path=True))"; fi; echo FNAMES="$FNAMES"; for FNAME in $FNAMES; do - if [[ `expr match $FNAME "\(tutorials\|examples\)/.*plot_.*\.py"` ]] ; then + if [[ $(echo "$FNAME" | grep -P '^(tutorials|examples)(/.*)?/((?!sgskip).)*\.py$') ]] ; then echo "Checking example $FNAME ..."; - PATTERN=`basename $FNAME`"\\|"$PATTERN; + PATTERN=$(basename $FNAME)"\\|"$PATTERN; if [[ $(cat $FNAME | grep -x ".*datasets.*sample.*" | wc -l) -gt 0 ]]; then python -c "import mne; print(mne.datasets.sample.data_path(update_path=True))"; fi; @@ -63,7 +61,7 @@ else fi; if [[ $(cat $FNAME | grep -x ".*datasets.*hcp_mmp_parcellation.*" | wc -l) -gt 0 ]]; then python -c "import mne; print(mne.datasets.sample.data_path(update_path=True))"; - python -c "import mne; print(mne.datasets.fetch_hcp_mmp_parcellation(subjects_dir=mne.datasets.sample.data_path() + '/subjects'), accept=True)"; + python -c "import mne; print(mne.datasets.fetch_hcp_mmp_parcellation(subjects_dir=mne.datasets.sample.data_path() + '/subjects', accept=True))"; fi; if [[ $(cat $FNAME | grep -x ".*datasets.*misc.*" | wc -l) -gt 0 ]]; then python -c "import mne; print(mne.datasets.misc.data_path(update_path=True))"; @@ -98,6 +96,15 @@ else if [[ $(cat $FNAME | grep -x ".*datasets.*refmeg_noise.*" | wc -l) -gt 0 ]]; then python -c "import mne; print(mne.datasets.refmeg_noise.data_path(update_path=True))"; fi; + if [[ $(cat $FNAME | grep -x ".*datasets.*ssvep.*" | wc -l) -gt 0 ]]; then + python -c "import mne; print(mne.datasets.ssvep.data_path(update_path=True))"; + fi; + if [[ $(cat $FNAME | grep -x ".*datasets.*epilepsy_ecog.*" | wc -l) -gt 0 ]]; then + python -c "import mne; print(mne.datasets.epilepsy_ecog.data_path(update_path=True))"; + fi; + if [[ $(cat $FNAME | grep -x ".*datasets.*erp_core.*" | wc -l) -gt 0 ]]; then + python -c "import mne; print(mne.datasets.erp_core.data_path(update_path=True))"; + fi; fi; done; echo PATTERN="$PATTERN"; @@ -108,4 +115,4 @@ else echo html_dev-noplot > build.txt; fi; fi; -echo "$PATTERN" > pattern.txt; \ No newline at end of file +echo "$PATTERN" > pattern.txt; diff --git a/tools/generate_codemeta.py b/tools/generate_codemeta.py new file mode 100644 index 00000000000..a7147f74c95 --- /dev/null +++ b/tools/generate_codemeta.py @@ -0,0 +1,139 @@ +import os +import subprocess +from datetime import date +from mne import __version__ as release_version + +# add to these as necessary +compound_surnames = ( + 'García Alanis', + 'van Vliet', + 'De Santis', + 'Dupré la Tour', + 'de la Torre', + 'van den Bosch', + 'Van den Bossche', + 'Van Der Donckt', + 'van der Meer', + 'van Harmelen', + 'Visconti di Oleggio Castello' +) + + +def parse_name(name): + """Split name blobs from `git shortlog -nse` into first/last/email.""" + # remove commit count + _, name_and_email = name.strip().split('\t') + name, email = name_and_email.split(' <') + email = email.strip('>') + email = '' if 'noreply' in email else email # ignore "noreply" emails + name = ' '.join(name.split('.')) # remove periods from initials + # handle compound surnames + for compound_surname in compound_surnames: + if name.endswith(compound_surname): + ix = name.index(compound_surname) + first = name[:ix].strip() + last = compound_surname + return (first, last, email) + # handle non-compound surnames + name_elements = name.split() + if len(name_elements) == 1: # mononyms / usernames + first = '' + last = name + else: + first = ' '.join(name_elements[:-1]) + last = name_elements[-1] + return (first, last, email) + + +# MAKE SURE THE RELEASE STRING IS PROPERLY FORMATTED +try: + split_version = list(map(int, release_version.split('.'))) +except ValueError: + raise +msg = f'version string must be X.Y.Z (all integers), got {release_version}' +assert len(split_version) == 3, msg + + +# RUN GIT SHORTLOG TO GET ALL AUTHORS, SORTED BY NUMBER OF COMMITS +args = ['git', 'shortlog', '-nse'] +result = subprocess.run(args, capture_output=True, text=True) +lines = result.stdout.strip().split('\n') +all_names = [parse_name(line) for line in lines] + + +# CONSTRUCT JSON AUTHORS LIST +authors = [f'''{{ + "@type":"Person", + "email":"{email}", + "givenName":"{first}", + "familyName": "{last}" + }}''' for (first, last, email) in all_names] + + +# GET OUR DEPENDENCIES +with open(os.path.join('..', 'setup.py'), 'r') as fid: + for line in fid: + if line.strip().startswith('python_requires='): + version = line.strip().split('=', maxsplit=1)[1].strip("'\",") + dependencies = [f'python{version}'] + break +hard_dependencies = ('numpy', 'scipy') +with open(os.path.join('..', 'requirements.txt'), 'r') as fid: + for line in fid: + req = line.strip() + for hard_dep in hard_dependencies: + if req.startswith(hard_dep): + dependencies.append(req) + + +# these must be done outside the boilerplate (no \n allowed in f-strings): +authors = ',\n '.join(authors) +dependencies = '",\n "'.join(dependencies) + + +# ASSEMBLE COMPLETE JSON +codemeta_boilerplate = f'''{{ + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "@type": "SoftwareSourceCode", + "license": "https://spdx.org/licenses/BSD-3-Clause", + "codeRepository": "git+https://github.com/mne-tools/mne-python.git", + "dateCreated": "2010-12-26", + "datePublished": "2014-08-04", + "dateModified": "{str(date.today())}", + "downloadUrl": "https://github.com/mne-tools/mne-python/archive/v{release_version}.zip", + "issueTracker": "https://github.com/mne-tools/mne-python/issues", + "name": "MNE-Python", + "version": "{release_version}", + "description": "MNE-Python is an open-source Python package for exploring, visualizing, and analyzing human neurophysiological data. It provides methods for data input/output, preprocessing, visualization, source estimation, time-frequency analysis, connectivity analysis, machine learning, and statistics.", + "applicationCategory": "Neuroscience", + "developmentStatus": "active", + "referencePublication": "https://doi.org/10.3389/fnins.2013.00267", + "keywords": [ + "MEG", + "EEG", + "fNIRS", + "ECoG", + "sEEG", + "DBS" + ], + "programmingLanguage": [ + "Python" + ], + "operatingSystem": [ + "Linux", + "Windows", + "macOS" + ], + "softwareRequirements": [ + "{dependencies}" + ], + "author": [ + {authors} + ] +}} +''' # noqa E501 + + +# WRITE TO FILE +with open(os.path.join('..', 'codemeta.json'), 'w') as codemeta_file: + codemeta_file.write(codemeta_boilerplate) diff --git a/tools/get_minimal_commands.sh b/tools/get_minimal_commands.sh index 399aeea130c..b417e914bf9 100755 --- a/tools/get_minimal_commands.sh +++ b/tools/get_minimal_commands.sh @@ -1,22 +1,64 @@ #!/bin/bash -ef -if [ "${DEPS}" != "minimal" ]; then - pushd ~ > /dev/null - export MNE_ROOT="${PWD}/minimal_cmds" - export PATH=${MNE_ROOT}/bin:$PATH - if [ "${CI_OS_NAME}" != "osx" ]; then - if [ ! -d "${PWD}/minimal_cmds" ]; then - curl -L https://osf.io/g7dzs/download | tar xz - fi; - export LD_LIBRARY_PATH=${MNE_ROOT}/lib:$LD_LIBRARY_PATH - export NEUROMAG2FT_ROOT="${PWD}/minimal_cmds/bin" - export FREESURFER_HOME="${MNE_ROOT}" - else - if [ ! -d "${PWD}/minimal_cmds" ]; then - curl -L https://osf.io/rjcz4/download | tar xz - fi; - export DYLD_LIBRARY_PATH=${MNE_ROOT}/lib:$DYLD_LIBRARY_PATH - fi - popd > /dev/null - mne_surf2bem --version +if [ "${DEPS}" == "minimal" ]; then + return 0 2>/dev/null || exit "0" +fi; + +pushd ~ > /dev/null +export MNE_ROOT="${PWD}/minimal_cmds" +export PATH=${MNE_ROOT}/bin:$PATH +if [ "${GITHUB_ACTIONS}" == "true" ]; then + echo "MNE_ROOT=${MNE_ROOT}" >> $GITHUB_ENV; + echo "${MNE_ROOT}/bin" >> $GITHUB_PATH; +fi; +if [ "${AZURE_CI}" == "true" ]; then + echo "##vso[task.setvariable variable=MNE_ROOT]${MNE_ROOT}" + echo "##vso[task.setvariable variable=PATH]${PATH}"; +fi; +if [ "${CIRCLECI}" == "true" ]; then + echo "export MNE_ROOT=${MNE_ROOT}" >> "$BASH_ENV"; + echo "export PATH=${MNE_ROOT}/bin:$PATH" >> "$BASH_ENV"; +fi; +if [ "${CI_OS_NAME}" != "osx" ]; then + if [ ! -d "${PWD}/minimal_cmds" ]; then + curl -L https://osf.io/g7dzs/download | tar xz + fi; + export LD_LIBRARY_PATH=${MNE_ROOT}/lib:$LD_LIBRARY_PATH + export NEUROMAG2FT_ROOT="${PWD}/minimal_cmds/bin" + export FREESURFER_HOME="${MNE_ROOT}" + if [ "${GITHUB_ACTIONS}" == "true" ]; then + echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> "$GITHUB_ENV"; + echo "NEUROMAG2FT_ROOT=${NEUROMAG2FT_ROOT}" >> "$GITHUB_ENV"; + echo "FREESURFER_HOME=${FREESURFER_HOME}" >> "$GITHUB_ENV"; + fi; + if [ "${AZURE_CI}" == "true" ]; then + echo "##vso[task.setvariable variable=LD_LIBRARY_PATH]${LD_LIBRARY_PATH}" + echo "##vso[task.setvariable variable=NEUROMAG2FT_ROOT]${NEUROMAG2FT_ROOT}" + echo "##vso[task.setvariable variable=FREESURFER_HOME]${FREESURFER_HOME}" + fi; + if [ "${CIRCLECI}" == "true" ]; then + echo "export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> "$BASH_ENV"; + echo "export NEUROMAG2FT_ROOT=${NEUROMAG2FT_ROOT}" >> "$BASH_ENV"; + echo "export FREESURFER_HOME=${FREESURFER_HOME}" >> "$BASH_ENV"; + fi; +else + if [ ! -d "${PWD}/minimal_cmds" ]; then + curl -L https://osf.io/rjcz4/download | tar xz + fi; + export DYLD_LIBRARY_PATH=${MNE_ROOT}/lib:$DYLD_LIBRARY_PATH + if [ "${GITHUB_ACTIONS}" == "true" ]; then + echo "DYLD_LIBRARY_PATH=${DYLD_LIBRARY_PATH}" >> "$GITHUB_ENV"; + wget https://github.com/XQuartz/XQuartz/releases/download/XQuartz-2.7.11/XQuartz-2.7.11.dmg + sudo hdiutil attach XQuartz-2.7.11.dmg + sudo installer -package /Volumes/XQuartz-2.7.11/XQuartz.pkg -target / + sudo ln -s /opt/X11 /usr/X11 + fi; + if [ "${AZURE_CI}" == "true" ]; then + echo "##vso[task.setvariable variable=DYLD_LIBRARY_PATH]${DYLD_LIBRARY_PATH}" + fi; + if [ "${CIRCLECI}" == "true" ]; then + echo "export DYLD_LIBRARY_PATH=${DYLD_LIBRARY_PATH}" >> "$BASH_ENV"; + fi; fi +popd > /dev/null +mne_process_raw --version diff --git a/tools/get_testing_version.sh b/tools/get_testing_version.sh new file mode 100755 index 00000000000..78842f31e34 --- /dev/null +++ b/tools/get_testing_version.sh @@ -0,0 +1,10 @@ +#!/bin/bash -ef + +TESTING_VERSION=`grep -o "testing='[0-9.]\+'" mne/datasets/utils.py | cut -d \' -f 2 | sed "s/\./-/g"` +if [ ! -z $GITHUB_ENV ]; then + echo "TESTING_VERSION="$TESTING_VERSION >> $GITHUB_ENV +elif [ ! -z $AZURE_CI ]; then + echo "##vso[task.setvariable variable=testing_version]$TESTING_VERSION" +else + echo $TESTING_VERSION +fi diff --git a/tools/github_actions_dependencies.sh b/tools/github_actions_dependencies.sh index a54f5dcaabd..67f0edc73f8 100755 --- a/tools/github_actions_dependencies.sh +++ b/tools/github_actions_dependencies.sh @@ -4,23 +4,21 @@ if [ ! -z "$CONDA_ENV" ]; then pip uninstall -yq mne elif [ ! -z "$CONDA_DEPENDENCIES" ]; then conda install -y $CONDA_DEPENDENCIES -else # pip 3.9 (missing statsmodels and dipy) +else # pip --pre 3.9 (missing dipy in pre) + # Changes here should also go in the interactive_test CircleCI job python -m pip install --progress-bar off --upgrade "pip!=20.3.0" setuptools wheel pip uninstall -yq numpy pip install --progress-bar off --upgrade --pre --only-binary ":all:" python-dateutil pytz joblib threadpoolctl - pip install --use-deprecated=legacy-resolver --progress-bar off --upgrade --pre --only-binary ":all:" -i "https://pypi.anaconda.org/scipy-wheels-nightly/simple" numpy scipy - pip install --progress-bar off --upgrade --pre --only-binary ":all:" -i "https://pypi.anaconda.org/scipy-wheels-nightly/simple" pandas scikit-learn - pip install --progress-bar off --upgrade --pre --only-binary ":all:" -f "https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" matplotlib + pip install --progress-bar off --upgrade --pre --only-binary ":all:" -i "https://pypi.anaconda.org/scipy-wheels-nightly/simple" --extra-index-url https://www.riverbankcomputing.com/pypi/simple numpy scipy pandas scikit-learn PyQt5 + pip install --progress-bar off --upgrade --pre --only-binary ":all:" -f "https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" h5py pillow matplotlib + pip install --progress-bar off --upgrade --pre --only-binary ":all:" numba llvmlite # built using vtk master branch on an Ubuntu 18.04.5 VM and uploaded to OSF: wget -q https://osf.io/kej3v/download -O vtk-9.0.20201117-cp39-cp39-linux_x86_64.whl - pip install vtk-9.0.20201117-cp39-cp39-linux_x86_64.whl - pip install --progress-bar off https://github.com/pyvista/pyvista/zipball/5ee02e2f295f667e33f11e71946e774cca40256c + pip install --progress-bar off vtk-9.0.20201117-cp39-cp39-linux_x86_64.whl + pip install --progress-bar off https://github.com/pyvista/pyvista/zipball/master pip install --progress-bar off https://github.com/pyvista/pyvistaqt/zipball/master - pip install --progress-bar off --upgrade --pre PyQt5 - python -c "import vtk" - python -c "import pyvistaqt" fi pip install --progress-bar off --upgrade -r requirements_testing.txt if [ "${DEPS}" != "minimal" ]; then - pip install nitime + pip install --progress-bar off --upgrade -r requirements_testing_extra.txt fi diff --git a/tools/setup_xvfb.sh b/tools/setup_xvfb.sh index cfeb6a0bd92..49e40dce916 100755 --- a/tools/setup_xvfb.sh +++ b/tools/setup_xvfb.sh @@ -1,4 +1,5 @@ #!/bin/bash -ef +sudo apt-get update sudo apt-get install -yqq libxkbcommon-x11-0 libxcb-icccm4 libxcb-image0 libxcb-keysyms1 libxcb-randr0 libxcb-render-util0 libxcb-xinerama0 libxcb-xfixes0 libopengl0 /sbin/start-stop-daemon --start --quiet --pidfile /tmp/custom_xvfb_99.pid --make-pidfile --background --exec /usr/bin/Xvfb -- :99 -screen 0 1400x900x24 -ac +extension GLX +render -noreset diff --git a/tutorials/misc/plot_seeg.py b/tutorials/clinical/20_seeg.py similarity index 99% rename from tutorials/misc/plot_seeg.py rename to tutorials/clinical/20_seeg.py index 2b479dcad0b..b7370f80693 100644 --- a/tutorials/misc/plot_seeg.py +++ b/tutorials/clinical/20_seeg.py @@ -26,6 +26,7 @@ :ref:`tut_working_with_ecog`. In the ECoG example, we show how to visualize surface grid channels on the brain. """ + # Authors: Eric Larson # Adam Li # @@ -122,7 +123,7 @@ surfaces=["pial", "head"]) ############################################################################### -# Next, we will get the raw data and plot its amplitude over time. +# Next, we'll get the raw data and plot its amplitude over time. raw.plot() diff --git a/tutorials/clinical/30_ecog.py b/tutorials/clinical/30_ecog.py new file mode 100644 index 00000000000..bcab8c2e77c --- /dev/null +++ b/tutorials/clinical/30_ecog.py @@ -0,0 +1,190 @@ +""" +.. _tut_working_with_ecog: + +====================== +Working with ECoG data +====================== + +MNE supports working with more than just MEG and EEG data. Here we show some +of the functions that can be used to facilitate working with +electrocorticography (ECoG) data. + +This example shows how to use: + +- ECoG data (`available here `_) + from an epilepsy patient during a seizure +- channel locations in FreeSurfer's ``fsaverage`` MRI space +- projection onto a pial surface + +For a complementary example that involves sEEG data, channel locations in +MNI space, or projection into a volume, see :ref:`tut_working_with_seeg`. +""" +# Authors: Eric Larson +# Chris Holdgraf +# Adam Li +# Alex Rockhill +# Liberty Hamilton +# +# License: BSD (3-clause) + +import os.path as op + +import numpy as np +import matplotlib.pyplot as plt +from matplotlib.cm import get_cmap +from mne_bids import BIDSPath, read_raw_bids + +import mne +from mne.viz import plot_alignment, snapshot_brain_montage + +print(__doc__) + +# paths to mne datasets - sample ECoG and FreeSurfer subject +bids_root = mne.datasets.epilepsy_ecog.data_path() +sample_path = mne.datasets.sample.data_path() +subjects_dir = op.join(sample_path, 'subjects') + + +############################################################################### +# Load in data and perform basic preprocessing +# -------------------------------------------- +# +# Let's load some ECoG electrode data with `mne-bids +# `_. + +# first define the bids path +bids_path = BIDSPath(root=bids_root, subject='pt1', session='presurgery', + task='ictal', datatype='ieeg', extension='vhdr') + +# then we'll use it to load in the sample dataset +# Here we use a format (iEEG) that is only available in MNE-BIDS 0.7+, so it +# will emit a warning on versions <= 0.6 +raw = read_raw_bids(bids_path=bids_path, verbose=False) + +# Pick only the ECoG channels, removing the EKG channels +raw.pick_types(ecog=True) + +# Load the data +raw.load_data() + +# Then we remove line frequency interference +raw.notch_filter([60], trans_bandwidth=3) + +# drop bad channels +raw.drop_channels(raw.info['bads']) + +# the coordinate frame of the montage +print(raw.get_montage().get_positions()['coord_frame']) + +# Find the annotated events +events, event_id = mne.events_from_annotations(raw) + +# Make a 25 second epoch that spans before and after the seizure onset +epoch_length = 25 # seconds +epochs = mne.Epochs(raw, events, event_id=event_id['onset'], + tmin=13, tmax=13 + epoch_length, baseline=None) + +# And then load data and downsample. +# .. note: This is just to save execution time in this example, you should +# not need to do this in general! +epochs.load_data() +epochs.resample(200) # Hz, will also load the data for us + +# Finally, make evoked from the one epoch +evoked = epochs.average() + + +############################################################################### +# Explore the electrodes on a template brain +# ------------------------------------------ +# +# Our electrodes are shown after being morphed to fsaverage brain so we'll use +# this fsaverage brain to plot the locations of our electrodes. We'll use +# :func:`~mne.viz.snapshot_brain_montage` to save the plot as image data +# (along with xy positions of each electrode in the image), so that later +# we can plot frequency band power on top of it. + +fig = plot_alignment(raw.info, subject='fsaverage', subjects_dir=subjects_dir, + surfaces=['pial'], coord_frame='mri') +az, el, focalpoint = 160, -70, [0.067, -0.040, 0.018] +mne.viz.set_3d_view(fig, azimuth=az, elevation=el, focalpoint=focalpoint) + +xy, im = snapshot_brain_montage(fig, raw.info) + +############################################################################### +# Compute frequency features of the data +# -------------------------------------- +# +# Next, we'll compute the signal power in the gamma (30-90 Hz) band, +# downsampling the result to 10 Hz (to save time). + +sfreq = 10 +gamma_power_t = evoked.copy().filter(30, 90).apply_hilbert( + envelope=True).resample(sfreq) +gamma_info = gamma_power_t.info + +############################################################################### +# Visualize the time-evolution of the gamma power on the brain +# ------------------------------------------------------------ +# +# Say we want to visualize the evolution of the power in the gamma band, +# instead of just plotting the average. We can use +# `matplotlib.animation.FuncAnimation` to create an animation and apply this +# to the brain figure. + +# convert from a dictionary to array to plot +xy_pts = np.vstack([xy[ch] for ch in raw.info['ch_names']]) + +# get a colormap to color nearby points similar colors +cmap = get_cmap('viridis') + +# create the figure of the brain with the electrode positions +fig, ax = plt.subplots(figsize=(5, 5)) +ax.set_title('Gamma power over time', size='large') +ax.imshow(im) +ax.set_axis_off() + +# normalize gamma power for plotting +gamma_power = -100 * gamma_power_t.data / gamma_power_t.data.max() +# add the time course overlaid on the positions +x_line = np.linspace(-0.025 * im.shape[0], 0.025 * im.shape[0], + gamma_power_t.data.shape[1]) +for i, pos in enumerate(xy_pts): + x, y = pos + color = cmap(i / xy_pts.shape[0]) + ax.plot(x_line + x, gamma_power[i] + y, linewidth=0.5, color=color) + +############################################################################### +# We can project gamma power from the sensor data to the nearest locations on +# the pial surface and visualize that: +# +# As shown in the plot, the epileptiform activity starts in the temporal lobe, +# progressing posteriorly. The seizure becomes generalized eventually, after +# this example short time section. This dataset is available using +# :func:`mne.datasets.epilepsy_ecog.data_path` for you to examine. + +# sphinx_gallery_thumbnail_number = 5 + +xyz_pts = np.array([dig['r'] for dig in evoked.info['dig']]) + +src = mne.read_source_spaces( + op.join(subjects_dir, 'fsaverage', 'bem', 'fsaverage-ico-5-src.fif')) +trans = None # identity transform +stc = mne.stc_near_sensors(gamma_power_t, trans, 'fsaverage', src=src, + mode='nearest', subjects_dir=subjects_dir, + distance=0.02) +vmin, vmid, vmax = np.percentile(gamma_power_t.data, [10, 25, 90]) +clim = dict(kind='value', lims=[vmin, vmid, vmax]) +brain = stc.plot(surface='pial', hemi='rh', colormap='inferno', colorbar=False, + clim=clim, views=['lat', 'med'], subjects_dir=subjects_dir, + size=(250, 250), smoothing_steps=20, time_viewer=False) + +# plot electrode locations +for xyz in xyz_pts: + for subplot in (0, 1): + brain.plotter.subplot(subplot, 0) + brain._renderer.sphere(xyz * 1e3, color='white', scale=2) + +# You can save a movie like the one on our documentation website with: +# brain.save_movie(time_dilation=1, interpolation='linear', framerate=12, +# time_viewer=True) diff --git a/tutorials/sample-datasets/plot_sleep.py b/tutorials/clinical/60_sleep.py similarity index 89% rename from tutorials/sample-datasets/plot_sleep.py rename to tutorials/clinical/60_sleep.py index 7ec82b9bb81..e46a2e14eda 100644 --- a/tutorials/sample-datasets/plot_sleep.py +++ b/tutorials/clinical/60_sleep.py @@ -5,24 +5,22 @@ Sleep stage classification from polysomnography (PSG) data ========================================================== -.. note:: This code is taken from the analysis code used in [3]_. If you reuse - this code please consider citing this work. +.. note:: This code is taken from the analysis code used in + :footcite:`ChambonEtAl2018`. If you reuse this code please consider + citing this work. This tutorial explains how to perform a toy polysomnography analysis that answers the following question: -.. important:: Given two subjects from the Sleep Physionet dataset [1]_ [2]_, - namely *Alice* and *Bob*, how well can we predict the sleep - stages of *Bob* from *Alice's* data? +.. important:: Given two subjects from the Sleep Physionet dataset + :footcite:`KempEtAl2000,GoldbergerEtAl2000`, namely + *Alice* and *Bob*, how well can we predict the sleep stages of + *Bob* from *Alice's* data? This problem is tackled as supervised multiclass classification task. The aim is to predict the sleep stage from 5 possible stages for each chunk of 30 seconds of data. -.. contents:: This tutorial covers: - :local: - :depth: 2 - .. _Pipeline: https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html .. _FunctionTransformer: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.FunctionTransformer.html .. _physionet_labels: https://physionet.org/physiobank/database/sleep-edfx/#sleep-cassette-study-and-data @@ -57,7 +55,8 @@ # # MNE-Python provides us with # :func:`mne.datasets.sleep_physionet.age.fetch_data` to conveniently download -# data from the Sleep Physionet dataset [1]_ [2]_. +# data from the Sleep Physionet dataset +# :footcite:`KempEtAl2000,GoldbergerEtAl2000`. # Given a list of subjects and records, the fetcher downloads the data and # provides us for each subject, a pair of files: # @@ -78,8 +77,8 @@ [alice_files, bob_files] = fetch_data(subjects=[ALICE, BOB], recording=[1]) mapping = {'EOG horizontal': 'eog', - 'Resp oro-nasal': 'misc', - 'EMG submental': 'misc', + 'Resp oro-nasal': 'resp', + 'EMG submental': 'emg', 'Temp rectal': 'misc', 'Event marker': 'misc'} @@ -90,7 +89,11 @@ raw_train.set_channel_types(mapping) # plot some data -raw_train.plot(duration=60, scalings='auto') +# scalings were chosen manually to allow for simultaneous visualization of +# different channel types in this specific dataset +raw_train.plot(start=60, duration=60, + scalings=dict(eeg=1e-4, resp=1e3, eog=1e-4, emg=1e-7, + misc=1e-1)) ############################################################################## # Extract 30s events from annotations @@ -303,20 +306,4 @@ def eeg_power_band(epochs): # # References # ---------- -# -# .. [1] B Kemp, AH Zwinderman, B Tuk, HAC Kamphuisen, JJL Oberyé. Analysis of -# a sleep-dependent neuronal feedback loop: the slow-wave -# microcontinuity of the EEG. IEEE-BME 47(9):1185-1194 (2000). -# -# .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, -# Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000) -# PhysioBank, PhysioToolkit, and PhysioNet: Components of a New -# Research Resource for Complex Physiologic Signals. -# Circulation 101(23):e215-e220 -# -# .. [3] Chambon, S., Galtier, M., Arnal, P., Wainrib, G. and Gramfort, A. -# (2018)A Deep Learning Architecture for Temporal Sleep Stage -# Classification Using Multivariate and Multimodal Time Series. -# IEEE Trans. on Neural Systems and Rehabilitation Engineering 26: -# (758-769). -# +# .. footbibliography:: diff --git a/tutorials/clinical/README.txt b/tutorials/clinical/README.txt new file mode 100644 index 00000000000..43e5f701fa2 --- /dev/null +++ b/tutorials/clinical/README.txt @@ -0,0 +1,4 @@ +Clinical applications +--------------------- + +These tutorials illustrate clinical uses of MNE-Python. diff --git a/tutorials/discussions/README.txt b/tutorials/discussions/README.txt deleted file mode 100644 index 04e927de347..00000000000 --- a/tutorials/discussions/README.txt +++ /dev/null @@ -1,5 +0,0 @@ -Discussions -=========== - -These tutorials offer longer, more nuanced discussions of key topics in the -analysis of neural data. diff --git a/tutorials/discussions/plot_background_ica.py b/tutorials/discussions/plot_background_ica.py deleted file mode 100644 index 89b93ae0bbe..00000000000 --- a/tutorials/discussions/plot_background_ica.py +++ /dev/null @@ -1,97 +0,0 @@ -""" -.. _ica: - -================================================== -Background on Independent Component Analysis (ICA) -================================================== - -.. contents:: Contents - :local: - :depth: 2 - -Many M/EEG signals including biological artifacts reflect non-Gaussian -processes. Therefore PCA-based artifact rejection will likely perform worse at -separating the signal from noise sources. -MNE-Python supports identifying artifacts and latent components using temporal ICA. -MNE-Python implements the :class:`mne.preprocessing.ICA` class that facilitates applying ICA -to MEG and EEG data. Here we discuss some -basics of ICA. - -Concepts -======== - -ICA finds directions in the feature space corresponding to projections with high non-Gaussianity. - -- not necessarily orthogonal in the original feature space, but orthogonal in the whitened feature space. -- In contrast, PCA finds orthogonal directions in the raw feature - space that correspond to directions accounting for maximum variance. -- or differently, if data only reflect Gaussian processes ICA and PCA are equivalent. - - -**Example**: Imagine 3 instruments playing simultaneously and 3 microphones -recording mixed signals. ICA can be used to recover the sources ie. what is played by each instrument. - -ICA employs a very simple model: :math:`X = AS` where :math:`X` is our observations, :math:`A` is the mixing matrix and :math:`S` is the vector of independent (latent) sources. - -The challenge is to recover :math:`A` and :math:`S` from :math:`X`. - - -First generate simulated data ------------------------------ -""" # noqa: E501 - -import numpy as np -import matplotlib.pyplot as plt -from scipy import signal - -from sklearn.decomposition import FastICA, PCA - -np.random.seed(0) # set seed for reproducible results -n_samples = 2000 -time = np.linspace(0, 8, n_samples) - -s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal -s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal -s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: sawtooth signal - -S = np.c_[s1, s2, s3] -S += 0.2 * np.random.normal(size=S.shape) # Add noise - -S /= S.std(axis=0) # Standardize data -# Mix data -A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix -X = np.dot(S, A.T) # Generate observations - -############################################################################### -# Now try to recover the sources -# ------------------------------ - -# compute ICA -ica = FastICA(n_components=3) -S_ = ica.fit_transform(X) # Get the estimated sources -A_ = ica.mixing_ # Get estimated mixing matrix - -# compute PCA -pca = PCA(n_components=3) -H = pca.fit_transform(X) # estimate PCA sources - -plt.figure(figsize=(9, 6)) - -models = [X, S, S_, H] -names = ['Observations (mixed signal)', - 'True Sources', - 'ICA estimated sources', - 'PCA estimated sources'] -colors = ['red', 'steelblue', 'orange'] - -for ii, (model, name) in enumerate(zip(models, names), 1): - plt.subplot(4, 1, ii) - plt.title(name) - for sig, color in zip(model.T, colors): - plt.plot(sig, color=color) - -plt.tight_layout() - -############################################################################### -# :math:`\rightarrow` PCA fails at recovering our "instruments" since the -# related signals reflect non-Gaussian processes. diff --git a/tutorials/epochs/plot_10_epochs_overview.py b/tutorials/epochs/10_epochs_overview.py similarity index 96% rename from tutorials/epochs/plot_10_epochs_overview.py rename to tutorials/epochs/10_epochs_overview.py index 7d3d0c3dee5..50076701fd2 100644 --- a/tutorials/epochs/plot_10_epochs_overview.py +++ b/tutorials/epochs/10_epochs_overview.py @@ -13,10 +13,6 @@ creating an :class:`~mne.Epochs` object from (possibly simulated) data in a :class:`NumPy array `, see :ref:`tut_creating_data_structures`. -.. contents:: Page contents - :local: - :depth: 2 - As usual we'll start by importing the modules we need: """ @@ -184,16 +180,14 @@ ############################################################################### # Notice that the individual epochs are sequentially numbered along the bottom -# axis; the event ID associated with the epoch is marked on the top axis; -# epochs are separated by vertical dashed lines; and a vertical solid green -# line marks time=0 for each epoch (i.e., in this case, the stimulus onset -# time for each trial). Epoch plots are interactive (similar to -# :meth:`raw.plot() `) and have many of the same interactive -# controls as :class:`~mne.io.Raw` plots. Horizontal and vertical scrollbars -# allow browsing through epochs or channels (respectively), and pressing -# :kbd:`?` when the plot is focused will show a help screen with all the -# available controls. See :ref:`tut-visualize-epochs` for more details (as well -# as other ways of visualizing epoched data). +# axis and are separated by vertical dashed lines. +# Epoch plots are interactive (similar to :meth:`raw.plot() +# `) and have many of the same interactive controls as +# :class:`~mne.io.Raw` plots. Horizontal and vertical scrollbars allow browsing +# through epochs or channels (respectively), and pressing :kbd:`?` when the +# plot is focused will show a help screen with all the available controls. See +# :ref:`tut-visualize-epochs` for more details (as well as other ways of +# visualizing epoched data). # # # .. _tut-section-subselect-epochs: diff --git a/tutorials/epochs/plot_20_visualize_epochs.py b/tutorials/epochs/20_visualize_epochs.py similarity index 99% rename from tutorials/epochs/plot_20_visualize_epochs.py rename to tutorials/epochs/20_visualize_epochs.py index ca790a0f7a7..f86dd4146a0 100644 --- a/tutorials/epochs/plot_20_visualize_epochs.py +++ b/tutorials/epochs/20_visualize_epochs.py @@ -9,10 +9,6 @@ plot the sensor locations and projectors stored in `~mne.Epochs` objects. -.. contents:: Page contents - :local: - :depth: 2 - We'll start by importing the modules we need, loading the continuous (raw) sample data, and cropping it to save memory: """ diff --git a/tutorials/epochs/plot_30_epochs_metadata.py b/tutorials/epochs/30_epochs_metadata.py similarity index 90% rename from tutorials/epochs/plot_30_epochs_metadata.py rename to tutorials/epochs/30_epochs_metadata.py index f4f6dabd695..e3a3b7e9398 100644 --- a/tutorials/epochs/plot_30_epochs_metadata.py +++ b/tutorials/epochs/30_epochs_metadata.py @@ -4,18 +4,14 @@ Working with Epoch metadata =========================== -This tutorial shows how to add metadata to :class:`~mne.Epochs` objects, and +This tutorial shows how to add metadata to `~mne.Epochs` objects, and how to use :ref:`Pandas query strings ` to select and plot epochs based on metadata properties. -.. contents:: Page contents - :local: - :depth: 2 - For this tutorial we'll use a different dataset than usual: the :ref:`kiloword-dataset`, which contains EEG data averaged across 75 subjects who were performing a lexical decision (word/non-word) task. The data is in -:class:`~mne.Epochs` format, with each epoch representing the response to a +`~mne.Epochs` format, with each epoch representing the response to a different stimulus (word). As usual we'll start by importing the modules we need and loading the data: """ @@ -42,10 +38,10 @@ # and the row labels are always integers corresponding to epoch numbers. # Other capabilities of :class:`DataFrames ` such as # :class:`hierarchical indexing ` are possible while the -# :class:`~mne.Epochs` object is in memory, but will not survive saving and -# reloading the :class:`~mne.Epochs` object to/from disk. +# `~mne.Epochs` object is in memory, but will not survive saving and +# reloading the `~mne.Epochs` object to/from disk. # -# The metadata attached to :class:`~mne.Epochs` objects is stored as a +# The metadata attached to `~mne.Epochs` objects is stored as a # :class:`pandas.DataFrame` containing one row for each epoch. The columns of # this :class:`~pandas.DataFrame` can contain just about any information you # want to store about each epoch; in this case, the metadata encodes @@ -96,9 +92,9 @@ # Selecting epochs using metadata queries # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # -# All :class:`~mne.Epochs` objects can be subselected by event name, index, or +# All `~mne.Epochs` objects can be subselected by event name, index, or # :term:`slice` (see :ref:`tut-section-subselect-epochs`). But -# :class:`~mne.Epochs` objects with metadata can also be queried using +# `~mne.Epochs` objects with metadata can also be queried using # :ref:`Pandas query strings ` by passing the query # string just as you would normally pass an event name. For example: @@ -162,7 +158,7 @@ # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # You can add a metadata :class:`~pandas.DataFrame` to any -# :class:`~mne.Epochs` object (or replace existing metadata) simply by +# `~mne.Epochs` object (or replace existing metadata) simply by # assigning to the :attr:`~mne.Epochs.metadata` attribute: new_metadata = pd.DataFrame(data=['foo'] * len(epochs), columns=['bar'], @@ -171,7 +167,7 @@ epochs.metadata.head() ############################################################################### -# You can remove metadata from an :class:`~mne.Epochs` object by setting its +# You can remove metadata from an `~mne.Epochs` object by setting its # metadata to ``None``: epochs.metadata = None diff --git a/tutorials/epochs/40_autogenerate_metadata.py b/tutorials/epochs/40_autogenerate_metadata.py new file mode 100644 index 00000000000..5fb66fdd80f --- /dev/null +++ b/tutorials/epochs/40_autogenerate_metadata.py @@ -0,0 +1,437 @@ +""" +.. _tut-autogenerate-metadata: + +Auto-generating ``Epochs`` metadata +=================================== + +This tutorial shows how to auto-generate metadata for `~mne.Epochs`, based on +events via `mne.epochs.make_metadata`. + +We are going to use data from the :ref:`erp-core-dataset` (derived from +:footcite:`Kappenman2021`). This is EEG data from a single participant +performing an active visual task (Eriksen flanker task). + +.. note:: + If you wish to skip the introductory parts of this tutorial, you may jump + straight to :ref:`tut-autogenerate-metadata-ern` after completing the data + import and event creation in the + :ref:`tut-autogenerate-metadata-preparation` section. + +This tutorial is loosely divided into two parts: + +1. We will first focus on producing ERP time-locked to the **visual + stimulation**, conditional on response correctness and response time in + order to familiarize ourselves with the `~mne.epochs.make_metadata` + function. +2. After that, we will calculate ERPs time-locked to the **responses** – again, + conditional on response correctness – to visualize the error-related + negativity (ERN), i.e. the ERP component associated with incorrect + behavioral responses. + + +.. _tut-autogenerate-metadata-preparation: + +Preparation +^^^^^^^^^^^ + +Let's start by reading, filtering, and producing a simple visualization of the +raw data. The data is pretty clean and contains very few blinks, so there's no +need to apply sophisticated preprocessing and data cleaning procedures. +We will also convert the `~mne.Annotations` contained in this dataset to events +by calling `mne.events_from_annotations`. +""" + +from pathlib import Path +import matplotlib.pyplot as plt +import mne + + +data_dir = Path(mne.datasets.erp_core.data_path()) +infile = data_dir / 'ERP-CORE_Subject-001_Task-Flankers_eeg.fif' + +raw = mne.io.read_raw(infile, preload=True) +raw.filter(l_freq=0.1, h_freq=40) +raw.plot(start=60) + +# extract events +all_events, all_event_id = mne.events_from_annotations(raw) + +############################################################################### +# Creating metadata from events +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# +# The basics of ``make_metadata`` +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# Now it's time to think about the time windows to use for epoching and +# metadata generation. **It is important to understand that these time windows +# need not be the same!** That is, the automatically generated metadata might +# include information about events from only a fraction of the epochs duration; +# or it might include events that occurred well outside a given epoch. +# +# Let us look at a concrete example. In the Flankers task of the ERP CORE +# dataset, participants were required to respond to visual stimuli by pressing +# a button. We're interested in looking at the visual evoked responses (ERPs) +# of trials with correct responses. Assume that based on literature +# studies, we decide that responses later than 1500 ms after stimulus onset are +# to be considered invalid, because they don't capture the neuronal processes +# of interest here. We can approach this in the following way with the help of +# `mne.epochs.make_metadata`: + +# metadata for each epoch shall include events from the range: [0.0, 1.5] s, +# i.e. starting with stimulus onset and expanding beyond the end of the epoch +metadata_tmin, metadata_tmax = 0.0, 1.5 + +# auto-create metadata +# this also returns a new events array and an event_id dictionary. we'll see +# later why this is important +metadata, events, event_id = mne.epochs.make_metadata( + events=all_events, event_id=all_event_id, + tmin=metadata_tmin, tmax=metadata_tmax, sfreq=raw.info['sfreq']) + +# let's look at what we got! +metadata + +############################################################################### +# Specifying time-locked events +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# We can see that the generated table has 802 rows, each one corresponding to +# an individual event in ``all_events``. The first column, ``event_name``, +# contains the name of the respective event around which the metadata of that +# specific column was generated – we'll call that the "time-locked event", +# because we'll assign it time point zero. +# +# The names of the remaining columns correspond to the event names specified in +# the ``all_event_id`` dictionary. These columns contain floats; the values +# represent the latency of that specific event in seconds, relative to +# the time-locked event (the one mentioned in the ``event_name`` column). +# For events that didn't occur within the given time window, you'll see +# a value of ``NaN``, simply indicating that no event latency could be +# extracted. +# +# Now, there's a problem here. We want investigate the visual ERPs only, +# conditional on responses. But the metadata that was just created contains +# one row for **every** event, including responses. While we **could** create +# epochs for all events, allowing us to pass those metadata, and later subset +# the created events, there's a more elegant way to handle things: +# `~mne.epochs.make_metadata` has a ``row_events`` parameter that +# allows us to specify for which events to create metadata **rows**, while +# still creating **columns for all events** in the ``event_id`` dictionary. +# +# Because the metadata, then, only pertains to a subset of our original events, +# it's important to keep the returned ``events`` and ``event_id`` around for +# later use when we're actually going to create our epochs, to ensure that +# metadata, events, and event descriptions stay in sync. + +row_events = ['stimulus/compatible/target_left', + 'stimulus/compatible/target_right', + 'stimulus/incompatible/target_left', + 'stimulus/incompatible/target_right'] + +metadata, events, event_id = mne.epochs.make_metadata( + events=all_events, event_id=all_event_id, + tmin=metadata_tmin, tmax=metadata_tmax, sfreq=raw.info['sfreq'], + row_events=row_events) + +metadata + +############################################################################### +# Keeping only the first events of a group +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# The metadata now contains 400 rows – one per stimulation – and the same +# number of columns as before. Great! +# +# We have two types of responses in our data: ``response/left`` and +# ``response/right``. We would like to map those to "correct" and "incorrect". +# To make this easier, we can ask `~mne.epochs.make_metadata` to generate an +# entirely **new** column that refers to the first response observed during the +# given time interval. This works by passing a subset of the +# :term:`hierarchical event descriptors` (HEDs, inspired by +# :footcite:`BigdelyShamloEtAl2013`) used to name events via the ``keep_first`` +# parameter. For example, in the case of the HEDs ``response/left`` and +# ``response/right``, we could pass ``keep_first='response'`` to generate a new +# column, ``response``, containing the latency of the respective event. This +# value pertains only the first (or, in this specific example: the only) +# response, regardless of side (left or right). To indicate **which** event +# type (here: response side) was matched, a second column is added: +# ``first_response``. The values in this column are the event types without the +# string used for matching, as it is already encoded as the column name, i.e. +# in our example, we expect it to only contain ``'left'`` and ``'right'``. + +keep_first = 'response' +metadata, events, event_id = mne.epochs.make_metadata( + events=all_events, event_id=all_event_id, + tmin=metadata_tmin, tmax=metadata_tmax, sfreq=raw.info['sfreq'], + row_events=row_events, + keep_first=keep_first) + +# visualize response times regardless of side +metadata['response'].plot.hist(bins=50, title='Response Times') + +# the "first_response" column contains only "left" and "right" entries, derived +# from the initial event named "response/left" and "response/right" +print(metadata['first_response']) + +############################################################################### +# We're facing a similar issue with the stimulus events, and now there are not +# only two, but **four** different types: ``stimulus/compatible/target_left``, +# ``stimulus/compatible/target_right``, ``stimulus/incompatible/target_left``, +# and ``stimulus/incompatible/target_right``. Even more, because in the present +# paradigm stimuli were presented in rapid succession, sometimes multiple +# stimulus events occurred within the 1.5 second time window we're using to +# generate our metadata. See for example: + +metadata.loc[metadata['stimulus/compatible/target_left'].notna() & + metadata['stimulus/compatible/target_right'].notna(), + :] + +############################################################################### +# This can easily lead to confusion during later stages of processing, so let's +# create a column for the first stimulus – which will always be the time-locked +# stimulus, as our time interval starts at 0 seconds. We can pass a **list** of +# strings to ``keep_first``. + +keep_first = ['stimulus', 'response'] +metadata, events, event_id = mne.epochs.make_metadata( + events=all_events, event_id=all_event_id, + tmin=metadata_tmin, tmax=metadata_tmax, sfreq=raw.info['sfreq'], + row_events=row_events, + keep_first=keep_first) + +# all times of the time-locked events should be zero +assert all(metadata['stimulus'] == 0) + +# the values in the new "first_stimulus" and "first_response" columns indicate +# which events were selected via "keep_first" +metadata[['first_stimulus', 'first_response']] + +############################################################################### +# Adding new columns to describe stimulation side and response correctness +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# +# Perfect! Now it's time to define which responses were correct and incorrect. +# We first add a column encoding the side of stimulation, and then simply +# check whether the response matches the stimulation side, and add this result +# to another column. + +# left-side stimulation +metadata.loc[metadata['first_stimulus'].isin(['compatible/target_left', + 'incompatible/target_left']), + 'stimulus_side'] = 'left' + +# right-side stimulation +metadata.loc[metadata['first_stimulus'].isin(['compatible/target_right', + 'incompatible/target_right']), + 'stimulus_side'] = 'right' + +# first assume all responses were incorrect, then mark those as correct where +# the stimulation side matches the response side +metadata['response_correct'] = False +metadata.loc[metadata['stimulus_side'] == metadata['first_response'], + 'response_correct'] = True + + +correct_response_count = metadata['response_correct'].sum() +print(f'Correct responses: {correct_response_count}\n' + f'Incorrect responses: {len(metadata) - correct_response_count}') + +############################################################################### +# Creating ``Epochs`` with metadata, and visualizing ERPs +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# +# It's finally time to create our epochs! We set the metadata directly on +# instantiation via the ``metadata`` parameter. Also it is important to +# remember to pass ``events`` and ``event_id`` as returned from +# `~mne.epochs.make_metadata`, as we only created metadata for a subset of +# our original events by passing ``row_events``. Otherwise, the length +# of the metadata and the number of epochs would not match and MNE-Python +# would raise an error. + +epochs_tmin, epochs_tmax = -0.1, 0.4 # epochs range: [-0.1, 0.4] s +reject = {'eeg': 250e-6} # exclude epochs with strong artifacts +epochs = mne.Epochs(raw=raw, tmin=epochs_tmin, tmax=epochs_tmax, + events=events, event_id=event_id, metadata=metadata, + reject=reject, preload=True) + +############################################################################### +# Lastly, let's visualize the ERPs evoked by the visual stimulation, once for +# all trials with correct responses, and once for all trials with correct +# responses and a response time greater than 0.5 seconds +# (i.e., slow responses). +vis_erp = epochs['response_correct'].average() +vis_erp_slow = epochs['(not response_correct) & ' + '(response > 0.3)'].average() + +fig, ax = plt.subplots(2, figsize=(6, 6)) +vis_erp.plot(gfp=True, spatial_colors=True, axes=ax[0]) +vis_erp_slow.plot(gfp=True, spatial_colors=True, axes=ax[1]) +ax[0].set_title('Visual ERPs – All Correct Responses') +ax[1].set_title('Visual ERPs – Slow Correct Responses') +fig.tight_layout() +fig + +############################################################################### +# Aside from the fact that the data for the (much fewer) slow responses looks +# noisier – which is entirely to be expected – not much of an ERP difference +# can be seen. +# +# .. _tut-autogenerate-metadata-ern: +# +# Applying the knowledge: visualizing the ERN component +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# +# In the following analysis, we will use the same dataset as above, but +# we'll time-lock our epochs to the **response events,** not to the stimulus +# onset. Comparing ERPs associated with correct and incorrect behavioral +# responses, we should be able to see the error-related negativity (ERN) in +# the difference wave. +# +# Since we want to time-lock our analysis to responses, for the automated +# metadata generation we'll consider events occurring up to 1500 ms before +# the response trigger. +# +# We only wish to consider the **last** stimulus and response in each time +# window: Remember that we're dealing with rapid stimulus presentations in +# this paradigm; taking the last response – at time point zero – and the last +# stimulus – the one closest to the response – ensures we actually create +# the right stimulus-response pairings. We can achieve this by passing the +# ``keep_last`` parameter, which works exactly like ``keep_first`` we got to +# know above, only that it keeps the **last** occurrences of the specified +# events and stores them in columns whose names start with ``last_``. + +metadata_tmin, metadata_tmax = -1.5, 0 +row_events = ['response/left', 'response/right'] +keep_last = ['stimulus', 'response'] + +metadata, events, event_id = mne.epochs.make_metadata( + events=all_events, event_id=all_event_id, + tmin=metadata_tmin, tmax=metadata_tmax, sfreq=raw.info['sfreq'], + row_events=row_events, + keep_last=keep_last) + +############################################################################### +# Exactly like in the previous example, create new columns ``stimulus_side`` +# and ``response_correct``. + +# left-side stimulation +metadata.loc[metadata['last_stimulus'].isin(['compatible/target_left', + 'incompatible/target_left']), + 'stimulus_side'] = 'left' + +# right-side stimulation +metadata.loc[metadata['last_stimulus'].isin(['compatible/target_right', + 'incompatible/target_right']), + 'stimulus_side'] = 'right' + +# first assume all responses were incorrect, then mark those as correct where +# the stimulation side matches the response side +metadata['response_correct'] = False +metadata.loc[metadata['stimulus_side'] == metadata['last_response'], + 'response_correct'] = True + +metadata + +############################################################################### +# Now it's already time to epoch the data! When deciding upon the epochs +# duration for this specific analysis, we need to ensure we see quite a bit of +# signal from before and after the motor response. We also must be aware of +# the fact that motor-/muscle-related signals will most likely be present +# **before** the response button trigger pulse appears in our data, so the time +# period close to the response event should not be used for baseline +# correction. But at the same time, we don't want to use a baseline +# period that extends too far away from the button event. The following values +# seem to work quite well. + +epochs_tmin, epochs_tmax = -0.6, 0.4 +baseline = (-0.4, -0.2) +reject = {'eeg': 250e-6} +epochs = mne.Epochs(raw=raw, tmin=epochs_tmin, tmax=epochs_tmax, + baseline=baseline, reject=reject, + events=events, event_id=event_id, metadata=metadata, + preload=True) + +############################################################################### +# Let's do a final sanity check: we want to make sure that in every row, we +# actually have a stimulus. We use ``epochs.metadata`` (and not ``metadata``) +# because when creating the epochs, we passed the ``reject`` parameter, and +# MNE-Python always ensures that ``epochs.metadata`` stays in sync with the +# available epochs. + +epochs.metadata.loc[epochs.metadata['last_stimulus'].isna(), :] + +############################################################################### +# Bummer! It seems the very first two responses were recorded before the +# first stimulus appeared: the values in the ``stimulus`` column are ``None``. +# There is a very simple way to select only those epochs that **do** have a +# stimulus (i.e., are not ``None``): + +epochs = epochs['last_stimulus.notna()'] + +############################################################################### +# Time to calculate the ERPs for correct and incorrect responses. +# For visualization, we'll only look at sensor ``FCz``, which is known to show +# the ERN nicely in the given paradigm. We'll also create a topoplot to get an +# impression of the average scalp potentials measured in the first 100 ms after +# an incorrect response. + +resp_erp_correct = epochs['response_correct'].average() +resp_erp_incorrect = epochs['not response_correct'].average() + +mne.viz.plot_compare_evokeds({'Correct Response': resp_erp_correct, + 'Incorrect Response': resp_erp_incorrect}, + picks='FCz', show_sensors=True, + title='ERPs at FCz, time-locked to response') + +# topoplot of average field from time 0.0-0.1 s +resp_erp_incorrect.plot_topomap(times=0.05, average=0.05, size=3, + title='Avg. topography 0–100 ms after ' + 'incorrect responses') + +############################################################################### +# We can see a strong negative deflection immediately after incorrect +# responses, compared to correct responses. The topoplot, too, leaves no doubt: +# what we're looking at is, in fact, the ERN. +# +# Some researchers suggest to construct the difference wave between ERPs for +# correct and incorrect responses, as it more clearly reveals signal +# differences, while ideally also improving the signal-to-noise ratio (under +# the assumption that the noise level in "correct" and "incorrect" trials is +# similar). Let's do just that and put it into a publication-ready +# visualization. + +# difference wave: incorrect minus correct responses +resp_erp_diff = mne.combine_evoked([resp_erp_incorrect, resp_erp_correct], + weights=[1, -1]) + +fig, ax = plt.subplots() +resp_erp_diff.plot(picks='FCz', axes=ax, selectable=False, show=False) + +# make ERP trace bolder +ax.lines[0].set_linewidth(1.5) + +# add lines through origin +ax.axhline(0, ls='dotted', lw=0.75, color='gray') +ax.axvline(0, ls=(0, (10, 10)), lw=0.75, color='gray', + label='response trigger') + +# mark trough +trough_time_idx = resp_erp_diff.copy().pick('FCz').data.argmin() +trough_time = resp_erp_diff.times[trough_time_idx] +ax.axvline(trough_time, ls=(0, (10, 10)), lw=0.75, color='red', + label='max. negativity') + +# legend, axis labels, title +ax.legend(loc='lower left') +ax.set_xlabel('Time (s)', fontweight='bold') +ax.set_ylabel('Amplitude (µV)', fontweight='bold') +ax.set_title('Channel: FCz') +fig.suptitle('ERN (Difference Wave)', fontweight='bold') + +fig + +############################################################################### +# References +# ^^^^^^^^^^ +# .. footbibliography:: diff --git a/tutorials/epochs/plot_40_epochs_to_data_frame.py b/tutorials/epochs/50_epochs_to_data_frame.py similarity index 99% rename from tutorials/epochs/plot_40_epochs_to_data_frame.py rename to tutorials/epochs/50_epochs_to_data_frame.py index 5a2bc877f9d..0341e0c4e6c 100644 --- a/tutorials/epochs/plot_40_epochs_to_data_frame.py +++ b/tutorials/epochs/50_epochs_to_data_frame.py @@ -9,10 +9,6 @@ :doc:`split-apply-combine ` workflow to examine the latencies of the response maxima across epochs and conditions. -.. contents:: Page contents - :local: - :depth: 2 - We'll use the :ref:`sample-dataset` dataset, but load a version of the raw file that has already been filtered and downsampled, and has an average reference applied to its EEG channels. As usual we'll start by importing the modules we diff --git a/tutorials/epochs/60_make_fixed_length_epochs.py b/tutorials/epochs/60_make_fixed_length_epochs.py new file mode 100644 index 00000000000..7aac9131a79 --- /dev/null +++ b/tutorials/epochs/60_make_fixed_length_epochs.py @@ -0,0 +1,112 @@ +# -*- coding: utf-8 -*- +""" +.. _tut-fixed-length-epochs: + +Creating epochs of equal length +=============================== + +This tutorial shows how to create equal length epochs and briefly demonstrates +an example of their use in connectivity analysis. + +First, we import necessary modules and read in a sample raw +data set. This data set contains brain activity that is event-related, i.e. +synchronized to the onset of auditory stimuli. However, rather than creating +epochs by segmenting the data around the onset of each stimulus, we will +create 30 second epochs that allow us to perform non-event-related analyses of +the signal. +""" + +import os +import numpy as np +import matplotlib.pyplot as plt +import mne +from mne.preprocessing import compute_proj_ecg + +sample_data_folder = mne.datasets.sample.data_path() +sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample', + 'sample_audvis_raw.fif') + +raw = mne.io.read_raw_fif(sample_data_raw_file) + +############################################################################### +# For this tutorial we'll crop and resample the raw data to a manageable size +# for our web server to handle, ignore EEG channels, and remove the heartbeat +# artifact so we don't get spurious correlations just because of that. + +raw.crop(tmax=150).resample(100).pick('meg') +ecg_proj, _ = compute_proj_ecg(raw, ch_name='MEG 0511') # No ECG chan +raw.add_proj(ecg_proj) +raw.apply_proj() + +############################################################################### +# To create fixed length epochs, we simply call the function and provide it +# with the appropriate parameters indicating the desired duration of epochs in +# seconds, whether or not to preload data, whether or not to reject epochs that +# overlap with raw data segments annotated as bad, whether or not to include +# projectors, and finally whether or not to be verbose. Here, we choose a long +# epoch duration (30 seconds). To conserve memory, we set ``preload`` to +# ``False``. + +epochs = mne.make_fixed_length_epochs(raw, duration=30, preload=False) + +############################################################################### +# Characteristics of Fixed Length Epochs +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# Fixed length epochs are generally unsuitable for event-related analyses. This +# can be seen in an image map of our fixed length +# epochs. When the epochs are averaged, as seen at the bottom of the plot, +# misalignment between onsets of event-related activity results in noise. + +event_related_plot = epochs.plot_image(picks=['MEG 1142']) + +############################################################################### +# For information about creating epochs for event-related analyses, please see +# :ref:`tut-epochs-class`. +# +# Example Use Case for Fixed Length Epochs: Connectivity Analysis +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# Fixed lengths epochs are suitable for many types of analysis, including +# frequency or time-frequency analyses, connectivity analyses, or +# classification analyses. Here we briefly illustrate their utility in a sensor +# space connectivity analysis. +# +# The data from our epochs object has shape ``(n_epochs, n_sensors, n_times)`` +# and is therefore an appropriate basis for using MNE-Python's envelope +# correlation function to compute power-based connectivity in sensor space. The +# long duration of our fixed length epochs, 30 seconds, helps us reduce edge +# artifacts and achieve better frequency resolution when filtering must +# be applied after epoching. +# +# Let's examine the alpha band. We allow default values for filter parameters +# (for more information on filtering, please see :ref:`tut-filter-resample`). + +epochs.load_data().filter(l_freq=8, h_freq=12) +alpha_data = epochs.get_data() + +############################################################################### +# If desired, separate correlation matrices for each epoch can be obtained. +# For envelope correlations, this is done by passing ``combine=None`` to the +# envelope correlations function. + +corr_matrix = mne.connectivity.envelope_correlation(alpha_data, combine=None) + +############################################################################### +# Now we can plot correlation matrices. We'll compare the first and last +# 30-second epochs of the recording: + +first_30 = corr_matrix[0] +last_30 = corr_matrix[-1] +corr_matrices = [first_30, last_30] +color_lims = np.percentile(np.array(corr_matrices), [5, 95]) +titles = ['First 30 Seconds', 'Last 30 Seconds'] + +fig, axes = plt.subplots(nrows=1, ncols=2) +fig.suptitle('Correlation Matrices from First 30 Seconds and Last 30 Seconds') +for ci, corr_matrix in enumerate(corr_matrices): + ax = axes[ci] + mpbl = ax.imshow(corr_matrix, clim=color_lims) + ax.set_xlabel(titles[ci]) +fig.subplots_adjust(right=0.8) +cax = fig.add_axes([0.85, 0.2, 0.025, 0.6]) +cbar = fig.colorbar(ax.images[0], cax=cax) +cbar.set_label('Correlation Coefficient') diff --git a/tutorials/epochs/README.txt b/tutorials/epochs/README.txt index a791c9d3b53..c38eb5b0f58 100644 --- a/tutorials/epochs/README.txt +++ b/tutorials/epochs/README.txt @@ -1,5 +1,5 @@ Segmenting continuous data into epochs -====================================== +-------------------------------------- These tutorials cover epoched data, and how it differs from working with continuous data. diff --git a/tutorials/evoked/plot_10_evoked_overview.py b/tutorials/evoked/10_evoked_overview.py similarity index 94% rename from tutorials/evoked/plot_10_evoked_overview.py rename to tutorials/evoked/10_evoked_overview.py index 257e106f2ed..ed1bad93d06 100644 --- a/tutorials/evoked/plot_10_evoked_overview.py +++ b/tutorials/evoked/10_evoked_overview.py @@ -11,10 +11,6 @@ object from (possibly simulated) data in a :class:`NumPy array `, see :ref:`tut_creating_data_structures`. -.. contents:: Page contents - :local: - :depth: 2 - As usual we'll start by importing the modules we need: """ @@ -48,6 +44,19 @@ del raw # reduce memory usage +############################################################################### +# You may have noticed that MNE informed us that "baseline correction" has been +# applied. This happened automatically by during creation of the +# `~mne.Epochs` object, but may also be initiated (or disabled!) manually: +# We will discuss this in more detail later. +# +# The information about the baseline period of `~mne.Epochs` is transferred to +# derived `~mne.Evoked` objects to maintain provenance as you process your +# data: + +print(f'Epochs baseline: {epochs.baseline}') +print(f'Evoked baseline: {evoked.baseline}') + ############################################################################### # Basic visualization of ``Evoked`` objects # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -63,7 +72,7 @@ # :meth:`Epochs ` objects, # :meth:`evoked.plot() ` has many parameters for customizing # the plot output, such as color-coding channel traces by scalp location, or -# plotting the :term:`global field power ` alongside the channel traces. +# plotting the :term:`global field power` alongside the channel traces. # See :ref:`tut-visualize-evoked` for more information about visualizing # :class:`~mne.Evoked` objects. # @@ -219,7 +228,14 @@ # :func:`mne.read_evokeds`, or by applying baseline correction after loading, # as shown here: +# Original baseline (none set). +print(f'Baseline after loading: {evokeds_list[0].baseline}') + +# Apply a custom baseline correction. evokeds_list[0].apply_baseline((None, 0)) +print(f'Baseline after calling apply_baseline(): {evokeds_list[0].baseline}') + +# Visualize the evoked response. evokeds_list[0].plot(picks='eeg') ############################################################################### diff --git a/tutorials/evoked/plot_20_visualize_evoked.py b/tutorials/evoked/20_visualize_evoked.py similarity index 95% rename from tutorials/evoked/plot_20_visualize_evoked.py rename to tutorials/evoked/20_visualize_evoked.py index 658df8fc153..da49de65129 100644 --- a/tutorials/evoked/plot_20_visualize_evoked.py +++ b/tutorials/evoked/20_visualize_evoked.py @@ -7,10 +7,6 @@ This tutorial shows the different visualization methods for `~mne.Evoked` objects. -.. contents:: Page contents - :local: - :depth: 2 - As usual we'll start by importing the modules we need: """ @@ -33,9 +29,11 @@ 'sample_audvis-ave.fif') evokeds_list = mne.read_evokeds(sample_data_evk_file, baseline=(None, 0), proj=True, verbose=False) -# show the condition names + +# Show the condition names, and reassure ourselves that baseline correction has +# been applied. for e in evokeds_list: - print(e.comment) + print(f'Condition: {e.comment}, baseline: {e.baseline}') ############################################################################### # To make our life easier, let's convert that list of `~mne.Evoked` @@ -57,8 +55,8 @@ # .. sidebar:: Butterfly plots # # Plots of superimposed sensor timeseries are called "butterfly plots" -# because the positive- and negative-going traces can resemble -# butterfly wings. +# because the positive- and negative-going traces can resemble butterfly +# wings. # # The most basic plot of `~mne.Evoked` objects is a butterfly plot of # each channel type, generated by the `evoked.plot() ` @@ -75,7 +73,12 @@ # select channels to plot by name, index, or type. In the next plot we'll show # only magnetometer channels, and also color-code the channel traces by their # location by passing ``spatial_colors=True``. Finally, we'll superimpose a -# trace of the :term:`global field power ` across channels: +# trace of the root mean square (RMS) of the signal across channels by +# passing ``gfp=True``. This parameter is called ``gfp`` for historical +# reasons and behaves correctly for all supported channel types: for MEG data, +# it will plot the RMS; while for EEG, it would plot the +# :term:`global field power ` (an average-referenced RMS), hence its +# name: evks['aud/left'].plot(picks='mag', spatial_colors=True, gfp=True) @@ -140,7 +143,7 @@ # all on the same axes. Like most MNE-Python visualization functions, it has a # ``picks`` parameter for selecting channels, but by default will generate one # figure for each channel type, and combine information across channels of the -# same type by calculating the :term:`global field power `. Information +# same type by calculating the :term:`global field power`. Information # may be combined across channels in other ways too; support for combining via # mean, median, or standard deviation are built-in, and custom callable # functions may also be used, as shown here: diff --git a/tutorials/evoked/plot_eeg_erp.py b/tutorials/evoked/30_eeg_erp.py similarity index 81% rename from tutorials/evoked/plot_eeg_erp.py rename to tutorials/evoked/30_eeg_erp.py index df5c062fd27..110d0a0f50a 100644 --- a/tutorials/evoked/plot_eeg_erp.py +++ b/tutorials/evoked/30_eeg_erp.py @@ -4,12 +4,10 @@ EEG processing and Event Related Potentials (ERPs) ================================================== -.. contents:: Here we cover the specifics of EEG, namely: - :local: - :depth: 1 - """ +import matplotlib.pyplot as plt + import mne from mne.datasets import sample from mne.channels import combine_channels @@ -108,6 +106,45 @@ evoked_custom.plot(titles=dict(eeg=title), time_unit='s') evoked_custom.plot_topomap(times=[0.1], size=3., title=title, time_unit='s') +############################################################################### +# Global field power (GFP) +# ------------------------ +# +# Global field power :footcite:`Lehmann1980,Lehmann1984,Murray2008` is, +# generally speaking, a measure of agreement of the signals picked up by all +# sensors across the entire scalp: if all sensors have the same value at a +# given time point, the GFP will be zero at that time point; if the signals +# differ, the GFP will be non-zero at that time point. GFP +# peaks may reflect "interesting" brain activity, warranting further +# investigation. Mathematically, the GFP is the population standard +# deviation across all sensors, calculated separately for every time point. +# +# You can plot the GFP using `evoked.plot(gfp=True) `. The GFP +# trace will be black if ``spatial_colors=True`` and green otherwise. The EEG +# reference will not affect the GFP: + +for evk in (evoked_car, evoked_no_ref): + evk.plot(gfp=True, spatial_colors=True, ylim=dict(eeg=[-10, 10])) + +############################################################################### +# To plot the GFP by itself you can pass ``gfp='only'`` (this makes it easier +# to read off the GFP data values, because the scale is aligned): + +evoked_car.plot(gfp='only') + +############################################################################### +# As stated above, the GFP is the population standard deviation of the signal +# across channels. To compute it manually, we can leverage +# the fact that `evoked.data ` is a NumPy array: + +gfp = evoked_car.data.std(axis=0, ddof=0) + +# Reproducing the plot style from above: +fig, ax = plt.subplots() +ax.plot(evoked_car.times, gfp * 1e6, color='lime') +ax.fill_between(evoked_car.times, gfp * 1e6, color='lime', alpha=0.2) +ax.set(xlabel='Time (s)', ylabel='GFP (µV)', title='EEG') + ############################################################################### # Evoked response averaged across channels by ROI # ----------------------------------------------- @@ -200,3 +237,9 @@ # Besides for explicit access, this can be used for example to set titles. for cond in all_evokeds: all_evokeds[cond].plot_joint(title=cond, **joint_kwargs) + + +############################################################################## +# References +# ---------- +# .. footbibliography:: diff --git a/tutorials/evoked/plot_whitened.py b/tutorials/evoked/40_whitened.py similarity index 100% rename from tutorials/evoked/plot_whitened.py rename to tutorials/evoked/40_whitened.py diff --git a/tutorials/evoked/README.txt b/tutorials/evoked/README.txt index 5430fa4d0b2..9e2160c4457 100644 --- a/tutorials/evoked/README.txt +++ b/tutorials/evoked/README.txt @@ -1,5 +1,5 @@ Estimating evoked responses -=========================== +--------------------------- These tutorials cover estimates of evoked responses (i.e., averages across several repetitions of an experimental condition). diff --git a/tutorials/source-modeling/plot_background_freesurfer.py b/tutorials/forward/10_background_freesurfer.py similarity index 98% rename from tutorials/source-modeling/plot_background_freesurfer.py rename to tutorials/forward/10_background_freesurfer.py index 46fe774235d..a2fd309f5ae 100644 --- a/tutorials/source-modeling/plot_background_freesurfer.py +++ b/tutorials/forward/10_background_freesurfer.py @@ -23,9 +23,6 @@ use FreeSurfer surface representations to allow functional data to morph between different subjects. -.. contents:: - :local: - First steps =========== @@ -123,7 +120,7 @@ # Use with MNE-Python # =================== # -# For source localization analyses to work properly, it is important that the +# For source localization analysis to work properly, it is important that the # FreeSurfer reconstruction has completed beforehand. Furthermore, when using # related functions, such as :func:`mne.setup_source_space`, ``SUBJECTS_DIR`` # has to be defined either globally by setting :func:`mne.set_config` or for diff --git a/tutorials/source-modeling/plot_source_alignment.py b/tutorials/forward/20_source_alignment.py similarity index 98% rename from tutorials/source-modeling/plot_source_alignment.py rename to tutorials/forward/20_source_alignment.py index 45c38343fd7..aace3226af3 100644 --- a/tutorials/source-modeling/plot_source_alignment.py +++ b/tutorials/forward/20_source_alignment.py @@ -10,10 +10,6 @@ alignment process is crucial for computing the forward solution, as is understanding the different coordinate frames involved in this process. -.. contents:: Page contents - :local: - :depth: 2 - Let's start out by loading some data. """ import os.path as op @@ -99,12 +95,12 @@ # Note that all three coordinate systems are **RAS** coordinate frames and # hence are also `right-handed`_ coordinate systems. Finally, note that the # ``coord_frame`` parameter sets which coordinate frame the camera -# should initially be aligned with. Let's take a look: +# should initially be aligned with. Let's have a look: fig = mne.viz.plot_alignment(raw.info, trans=trans, subject='sample', subjects_dir=subjects_dir, surfaces='head-dense', show_axes=True, dig=True, eeg=[], meg='sensors', - coord_frame='meg') + coord_frame='meg', mri_fiducials='estimated') mne.viz.set_3d_view(fig, 45, 90, distance=0.6, focalpoint=(0., 0., 0.)) print('Distance from head origin to MEG origin: %0.1f mm' % (1000 * np.linalg.norm(raw.info['dev_head_t']['trans'][:3, 3]))) @@ -272,7 +268,7 @@ def add_head(renderer, points, color, opacity=0.95): # images. Here's what that would look like (we'll use the nasion landmark as a # representative example): -# Get the nasion +# Get the nasion: nasion = [p for p in raw.info['dig'] if p['kind'] == FIFF.FIFFV_POINT_CARDINAL and p['ident'] == FIFF.FIFFV_POINT_NASION][0] @@ -343,7 +339,8 @@ def add_head(renderer, points, color, opacity=0.95): src = mne.setup_volume_source_space(sphere=sphere, pos=10.) mne.viz.plot_alignment( raw.info, eeg='projected', bem=sphere, src=src, dig=True, - surfaces=['brain', 'outer_skin'], coord_frame='meg', show_axes=True) + surfaces=['brain', 'inner_skull', 'outer_skull', 'outer_skin'], + coord_frame='meg', show_axes=True) ############################################################################### # It is also possible to use :func:`mne.gui.coregistration` diff --git a/tutorials/source-modeling/plot_forward.py b/tutorials/forward/30_forward.py similarity index 99% rename from tutorials/source-modeling/plot_forward.py rename to tutorials/forward/30_forward.py index cd4bbe81586..68270560858 100644 --- a/tutorials/source-modeling/plot_forward.py +++ b/tutorials/forward/30_forward.py @@ -253,8 +253,7 @@ # Exercise # -------- # -# By looking at -# :ref:`sphx_glr_auto_examples_forward_plot_forward_sensitivity_maps.py` +# By looking at :ref:`ex-sensitivity-maps` # plot the sensitivity maps for EEG and compare it with the MEG, can you # justify the claims that: # diff --git a/tutorials/forward/35_eeg_no_mri.py b/tutorials/forward/35_eeg_no_mri.py new file mode 100644 index 00000000000..07094c50f24 --- /dev/null +++ b/tutorials/forward/35_eeg_no_mri.py @@ -0,0 +1,141 @@ +# -*- coding: utf-8 -*- +# Authors: Alexandre Gramfort +# Joan Massich +# Eric Larson +# +# License: BSD Style. + +""" +.. _tut-eeg-fsaverage-source-modeling: + +EEG forward operator with a template MRI +======================================== + +This tutorial explains how to compute the forward operator from EEG data +using the standard template MRI subject ``fsaverage``. + +.. caution:: Source reconstruction without an individual T1 MRI from the + subject will be less accurate. Do not over interpret + activity locations which can be off by multiple centimeters. + +Adult template MRI (fsaverage) +------------------------------ +First we show how ``fsaverage`` can be used as a surrogate subject. +""" + +import os.path as op +import numpy as np + +import mne +from mne.datasets import eegbci +from mne.datasets import fetch_fsaverage + +# Download fsaverage files +fs_dir = fetch_fsaverage(verbose=True) +subjects_dir = op.dirname(fs_dir) + +# The files live in: +subject = 'fsaverage' +trans = 'fsaverage' # MNE has a built-in fsaverage transformation +src = op.join(fs_dir, 'bem', 'fsaverage-ico-5-src.fif') +bem = op.join(fs_dir, 'bem', 'fsaverage-5120-5120-5120-bem-sol.fif') + +############################################################################## +# Load the data +# ^^^^^^^^^^^^^ +# +# We use here EEG data from the BCI dataset. +# +# .. note:: See :ref:`plot_montage` to view all the standard EEG montages +# available in MNE-Python. + +raw_fname, = eegbci.load_data(subject=1, runs=[6]) +raw = mne.io.read_raw_edf(raw_fname, preload=True) + +# Clean channel names to be able to use a standard 1005 montage +new_names = dict( + (ch_name, + ch_name.rstrip('.').upper().replace('Z', 'z').replace('FP', 'Fp')) + for ch_name in raw.ch_names) +raw.rename_channels(new_names) + +# Read and set the EEG electrode locations +montage = mne.channels.make_standard_montage('standard_1005') +raw.set_montage(montage) +raw.set_eeg_reference(projection=True) # needed for inverse modeling + +# Check that the locations of EEG electrodes is correct with respect to MRI +mne.viz.plot_alignment( + raw.info, src=src, eeg=['original', 'projected'], trans=trans, + show_axes=True, mri_fiducials=True, dig='fiducials') + +############################################################################## +# Setup source space and compute forward +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +fwd = mne.make_forward_solution(raw.info, trans=trans, src=src, + bem=bem, eeg=True, mindist=5.0, n_jobs=1) +print(fwd) + +# Use fwd to compute the sensitivity map for illustration purposes +eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed') +brain = eeg_map.plot(time_label='EEG sensitivity', subjects_dir=subjects_dir, + clim=dict(lims=[5, 50, 100])) + +############################################################################## +# From here on, standard inverse imaging methods can be used! +# +# Infant MRI surrogates +# --------------------- +# We don't have a sample infant dataset for MNE, so let's fake a 10-20 one: + +ch_names = \ + 'Fz Cz Pz Oz Fp1 Fp2 F3 F4 F7 F8 C3 C4 T7 T8 P3 P4 P7 P8 O1 O2'.split() +data = np.random.RandomState(0).randn(len(ch_names), 1000) +info = mne.create_info(ch_names, 1000., 'eeg') +raw = mne.io.RawArray(data, info) + +############################################################################## +# Get an infant MRI template +# ^^^^^^^^^^^^^^^^^^^^^^^^^^ +# To use an infant head model for M/EEG data, you can use +# :func:`mne.datasets.fetch_infant_template` to download an infant template: + +subject = mne.datasets.fetch_infant_template('6mo', subjects_dir, verbose=True) + +############################################################################## +# It comes with several helpful built-in files, including a 10-20 montage +# in the MRI coordinate frame, which can be used to compute the +# MRI<->head transform ``trans``: +fname_1020 = op.join(subjects_dir, subject, 'montages', '10-20-montage.fif') +mon = mne.channels.read_dig_fif(fname_1020) +mon.rename_channels( + {f'EEG{ii:03d}': ch_name for ii, ch_name in enumerate(ch_names, 1)}) +trans = mne.channels.compute_native_head_t(mon) +raw.set_montage(mon) +print(trans) + +############################################################################## +# There are also BEM and source spaces: + +bem_dir = op.join(subjects_dir, subject, 'bem') +fname_src = op.join(bem_dir, f'{subject}-oct-6-src.fif') +src = mne.read_source_spaces(fname_src) +print(src) +fname_bem = op.join(bem_dir, f'{subject}-5120-5120-5120-bem-sol.fif') +bem = mne.read_bem_solution(fname_bem) + +############################################################################## +# You can ensure everything is as expected by plotting the result: +fig = mne.viz.plot_alignment( + raw.info, subject=subject, subjects_dir=subjects_dir, trans=trans, + src=src, bem=bem, coord_frame='mri', mri_fiducials=True, show_axes=True, + surfaces=('white', 'outer_skin', 'inner_skull', 'outer_skull')) +mne.viz.set_3d_view(fig, 25, 70, focalpoint=[0, -0.005, 0.01]) + +############################################################################## +# From here, standard forward and inverse operators can be computed +# +# If you have digitized head positions or MEG data, consider using +# :ref:`mne coreg` to warp a suitable infant template MRI to your +# digitization information. diff --git a/tutorials/source-modeling/plot_background_freesurfer_mne.py b/tutorials/forward/50_background_freesurfer_mne.py similarity index 99% rename from tutorials/source-modeling/plot_background_freesurfer_mne.py rename to tutorials/forward/50_background_freesurfer_mne.py index b9d4b45e043..61257d9ea82 100644 --- a/tutorials/source-modeling/plot_background_freesurfer_mne.py +++ b/tutorials/forward/50_background_freesurfer_mne.py @@ -9,9 +9,6 @@ and how MNE-Python integrates with FreeSurfer for handling MRI data and source space data in general. -.. contents:: - :local: - As usual we'll start by importing the necessary packages; for this tutorial that includes :mod:`nibabel` to handle loading the MRI images (MNE-Python also uses :mod:`nibabel` under the hood). We'll also use a special :mod:`Matplotlib @@ -295,7 +292,6 @@ def imshow_mri(data, img, vox, xyz, suptitle): # (``tris``) with shape ``(n_tris, 3)`` defining which vertices in ``rr`` form # each triangular facet of the mesh. - fname = os.path.join(subjects_dir, subject, 'surf', 'rh.white') rr_mm, tris = mne.read_surface(fname) print(f'rr_mm.shape == {rr_mm.shape}') diff --git a/tutorials/forward/80_fix_bem_in_blender.py b/tutorials/forward/80_fix_bem_in_blender.py new file mode 100644 index 00000000000..9e038468a51 --- /dev/null +++ b/tutorials/forward/80_fix_bem_in_blender.py @@ -0,0 +1,275 @@ +""" +.. _tut-fix-meshes: + +Editing BEM surfaces in Blender +=============================== + +Sometimes when creating a BEM model the surfaces need manual correction because +of a series of problems that can arise (e.g. intersection between surfaces). +Here, we will see how this can be achieved by exporting the surfaces to the 3D +modeling program `Blender `_, editing them, and +re-importing them. + +This tutorial is based on https://github.com/ezemikulan/blender_freesurfer by +Ezequiel Mikulan. + +.. contents:: Page contents + :local: + :depth: 2 + +""" + +# Authors: Marijn van Vliet +# Ezequiel Mikulan +# Manorama Kadwani +# +# License: BSD (3-clause) + +# sphinx_gallery_thumbnail_path = '_static/blender_import_obj/blender_import_obj2.jpg' # noqa + +import os +import os.path as op +import shutil +import mne + +data_path = mne.datasets.sample.data_path() +subjects_dir = op.join(data_path, 'subjects') +bem_dir = op.join(subjects_dir, 'sample', 'bem', 'flash') +############################################################################### +# Exporting surfaces to Blender +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# +# In this tutorial, we are working with the MNE-Sample set, for which the +# surfaces have no issues. To demonstrate how to fix problematic surfaces, we +# are going to manually place one of the inner-skull vertices outside the +# outer-skill mesh. +# +# We then convert the surfaces to `.obj +# `_ files and create a new +# folder called ``conv`` inside the FreeSurfer subject folder to keep them in. + +# Put the converted surfaces in a separate 'conv' folder +conv_dir = op.join(subjects_dir, 'sample', 'conv') +os.makedirs(conv_dir, exist_ok=True) + +# Load the inner skull surface and create a problem +# The metadata is empty in this example. In real study, we want to write the +# original metadata to the fixed surface file. Set read_metadata=True to do so. +coords, faces = mne.read_surface(op.join(bem_dir, 'inner_skull.surf')) +coords[0] *= 1.1 # Move the first vertex outside the skull + +# Write the inner skull surface as an .obj file that can be imported by +# Blender. +mne.write_surface(op.join(conv_dir, 'inner_skull.obj'), coords, faces, + overwrite=True) + +# Also convert the outer skull surface. +coords, faces = mne.read_surface(op.join(bem_dir, 'outer_skull.surf')) +mne.write_surface(op.join(conv_dir, 'outer_skull.obj'), coords, faces, + overwrite=True) + +############################################################################### +# Editing in Blender +# ^^^^^^^^^^^^^^^^^^ +# +# We can now open Blender and import the surfaces. Go to *File > Import > +# Wavefront (.obj)*. Navigate to the ``conv`` folder and select the file you +# want to import. Make sure to select the *Keep Vert Order* option. You can +# also select the *Y Forward* option to load the axes in the correct direction +# (RAS): +# +# .. image:: ../../_static/blender_import_obj/blender_import_obj1.jpg +# :width: 800 +# :alt: Importing .obj files in Blender +# +# For convenience, you can save these settings by pressing the ``+`` button +# next to *Operator Presets*. +# +# Repeat the procedure for all surfaces you want to import (e.g. inner_skull +# and outer_skull). +# +# You can now edit the surfaces any way you like. See the +# `Beginner Blender Tutorial Series +# `_ +# to learn how to use Blender. Specifically, `part 2 +# `_ will teach you how to +# use the basic editing tools you need to fix the surface. +# +# .. image:: ../../_static/blender_import_obj/blender_import_obj2.jpg +# :width: 800 +# :alt: Editing surfaces in Blender +# +# Using the fixed surfaces in MNE-Python +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# +# In Blender, you can export a surface as an .obj file by selecting it and go +# to *File > Export > Wavefront (.obj)*. You need to again select the *Y +# Forward* option and check the *Keep Vertex Order* box. +# +# .. image:: ../../_static/blender_import_obj/blender_import_obj3.jpg +# :width: 200 +# :alt: Exporting .obj files in Blender +# +# +# Each surface needs to be exported as a separate file. We recommend saving +# them in the ``conv`` folder and ending the file name with ``_fixed.obj``, +# although this is not strictly necessary. +# +# In order to be able to run this tutorial script top to bottom, we here +# simulate the edits you did manually in Blender using Python code: + +coords, faces = mne.read_surface(op.join(conv_dir, 'inner_skull.obj')) +coords[0] /= 1.1 # Move the first vertex back inside the skull +mne.write_surface(op.join(conv_dir, 'inner_skull_fixed.obj'), coords, faces, + overwrite=True) + +############################################################################### +# Back in Python, you can read the fixed .obj files and save them as +# FreeSurfer .surf files. For the :func:`mne.make_bem_model` function to find +# them, they need to be saved using their original names in the ``surf`` +# folder, e.g. ``bem/inner_skull.surf``. Be sure to first backup the original +# surfaces in case you make a mistake! + +# Read the fixed surface +coords, faces = mne.read_surface(op.join(conv_dir, 'inner_skull_fixed.obj')) + +# Backup the original surface +shutil.copy(op.join(bem_dir, 'inner_skull.surf'), + op.join(bem_dir, 'inner_skull_orig.surf')) + +# Overwrite the original surface with the fixed version +# In real study you should provide the correct metadata using ``volume_info=`` +# This could be accomplished for example with: +# +# _, _, vol_info = mne.read_surface(op.join(bem_dir, 'inner_skull.surf'), +# read_metadata=True) +# mne.write_surface(op.join(bem_dir, 'inner_skull.surf'), coords, faces, +# volume_info=vol_info, overwrite=True) + +############################################################################### +# Editing the head surfaces +# ^^^^^^^^^^^^^^^^^^^^^^^^^ +# +# Sometimes the head surfaces are faulty and require manual editing. We use +# :func:`mne.write_head_bem` to convert the fixed surfaces to ``.fif`` files. +# +# Low-resolution head +# ~~~~~~~~~~~~~~~~~~~ +# +# For EEG forward modeling, it is possible that ``outer_skin.surf`` would be +# manually edited. In that case, remember to save the fixed version of +# ``-head.fif`` from the edited surface file for coregistration. + +# Load the fixed surface +coords, faces = mne.read_surface(op.join(bem_dir, 'outer_skin.surf')) + +# Make sure we are in the correct directory +head_dir = op.dirname(bem_dir) + +# Remember to backup the original head file in advance! +# Overwrite the original head file +# +# mne.write_head_bem(op.join(head_dir, 'sample-head.fif'), coords, faces, +# overwrite=True) + +############################################################################### +# High-resolution head +# ~~~~~~~~~~~~~~~~~~~~ +# +# We use :func:`mne.read_bem_surfaces` to read the head surface files. After +# editing, we again output the head file with :func:`mne.write_head_bem`. +# Here we use ``-head.fif`` for speed. + +# If ``-head-dense.fif`` does not exist, you need to run +# ``mne make_scalp_surfaces`` first. +# [0] because a list of surfaces is returned +surf = mne.read_bem_surfaces(op.join(head_dir, 'sample-head.fif'))[0] + +# For consistency only +coords = surf['rr'] +faces = surf['tris'] + +# Write the head as an .obj file for editing +mne.write_surface(op.join(conv_dir, 'sample-head.obj'), + coords, faces, overwrite=True) + +# Usually here you would go and edit your meshes. +# +# Here we just use the same surface as if it were fixed +# Read in the .obj file +coords, faces = mne.read_surface(op.join(conv_dir, 'sample-head.obj')) + +# Remember to backup the original head file in advance! +# Overwrite the original head file +# +# mne.write_head_bem(op.join(head_dir, 'sample-head.fif'), coords, faces, +# overwrite=True) + +############################################################################### +# That's it! You are ready to continue with your analysis pipeline (e.g. +# running :func:`mne.make_bem_model`). +# +# What if you still get an error? +# --------------------------------- +# +# When editing BEM surfaces/meshes in Blender, make sure to use +# tools that do not change the number or order of vertices, or the geometry +# of triangular faces. For example, avoid the extrusion tool, because it +# duplicates the extruded vertices. +# +# Below are some examples of errors you might encounter when running the +# `mne.make_bem_model` function, and the likely causes of those errors. +# +# +# 1. Cannot decimate to requested ico grade +# +# This error is caused by having too few or too many vertices. The full +# error is something like: +# +# .. code-block:: console +# +# RuntimeError: Cannot decimate to requested ico grade 4. The provided +# BEM surface has 20516 triangles, which cannot be isomorphic with a +# subdivided icosahedron. Consider manually decimating the surface to a +# suitable density and then use ico=None in make_bem_model. +# +# 2. Surface inner skull has topological defects +# +# This error can occur when trying to match the original number of +# triangles by removing vertices. The full error looks like: +# +# .. code-block:: console +# +# RuntimeError: Surface inner skull has topological defects: 12 / 20484 +# vertices have fewer than three neighboring triangles [733, 1014, 2068, +# 7732, 8435, 8489, 10181, 11120, 11121, 11122, 11304, 11788] +# +# 3. Surface inner skull is not complete +# +# This error (like the previous error) reflects a problem with the surface +# topology (i.e., the expected pattern of vertices/edges/faces is +# disrupted). +# +# .. code-block:: console +# +# RuntimeError: Surface inner skull is not complete (sum of solid +# angles yielded 0.999668, should be 1.) +# +# 4. Triangle ordering is wrong +# +# This error reflects a mismatch between how the surface is represented in +# memory (the order of the vertex/face definitions) and what is expected by +# MNE-Python. The full error is: +# +# .. code-block:: console +# +# RuntimeError: The source surface has a matching number of +# triangles but ordering is wrong +# +# +# For any of these errors, it is usually easiest to start over with the +# unedited BEM surface and try again, making sure to only *move* vertices and +# faces without *adding* or *deleting* any. For example, +# select a circle of vertices, then press :kbd:`G` to drag them to the desired +# location. Smoothing a group of selected vertices in Blender (by +# right-clicking and selecting "Smooth Vertices") can also be helpful. diff --git a/tutorials/source-modeling/plot_compute_covariance.py b/tutorials/forward/90_compute_covariance.py similarity index 91% rename from tutorials/source-modeling/plot_compute_covariance.py rename to tutorials/forward/90_compute_covariance.py index f82875f4f34..f8d32a13b78 100644 --- a/tutorials/source-modeling/plot_compute_covariance.py +++ b/tutorials/forward/90_compute_covariance.py @@ -103,8 +103,9 @@ # available. Unfortunately it is not easy to tell the effective number of # samples, hence, to choose the appropriate regularization. # In MNE-Python, regularization is done using advanced regularization methods -# described in [1]_. For this the 'auto' option can be used. With this -# option cross-validation will be used to learn the optimal regularization: +# described in :footcite:`EngemannGramfort2015`. For this the 'auto' option +# can be used. With this option cross-validation will be used to learn the +# optimal regularization: noise_cov_reg = mne.compute_covariance(epochs, tmax=0., method='auto', rank=None) @@ -136,7 +137,7 @@ # under-regularization. # # Note that if data have been processed using signal space separation -# (SSS) [2]_, +# (SSS) :footcite:`TauluEtAl2005`, # gradiometers and magnetometers will be displayed jointly because both are # reconstructed from the same SSS basis vectors with the same numerical rank. # This also implies that both sensor types are not any longer statistically @@ -146,9 +147,8 @@ # introductory materials can be found `here `_. # # For expert use cases or debugging the alternative estimators can also be -# compared (see -# :ref:`sphx_glr_auto_examples_visualization_plot_evoked_whitening.py`) and -# :ref:`sphx_glr_auto_examples_inverse_plot_covariance_whitening_dspm.py`): +# compared (see :ref:`ex-evoked-whitening`) and +# :ref:`ex-covariance-whitening-dspm`): noise_covs = mne.compute_covariance( epochs, tmax=0., method=('empirical', 'shrunk'), return_estimators=True, @@ -158,7 +158,7 @@ ############################################################################## # This will plot the whitened evoked for the optimal estimator and display the -# :term:`GFPs ` for all estimators as separate lines in the related panel. +# :term:`GFP` for all estimators as separate lines in the related panel. ############################################################################## @@ -180,9 +180,4 @@ # References # ---------- # -# .. [1] Engemann D. and Gramfort A. (2015) Automated model selection in -# covariance estimation and spatial whitening of MEG and EEG signals, -# vol. 108, 328-342, NeuroImage. -# -# .. [2] Taulu, S., Simola, J., Kajola, M., 2005. Applications of the signal -# space separation method. IEEE Trans. Signal Proc. 53, 3359-3372. +# .. footbibliography:: diff --git a/tutorials/forward/README.txt b/tutorials/forward/README.txt new file mode 100644 index 00000000000..f346c4a15be --- /dev/null +++ b/tutorials/forward/README.txt @@ -0,0 +1,5 @@ +Forward models and source spaces +-------------------------------- + +These tutorials cover how the cortical source locations (source spaces) and +forward models (AKA leadfield matrices) are defined. diff --git a/tutorials/intro/plot_10_overview.py b/tutorials/intro/10_overview.py similarity index 99% rename from tutorials/intro/plot_10_overview.py rename to tutorials/intro/10_overview.py index 39793830adb..083b47ef1d3 100644 --- a/tutorials/intro/plot_10_overview.py +++ b/tutorials/intro/10_overview.py @@ -12,10 +12,6 @@ covers a lot of ground fairly quickly (at the expense of depth). Subsequent tutorials address each of these topics in greater detail. -.. contents:: Page contents - :local: - :depth: 1 - We begin by importing the necessary Python modules: """ diff --git a/tutorials/intro/15_inplace.py b/tutorials/intro/15_inplace.py new file mode 100644 index 00000000000..a5f2f429d57 --- /dev/null +++ b/tutorials/intro/15_inplace.py @@ -0,0 +1,101 @@ +""" +.. _tut-inplace: + +Modifying data in-place +======================= + +Many of MNE-Python's data objects (`~mne.io.Raw`, `~mne.Epochs`, `~mne.Evoked`, +etc) have methods that modify the data in-place (either optionally or +obligatorily). This can be advantageous when working with large datasets +because it reduces the amount of computer memory needed to perform the +computations. However, it can lead to unexpected results if you're not aware +that it's happening. This tutorial provides a few examples of in-place +processing, and how and when to avoid it. + +As usual we'll start by importing the modules we need and +loading some :ref:`example data `: +""" + +import os +import mne + +############################################################################### +sample_data_folder = mne.datasets.sample.data_path() +sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample', + 'sample_audvis_raw.fif') +# the preload flag loads the data into memory now +raw = mne.io.read_raw_fif(sample_data_raw_file, preload=True) +raw.crop(tmax=10.) # raw.crop() always happens in-place + + +############################################################################### +# Signal processing +# ----------------- +# +# Most MNE-Python data objects have built-in methods for filtering, including +# high-, low-, and band-pass filters (`~mne.io.Raw.filter`), +# `~mne.io.Raw.notch_filter`, Hilbert transforms (`~mne.io.Raw.apply_hilbert`), +# and even arbitrary or user-defined functions (`~mne.io.Raw.apply_function`). +# These typically **always** modify data in-place, so if we want to preserve +# the unprocessed data for comparison, we must first make a copy of it. For +# example: + +original_raw = raw.copy() +raw.apply_hilbert() +print(f'original data type was {original_raw.get_data().dtype}, after ' + f'apply_hilbert the data type changed to {raw.get_data().dtype}.') + + +############################################################################### +# Channel picking +# --------------- +# +# Another group of methods where data is modified in-place are the +# channel-picking methods. For example: + +print(f'original data had {original_raw.info["nchan"]} channels.') +original_raw.pick('eeg') # selects only the EEG channels +print(f'after picking, it has {original_raw.info["nchan"]} channels.') + + +############################################################################### +# Note also that when picking only EEG channels, projectors that affected only +# the magnetometers were dropped, since there are no longer any magnetometer +# channels. +# +# +# The ``copy`` parameter +# ---------------------- +# +# Above we saw an example of using the ``.copy()`` method so that we can +# compare data before and after processing. Unlike the signal processing and +# channel picking *methods* above, MNE-Python *functions* often have a ``copy`` +# *parameter* that determines whether the operation happens in-place or on a +# copy of the data. For example, here we're plotting ``raw`` *after* the +# rereferencing has been done, but ``raw`` is unaffected because we specified +# ``copy=True``: + +# sphinx_gallery_thumbnail_number=2 +rereferenced_raw, ref_data = mne.set_eeg_reference(original_raw, ['EEG 003'], + copy=True) +original_raw.plot() +rereferenced_raw.plot() + +############################################################################### +# Another example is the picking *function* `mne.pick_info`, which operates on +# `mne.Info` dictionaries rather than on data objects. See +# :ref:`tut-info-class` for details. +# +# +# Summary +# ------- +# +# Generally speaking, you should expect that *methods of data objects* will +# operate in-place, and *functions that take a data object as an argument* will +# operate on a copy of the data (unless the function has a ``copy=False`` +# option). During the exploratory phase of your analysis, where you might want +# to try out the effects of different data cleaning approaches, you should get +# used to patterns like ``raw.copy().filter(...).plot()`` or +# ``raw.copy().apply_proj().plot_psd()`` if you want to avoid having to re-load +# data and repeat earlier steps each time you change a computation (see the +# :ref:`sect-meth-chain` section for more info on method chaining). diff --git a/tutorials/intro/plot_20_events_from_raw.py b/tutorials/intro/20_events_from_raw.py similarity index 99% rename from tutorials/intro/plot_20_events_from_raw.py rename to tutorials/intro/20_events_from_raw.py index 3e401595e57..c4002f41d09 100644 --- a/tutorials/intro/plot_20_events_from_raw.py +++ b/tutorials/intro/20_events_from_raw.py @@ -9,10 +9,6 @@ and how to convert between the two different representations of events within MNE-Python (Events arrays and Annotations objects). -.. contents:: Page contents - :local: - :depth: 1 - In the :ref:`introductory tutorial ` we saw an example of reading experimental events from a :term:`"STIM" channel `; here we'll discuss :term:`events` and :term:`annotations` more @@ -292,7 +288,7 @@ ############################################################################### # Other examples of resting-state analysis can be found in the online # documentation for :func:`mne.make_fixed_length_events`, such as -# :doc:`../../auto_examples/connectivity/plot_mne_inverse_envelope_correlation`. +# :ref:`ex-envelope-correlation`. # # .. LINKS # diff --git a/tutorials/intro/plot_30_info.py b/tutorials/intro/30_info.py similarity index 98% rename from tutorials/intro/plot_30_info.py rename to tutorials/intro/30_info.py index 6f27946faf6..3b6bd3ac865 100644 --- a/tutorials/intro/plot_30_info.py +++ b/tutorials/intro/30_info.py @@ -9,10 +9,6 @@ of various recording details, and is attached to :class:`~mne.io.Raw`, :class:`~mne.Epochs`, and :class:`~mne.Evoked` objects. -.. contents:: Page contents - :local: - :depth: 2 - We'll begin by loading the Python modules we need, and loading the same :ref:`example data ` we used in the :ref:`introductory tutorial `: @@ -184,6 +180,11 @@ eeg_indices = mne.pick_types(info, meg=False, eeg=True) print(mne.pick_info(info, eeg_indices)['nchan']) +############################################################################### +# We can also get a nice HTML representation in IPython like: + +info + ############################################################################### # By default, :func:`~mne.pick_info` will make a copy of the original # :class:`~mne.Info` object before modifying it; if you want to modify it diff --git a/tutorials/intro/plot_40_sensor_locations.py b/tutorials/intro/40_sensor_locations.py similarity index 98% rename from tutorials/intro/plot_40_sensor_locations.py rename to tutorials/intro/40_sensor_locations.py index 4af9844478f..ecfc81b4f70 100644 --- a/tutorials/intro/plot_40_sensor_locations.py +++ b/tutorials/intro/40_sensor_locations.py @@ -7,10 +7,6 @@ This tutorial describes how to read and plot sensor locations, and how the physical location of sensors is handled in MNE-Python. -.. contents:: Page contents - :local: - :depth: 2 - As usual we'll start by importing the modules we need and loading some :ref:`example data `: """ @@ -127,7 +123,8 @@ ############################################################################### # In mne-python the head center and therefore the sphere center are calculated -# using fiducial points. Because of this the head circle represents head +# using :term:`fiducial points `. +# Because of this the head circle represents head # circumference at the nasion and ear level, and not where it is commonly # measured in 10-20 EEG system: above nasion at T4/T8, T3/T7, Oz, Fz level. # Notice below that by default T7 and Oz channels are placed within the head @@ -223,8 +220,7 @@ # ``surfaces`` parameter) making it useful for :ref:`assessing coordinate frame # transformations `. For examples of various uses of # :func:`~mne.viz.plot_alignment`, see :ref:`plot_montage`, -# :doc:`../../auto_examples/visualization/plot_eeg_on_scalp`, and -# :doc:`../../auto_examples/visualization/plot_meg_sensors`. +# :ref:`ex-eeg-on-scalp`, and :ref:`ex-plot-meg-sensors`. # # # Working with layout files diff --git a/tutorials/intro/plot_50_configure_mne.py b/tutorials/intro/50_configure_mne.py similarity index 99% rename from tutorials/intro/plot_50_configure_mne.py rename to tutorials/intro/50_configure_mne.py index f1462a6117c..89096f46684 100644 --- a/tutorials/intro/plot_50_configure_mne.py +++ b/tutorials/intro/50_configure_mne.py @@ -8,10 +8,6 @@ This tutorial covers how to configure MNE-Python to suit your local system and your analysis preferences. -.. contents:: Page contents - :local: - :depth: 1 - We begin by importing the necessary Python modules: """ diff --git a/tutorials/misc/plot_report.py b/tutorials/intro/70_report.py similarity index 69% rename from tutorials/misc/plot_report.py rename to tutorials/intro/70_report.py index 2127c8b4ad6..a5d37e84f38 100644 --- a/tutorials/misc/plot_report.py +++ b/tutorials/intro/70_report.py @@ -4,18 +4,23 @@ Getting started with ``mne.Report`` =================================== -This tutorial covers making interactive HTML summaries with -:class:`mne.Report`. +`mne.Report` is a way to create interactive HTML summaries of your data. These +reports can show many different visualizations of one subject's data. A common +use case is creating diagnostic summaries to check data quality at different +stages in the processing pipeline. The report can show things like plots of +data before and after each preprocessing step, epoch rejection statistics, MRI +slices with overlaid BEM shells, all the way up to plots of estimated cortical +activity. -.. contents:: Page contents - :local: - :depth: 2 - -As usual we'll start by importing the modules we need and loading some -:ref:`example data `: +Compared to a Jupyter notebook, `mne.Report` is easier to deploy (the HTML +pages it generates are self-contained and do not require a running Python +environment) but less flexible (you can't change code and re-run something +directly within the browser). This tutorial covers the basics of building a +`~mne.Report`. As usual we'll start by importing the modules we need: """ import os +import matplotlib.pyplot as plt import mne ############################################################################### @@ -28,7 +33,8 @@ # ============== ============================================================== # Data object Filename convention (ends with) # ============== ============================================================== -# raw -raw.fif(.gz), -raw_sss.fif(.gz), -raw_tsss.fif(.gz), _meg.fif +# raw -raw.fif(.gz), -raw_sss.fif(.gz), -raw_tsss.fif(.gz), +# _meg.fif(.gz), _eeg.fif(.gz), _ieeg.fif(.gz) # events -eve.fif(.gz) # epochs -epo.fif(.gz) # evoked -ave.fif(.gz) @@ -115,7 +121,7 @@ ############################################################################### # Now let's look at how :class:`~mne.Report` handles :class:`~mne.Evoked` data -# (we'll skip the MRIs to save computation time). The following code will +# (we will skip the MRIs to save computation time). The following code will # produce butterfly plots, topomaps, and comparisons of the global field # power (GFP) for different experimental conditions. @@ -174,22 +180,82 @@ # Adding custom plots to a report # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # -# The python interface has greater flexibility compared to the :ref:`command +# The Python interface has greater flexibility compared to the :ref:`command # line interface `. For example, custom plots can be added via # the :meth:`~mne.Report.add_figs_to_section` method: -# generate a custom plot: -fname_evoked = os.path.join(path, 'MEG', 'sample', 'sample_audvis-ave.fif') -evoked = mne.read_evokeds(fname_evoked, - condition='Left Auditory', - baseline=(None, 0), - verbose=True) -fig = evoked.plot(show=False) +report = mne.Report(verbose=True) + +fname_raw = os.path.join(path, 'MEG', 'sample', 'sample_audvis_raw.fif') +raw = mne.io.read_raw_fif(fname_raw, verbose=False).crop(tmax=60) +events = mne.find_events(raw, stim_channel='STI 014') +event_id = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3, + 'visual/right': 4, 'face': 5, 'buttonpress': 32} + +# create some epochs and ensure we drop a few, so we can then plot the drop log +reject = dict(eeg=150e-6) +epochs = mne.Epochs(raw=raw, events=events, event_id=event_id, + tmin=-0.2, tmax=0.7, reject=reject, preload=True) +fig_drop_log = epochs.plot_drop_log(subject='sample', show=False) + +# now also plot an evoked response +evoked_aud_left = epochs['auditory/left'].average() +fig_evoked = evoked_aud_left.plot(spatial_colors=True, show=False) -# add the custom plot to the report: -report.add_figs_to_section(fig, captions='Left Auditory', section='evoked') +# add the custom plots to the report: +report.add_figs_to_section([fig_drop_log, fig_evoked], + captions=['Dropped Epochs', + 'Evoked: Left Auditory'], + section='drop-and-evoked') report.save('report_custom.html', overwrite=True) +############################################################################### +# Adding a slider +# ^^^^^^^^^^^^^^^ +# +# Sliders provide an intuitive way for users to interactively browse a +# predefined set of images. You can add sliders via +# :meth:`~mne.Report.add_slider_to_section`: + +report = mne.Report(verbose=True) + +figs = list() +times = evoked_aud_left.times[::30] +for t in times: + figs.append(evoked_aud_left.plot_topomap(t, vmin=-300, vmax=300, res=100, + show=False)) + plt.close(figs[-1]) +report.add_slider_to_section(figs, times, 'Evoked Response', + image_format='png') # can also use 'svg' + +report.save('report_slider.html', overwrite=True) + +############################################################################### +# Adding ``SourceEstimate`` (STC) plot to a report +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# +# Now we see how :class:`~mne.Report` handles :class:`~mne.SourceEstimate` +# data. The following will produce a :term:`stc` plot with vertex +# time courses. In this scenario, we also demonstrate how to use the +# :meth:`mne.viz.Brain.screenshot` method to save the figs in a slider. + +report = mne.Report(verbose=True) +fname_stc = os.path.join(path, 'MEG', 'sample', 'sample_audvis-meg') +stc = mne.read_source_estimate(fname_stc, subject='sample') +figs = list() +kwargs = dict(subjects_dir=subjects_dir, initial_time=0.13, + clim=dict(kind='value', lims=[3, 6, 9])) +for hemi in ('lh', 'rh'): + brain = stc.plot(hemi=hemi, **kwargs) + brain.toggle_interface(False) + figs.append(brain.screenshot(time_viewer=True)) + brain.close() + +# add the stc plot to the report: +report.add_slider_to_section(figs) + +report.save('report_stc.html', overwrite=True) + ############################################################################### # Managing report sections # ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -200,13 +266,14 @@ # :meth:`~mne.Report.add_figs_to_section` command. Each section is identified # by a toggle button in the top navigation bar of the report which can be used # to show or hide the contents of the section. To toggle the show/hide state of -# all sections in the HTML report, press :kbd:`t`. +# all sections in the HTML report, press :kbd:`t`, or press the toggle-all +# button in the upper right. # -# .. note:: +# .. sidebar:: Structure # -# Although we've been generating separate reports in each example, you could -# easily create a single report for all :file:`.fif` files (raw, evoked, -# covariance, etc) by passing ``pattern='*.fif'``. +# Although we've been generating separate reports in each of these examples, +# you could easily create a single report for all :file:`.fif` files (raw, +# evoked, covariance, etc) by passing ``pattern='*.fif'``. # # # Editing a saved report @@ -227,7 +294,7 @@ # context manager: with mne.open_report('report.h5') as report: - report.add_figs_to_section(fig, + report.add_figs_to_section(fig_evoked, captions='Left Auditory', section='evoked', replace=True) diff --git a/tutorials/intro/README.txt b/tutorials/intro/README.txt index 87db17bc27f..6e579abd967 100644 --- a/tutorials/intro/README.txt +++ b/tutorials/intro/README.txt @@ -1,5 +1,5 @@ Introductory tutorials -====================== +----------------------- These tutorials cover the basic EEG/MEG pipeline for event-related analysis, introduce the :class:`mne.Info`, :term:`events`, and :class:`mne.Annotations` diff --git a/tutorials/source-modeling/plot_object_source_estimate.py b/tutorials/inverse/10_stc_class.py similarity index 99% rename from tutorials/source-modeling/plot_object_source_estimate.py rename to tutorials/inverse/10_stc_class.py index 02fa5ce8fa0..0c38a326d3e 100644 --- a/tutorials/source-modeling/plot_object_source_estimate.py +++ b/tutorials/inverse/10_stc_class.py @@ -47,9 +47,6 @@ mostly used together with :ref:`FreeSurfer ` surface representations. -.. contents:: - :local: - Let's get ourselves an idea of what a :class:`mne.SourceEstimate` really is. We first set up the environment and load some data: """ diff --git a/tutorials/source-modeling/plot_dipole_fit.py b/tutorials/inverse/20_dipole_fit.py similarity index 100% rename from tutorials/source-modeling/plot_dipole_fit.py rename to tutorials/inverse/20_dipole_fit.py diff --git a/tutorials/source-modeling/plot_mne_dspm_source_localization.py b/tutorials/inverse/30_mne_dspm_loreta.py similarity index 97% rename from tutorials/source-modeling/plot_mne_dspm_source_localization.py rename to tutorials/inverse/30_mne_dspm_loreta.py index f16e5c13e95..0c07601c416 100644 --- a/tutorials/source-modeling/plot_mne_dspm_source_localization.py +++ b/tutorials/inverse/30_mne_dspm_loreta.py @@ -8,6 +8,8 @@ minimum-norm inverse method on evoked/raw/epochs data. """ +import os.path as op + import numpy as np import matplotlib.pyplot as plt @@ -19,7 +21,8 @@ # Process MEG data data_path = sample.data_path() -raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' +raw_fname = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_filt-0-40_raw.fif') raw = mne.io.read_raw_fif(raw_fname) # already has an average reference events = mne.find_events(raw, stim_channel='STI 014') @@ -37,7 +40,6 @@ ############################################################################### # Compute regularized noise covariance # ------------------------------------ -# # For more details see :ref:`tut_compute_covariance`. noise_cov = mne.compute_covariance( diff --git a/tutorials/source-modeling/plot_dipole_orientations.py b/tutorials/inverse/35_dipole_orientations.py similarity index 90% rename from tutorials/source-modeling/plot_dipole_orientations.py rename to tutorials/inverse/35_dipole_orientations.py index 32d34486472..aa4e46b336b 100644 --- a/tutorials/source-modeling/plot_dipole_orientations.py +++ b/tutorials/inverse/35_dipole_orientations.py @@ -82,9 +82,9 @@ # While the source space defines the position of the dipoles, the inverse # operator defines the possible orientations of them. One of the options is to # assign a fixed orientation. Since the neural currents from which MEG and EEG -# signals originate flows mostly perpendicular to the cortex [1]_, restricting -# the orientation of the dipoles accordingly places a useful restriction on the -# source estimate. +# signals originate flows mostly perpendicular to the cortex +# :footcite:`HamalainenEtAl1993`, restricting the orientation of the dipoles +# accordingly places a useful restriction on the source estimate. # # By specifying ``fixed=True`` when calling # :func:`mne.minimum_norm.make_inverse_operator`, the dipole orientations are @@ -118,6 +118,7 @@ _, time_max = stc.get_peak(hemi='lh') brain_fixed = stc.plot(surface='white', subjects_dir=subjects_dir, initial_time=time_max, time_unit='s', size=(600, 400)) +mne.viz.set_3d_view(figure=brain_fixed, focalpoint=(0., 0., 50)) ############################################################################### # The direction of the estimated current is now restricted to two directions: @@ -135,11 +136,12 @@ # source estimate sensitive to the spacing of the dipoles along the cortex, # since the curvature of the cortex changes within each ~10 square mm patch. # Furthermore, misalignment of the MEG/EEG and MRI coordinate frames is more -# critical when the source dipole orientations are strictly constrained [2]_. -# To lift the restriction on the orientation of the dipoles, the inverse -# operator has the ability to place not one, but three dipoles at each -# location defined by the source space. These three dipoles are placed -# orthogonally to form a Cartesian coordinate system. Let's visualize this: +# critical when the source dipole orientations are strictly constrained +# :footcite:`LinEtAl2006`. To lift the restriction on the orientation of the +# dipoles, the inverse operator has the ability to place not one, but three +# dipoles at each location defined by the source space. These three dipoles are +# placed orthogonally to form a Cartesian coordinate system. Let's visualize +# this: fig = mne.viz.create_3d_figure(size=(600, 400)) # Plot the cortex @@ -170,6 +172,7 @@ _, time_max = stc.magnitude().get_peak(hemi='lh') brain_mag = stc.plot(subjects_dir=subjects_dir, initial_time=time_max, time_unit='s', size=(600, 400), overlay_alpha=0) +mne.viz.set_3d_view(figure=brain_mag, focalpoint=(0., 0., 50)) ############################################################################### # .. _plot_dipole_orientations_vLOC_orientations: @@ -192,6 +195,7 @@ _, time_max = stc.magnitude().get_peak(hemi='lh') brain_loose = stc.plot(subjects_dir=subjects_dir, initial_time=time_max, time_unit='s', size=(600, 400), overlay_alpha=0) +mne.viz.set_3d_view(figure=brain_loose, focalpoint=(0., 0., 50)) ############################################################################### # Discarding dipole orientation information @@ -210,15 +214,9 @@ _, time_max = stc.get_peak(hemi='lh') brain = stc.plot(surface='white', subjects_dir=subjects_dir, initial_time=time_max, time_unit='s', size=(600, 400)) +mne.viz.set_3d_view(figure=brain, focalpoint=(0., 0., 50)) ############################################################################### # References # ---------- -# .. [1] Hämäläinen, M. S., Hari, R., Ilmoniemi, R. J., Knuutila, J., & -# Lounasmaa, O. V. "Magnetoencephalography - theory, instrumentation, and -# applications to noninvasive studies of the working human brain", Reviews -# of Modern Physics, 1993. https://doi.org/10.1103/RevModPhys.65.413 -# -# .. [2] Lin, F. H., Belliveau, J. W., Dale, A. M., & Hämäläinen, M. S. (2006). -# Distributed current estimates using cortical orientation constraints. -# Human Brain Mapping, 27(1), 1–13. http://doi.org/10.1002/hbm.20155 +# .. footbibliography:: diff --git a/tutorials/source-modeling/plot_mne_solutions.py b/tutorials/inverse/40_mne_fixed_free.py similarity index 100% rename from tutorials/source-modeling/plot_mne_solutions.py rename to tutorials/inverse/40_mne_fixed_free.py diff --git a/tutorials/source-modeling/plot_beamformer_lcmv.py b/tutorials/inverse/50_beamformer_lcmv.py similarity index 97% rename from tutorials/source-modeling/plot_beamformer_lcmv.py rename to tutorials/inverse/50_beamformer_lcmv.py index 9d5fa7dfa34..91bf79ad243 100644 --- a/tutorials/source-modeling/plot_beamformer_lcmv.py +++ b/tutorials/inverse/50_beamformer_lcmv.py @@ -3,14 +3,10 @@ ============================================== This tutorial gives an overview of the beamformer method -and shows how to use an LCMV beamformer to reconstruct source activity. - -.. contents:: Page contents - :local: - :depth: 2 - +and shows how to reconstruct source activity using an LCMV beamformer. """ -# Author: Britta Westner +# Authors: Britta Westner +# Eric Larson # # License: BSD (3-clause) @@ -237,7 +233,8 @@ brain = stc_vec.plot_3d( clim=dict(kind='value', lims=lims), hemi='both', views=['coronal', 'sagittal', 'axial'], size=(800, 300), - view_layout='horizontal', show_traces=0.3, **kwargs) + view_layout='horizontal', show_traces=0.3, + brain_kwargs=dict(silhouette=True), **kwargs) ############################################################################### # Visualize the activity of the maximum voxel with all three components diff --git a/tutorials/source-modeling/plot_visualize_stc.py b/tutorials/inverse/60_visualize_stc.py similarity index 92% rename from tutorials/source-modeling/plot_visualize_stc.py rename to tutorials/inverse/60_visualize_stc.py index efca1401e3f..bdbaf191d1f 100644 --- a/tutorials/source-modeling/plot_visualize_stc.py +++ b/tutorials/inverse/60_visualize_stc.py @@ -4,11 +4,7 @@ Visualize source time courses (stcs) ==================================== -This tutorial focuses on visualization of -:term:`stcs `. - -.. contents:: Table of Contents - :local: +This tutorial focuses on visualization of :term:`source estimates`. Surface Source Estimates ------------------------ @@ -22,7 +18,7 @@ import matplotlib.pyplot as plt import mne -from mne.datasets import sample +from mne.datasets import sample, fetch_hcp_mmp_parcellation from mne.minimum_norm import apply_inverse, read_inverse_operator from mne import read_evokeds @@ -32,13 +28,14 @@ fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif' fname_stc = os.path.join(sample_dir, 'sample_audvis-meg') +fetch_hcp_mmp_parcellation(subjects_dir) ############################################################################### -# Then, we read the stc from file +# Then, we read the stc from file. stc = mne.read_source_estimate(fname_stc, subject='sample') ############################################################################### -# This is a :class:`SourceEstimate ` object +# This is a :class:`SourceEstimate ` object. print(stc) ############################################################################### @@ -54,17 +51,22 @@ clim=dict(kind='value', lims=[3, 6, 9])) ############################################################################### -# You can also morph it to fsaverage and visualize it using a flatmap: +# You can also morph it to fsaverage and visualize it using a flatmap. # sphinx_gallery_thumbnail_number = 3 stc_fs = mne.compute_source_morph(stc, 'sample', 'fsaverage', subjects_dir, smooth=5, verbose='error').apply(stc) brain = stc_fs.plot(subjects_dir=subjects_dir, initial_time=initial_time, clim=dict(kind='value', lims=[3, 6, 9]), - surface='flat', hemi='split', size=(1000, 500), + surface='flat', hemi='both', size=(1000, 500), smoothing_steps=5, time_viewer=False, add_data_kwargs=dict( colorbar_kwargs=dict(label_font_size=10))) + +# to help orient us, let's add a parcellation (red=auditory, green=motor, +# blue=visual) +brain.add_annotation('HCPMMP1_combined', borders=2, subjects_dir=subjects_dir) + # You can save a movie like the one on our documentation website with: # brain.save_movie(time_dilation=20, tmin=0.05, tmax=0.16, # interpolation='linear', framerate=10) @@ -166,7 +168,8 @@ inv = read_inverse_operator(fname_inv) stc = apply_inverse(evoked, inv, lambda2, 'dSPM', pick_ori='vector') brain = stc.plot(subject='sample', subjects_dir=subjects_dir, - initial_time=initial_time) + initial_time=initial_time, brain_kwargs=dict( + silhouette=True)) ############################################################################### # Dipole fits diff --git a/tutorials/source-modeling/plot_eeg_mri_coords.py b/tutorials/inverse/70_eeg_mri_coords.py similarity index 99% rename from tutorials/source-modeling/plot_eeg_mri_coords.py rename to tutorials/inverse/70_eeg_mri_coords.py index af3a73ed90e..c9f49d964c4 100644 --- a/tutorials/source-modeling/plot_eeg_mri_coords.py +++ b/tutorials/inverse/70_eeg_mri_coords.py @@ -7,11 +7,6 @@ This tutorial explains how to compute the forward operator from EEG data when the electrodes are in MRI voxel coordinates. - -.. contents:: This tutorial covers: - :local: - :depth: 2 - """ # Authors: Eric Larson diff --git a/tutorials/sample-datasets/plot_brainstorm_phantom_elekta.py b/tutorials/inverse/80_brainstorm_phantom_elekta.py similarity index 94% rename from tutorials/sample-datasets/plot_brainstorm_phantom_elekta.py rename to tutorials/inverse/80_brainstorm_phantom_elekta.py index 865875453da..9099d42ecda 100644 --- a/tutorials/sample-datasets/plot_brainstorm_phantom_elekta.py +++ b/tutorials/inverse/80_brainstorm_phantom_elekta.py @@ -7,16 +7,13 @@ ========================================== Here we compute the evoked from raw for the Brainstorm Elekta phantom -tutorial dataset. For comparison, see [1]_ and: +tutorial dataset. For comparison, see :footcite:`TadelEtAl2011` and: https://neuroimage.usc.edu/brainstorm/Tutorials/PhantomElekta References ---------- -.. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM. - Brainstorm: A User-Friendly Application for MEG/EEG Analysis. - Computational Intelligence and Neuroscience, vol. 2011, Article ID - 879716, 13 pages, 2011. doi:10.1155/2011/879716 +.. footbibliography:: """ # sphinx_gallery_thumbnail_number = 9 @@ -87,7 +84,7 @@ sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.08) mne.viz.plot_alignment(epochs.info, subject=subject, show_axes=True, - bem=sphere, dig=True, surfaces='inner_skull') + bem=sphere, dig=True, surfaces='head') ############################################################################### # Let's do some dipole fits. We first compute the noise covariance, diff --git a/tutorials/sample-datasets/plot_brainstorm_phantom_ctf.py b/tutorials/inverse/85_brainstorm_phantom_ctf.py similarity index 92% rename from tutorials/sample-datasets/plot_brainstorm_phantom_ctf.py rename to tutorials/inverse/85_brainstorm_phantom_ctf.py index 43649173eea..95965b5782d 100644 --- a/tutorials/sample-datasets/plot_brainstorm_phantom_ctf.py +++ b/tutorials/inverse/85_brainstorm_phantom_ctf.py @@ -7,16 +7,13 @@ ======================================= Here we compute the evoked from raw for the Brainstorm CTF phantom -tutorial dataset. For comparison, see [1]_ and: +tutorial dataset. For comparison, see :footcite:`TadelEtAl2011` and: https://neuroimage.usc.edu/brainstorm/Tutorials/PhantomCtf References ---------- -.. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM. - Brainstorm: A User-Friendly Application for MEG/EEG Analysis. - Computational Intelligence and Neuroscience, vol. 2011, Article ID - 879716, 13 pages, 2011. doi:10.1155/2011/879716 +.. footbibliography:: """ # Authors: Eric Larson @@ -94,7 +91,7 @@ # # Let's use a :ref:`sphere head geometry model ` # and let's see the coordinate alignment and the sphere location. -sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None) +sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.08) mne.viz.plot_alignment(raw.info, subject='sample', meg='helmet', bem=sphere, dig=True, diff --git a/tutorials/sample-datasets/plot_phantom_4DBTi.py b/tutorials/inverse/90_phantom_4DBTi.py similarity index 100% rename from tutorials/sample-datasets/plot_phantom_4DBTi.py rename to tutorials/inverse/90_phantom_4DBTi.py diff --git a/tutorials/source-modeling/README.txt b/tutorials/inverse/README.txt similarity index 54% rename from tutorials/source-modeling/README.txt rename to tutorials/inverse/README.txt index d26e3aafac4..a1c451143a6 100644 --- a/tutorials/source-modeling/README.txt +++ b/tutorials/inverse/README.txt @@ -1,4 +1,4 @@ -Source modeling -=============== +Source localization and inverses +-------------------------------- These tutorials cover estimation of cortical activity from sensor recordings. diff --git a/tutorials/io/plot_10_reading_meg_data.py b/tutorials/io/10_reading_meg_data.py similarity index 99% rename from tutorials/io/plot_10_reading_meg_data.py rename to tutorials/io/10_reading_meg_data.py index 32e46571232..b345e9b0d53 100644 --- a/tutorials/io/plot_10_reading_meg_data.py +++ b/tutorials/io/10_reading_meg_data.py @@ -8,11 +8,6 @@ This section describes how to read data for various MEG manufacturers. -.. contents:: Page contents - :local: - :depth: 2 - - .. _import-neuromag: Elekta NeuroMag (.fif) diff --git a/tutorials/io/20_reading_eeg_data.py b/tutorials/io/20_reading_eeg_data.py new file mode 100644 index 00000000000..f1b45565333 --- /dev/null +++ b/tutorials/io/20_reading_eeg_data.py @@ -0,0 +1,247 @@ +# -*- coding: utf-8 -*- +r""" +.. _tut-imorting-eeg-data: + +=============================== +Importing data from EEG devices +=============================== + +MNE includes various functions and utilities for reading EEG data and electrode +locations. + +.. _import-bv: + +BrainVision (.vhdr, .vmrk, .eeg) +================================ + +The BrainVision file format consists of three separate files: + +1. A text header file (``.vhdr``) containing meta data. +2. A text marker file (``.vmrk``) containing information about events in the + data. +3. A binary data file (``.eeg``) containing the voltage values of the EEG. + +Both text files are based on the `INI format `_ +consisting of + +* sections marked as ``[square brackets]``, +* comments marked as ``; comment``, +* and key-value pairs marked as ``key=value``. + +Brain Products provides documentation for their core BrainVision file format. +The format specification is hosted on the +`Brain Products website `_. + +BrainVision EEG files can be read using :func:`mne.io.read_raw_brainvision`, +passing the ``.vhdr`` header file as the argument. + +.. warning:: Renaming BrainVision files can be problematic due to their + multi-file structure. See this + `example `_ + for instructions. + +.. note:: For *writing* BrainVision files, you can use the Python package + `pybv `_. + + +.. _import-edf: + +European data format (.edf) +=========================== + +`EDF `_ and +`EDF+ `_ files can be read using +:func:`mne.io.read_raw_edf`. Both variants are 16-bit formats. + +EDF+ files may contain annotation channels which can be used to store trigger +and event information. These annotations are available in ``raw.annotations``. + +Writing EDF files is not supported natively yet. `This gist +`__ or +`MNELAB `_ (both of which use +`pyedflib `_ under the hood) can be used +to export any :class:`mne.io.Raw` object to EDF/EDF+/BDF/BDF+. + + +.. _import-biosemi: + +BioSemi data format (.bdf) +========================== + +The `BDF format `_ is a 24-bit +variant of the EDF format used by EEG systems manufactured by BioSemi. It can +be imported with :func:`mne.io.read_raw_bdf`. + +BioSemi amplifiers do not perform "common mode noise rejection" automatically. +The signals in the EEG file are the voltages between each electrode and the CMS +active electrode, which still contain some CM noise (50 Hz, ADC reference +noise, etc.). The `BioSemi FAQ `__ +provides more details on this topic. +Therefore, it is advisable to choose a reference (e.g., a single channel like Cz, +average of linked mastoids, average of all electrodes, etc.) after importing +BioSemi data to avoid losing signal information. The data can be re-referenced +later after cleaning if desired. + +.. warning:: Data samples in a BDF file are represented in a 3-byte + (24-bit) format. Since 3-byte raw data buffers are not presently + supported in the FIF format, these data will be changed to 4-byte + integers in the conversion. + + +.. _import-gdf: + +General data format (.gdf) +========================== + +GDF files can be read using :func:`mne.io.read_raw_gdf`. + +`GDF (General Data Format) `_ is a flexible +format for biomedical signals that overcomes some of the limitations of the +EDF format. The original specification (GDF v1) includes a binary header +and uses an event table. An updated specification (GDF v2) was released in +2011 and adds fields for additional subject-specific information (gender, +age, etc.) and allows storing several physical units and other properties. +Both specifications are supported by MNE. + + +.. _import-cnt: + +Neuroscan CNT (.cnt) +==================== + +CNT files can be read using :func:`mne.io.read_raw_cnt`. +Channel locations can be read from a montage or the file header. If read +from the header, the data channels (channels that are not assigned to EOG, ECG, +EMG or MISC) are fit to a sphere and assigned a z-value accordingly. If a +non-data channel does not fit to the sphere, it is assigned a z-value of 0. + +.. warning:: + Reading channel locations from the file header may be dangerous, as the + x_coord and y_coord in the ELECTLOC section of the header do not necessarily + translate to absolute locations. Furthermore, EEG electrode locations that + do not fit to a sphere will distort the layout when computing the z-values. + If you are not sure about the channel locations in the header, using a + montage is encouraged. + + +.. _import-egi: + +EGI simple binary (.egi) +======================== + +EGI simple binary files can be read using :func:`mne.io.read_raw_egi`. +EGI raw files are simple binary files with a header and can be exported by the +EGI Netstation acquisition software. + + +.. _import-mff: + +EGI MFF (.mff) +============== + +EGI MFF files can be read with :func:`mne.io.read_raw_egi`. + + +.. _import-set: + +EEGLAB files (.set, .fdt) +========================= + +EEGLAB .set files (which sometimes come with a separate .fdt file) can be read +using :func:`mne.io.read_raw_eeglab` and :func:`mne.read_epochs_eeglab`. + + +.. _import-nicolet: + +Nicolet (.data) +=============== + +These files can be read with :func:`mne.io.read_raw_nicolet`. + + +.. _import-nxe: + +eXimia EEG data (.nxe) +====================== + +EEG data from the Nexstim eXimia system can be read with +:func:`mne.io.read_raw_eximia`. + + +.. _import-persyst: + +Persyst EEG data (.lay, .dat) +============================= + +EEG data from the Persyst system can be read with +:func:`mne.io.read_raw_persyst`. + +Note that subject metadata may not be properly imported because Persyst +sometimes changes its specification from version to version. Please let us know +if you encounter a problem. + + +Nihon Kohden EEG data (.eeg, .21e, .pnt, .log) +============================================== + +EEG data from the Nihon Kohden (NK) system can be read using the +:func:`mne.io.read_raw_nihon` function. + +Files with the following extensions will be read: + +- The ``.eeg`` file contains the actual raw EEG data. +- The ``.pnt`` file contains metadata related to the recording such as the + measurement date. +- The ``.log`` file contains annotations for the recording. +- The ``.21e`` file contains channel and electrode information. + +Reading ``.11d``, ``.cmt``, ``.cn2``, and ``.edf`` files is currently not +supported. + +Note that not all subject metadata may be properly read because NK changes the +specification sometimes from version to version. Please let us know if you +encounter a problem. + + +XDF data (.xdf, .xdfz) +====================== + +MNE-Python does not support loading +`XDF `_ files out of the box, +because the inherent flexibility of the XDF format makes it difficult to +provide a one-size-fits-all function. For example, XDF supports signals from +various modalities recorded with different sampling rates. However, it is +relatively straightforward to import only a specific stream (such as EEG +signals) using the `pyxdf `_ package. +See :ref:`ex-read-xdf` for a simple example. + +A more sophisticated version, which supports selection of specific streams as +well as converting marker streams into annotations, is available in +`MNELAB `_. If you want to use this +functionality in a script, MNELAB records its history (View - History), which +contains all commands required to load an XDF file after successfully loading +that file with the graphical user interface. + + +Setting EEG references +====================== + +The preferred method for applying an EEG reference in MNE is +:func:`mne.set_eeg_reference`, or equivalent instance methods like +:meth:`raw.set_eeg_reference() `. By default, +the data are assumed to already be properly referenced. See +:ref:`tut-set-eeg-ref` for more information. + + +Reading electrode locations and head shapes for EEG recordings +============================================================== + +Some EEG formats (e.g., EGI, EDF/EDF+, BDF) contain neither electrode locations +nor head shape digitization information. Therefore, this information has to be +provided separately. For that purpose, all raw instances have a +:meth:`mne.io.Raw.set_montage` method to set electrode locations. + +When using locations of fiducial points, the digitization data are converted to +the MEG head coordinate system employed in the MNE software, see +:ref:`coordinate_systems`. +""" # noqa:E501 diff --git a/tutorials/io/30_reading_fnirs_data.py b/tutorials/io/30_reading_fnirs_data.py new file mode 100644 index 00000000000..4bb92aa9330 --- /dev/null +++ b/tutorials/io/30_reading_fnirs_data.py @@ -0,0 +1,204 @@ +# -*- coding: utf-8 -*- +r""" +.. _tut-importing-fnirs-data: + +================================= +Importing data from fNIRS devices +================================= + +MNE includes various functions and utilities for reading NIRS +data and optode locations. + +fNIRS devices consist of light sources and light detectors. A channel is formed +by source-detector pairs. MNE stores the location of the channels, sources, and +detectors. + +.. warning:: Information about device light wavelength is stored in channel + names. Manual modification of channel names is not recommended. + +.. _import-nirx: + +NIRx (directory) +================================ + +NIRx recordings can be read in using :func:`mne.io.read_raw_nirx`. +The NIRx device stores data directly to a directory with multiple file types, +MNE extracts the appropriate information from each file. +MNE only supports NIRx files recorded with NIRStar version 15.0 and above. + + +.. _import-snirf: + +SNIRF (.snirf) +================================ + +Data stored in the SNIRF format can be read in +using :func:`mne.io.read_raw_snirf`. + +.. warning:: The SNIRF format has provisions for many different types of NIRS + recordings. MNE currently only supports continuous wave data + stored in the .snirf format. + + +.. _import-boxy: + +BOXY (.txt) +=========== + +BOXY recordings can be read in using :func:`mne.io.read_raw_boxy`. +The BOXY software and ISS Imagent I and II devices are frequency domain +systems that store data in a single ``.txt`` file containing what they call +(with MNE's name for that type of data in parens): + +- DC + All light collected by the detector (``fnirs_cw_amplitude``) +- AC + High-frequency modulated light intensity (``fnirs_fd_ac_amplitude``) +- Phase + Phase of the modulated light (``fnirs_fd_phase``) + +DC data is stored as the type ``fnirs_cw_amplitude`` because it +collects both the modulated and any unmodulated light, and hence is analogous +to what is collected by continuous wave systems such as NIRx. This helps with +conformance to SNIRF standard types. + +These raw data files can be saved by the acquisition devices as parsed or +unparsed ``.txt`` files, which affects how the data in the file is organised. +MNE will read either file type and extract the raw DC, AC, and Phase data. +If triggers are sent using the ``digaux`` port of the recording hardware, MNE +will also read the ``digaux`` data and create annotations for any triggers. + + +Loading legacy data in CSV or TSV format +======================================== + +.. warning:: This method is not supported and users are discoraged to use it. + You should convert your data to the + `SNIRF `_ format using the tools + provided by the Society for functional Near-Infrared Spectroscopy, + and then load it using :func:`mne.io.read_raw_snirf`. + +fNIRS measurements can have a non-standardised format that is not supported by +MNE and cannot be converted easily into SNIRF. This legacy data is often in CSV +or TSV format, we show here a way to load it even though it is not officially +supported by MNE due to the lack of standardisation of the file format (the +naming and ordering of channels, the type and scaling of data, and +specification of sensor positions varies between each vendor). You will likely +have to adapt this depending on the system from which your CSV originated. +""" # noqa:E501 + +import numpy as np +import pandas as pd +import mne + +# sphinx_gallery_thumbnail_number = 2 + +############################################################################### +# First, we generate an example CSV file which will then be loaded in to MNE. +# This step would be skipped if you have actual data you wish to load. +# We simulate 16 channels with 100 samples of data and save this to a file +# called fnirs.csv. + +pd.DataFrame(np.random.normal(size=(16, 100))).to_csv("fnirs.csv") + + +############################################################################### +# +# .. warning:: The channels must be ordered in haemoglobin pairs, such that for +# a single channel all the types are in subsequent indices. The +# type order must be 'hbo' then 'hbr'. +# The data below is already in the correct order and may be +# used as a template for how data must be stored. +# If the order that your data is stored is different to the +# mandatory formatting, then you must first read the data with +# channel naming according to the data structure, then reorder +# the channels to match the required format. +# +# Next, we will load the example CSV file. + +data = pd.read_csv('fnirs.csv') + + +############################################################################### +# Then, the metadata must be specified manually as the CSV file does not +# contain information about channel names, types, sample rate etc. +# +# .. warning:: In MNE the naming of channels MUST follow the structure of +# ``S#_D# type`` where # is replaced by the appropriate source and +# detector numbers and type is either ``hbo``, ``hbr`` or the +# wavelength. + +ch_names = ['S1_D1 hbo', 'S1_D1 hbr', 'S2_D1 hbo', 'S2_D1 hbr', + 'S3_D1 hbo', 'S3_D1 hbr', 'S4_D1 hbo', 'S4_D1 hbr', + 'S5_D2 hbo', 'S5_D2 hbr', 'S6_D2 hbo', 'S6_D2 hbr', + 'S7_D2 hbo', 'S7_D2 hbr', 'S8_D2 hbo', 'S8_D2 hbr'] +ch_types = ['hbo', 'hbr', 'hbo', 'hbr', + 'hbo', 'hbr', 'hbo', 'hbr', + 'hbo', 'hbr', 'hbo', 'hbr', + 'hbo', 'hbr', 'hbo', 'hbr'] +sfreq = 10. # in Hz + + +############################################################################### +# Finally, the data can be converted in to an MNE data structure. +# The metadata above is used to create an :class:`mne.Info` data structure, +# and this is combined with the data to create an MNE :class:`~mne.io.Raw` +# object. For more details on the info structure see :ref:`tut-info-class`, and +# for additional details on how continuous data is stored in MNE see +# :ref:`tut-raw-class`. +# For a more extensive description of how to create MNE data structures from +# raw array data see :ref:`tut_creating_data_structures`. + +info = mne.create_info(ch_names=ch_names, ch_types=ch_types, sfreq=sfreq) +raw = mne.io.RawArray(data, info, verbose=True) + + +############################################################################### +# Applying standard sensor locations to imported data +# --------------------------------------------------- +# +# Having information about optode locations may assist in your analysis. +# Beyond the general benefits this provides (e.g. creating regions of interest, +# etc), this is may be particularly important for fNIRS as information about +# the optode locations is required to convert the optical density data in to an +# estimate of the haemoglobin concentrations. +# MNE provides methods to load standard sensor configurations (montages) from +# some vendors, and this is demonstrated below. +# Some handy tutorials for understanding sensor locations, coordinate systems, +# and how to store and view this information in MNE are: +# :ref:`tut-sensor-locations`, :ref:`plot_source_alignment`, and +# :ref:`ex-eeg-on-scalp`. +# +# Below is an example of how to load the optode positions for an Artinis +# OctaMon device. +# +# .. note:: It is also possible to create a custom montage from a file for +# fNIRS with :func:`mne.channels.read_custom_montage` by setting +# ``coord_frame`` to ``'mri'``. + +montage = mne.channels.make_standard_montage('artinis-octamon') +raw.set_montage(montage) + +# View the position of optodes in 2D to confirm the positions are correct. +raw.plot_sensors() + + +############################################################################### +# To validate the positions were loaded correctly it is also possible to view +# the location of the sources (red), detectors (black), and channels (white +# lines and orange dots) in a 3D representation. +# The ficiduals are marked in blue, green and red. +# See :ref:`plot_source_alignment` for more details. + +subjects_dir = mne.datasets.sample.data_path() + '/subjects' +mne.datasets.fetch_fsaverage(subjects_dir=subjects_dir) + +trans = mne.channels.compute_native_head_t(montage) + +fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') +fig = mne.viz.plot_alignment( + raw.info, trans=trans, subject='fsaverage', subjects_dir=subjects_dir, + surfaces=['brain', 'head'], coord_frame='mri', dig=True, show_axes=True, + fnirs=['channels', 'pairs', 'sources', 'detectors'], fig=fig) +mne.viz.set_3d_view(figure=fig, azimuth=90, elevation=90, distance=0.5, + focalpoint=(0., -0.01, 0.02)) diff --git a/tutorials/sample-datasets/plot_brainstorm_auditory.py b/tutorials/io/60_ctf_bst_auditory.py similarity index 96% rename from tutorials/sample-datasets/plot_brainstorm_auditory.py rename to tutorials/io/60_ctf_bst_auditory.py index 46870e20c25..b5eeeef6747 100644 --- a/tutorials/sample-datasets/plot_brainstorm_auditory.py +++ b/tutorials/io/60_ctf_bst_auditory.py @@ -2,13 +2,14 @@ """ .. _tut-brainstorm-auditory: -==================================== -Brainstorm auditory tutorial dataset -==================================== +====================================================== +Working with CTF data: the Brainstorm auditory dataset +====================================================== Here we compute the evoked from raw for the auditory Brainstorm -tutorial dataset. For comparison, see [1]_ and the associated -`brainstorm site `_. +tutorial dataset. For comparison, see :footcite:`TadelEtAl2011` and the +associated `brainstorm site +`_. Experiment: @@ -23,10 +24,7 @@ References ---------- -.. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM. - Brainstorm: A User-Friendly Application for MEG/EEG Analysis. - Computational Intelligence and Neuroscience, vol. 2011, Article ID - 879716, 13 pages, 2011. doi:10.1155/2011/879716 +.. footbibliography:: """ # Authors: Mainak Jas diff --git a/tutorials/io/README.txt b/tutorials/io/README.txt index d76cc7d9be0..875418cf8a2 100644 --- a/tutorials/io/README.txt +++ b/tutorials/io/README.txt @@ -1,7 +1,7 @@ .. _tut-data-formats: Reading data for different recording systems -============================================ +-------------------------------------------- These tutorials cover the basics of loading EEG/MEG data into MNE-Python for various recording devices. diff --git a/tutorials/io/plot_20_reading_eeg_data.py b/tutorials/io/plot_20_reading_eeg_data.py deleted file mode 100644 index 35232c3b3f5..00000000000 --- a/tutorials/io/plot_20_reading_eeg_data.py +++ /dev/null @@ -1,230 +0,0 @@ -# -*- coding: utf-8 -*- -r""" -.. _tut-imorting-eeg-data: - -=============================== -Importing data from EEG devices -=============================== - -MNE includes various functions and utilities for reading EEG -data and electrode locations. - -.. contents:: Page contents - :local: - :depth: 2 - - -.. _import-bv: - -BrainVision (.vhdr, .vmrk, .eeg) -================================ - -The BrainVision file format consists of three separate files: - -1. A text header file (``.vhdr``) containing meta data -2. A text marker file (``.vmrk``) containing information about events in the - data -3. A binary data file (``.eeg``) containing the voltage values of the EEG - -Both text files are based on the -`Microsoft Windows INI format `_ -consisting of: - -* sections marked as ``[square brackets]`` -* comments marked as ``; comment`` -* key-value pairs marked as ``key=value`` - -A documentation for core BrainVision file format is provided by Brain Products. -You can view the specification hosted on the -`Brain Products website `_ - -BrainVision EEG files can be read in using :func:`mne.io.read_raw_brainvision` -with the ``.vhdr`` header file as an input. - -.. warning:: Renaming BrainVision files can be problematic due to their - multifile structure. See this - `example `_ - for an instruction. - -.. note:: For *writing* BrainVision files, you can use the Python package - `pybv `_. - -.. _import-edf: - -European data format (.edf) -=========================== - -EDF and EDF+ files can be read using :func:`mne.io.read_raw_edf`. - -`EDF (European Data Format) `_ and -`EDF+ `_ are 16-bit formats. - -The EDF+ files may contain an annotation channel which can be used to store -trigger information. These annotations are available in ``raw.annotations``. - -Saving EDF files is not supported natively yet. `This gist -`__ -can be used to save any mne.io.Raw into EDF/EDF+/BDF/BDF+. - - -.. _import-biosemi: - -BioSemi data format (.bdf) -========================== - -The `BDF format `_ is a 24-bit -variant of the EDF format used by EEG systems manufactured by BioSemi. It can -be imported with :func:`mne.io.read_raw_bdf`. - -BioSemi amplifiers do not perform "common mode noise rejection" automatically. -The signals in the EEG file are the voltages between each electrode and CMS -active electrode, which still contain some CM noise (50 Hz, ADC reference -noise, etc., see `the BioSemi FAQ `__ -for further detail). -Thus, it is advisable to choose a reference (e.g., a single channel like Cz, -average of linked mastoids, average of all electrodes, etc.) on import of -BioSemi data to avoid losing signal information. The data can be re-referenced -later after cleaning if desired. - -.. warning:: The data samples in a BDF file are represented in a 3-byte - (24-bit) format. Since 3-byte raw data buffers are not presently - supported in the fif format these data will be changed to 4-byte - integers in the conversion. - - -.. _import-gdf: - -General data format (.gdf) -========================== - -GDF files can be read in using :func:`mne.io.read_raw_gdf`. - -`GDF (General Data Format) `_ is a flexible -format for biomedical signals that overcomes some of the limitations of the -EDF format. The original specification (GDF v1) includes a binary header -and uses an event table. An updated specification (GDF v2) was released in -2011 and adds fields for additional subject-specific information (gender, -age, etc.) and allows storing several physical units and other properties. -Both specifications are supported in MNE. - - -.. _import-cnt: - -Neuroscan CNT data format (.cnt) -================================ - -CNT files can be read in using :func:`mne.io.read_raw_cnt`. -The channel locations can be read from a montage or the file header. If read -from the header, the data channels (channels that are not assigned to EOG, ECG, -EMG or misc) are fit to a sphere and assigned a z-value accordingly. If a -non-data channel does not fit to the sphere, it is assigned a z-value of 0. - -.. warning:: - Reading channel locations from the file header may be dangerous, as the - x_coord and y_coord in ELECTLOC section of the header do not necessarily - translate to absolute locations. Furthermore, EEG-electrode locations that - do not fit to a sphere will distort the layout when computing the z-values. - If you are not sure about the channel locations in the header, use of a - montage is encouraged. - - -.. _import-egi: - -EGI simple binary (.egi) -======================== - -EGI simple binary files can be read in using :func:`mne.io.read_raw_egi`. -The EGI raw files are simple binary files with a header and can be exported -from using the EGI Netstation acquisition software. - - -.. _import-mff: - -EGI MFF (.mff) -============== -These files can also be read with :func:`mne.io.read_raw_egi`. - - -.. _import-set: - -EEGLAB set files (.set) -======================= - -EEGLAB .set files can be read in using :func:`mne.io.read_raw_eeglab` -and :func:`mne.read_epochs_eeglab`. - - -.. _import-nicolet: - -Nicolet (.data) -=============== -These files can be read with :func:`mne.io.read_raw_nicolet`. - - -.. _import-nxe: - -eXimia EEG data (.nxe) -====================== - -EEG data from the Nexstim eXimia system can be read in using the -:func:`mne.io.read_raw_eximia` function. - - -.. _import-persyst: - -Persyst EEG data (.lay, .dat) -============================= - -EEG data from the Persyst system can be read in using the -:func:`mne.io.read_raw_persyst` function. - -Note that not all the subject metadata may be properly read in -due to the fact that Persyst changes its specification -sometimes from version to version. Please submit an issue, or -pull request if you encounter a problem. - -Nihon Kohden EEG data (.EEG, .21E, .PNT, .LOG) -============================================== - -EEG data from the Nihon Kohden (NK) system can be read using the -:func:`mne.io.read_raw_nihon` function. - -Files with the following extensions will be read: - -- The ``.EEG`` file contains the actual raw EEG data. -- The ``.PNT`` file contains the metadata related to the recording, such - as the measurement date. -- The ``.LOG`` file contains annotations for the recording. -- The ``.21E`` file contains the channel and electrode - recording system information. - -Reading ``.11D``, ``.CMT``, ``.CN2``, and ``.EDF`` files is currently not -supported. - -Note that not all the subject metadata may be properly read in -due to the fact that NK changes the specification -sometimes from version to version. Please submit an issue, or -pull request if you encounter a problem. - - -Setting EEG references -====================== - -The preferred method for applying an EEG reference in MNE is -:func:`mne.set_eeg_reference`, or equivalent instance methods like -:meth:`raw.set_eeg_reference() `. By default, -the data are assumed to already be properly referenced. See -:ref:`tut-set-eeg-ref` for more information. - -Reading electrode locations and head shapes for EEG recordings -============================================================== - -Some EEG formats (EGI, EDF/EDF+, BDF) neither contain electrode location -information nor head shape digitization information. Therefore, this -information has to be provided separately. For that purpose all raw instances -have a :meth:`mne.io.Raw.set_montage` method to set electrode locations. - -When using the locations of the fiducial points the digitization data -are converted to the MEG head coordinate system employed in the -MNE software, see :ref:`coordinate_systems`. -""" # noqa:E501 diff --git a/tutorials/io/plot_30_reading_fnirs_data.py b/tutorials/io/plot_30_reading_fnirs_data.py deleted file mode 100644 index 6968d32d685..00000000000 --- a/tutorials/io/plot_30_reading_fnirs_data.py +++ /dev/null @@ -1,81 +0,0 @@ -# -*- coding: utf-8 -*- -r""" -.. _tut-importing-fnirs-data: - -================================= -Importing data from fNIRS devices -================================= - -MNE includes various functions and utilities for reading NIRS -data and optode locations. - -.. contents:: Page contents - :local: - :depth: 2 - - -.. _import-nirx: - -NIRx (directory) -================================ - -NIRx recordings can be read in using :func:`mne.io.read_raw_nirx`. -The NIRx device stores data directly to a directory with multiple file types, -MNE extracts the appropriate information from each file. - - -.. _import-snirf: - -SNIRF (.snirf) -================================ - -Data stored in the SNIRF format can be read in -using :func:`mne.io.read_raw_snirf`. - -.. warning:: The SNIRF format has provisions for many different types of NIRS - recordings. MNE currently only supports continuous wave data - stored in the .snirf format. - - -.. _import-boxy: - -BOXY (.txt) -=========== - -BOXY recordings can be read in using :func:`mne.io.read_raw_boxy`. -The BOXY software and ISS Imagent I and II devices are frequency domain -systems that store data in a single ``.txt`` file containing what they call -(with MNE's name for that type of data in parens): - -- DC - All light collected by the detector (``fnirs_cw_amplitude``) -- AC - High-frequency modulated light intensity (``fnirs_fd_ac_amplitude``) -- Phase - Phase of the modulated light (``fnirs_fd_phase``) - -DC data is stored as the type ``fnirs_cw_amplitude`` because it -collects both the modulated and any unmodulated light, and hence is analogous -to what is collected by continuous wave systems such as NIRx. This helps with -conformance to SNIRF standard types. - -These raw data files can be saved by the acquisition devices as parsed or -unparsed ``.txt`` files, which affects how the data in the file is organised. -MNE will read either file type and extract the raw DC, AC, and Phase data. -If triggers are sent using the ``digaux`` port of the recording hardware, MNE -will also read the ``digaux`` data and create annotations for any triggers. - - -Storing of optode locations -=========================== - -NIRs devices consist of light sources and light detectors. -A channel is formed by source-detector pairs. -MNE stores the location of the channels, sources, and detectors. - - -.. warning:: Information about device light wavelength is stored in - channel names. Manual modification of channel names is not - recommended. - -""" # noqa:E501 diff --git a/tutorials/machine-learning/plot_receptive_field.py b/tutorials/machine-learning/30_strf.py similarity index 92% rename from tutorials/machine-learning/plot_receptive_field.py rename to tutorials/machine-learning/30_strf.py index 9f40d0ce9fd..36f0db9e488 100644 --- a/tutorials/machine-learning/plot_receptive_field.py +++ b/tutorials/machine-learning/30_strf.py @@ -10,30 +10,6 @@ spectro-temporal space onto an output, representing neural activity. We fit a receptive field model that attempts to recover the original linear filter that was used to create this data. - -References ----------- -Estimation of spectro-temporal and spatio-temporal receptive fields using -modeling with continuous inputs is described in: - -.. [1] Theunissen, F. E. et al. Estimating spatio-temporal receptive - fields of auditory and visual neurons from their responses to - natural stimuli. Network 12, 289-316 (2001). - -.. [2] Willmore, B. & Smyth, D. Methods for first-order kernel - estimation: simple-cell receptive fields from responses to - natural scenes. Network 14, 553-77 (2003). - -.. [3] Crosse, M. J., Di Liberto, G. M., Bednar, A. & Lalor, E. C. (2016). - The Multivariate Temporal Response Function (mTRF) Toolbox: - A MATLAB Toolbox for Relating Neural Signals to Continuous Stimuli. - Frontiers in Human Neuroscience 10, 604. - doi:10.3389/fnhum.2016.00604 - -.. [4] Holdgraf, C. R. et al. Rapid tuning shifts in human auditory cortex - enhance speech intelligibility. Nature Communications, 7, 13654 (2016). - doi:10.1038/ncomms13654 - """ # Authors: Chris Holdgraf # Eric Larson @@ -57,7 +33,8 @@ # Load audio data # --------------- # -# We'll read in the audio data from [3]_ in order to simulate a response. +# We'll read in the audio data from :footcite:`CrosseEtAl2016` in order to +# simulate a response. # # In addition, we'll downsample the data along the time dimension in order to # speed up computation. Note that depending on the input values, this may @@ -240,7 +217,7 @@ # score as well as the model coefficients for each value, in order to # visualize how coefficients change with different levels of regularization. # These issues as well as the STRF pipeline are described in detail -# in [1]_, [2]_, and [4]_. +# in :footcite:`TheunissenEtAl2001,WillmoreSmyth2003,HoldgrafEtAl2016`. # Plot model score for each ridge parameter fig = plt.figure(figsize=(10, 4)) @@ -282,7 +259,7 @@ # & & & & -1 & 1\end{matrix}\right] # # This imposes a smoothness constraint of nearby time samples and/or features. -# Quoting [3]_: +# Quoting :footcite:`CrosseEtAl2016` : # # Tikhonov [identity] regularization (Equation 5) reduces overfitting by # smoothing the TRF estimate in a way that is insensitive to @@ -364,3 +341,8 @@ plt.setp([iax.get_xticklabels() for iax in [ax1, ax2, ax3]], rotation=45) plt.autoscale(tight=True) mne.viz.tight_layout() + +############################################################################### +# References +# ========== +# .. footbibliography:: diff --git a/tutorials/machine-learning/plot_sensors_decoding.py b/tutorials/machine-learning/50_decoding.py similarity index 96% rename from tutorials/machine-learning/plot_sensors_decoding.py rename to tutorials/machine-learning/50_decoding.py index 7967b3f90a7..34de275177c 100644 --- a/tutorials/machine-learning/plot_sensors_decoding.py +++ b/tutorials/machine-learning/50_decoding.py @@ -3,10 +3,6 @@ Decoding (MVPA) =============== -.. contents:: Contents - :local: - :depth: 3 - .. include:: ../../links.inc Design philosophy @@ -185,8 +181,8 @@ # # .. topic:: Examples # -# * :ref:`sphx_glr_auto_examples_decoding_plot_decoding_csp_eeg.py` -# * :ref:`sphx_glr_auto_examples_decoding_plot_decoding_csp_timefreq.py` +# * :ref:`ex-decoding-csp-eeg` +# * :ref:`ex-decoding-csp-eeg-timefreq` # # .. note:: # @@ -217,7 +213,7 @@ # # .. topic:: Examples # -# * :ref:`sphx_glr_auto_examples_decoding_plot_decoding_spoc_CMC.py` +# * :ref:`ex-spoc-cmc` # # xDAWN # ^^^^^ @@ -230,8 +226,8 @@ # # .. topic:: Examples # -# * :ref:`sphx_glr_auto_examples_preprocessing_plot_xdawn_denoising.py` -# * :ref:`sphx_glr_auto_examples_decoding_plot_decoding_xdawn_eeg.py` +# * :ref:`ex-xdawn-denoising` +# * :ref:`ex-xdawn-decoding` # # Effect-matched spatial filtering # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -242,7 +238,7 @@ # # .. topic:: Examples # -# * :ref:`sphx_glr_auto_examples_decoding_plot_ems_filtering.py` +# * :ref:`ex-ems-filtering` # # Patterns vs. filters # ^^^^^^^^^^^^^^^^^^^^ @@ -256,8 +252,7 @@ # :label: patterns # # The columns of the matrix :math:`(W^{-1})^T` are called spatial patterns. -# This is also called the mixing matrix. The example -# :ref:`sphx_glr_auto_examples_decoding_plot_linear_model_patterns.py` +# This is also called the mixing matrix. The example :ref:`ex-linear-patterns` # discusses the difference between patterns and filters. # # These can be plotted with: diff --git a/tutorials/machine-learning/README.txt b/tutorials/machine-learning/README.txt index fa7b612dce4..3f881f1fc73 100644 --- a/tutorials/machine-learning/README.txt +++ b/tutorials/machine-learning/README.txt @@ -1,5 +1,5 @@ Machine learning models of neural activity -========================================== +------------------------------------------ These tutorials cover some of the machine learning methods available in MNE-Python. diff --git a/tutorials/misc/README.txt b/tutorials/misc/README.txt deleted file mode 100644 index 103ef209cdf..00000000000 --- a/tutorials/misc/README.txt +++ /dev/null @@ -1,5 +0,0 @@ -Miscellaneous tutorials -======================= - -Assorted tutorials on configuring MNE-Python, working with eCOG data, and -other topics. diff --git a/tutorials/misc/plot_ecog.py b/tutorials/misc/plot_ecog.py deleted file mode 100644 index 87b8160dee0..00000000000 --- a/tutorials/misc/plot_ecog.py +++ /dev/null @@ -1,202 +0,0 @@ -""" -.. _tut_working_with_ecog: - -====================== -Working with ECoG data -====================== - -MNE supports working with more than just MEG and EEG data. Here we show some -of the functions that can be used to facilitate working with -electrocorticography (ECoG) data. - -This example shows how to use: - -- ECoG data -- channel locations in subject's MRI space -- projection onto a surface - -For an example that involves sEEG data, channel locations in -MNI space, or projection into a volume, see :ref:`tut_working_with_seeg`. -""" -# Authors: Eric Larson -# Chris Holdgraf -# Adam Li -# -# License: BSD (3-clause) - -import pandas as pd -import numpy as np -import matplotlib.pyplot as plt -import matplotlib.animation as animation - -import mne -from mne.viz import plot_alignment, snapshot_brain_montage - -print(__doc__) - -# paths to mne datasets - sample ECoG and FreeSurfer subject -misc_path = mne.datasets.misc.data_path() -sample_path = mne.datasets.sample.data_path() -subject = 'sample' -subjects_dir = sample_path + '/subjects' - -############################################################################### -# Let's load some ECoG electrode locations and names, and turn them into -# a :class:`mne.channels.DigMontage` class. First, use pandas to read in the -# ``.tsv`` file. - -# In this tutorial, the electrode coordinates are assumed to be in meters -elec_df = pd.read_csv(misc_path + '/ecog/sample_ecog_electrodes.tsv', - sep='\t', header=0, index_col=None) -ch_names = elec_df['name'].tolist() -ch_coords = elec_df[['x', 'y', 'z']].to_numpy(dtype=float) -ch_pos = dict(zip(ch_names, ch_coords)) -# Ideally the nasion/LPA/RPA will also be present from the digitization, here -# we use fiducials estimated from the subject's FreeSurfer MNI transformation: -lpa, nasion, rpa = mne.coreg.get_mni_fiducials( - subject, subjects_dir=subjects_dir) -lpa, nasion, rpa = lpa['r'], nasion['r'], rpa['r'] - -############################################################################### -# Now we make a :class:`mne.channels.DigMontage` stating that the ECoG -# contacts are in the FreeSurfer surface RAS (i.e., MRI) coordinate system. - -montage = mne.channels.make_dig_montage( - ch_pos, coord_frame='mri', nasion=nasion, lpa=lpa, rpa=rpa) -print('Created %s channel positions' % len(ch_names)) - -############################################################################### -# Now we get the :term:`trans` that transforms from our MRI coordinate system -# to the head coordinate frame. This transform will be applied to the -# data when applying the montage so that standard plotting functions like -# :func:`mne.viz.plot_evoked_topomap` will be aligned properly. - -trans = mne.channels.compute_native_head_t(montage) -print(trans) - -############################################################################### -# Now that we have our montage, we can load in our corresponding -# time-series data and set the montage to the raw data. - -# first we'll load in the sample dataset -raw = mne.io.read_raw_edf(misc_path + '/ecog/sample_ecog.edf') - -# drop bad channels -raw.info['bads'].extend([ch for ch in raw.ch_names if ch not in ch_names]) -raw.load_data() -raw.drop_channels(raw.info['bads']) -raw.crop(0, 2) # just process 2 sec of data for speed - -# attach montage -raw.set_montage(montage) - -# set channel types to ECoG (instead of EEG) -raw.set_channel_types({ch_name: 'ecog' for ch_name in raw.ch_names}) - -############################################################################### -# We can then plot the locations of our electrodes on our subject's brain. -# We'll use :func:`~mne.viz.snapshot_brain_montage` to save the plot as image -# data (along with xy positions of each electrode in the image), so that later -# we can plot frequency band power on top of it. -# -# .. note:: These are not real electrodes for this subject, so they -# do not align to the cortical surface perfectly. - -fig = plot_alignment(raw.info, subject=subject, subjects_dir=subjects_dir, - surfaces=['pial'], trans=trans, coord_frame='mri') -mne.viz.set_3d_view(fig, 200, 70, focalpoint=[0, -0.005, 0.03]) - -xy, im = snapshot_brain_montage(fig, montage) - -############################################################################### -# Next, we'll compute the signal power in the gamma (30-90 Hz) and alpha -# (8-12 Hz) bands. -gamma_power_t = raw.copy().filter(30, 90).apply_hilbert( - envelope=True).get_data() -alpha_power_t = raw.copy().filter(8, 12).apply_hilbert( - envelope=True).get_data() -gamma_power = gamma_power_t.mean(axis=-1) -alpha_power = alpha_power_t.mean(axis=-1) - -############################################################################### -# Now let's use matplotlib to overplot frequency band power onto the electrodes -# which can be plotted on top of the brain from -# :func:`~mne.viz.snapshot_brain_montage`. - -# Convert from a dictionary to array to plot -xy_pts = np.vstack([xy[ch] for ch in raw.info['ch_names']]) - -# colormap to view spectral power -cmap = 'viridis' - -# Create a 1x2 figure showing the average power in gamma and alpha bands. -fig, axs = plt.subplots(1, 2, figsize=(20, 10)) -# choose a colormap range wide enough for both frequency bands -_gamma_alpha_power = np.concatenate((gamma_power, alpha_power)).flatten() -vmin, vmax = np.percentile(_gamma_alpha_power, [10, 90]) -for ax, band_power, band in zip(axs, - [gamma_power, alpha_power], - ['Gamma', 'Alpha']): - ax.imshow(im) - ax.set_axis_off() - sc = ax.scatter(*xy_pts.T, c=band_power, s=200, - cmap=cmap, vmin=vmin, vmax=vmax) - ax.set_title(f'{band} band power', size='x-large') -fig.colorbar(sc, ax=axs) - -############################################################################### -# Say we want to visualize the evolution of the power in the gamma band, -# instead of just plotting the average. We can use -# `matplotlib.animation.FuncAnimation` to create an animation and apply this -# to the brain figure. - - -# create an initialization and animation function -# to pass to FuncAnimation -def init(): - """Create an empty frame.""" - return paths, - - -def animate(i, activity): - """Animate the plot.""" - paths.set_array(activity[:, i]) - return paths, - - -# create the figure and apply the animation of the -# gamma frequency band activity -fig, ax = plt.subplots(figsize=(5, 5)) -ax.imshow(im) -ax.set_axis_off() -paths = ax.scatter(*xy_pts.T, c=np.zeros(len(xy_pts)), s=200, - cmap=cmap, vmin=vmin, vmax=vmax) -fig.colorbar(paths, ax=ax) -ax.set_title('Gamma frequency over time (Hilbert transform)', - size='large') - -# avoid edge artifacts and decimate, showing just a short chunk -sl = slice(100, 150) -show_power = gamma_power_t[:, sl] -anim = animation.FuncAnimation(fig, animate, init_func=init, - fargs=(show_power,), - frames=show_power.shape[1], - interval=100, blit=True) - -############################################################################### -# Alternatively, we can project the sensor data to the nearest locations on -# the pial surface and visualize that: - -# sphinx_gallery_thumbnail_number = 4 - -evoked = mne.EvokedArray( - gamma_power_t[:, sl], raw.info, tmin=raw.times[sl][0]) -stc = mne.stc_near_sensors(evoked, trans, subject, subjects_dir=subjects_dir) -clim = dict(kind='value', lims=[vmin * 0.9, vmin, vmax]) -brain = stc.plot(surface='pial', hemi='both', initial_time=0.68, - colormap='viridis', clim=clim, views='parietal', - subjects_dir=subjects_dir, size=(500, 500)) - -# You can save a movie like the one on our documentation website with: -# brain.save_movie(time_dilation=50, interpolation='linear', framerate=10, -# time_viewer=True) diff --git a/tutorials/misc/plot_modifying_data_inplace.py b/tutorials/misc/plot_modifying_data_inplace.py deleted file mode 100644 index 1201fe8eac9..00000000000 --- a/tutorials/misc/plot_modifying_data_inplace.py +++ /dev/null @@ -1,78 +0,0 @@ -""" -.. _tut_modifying_data_inplace: - -Modifying data in-place -======================= - -It is often necessary to modify data once you have loaded it into memory. -Common examples of this are signal processing, feature extraction, and data -cleaning. Some functionality is pre-built into MNE-python, though it is also -possible to apply an arbitrary function to the data. -""" - -import mne -import os.path as op -import numpy as np -from matplotlib import pyplot as plt - -############################################################################### -# Load an example dataset, the preload flag loads the data into memory now -data_path = op.join(mne.datasets.sample.data_path(), 'MEG', - 'sample', 'sample_audvis_raw.fif') -raw = mne.io.read_raw_fif(data_path, preload=True) -raw = raw.crop(0, 10) -print(raw) - -############################################################################### -# Signal processing -# ----------------- -# -# Most MNE objects have in-built methods for filtering: - -filt_bands = [(1, 3), (3, 10), (10, 20), (20, 60)] -_, (ax, ax2) = plt.subplots(2, 1, figsize=(15, 10)) -data, times = raw[0] -_ = ax.plot(data[0]) -for fmin, fmax in filt_bands: - raw_filt = raw.copy() - raw_filt.filter(fmin, fmax, fir_design='firwin') - _ = ax2.plot(raw_filt[0][0][0]) -ax2.legend(filt_bands) -ax.set_title('Raw data') -ax2.set_title('Band-pass filtered data') - -############################################################################### -# In addition, there are functions for applying the Hilbert transform, which is -# useful to calculate phase / amplitude of your signal. - -# Filter signal with a fairly steep filter, then take hilbert transform - -raw_band = raw.copy() -raw_band.filter(12, 18, l_trans_bandwidth=2., h_trans_bandwidth=2., - fir_design='firwin') -raw_hilb = raw_band.copy() -hilb_picks = mne.pick_types(raw_band.info, meg=False, eeg=True) -raw_hilb.apply_hilbert(hilb_picks) -print(raw_hilb[0][0].dtype) - -############################################################################### -# Finally, it is possible to apply arbitrary functions to your data to do -# what you want. Here we will use this to take the amplitude and phase of -# the hilbert transformed data. -# -# .. note:: You can also use ``envelope=True`` in the call to -# :meth:`mne.io.Raw.apply_hilbert` to do this automatically. -# - -# Take the amplitude and phase -raw_amp = raw_hilb.copy() -raw_amp.apply_function(np.abs, hilb_picks) -raw_phase = raw_hilb.copy() -raw_phase.apply_function(np.angle, hilb_picks) - -_, (a1, a2) = plt.subplots(2, 1, figsize=(15, 10)) -a1.plot(raw_band[hilb_picks[0]][0][0].real) -a1.plot(raw_amp[hilb_picks[0]][0][0].real) -a2.plot(raw_phase[hilb_picks[0]][0][0].real) -a1.set_title('Amplitude of frequency band') -a2.set_title('Phase of frequency band') diff --git a/tutorials/preprocessing/plot_10_preprocessing_overview.py b/tutorials/preprocessing/10_preprocessing_overview.py similarity index 99% rename from tutorials/preprocessing/plot_10_preprocessing_overview.py rename to tutorials/preprocessing/10_preprocessing_overview.py index 68623bb865a..f183580c999 100644 --- a/tutorials/preprocessing/plot_10_preprocessing_overview.py +++ b/tutorials/preprocessing/10_preprocessing_overview.py @@ -8,10 +8,6 @@ This tutorial covers the basics of artifact detection, and introduces the artifact detection tools available in MNE-Python. -.. contents:: Page contents - :local: - :depth: 2 - We begin as always by importing the necessary Python modules and loading some :ref:`example data `: """ diff --git a/tutorials/preprocessing/plot_15_handling_bad_channels.py b/tutorials/preprocessing/15_handling_bad_channels.py similarity index 96% rename from tutorials/preprocessing/plot_15_handling_bad_channels.py rename to tutorials/preprocessing/15_handling_bad_channels.py index 37077ea65ab..b13910c38ff 100644 --- a/tutorials/preprocessing/plot_15_handling_bad_channels.py +++ b/tutorials/preprocessing/15_handling_bad_channels.py @@ -2,16 +2,12 @@ """ .. _tut-bad-channels: -Interpolating bad channels +Handling bad channels ========================== This tutorial covers manual marking of bad channels and reconstructing bad channels based on good signals at other sensors. -.. contents:: Page contents - :local: - :depth: 2 - As usual we'll start by importing the modules we need, and loading some example data: """ @@ -167,7 +163,7 @@ # can lead to too many epochs being discarded based on signal amplitude # rejection thresholds, which in turn can lead to less robust estimation of the # noise covariance across sensors. Noisy channels can also interfere with -# :term:`SSP ` computations, because the projectors will be +# :term:`SSP` computations, because the projectors will be # spatially biased in the direction of the noisy channel, which can cause # adjacent good channels to be suppressed. ICA is corrupted by noisy channels # for similar reasons. On the other hand, when performing machine learning @@ -194,7 +190,8 @@ # ~~~~~~~~~~~~~~~~~~~~~~~ # # Interpolation of EEG channels in MNE-Python is done using the spherical -# spline method [1]_, which projects the sensor locations onto a unit sphere +# spline method :footcite:`PerrinEtAl1989`, which projects the sensor +# locations onto a unit sphere # and interpolates the signal at the bad sensor locations based on the signals # at the good locations. Mathematical details are presented in # :ref:`channel-interpolation`. Interpolation of MEG channels uses the field @@ -259,9 +256,7 @@ # References # ^^^^^^^^^^ # -# .. [1] Perrin, F., Pernier, J., Bertrand, O. and Echallier, JF. (1989). -# Spherical splines for scalp potential and current density mapping. -# *Electroencephalography Clinical Neurophysiology* 72(2):184-187. +# .. footbibliography:: # # # .. LINKS diff --git a/tutorials/preprocessing/plot_20_rejecting_bad_data.py b/tutorials/preprocessing/20_rejecting_bad_data.py similarity index 99% rename from tutorials/preprocessing/plot_20_rejecting_bad_data.py rename to tutorials/preprocessing/20_rejecting_bad_data.py index bbf439904d2..1b8151709a9 100644 --- a/tutorials/preprocessing/plot_20_rejecting_bad_data.py +++ b/tutorials/preprocessing/20_rejecting_bad_data.py @@ -7,10 +7,6 @@ This tutorial covers manual marking of bad spans of data, and automated rejection of data spans based on signal amplitude. -.. contents:: Page contents - :local: - :depth: 2 - We begin as always by importing the necessary Python modules and loading some :ref:`example data `; to save memory we'll use a pre-filtered and downsampled version of the example data, and we'll also load an events diff --git a/tutorials/discussions/plot_background_filtering.py b/tutorials/preprocessing/25_background_filtering.py similarity index 91% rename from tutorials/discussions/plot_background_filtering.py rename to tutorials/preprocessing/25_background_filtering.py index a2f35b8227a..39c90da6914 100644 --- a/tutorials/discussions/plot_background_filtering.py +++ b/tutorials/preprocessing/25_background_filtering.py @@ -9,23 +9,31 @@ Here we give some background information on filtering in general, and how it is done in MNE-Python in particular. Recommended reading for practical applications of digital -filter design can be found in Parks & Burrus (1987) [1]_ and -Ifeachor & Jervis (2002) [2]_, and for filtering in an -M/EEG context we recommend reading Widmann *et al.* (2015) [7]_. -To see how to use the default filters in MNE-Python on actual data, see -the :ref:`tut-filter-resample` tutorial. +filter design can be found in +Parks & Burrus (1987) :footcite:`ParksBurrus1987` +and Ifeachor & Jervis (2002) :footcite:`IfeachorJervis2002`, +and for filtering in an M/EEG context we recommend reading +Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`. + +.. note:: + + This tutorial goes pretty deep into the mathematics of filtering and the + design decisions that go into choosing a filter. If you just want to know + how to apply the default filters in MNE-Python to your data, skip this + tutorial and read :ref:`tut-filter-resample` instead (but someday, you + should come back and read this one too 🙂). -.. contents:: - :local: Problem statement ================= Practical issues with filtering electrophysiological data are covered -in Widmann *et al.* (2012) [6]_, where they conclude with this statement: +in Widmann *et al.* (2012) :footcite:`WidmannSchroger2012`, where they +conclude with this statement: Filtering can result in considerable distortions of the time course - (and amplitude) of a signal as demonstrated by VanRullen (2011) [[3]_]. + (and amplitude) of a signal as demonstrated by VanRullen (2011) + :footcite:`VanRullen2011`. Thus, filtering should not be used lightly. However, if effects of filtering are cautiously considered and filter artifacts are minimized, a valid interpretation of the temporal dynamics of filtered @@ -80,8 +88,8 @@ IIR filters depend on the previous input and output values, and thus can have effectively infinite impulse responses. -As outlined in Parks & Burrus (1987) [1]_, FIR and IIR have different -trade-offs: +As outlined in Parks & Burrus (1987) :footcite:`ParksBurrus1987`, +FIR and IIR have different trade-offs: * A causal FIR filter can be linear-phase -- i.e., the same time delay across all frequencies -- whereas a causal IIR filter cannot. The phase @@ -92,11 +100,12 @@ accumulating error (due to its recursive calculations). In MNE-Python we default to using FIR filtering. As noted in Widmann *et al.* -(2015) [7]_: +(2015) :footcite:`WidmannEtAl2015`: Despite IIR filters often being considered as computationally more efficient, they are recommended only when high throughput and sharp - cutoffs are required (Ifeachor and Jervis, 2002 [[2]_], p. 321)... + cutoffs are required + (Ifeachor and Jervis, 2002 :footcite:`IfeachorJervis2002`, p. 321)... FIR filters are easier to control, are always stable, have a well-defined passband, can be corrected to zero-phase without additional computations, and can be converted to minimum-phase. @@ -609,13 +618,15 @@ def plot_signal(x, offset): # # Filters in general, especially those that are non-causal (zero-phase), can # make activity appear to occur earlier or later than it truly did. As -# mentioned in VanRullen (2011) [3]_, investigations of commonly (at the time) +# mentioned in VanRullen (2011) :footcite:`VanRullen2011`, +# investigations of commonly (at the time) # used low-pass filters created artifacts when they were applied to simulated # data. However, such deleterious effects were minimal in many real-world -# examples in Rousselet (2012) [5]_. +# examples in Rousselet (2012) :footcite:`Rousselet2012`. # -# Perhaps more revealing, it was noted in Widmann & Schröger (2012) [6]_ that -# the problematic low-pass filters from VanRullen (2011) [3]_: +# Perhaps more revealing, it was noted in Widmann & Schröger (2012) +# :footcite:`WidmannSchroger2012` that the problematic low-pass filters from +# VanRullen (2011) :footcite:`VanRullen2011`: # # 1. Used a least-squares design (like :func:`scipy.signal.firls`) that # included "do-not-care" transition regions, which can lead to @@ -629,20 +640,22 @@ def plot_signal(x, offset): # ------------------ # # When it comes to high-pass filtering, using corner frequencies above 0.1 Hz -# were found in Acunzo *et al.* (2012) [4]_ to: +# were found in Acunzo *et al.* (2012) :footcite:`AcunzoEtAl2012` to: # # "... generate a systematic bias easily leading to misinterpretations of # neural activity.” # -# In a related paper, Widmann *et al.* (2015) [7]_ also came to suggest a -# 0.1 Hz highpass. More evidence followed in Tanner *et al.* (2015) [8]_ of -# such distortions. Using data from language ERP studies of semantic and +# In a related paper, Widmann *et al.* (2015) :footcite:`WidmannEtAl2015` +# also came to suggest a 0.1 Hz highpass. More evidence followed in +# Tanner *et al.* (2015) :footcite:`TannerEtAl2015` of such distortions. +# Using data from language ERP studies of semantic and # syntactic processing (i.e., N400 and P600), using a high-pass above 0.3 Hz # caused significant effects to be introduced implausibly early when compared # to the unfiltered data. From this, the authors suggested the optimal # high-pass value for language processing to be 0.1 Hz. # -# We can recreate a problematic simulation from Tanner *et al.* (2015) [8]_: +# We can recreate a problematic simulation from +# Tanner *et al.* (2015) :footcite:`TannerEtAl2015`: # # "The simulated component is a single-cycle cosine wave with an amplitude # of 5µV [sic], onset of 500 ms poststimulus, and duration of 800 ms. The @@ -698,7 +711,8 @@ def plot_signal(x, offset): plt.show() ############################################################################### -# Similarly, in a P300 paradigm reported by Kappenman & Luck (2010) [12]_, +# Similarly, in a P300 paradigm reported by +# Kappenman & Luck (2010) :footcite:`KappenmanLuck2010`, # they found that applying a 1 Hz high-pass decreased the probability of # finding a significant difference in the N100 response, likely because # the P300 response was smeared (and inverted) in time by the high-pass @@ -719,12 +733,13 @@ def plot_signal(x, offset): # Baseline problems (or solutions?) # --------------------------------- # -# In an evolving discussion, Tanner *et al.* (2015) [8]_ suggest using baseline -# correction to remove slow drifts in data. However, Maess *et al.* (2016) [9]_ +# In an evolving discussion, Tanner *et al.* (2015) :footcite:`TannerEtAl2015` +# suggest using baseline correction to remove slow drifts in data. However, +# Maess *et al.* (2016) :footcite:`MaessEtAl2016` # suggest that baseline correction, which is a form of high-passing, does # not offer substantial advantages over standard high-pass filtering. -# Tanner *et al.* (2016) [10]_ rebutted that baseline correction can correct -# for problems with filtering. +# Tanner *et al.* (2016) :footcite:`TannerEtAl2016` +# rebutted that baseline correction can correct for problems with filtering. # # To see what they mean, consider again our old simulated signal ``x`` from # before: @@ -757,7 +772,8 @@ def baseline_plot(x): baseline_plot(x) ############################################################################### -# In response, Maess *et al.* (2016) [11]_ note that these simulations do not +# In response, Maess *et al.* (2016) :footcite:`MaessEtAl2016a` +# note that these simulations do not # address cases of pre-stimulus activity that is shared across conditions, as # applying baseline correction will effectively copy the topology outside the # baseline period. We can see this if we give our signal ``x`` with some @@ -804,7 +820,8 @@ def baseline_plot(x): # and thus :func:`mne.io.Raw.filter` is used. This function under the hood # (among other things) calls :func:`mne.filter.filter_data` to actually # filter the data, which by default applies a zero-phase FIR filter designed -# using :func:`scipy.signal.firwin`. In Widmann *et al.* (2015) [7]_, they +# using :func:`scipy.signal.firwin`. +# In Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`, they # suggest a specific set of parameters to use for high-pass filtering, # including: # @@ -854,7 +871,8 @@ def baseline_plot(x): # ``fir_design='firwin2'`` mode. # # .. note:: For ``fir_design='firwin2'``, the multiplicative factors are -# doubled compared to what is given in Ifeachor & Jervis (2002) [2]_ +# doubled compared to what is given in +# Ifeachor & Jervis (2002) :footcite:`IfeachorJervis2002` # (p. 357), as :func:`scipy.signal.firwin2` has a smearing effect # on the frequency response, which we compensate for by # increasing the filter length. This is why @@ -899,7 +917,8 @@ def baseline_plot(x): # Defaults in other software # -------------------------- # A good but possibly outdated comparison of filtering in various software -# packages is available in Widmann *et al.* (2015) [7]_. Briefly: +# packages is available in Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`. +# Briefly: # # * EEGLAB # MNE-Python 0.14 defaults to behavior very similar to that of EEGLAB @@ -915,7 +934,8 @@ def baseline_plot(x): # # Reporting Filters # ================= -# On page 45 in Widmann *et al.* (2015) [7]_, there is a convenient list of +# On page 45 in Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`, +# there is a convenient list of # important filter parameters that should be reported with each publication: # # 1. Filter type (high-pass, low-pass, band-pass, band-stop, FIR, IIR) @@ -985,7 +1005,7 @@ def baseline_plot(x): # When use standard :func:`scipy.signal.firwin` design (as for FIR filters in # MNE), the passband ripple and stopband attenuation are dependent upon the # window used in design. For standard windows the values are listed in this -# table (see Ifeachor & Jervis (2002) [2]_, p. 357): +# table (see Ifeachor & Jervis (2002) :footcite:`IfeachorJervis2002`, p. 357): # # +-------------------------+-----------------+----------------------+ # | Name of window function | Passband ripple | Stopband attenuation | @@ -1026,45 +1046,7 @@ def baseline_plot(x): # # References # ========== -# -# .. [1] Parks TW, Burrus CS (1987). Digital Filter Design. -# New York: Wiley-Interscience. -# .. [2] Ifeachor, E. C., & Jervis, B. W. (2002). Digital Signal Processing: -# A Practical Approach. Prentice Hall. -# .. [3] Vanrullen, R. (2011). Four common conceptual fallacies in mapping -# the time course of recognition. Perception Science, 2, 365. -# .. [4] Acunzo, D. J., MacKenzie, G., & van Rossum, M. C. W. (2012). -# Systematic biases in early ERP and ERF components as a result -# of high-pass filtering. Journal of Neuroscience Methods, -# 209(1), 212–218. https://doi.org/10.1016/j.jneumeth.2012.06.011 -# .. [5] Rousselet, G. A. (2012). Does filtering preclude us from studying -# ERP time-courses? Frontiers in Psychology, 3(131) -# .. [6] Widmann, A., & Schröger, E. (2012). Filter effects and filter -# artifacts in the analysis of electrophysiological data. -# Perception Science, 233. -# .. [7] Widmann, A., Schröger, E., & Maess, B. (2015). Digital filter -# design for electrophysiological data – a practical approach. -# Journal of Neuroscience Methods, 250, 34–46. -# https://doi.org/10.1016/j.jneumeth.2014.08.002 -# .. [8] Tanner, D., Morgan-Short, K., & Luck, S. J. (2015). -# How inappropriate high-pass filters can produce artifactual effects -# and incorrect conclusions in ERP studies of language and cognition. -# Psychophysiology, 52(8), 997–1009. https://doi.org/10.1111/psyp.12437 -# .. [9] Maess, B., Schröger, E., & Widmann, A. (2016). -# High-pass filters and baseline correction in M/EEG analysis. -# Commentary on: “How inappropriate high-pass filters can produce -# artifacts and incorrect conclusions in ERP studies of language -# and cognition.” Journal of Neuroscience Methods, 266, 164–165. -# .. [10] Tanner, D., Norton, J. J. S., Morgan-Short, K., & Luck, S. J. (2016). -# On high-pass filter artifacts (they’re real) and baseline correction -# (it’s a good idea) in ERP/ERMF analysis. -# .. [11] Maess, B., Schröger, E., & Widmann, A. (2016). -# High-pass filters and baseline correction in M/EEG analysis-continued -# discussion. Journal of Neuroscience Methods, 266, 171–172. -# Journal of Neuroscience Methods, 266, 166–170. -# .. [12] Kappenman E. & Luck, S. (2010). The effects of impedance on data -# quality and statistical significance in ERP recordings. -# Psychophysiology, 47, 888-904. +# .. footbibliography:: # # .. _FIR: https://en.wikipedia.org/wiki/Finite_impulse_response # .. _IIR: https://en.wikipedia.org/wiki/Infinite_impulse_response diff --git a/tutorials/preprocessing/plot_30_filtering_resampling.py b/tutorials/preprocessing/30_filtering_resampling.py similarity index 99% rename from tutorials/preprocessing/plot_30_filtering_resampling.py rename to tutorials/preprocessing/30_filtering_resampling.py index b6695cede8b..a63e04192c1 100644 --- a/tutorials/preprocessing/plot_30_filtering_resampling.py +++ b/tutorials/preprocessing/30_filtering_resampling.py @@ -8,10 +8,6 @@ This tutorial covers filtering and resampling, and gives examples of how filtering can be used for artifact repair. -.. contents:: Page contents - :local: - :depth: 2 - We begin as always by importing the necessary Python modules and loading some :ref:`example data `. We'll also crop the data to 60 seconds (to save memory on the documentation server): diff --git a/tutorials/preprocessing/plot_35_artifact_correction_regression.py b/tutorials/preprocessing/35_artifact_correction_regression.py similarity index 99% rename from tutorials/preprocessing/plot_35_artifact_correction_regression.py rename to tutorials/preprocessing/35_artifact_correction_regression.py index 9fd2122f2ac..c63e1477179 100644 --- a/tutorials/preprocessing/plot_35_artifact_correction_regression.py +++ b/tutorials/preprocessing/35_artifact_correction_regression.py @@ -8,10 +8,6 @@ This tutorial covers removal of artifacts using regression as in Gratton et al. (1983) :footcite:`GrattonEtAl1983`. -.. contents:: Page contents - :local: - :depth: 2 - Generally speaking, artifacts that result in time waveforms on the sensors that are accurately reflected by some reference signal can be removed by regression. Blink artifacts captured by bipolar EOG channels serve as a good diff --git a/tutorials/preprocessing/plot_40_artifact_correction_ica.py b/tutorials/preprocessing/40_artifact_correction_ica.py similarity index 78% rename from tutorials/preprocessing/plot_40_artifact_correction_ica.py rename to tutorials/preprocessing/40_artifact_correction_ica.py index c5b9f3c92d9..ef8e88c8208 100644 --- a/tutorials/preprocessing/plot_40_artifact_correction_ica.py +++ b/tutorials/preprocessing/40_artifact_correction_ica.py @@ -7,11 +7,9 @@ This tutorial covers the basics of independent components analysis (ICA) and shows how ICA can be used for artifact repair; an extended example illustrates -repair of ocular and heartbeat artifacts. - -.. contents:: Page contents - :local: - :depth: 2 +repair of ocular and heartbeat artifacts. For conceptual background on ICA, see +:ref:`this scikit-learn tutorial +`. We begin as always by importing the necessary Python modules and loading some :ref:`example data `. Because ICA can be computationally @@ -20,7 +18,6 @@ and classes from that submodule: """ - import os import mne from mne.preprocessing import (ICA, create_eog_epochs, create_ecg_epochs, @@ -69,12 +66,13 @@ # # If you want to perform ICA with *no* dimensionality reduction (other than # the number of Independent Components (ICs) given in ``n_components``, and -# any subsequent exclusion of ICs you specify in ``ICA.exclude``), pass -# ``n_pca_components=None`` (this is the default value). +# any subsequent exclusion of ICs you specify in ``ICA.exclude``), simply +# pass ``n_components``. # # However, if you *do* want to reduce dimensionality, consider this -# example: if you have 300 sensor channels and you set -# ``n_pca_components=None`` and ``n_components=50``, then the the first 50 +# example: if you have 300 sensor channels and you set ``n_components=50`` +# during instantiation and pass ``n_pca_components=None`` to +# `~mne.preprocessing.ICA.apply`, then the the first 50 # PCs are sent to the ICA algorithm (yielding 50 ICs), and during # reconstruction `~mne.preprocessing.ICA.apply` will use the 50 ICs # plus PCs number 51-300 (the full PCA residual). If instead you specify @@ -84,16 +82,16 @@ # # **If you have previously been using EEGLAB**'s ``runica()`` and are # looking for the equivalent of its ``'pca', n`` option to reduce -# dimensionality via PCA before the ICA step, set ``n_components=n`` -# during initialization and pass ``n_pca_components=n`` to -# `~mne.preprocessing.ICA.apply`. +# dimensionality, set ``n_components=n`` during initialization and pass +# ``n_pca_components=n`` to `~mne.preprocessing.ICA.apply`. # # MNE-Python implements three different ICA algorithms: ``fastica`` (the # default), ``picard``, and ``infomax``. FastICA and Infomax are both in fairly # widespread use; Picard is a newer (2017) algorithm that is expected to # converge faster than FastICA and Infomax, and is more robust than other # algorithms in cases where the sources are not completely independent, which -# typically happens with real EEG/MEG data. See [1]_ for more information. +# typically happens with real EEG/MEG data. See +# :footcite:`AblinEtAl2018` for more information. # # The ICA interface in MNE-Python is similar to the interface in # `scikit-learn`_: some general parameters are specified when creating an @@ -163,7 +161,7 @@ ############################################################################### # We can get a summary of how the ocular artifact manifests across each channel -# type using :func:`~mne.preprocessing.create_eog_epochs` like we did in the +# type using `~mne.preprocessing.create_eog_epochs` like we did in the # :ref:`tut-artifact-overview` tutorial: eog_evoked = create_eog_epochs(raw).average() @@ -172,7 +170,7 @@ ############################################################################### # Now we'll do the same for the heartbeat artifacts, using -# :func:`~mne.preprocessing.create_ecg_epochs`: +# `~mne.preprocessing.create_ecg_epochs`: ecg_evoked = create_ecg_epochs(raw).average() ecg_evoked.apply_baseline(baseline=(None, -0.2)) @@ -190,9 +188,10 @@ # higher values), making it harder for the algorithm to find an accurate # solution. A high-pass filter with 1 Hz cutoff frequency is recommended. # However, because filtering is a linear operation, the ICA solution found from -# the filtered signal can be applied to the unfiltered signal (see [2]_ for +# the filtered signal can be applied to the unfiltered signal (see +# :footcite:`WinklerEtAl2015` for # more information), so we'll keep a copy of the unfiltered -# :class:`~mne.io.Raw` object around so we can apply the ICA solution to it +# `~mne.io.Raw` object around so we can apply the ICA solution to it # later. filt_raw = raw.copy() @@ -206,10 +205,16 @@ # # The ICA algorithms implemented in MNE-Python find patterns across # channels, but ignore the time domain. This means you can compute ICA on -# discontinuous :class:`~mne.Epochs` or :class:`~mne.Evoked` objects (not -# just continuous :class:`~mne.io.Raw` objects), or only use every Nth +# discontinuous `~mne.Epochs` or `~mne.Evoked` objects (not +# just continuous `~mne.io.Raw` objects), or only use every Nth # sample by passing the ``decim`` parameter to ``ICA.fit()``. # +# .. note:: `~mne.Epochs` used for fitting ICA should not be +# baseline-corrected. Because cleaning the data via ICA may +# introduce DC offsets, we suggest to baseline correct your data +# **after** cleaning (and not before), should you require +# baseline correction. +# # Now we're ready to set up and fit the ICA. Since we know (from observing our # raw data) that the EOG and ECG artifacts are fairly strong, we would expect # those artifacts to be captured in the first few dimensions of the PCA @@ -228,21 +233,21 @@ # we'll also specify a `random seed`_ so that we get identical results each # time this tutorial is built by our web servers. -ica = ICA(n_components=15, random_state=97) +ica = ICA(n_components=15, max_iter='auto', random_state=97) ica.fit(filt_raw) ############################################################################### # Some optional parameters that we could have passed to the -# :meth:`~mne.preprocessing.ICA.fit` method include ``decim`` (to use only +# `~mne.preprocessing.ICA.fit` method include ``decim`` (to use only # every Nth sample in computing the ICs, which can yield a considerable # speed-up) and ``reject`` (for providing a rejection dictionary for maximum # acceptable peak-to-peak amplitudes for each channel type, just like we used # when creating epoched data in the :ref:`tut-overview` tutorial). # # Now we can examine the ICs to see what they captured. -# :meth:`~mne.preprocessing.ICA.plot_sources` will show the time series of the -# ICs. Note that in our call to :meth:`~mne.preprocessing.ICA.plot_sources` we -# can use the original, unfiltered :class:`~mne.io.Raw` object: +# `~mne.preprocessing.ICA.plot_sources` will show the time series of the +# ICs. Note that in our call to `~mne.preprocessing.ICA.plot_sources` we +# can use the original, unfiltered `~mne.io.Raw` object: raw.load_data() ica.plot_sources(raw, show_scrollbars=False) @@ -253,7 +258,7 @@ # like `a heartbeat `_ (for more info on visually identifying Independent # Components, `this EEGLAB tutorial`_ is a good resource). We can also # visualize the scalp field distribution of each component using -# :meth:`~mne.preprocessing.ICA.plot_components`. These are interpolated based +# `~mne.preprocessing.ICA.plot_components`. These are interpolated based # on the values in the ICA mixing matrix: # sphinx_gallery_thumbnail_number = 9 @@ -262,18 +267,18 @@ ############################################################################### # .. note:: # -# :meth:`~mne.preprocessing.ICA.plot_components` (which plots the scalp +# `~mne.preprocessing.ICA.plot_components` (which plots the scalp # field topographies for each component) has an optional ``inst`` parameter -# that takes an instance of :class:`~mne.io.Raw` or :class:`~mne.Epochs`. +# that takes an instance of `~mne.io.Raw` or `~mne.Epochs`. # Passing ``inst`` makes the scalp topographies interactive: clicking one -# will bring up a diagnostic :meth:`~mne.preprocessing.ICA.plot_properties` +# will bring up a diagnostic `~mne.preprocessing.ICA.plot_properties` # window (see below) for that component. # # In the plots above it's fairly obvious which ICs are capturing our EOG and # ECG artifacts, but there are additional ways visualize them anyway just to # be sure. First, we can plot an overlay of the original signal against the # reconstructed signal with the artifactual ICs excluded, using -# :meth:`~mne.preprocessing.ICA.plot_overlay`: +# `~mne.preprocessing.ICA.plot_overlay`: # blinks ica.plot_overlay(raw, exclude=[0], picks='eeg') @@ -282,7 +287,7 @@ ############################################################################### # We can also plot some diagnostics of each IC using -# :meth:`~mne.preprocessing.ICA.plot_properties`: +# `~mne.preprocessing.ICA.plot_properties`: ica.plot_properties(raw, picks=[0, 1]) @@ -299,16 +304,16 @@ # channels, merely setting ``ica.exclude`` doesn't do anything immediately (it # just adds the excluded ICs to a list that will get used later when it's # needed). Once the exclusions have been set, ICA methods like -# :meth:`~mne.preprocessing.ICA.plot_overlay` will exclude those component(s) +# `~mne.preprocessing.ICA.plot_overlay` will exclude those component(s) # even if no ``exclude`` parameter is passed, and the list of excluded -# components will be preserved when using :meth:`mne.preprocessing.ICA.save` -# and :func:`mne.preprocessing.read_ica`. +# components will be preserved when using `mne.preprocessing.ICA.save` +# and `mne.preprocessing.read_ica`. ica.exclude = [0, 1] # indices chosen based on various plots above ############################################################################### # Now that the exclusions have been set, we can reconstruct the sensor signals -# with artifacts removed using the :meth:`~mne.preprocessing.ICA.apply` method +# with artifacts removed using the `~mne.preprocessing.ICA.apply` method # (remember, we're applying the ICA solution from the *filtered* data to the # original *unfiltered* signal). Plotting the original raw data alongside the # reconstructed data shows that the heartbeat and blink artifacts are repaired. @@ -332,9 +337,9 @@ # a tedious, rate-limiting step in the analysis pipeline. One alternative is to # use dedicated EOG or ECG sensors as a "pattern" to check the ICs against, and # automatically mark for exclusion any ICs that match the EOG/ECG pattern. Here -# we'll use :meth:`~mne.preprocessing.ICA.find_bads_eog` to automatically find +# we'll use `~mne.preprocessing.ICA.find_bads_eog` to automatically find # the ICs that best match the EOG signal, then use -# :meth:`~mne.preprocessing.ICA.plot_scores` along with our other plotting +# `~mne.preprocessing.ICA.plot_scores` along with our other plotting # functions to see which ICs it picked. We'll start by resetting # ``ica.exclude`` back to an empty list: @@ -356,10 +361,10 @@ ica.plot_sources(eog_evoked) ############################################################################### -# Note that above we used :meth:`~mne.preprocessing.ICA.plot_sources` on both -# the original :class:`~mne.io.Raw` instance and also on an -# :class:`~mne.Evoked` instance of the extracted EOG artifacts. This can be -# another way to confirm that :meth:`~mne.preprocessing.ICA.find_bads_eog` has +# Note that above we used `~mne.preprocessing.ICA.plot_sources` on both +# the original `~mne.io.Raw` instance and also on an +# `~mne.Evoked` instance of the extracted EOG artifacts. This can be +# another way to confirm that `~mne.preprocessing.ICA.find_bads_eog` has # identified the correct components. # # @@ -367,19 +372,20 @@ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # If you don't have an EOG channel, -# :meth:`~mne.preprocessing.ICA.find_bads_eog` has a ``ch_name`` parameter that +# `~mne.preprocessing.ICA.find_bads_eog` has a ``ch_name`` parameter that # you can use as a proxy for EOG. You can use a single channel, or create a # bipolar reference from frontal EEG sensors and use that as virtual EOG # channel. This carries a risk however: you must hope that the frontal EEG # channels only reflect EOG and not brain dynamics in the prefrontal cortex (or # you must not care about those prefrontal signals). # -# For ECG, it is easier: :meth:`~mne.preprocessing.ICA.find_bads_ecg` can use +# For ECG, it is easier: `~mne.preprocessing.ICA.find_bads_ecg` can use # cross-channel averaging of magnetometer or gradiometer channels to construct # a virtual ECG channel, so if you have MEG channels it is usually not # necessary to pass a specific channel name. -# :meth:`~mne.preprocessing.ICA.find_bads_ecg` also has two options for its -# ``method`` parameter: ``'ctps'`` (cross-trial phase statistics [3]_) and +# `~mne.preprocessing.ICA.find_bads_ecg` also has two options for its +# ``method`` parameter: ``'ctps'`` (cross-trial phase statistics +# :footcite:`DammersEtAl2008`) and # ``'correlation'`` (Pearson correlation between data and ECG channel). ica.exclude = [] @@ -404,14 +410,14 @@ # The last of these plots is especially useful: it shows us that the heartbeat # artifact is coming through on *two* ICs, and we've only caught one of them. # In fact, if we look closely at the output of -# :meth:`~mne.preprocessing.ICA.plot_sources` (online, you can right-click → +# `~mne.preprocessing.ICA.plot_sources` (online, you can right-click → # "view image" to zoom in), it looks like ``ICA014`` has a weak periodic # component that is in-phase with ``ICA001``. It might be worthwhile to re-run # the ICA with more components to see if that second heartbeat artifact # resolves out a little better: # refit the ICA with 30 components this time -new_ica = ICA(n_components=30, random_state=97) +new_ica = ICA(n_components=30, max_iter='auto', random_state=97) new_ica.fit(filt_raw) # find which ICs match the ECG pattern @@ -435,11 +441,11 @@ # Much better! Now we've captured both ICs that are reflecting the heartbeat # artifact (and as a result, we got two diagnostic plots: one for each IC that # reflects the heartbeat). This demonstrates the value of checking the results -# of automated approaches like :meth:`~mne.preprocessing.ICA.find_bads_ecg` +# of automated approaches like `~mne.preprocessing.ICA.find_bads_ecg` # before accepting them. # clean up memory before moving on -del raw, filt_raw, ica, new_ica +del raw, ica, new_ica ############################################################################### # Selecting ICA components using template matching @@ -448,17 +454,19 @@ # When dealing with multiple subjects, it is also possible to manually select # an IC for exclusion on one subject, and then use that component as a # *template* for selecting which ICs to exclude from other subjects' data, -# using :func:`mne.preprocessing.corrmap` [4]_. The idea behind -# :func:`~mne.preprocessing.corrmap` is that the artifact patterns are similar +# using `mne.preprocessing.corrmap` :footcite:`CamposViolaEtAl2009`. +# The idea behind +# `~mne.preprocessing.corrmap` is that the artifact patterns are similar # enough across subjects that corresponding ICs can be identified by # correlating the ICs from each ICA solution with a common template, and # picking the ICs with the highest correlation strength. -# :func:`~mne.preprocessing.corrmap` takes a list of ICA solutions, and a +# `~mne.preprocessing.corrmap` takes a list of ICA solutions, and a # ``template`` parameter that specifies which ICA object and which component # within it to use as a template. # # Since our sample dataset only contains data from one subject, we'll use a -# different dataset with multiple subjects: the EEGBCI dataset [5]_ [6]_. The +# different dataset with multiple subjects: the EEGBCI dataset +# :footcite:`SchalkEtAl2004,GoldbergerEtAl2000`. The # dataset has 109 subjects, we'll just download one run (a left/right hand # movement task) from each of the first 4 subjects: @@ -488,14 +496,16 @@ # remove trailing `.` from channel names so we can set montage raw.rename_channels(mapping) raw.set_montage('standard_1005') + # high-pass filter + raw_filt = raw.copy().load_data().filter(l_freq=1., h_freq=None) # fit ICA - ica = ICA(n_components=30, random_state=97) - ica.fit(raw) + ica = ICA(n_components=30, max_iter='auto', random_state=97) + ica.fit(raw_filt) raws.append(raw) icas.append(ica) ############################################################################### -# Now let's run :func:`~mne.preprocessing.corrmap`: +# Now let's run `~mne.preprocessing.corrmap`: # use the first subject as template; use Fpz as proxy for EOG raw = raws[0] @@ -521,14 +531,14 @@ # Notice that subject 1 *does* seem to have an IC that looks like it reflects # blink artifacts (component ``ICA000``). Notice also that subject 3 appears to # have *two* components that are reflecting ocular artifacts (``ICA000`` and -# ``ICA002``), but only one was caught by :func:`~mne.preprocessing.corrmap`. +# ``ICA002``), but only one was caught by `~mne.preprocessing.corrmap`. # Let's try setting the threshold manually: corrmap(icas, template=(0, eog_inds[0]), threshold=0.9) ############################################################################### # Now we get the message ``At least 1 IC detected for each subject`` (which is -# good). At this point we'll re-run :func:`~mne.preprocessing.corrmap` with +# good). At this point we'll re-run `~mne.preprocessing.corrmap` with # parameters ``label='blink', plot=False`` to *label* the ICs from each subject # that capture the blink artifacts (without plotting them again). @@ -539,11 +549,11 @@ ############################################################################### # Notice that the first subject has 3 different labels for the IC at index 0: # "eog/0/Fpz", "eog", and "blink". The first two were added by -# :meth:`~mne.preprocessing.ICA.find_bads_eog`; the "blink" label was added by -# the last call to :func:`~mne.preprocessing.corrmap`. Notice also that each +# `~mne.preprocessing.ICA.find_bads_eog`; the "blink" label was added by +# the last call to `~mne.preprocessing.corrmap`. Notice also that each # subject has at least one IC index labelled "blink", and subject 3 has two # components (0 and 2) labelled "blink" (consistent with the plot of IC sources -# above). The ``labels_`` attribute of :class:`~mne.preprocessing.ICA` objects +# above). The ``labels_`` attribute of `~mne.preprocessing.ICA` objects # can also be manually edited to annotate the ICs with custom labels. They also # come in handy when plotting: @@ -553,10 +563,10 @@ ############################################################################### # As a final note, it is possible to extract ICs numerically using the -# :meth:`~mne.preprocessing.ICA.get_components` method of -# :class:`~mne.preprocessing.ICA` objects. This will return a :class:`NumPy +# `~mne.preprocessing.ICA.get_components` method of +# `~mne.preprocessing.ICA` objects. This will return a :class:`NumPy # array ` that can be passed to -# :func:`~mne.preprocessing.corrmap` instead of the :class:`tuple` of +# `~mne.preprocessing.corrmap` instead of the :class:`tuple` of # ``(subject_index, component_index)`` we passed before, and will yield the # same result: @@ -567,48 +577,11 @@ ############################################################################### # An advantage of using this numerical representation of an IC to capture a # particular artifact pattern is that it can be saved and used as a template -# for future template-matching tasks using :func:`~mne.preprocessing.corrmap` +# for future template-matching tasks using `~mne.preprocessing.corrmap` # without having to load or recompute the ICA solution that yielded the # template originally. Put another way, when the template is a NumPy array, the -# :class:`~mne.preprocessing.ICA` object containing the template does not need -# to be in the list of ICAs provided to :func:`~mne.preprocessing.corrmap`. -# -# -# References -# ^^^^^^^^^^ -# -# .. [1] Ablin P, Cardoso J, Gramfort A (2018). Faster Independent Component -# Analysis by Preconditioning With Hessian Approximations. *IEEE -# Transactions on Signal Processing* 66:4040–4049. -# https://doi.org/10.1109/TSP.2018.2844203 -# -# .. [2] Winkler I, Debener S, Müller K-R, Tangermann M (2015). On the -# influence of high-pass filtering on ICA-based artifact reduction in -# EEG-ERP. Proceedings of EMBC-2015, 4101–4105. -# https://doi.org/10.1109/EMBC.2015.7319296 -# -# .. [3] Dammers J, Schiek M, Boers F, Silex C, Zvyagintsev M, Pietrzyk U, -# Mathiak K (2008). Integration of amplitude and phase statistics for -# complete artifact removal in independent components of neuromagnetic -# recordings. *IEEE Transactions on Biomedical Engineering* -# 55(10):2353–2362. https://doi.org/10.1109/TBME.2008.926677 -# -# .. [4] Viola FC, Thorne J, Edmonds B, Schneider T, Eichele T, Debener S -# (2009). Semi-automatic identification of independent components -# representing EEG artifact. *Clinical Neurophysiology* 120(5):868–877. -# https://doi.org/10.1016/j.clinph.2009.01.015 -# -# .. [5] Schalk G, McFarland DJ, Hinterberger T, Birbaumer N, Wolpaw JR (2004). -# BCI2000: A General-Purpose Brain-Computer Interface (BCI) System. -# *IEEE Transactions on Biomedical Engineering* 51(6):1034-1043. -# https://doi.org/10.1109/TBME.2004.827072 -# -# .. [6] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, Mark RG, -# Mietus JE, Moody GB, Peng C-K, Stanley HE (2000). PhysioBank, -# PhysioToolkit, and PhysioNet: Components of a New Research Resource -# for Complex Physiologic Signals. *Circulation* 101(23):e215-e220. -# https://doi.org/10.1161/01.CIR.101.23.e215 -# +# `~mne.preprocessing.ICA` object containing the template does not need +# to be in the list of ICAs provided to `~mne.preprocessing.corrmap`. # # .. LINKS # @@ -621,3 +594,63 @@ # .. _`regular expression`: https://www.regular-expressions.info/ # .. _`qrs`: https://en.wikipedia.org/wiki/QRS_complex # .. _`this EEGLAB tutorial`: https://labeling.ucsd.edu/tutorial/labels + + +############################################################################### +# Compute ICA components on Epochs +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# ICA is now fit to epoched MEG data instead of the raw data. +# We assume that the non-stationary EOG artifacts have already been removed. +# The sources matching the ECG are automatically found and displayed. +# +# .. note:: +# This example is computationally intensive, so it might take a few minutes +# to complete. +# +# Read and preprocess the data. Preprocessing consists of: +# +# - MEG channel selection +# - 1-30 Hz band-pass filter +# - epoching -0.2 to 0.5 seconds with respect to events +# - rejection based on peak-to-peak amplitude +# +# Note that we don't baseline correct the epochs here – we'll do this after +# cleaning with ICA is completed. Baseline correction before ICA is not +# recommended by the MNE-Python developers, as it doesn't guarantee optimal +# results. + +filt_raw.pick_types(meg=True, eeg=False, exclude='bads', stim=True).load_data() +filt_raw.filter(1, 30, fir_design='firwin') + +# peak-to-peak amplitude rejection parameters +reject = dict(grad=4000e-13, mag=4e-12) +# create longer and more epochs for more artifact exposure +events = mne.find_events(filt_raw, stim_channel='STI 014') +# don't baseline correct epochs +epochs = mne.Epochs(filt_raw, events, event_id=None, tmin=-0.2, tmax=0.5, + reject=reject, baseline=None) + +############################################################################### +# Fit ICA model using the FastICA algorithm, detect and plot components +# explaining ECG artifacts. + +ica = ICA(n_components=15, method='fastica', max_iter="auto").fit(epochs) + +ecg_epochs = create_ecg_epochs(filt_raw, tmin=-.5, tmax=.5) +ecg_inds, scores = ica.find_bads_ecg(ecg_epochs, threshold='auto') + +ica.plot_components(ecg_inds) + +############################################################################### +# Plot the properties of the ECG components: +ica.plot_properties(epochs, picks=ecg_inds) + +############################################################################### +# Plot the estimated sources of detected ECG related components: +ica.plot_sources(filt_raw, picks=ecg_inds) + +############################################################################### +# References +# ^^^^^^^^^^ +# .. footbibliography:: diff --git a/tutorials/preprocessing/plot_45_projectors_background.py b/tutorials/preprocessing/45_projectors_background.py similarity index 99% rename from tutorials/preprocessing/plot_45_projectors_background.py rename to tutorials/preprocessing/45_projectors_background.py index 29bdacf7b84..4b347e46361 100644 --- a/tutorials/preprocessing/plot_45_projectors_background.py +++ b/tutorials/preprocessing/45_projectors_background.py @@ -10,10 +10,6 @@ projectors from Raw objects, the difference between "applied" and "unapplied" projectors, and at what stages MNE-Python applies projectors automatically. -.. contents:: Page contents - :local: - :depth: 2 - We'll start by importing the Python modules we need; we'll also define a short function to make it easier to make several plots that look similar: """ @@ -207,7 +203,7 @@ def setup_3d_axes(): # .. sidebar:: Terminology # # In MNE-Python, the matrix used to project a raw signal into a subspace is -# usually called a :term:`projector ` or a *projection +# usually called a :term:`projector` or a *projection # operator* — these terms are interchangeable with the term *projection # matrix* used above. # diff --git a/tutorials/preprocessing/plot_50_artifact_correction_ssp.py b/tutorials/preprocessing/50_artifact_correction_ssp.py similarity index 89% rename from tutorials/preprocessing/plot_50_artifact_correction_ssp.py rename to tutorials/preprocessing/50_artifact_correction_ssp.py index d64f6e65c69..61eba3c38f3 100644 --- a/tutorials/preprocessing/plot_50_artifact_correction_ssp.py +++ b/tutorials/preprocessing/50_artifact_correction_ssp.py @@ -10,10 +10,6 @@ of SSP for environmental noise reduction, and for repair of ocular and heartbeat artifacts. -.. contents:: Page contents - :local: - :depth: 2 - We begin as always by importing the necessary Python modules. To save ourselves from repeatedly typing ``mne.preprocessing`` we'll directly import a handful of functions from that submodule: @@ -63,7 +59,7 @@ # The :ref:`example data ` was recorded on a Neuromag system, # which stores SSP projectors for environmental noise removal in the system # configuration (so that reasonably clean raw data can be viewed in real-time -# during acquisition). For this reason, all the :class:`~mne.io.Raw` data in +# during acquisition). For this reason, all the `~mne.io.Raw` data in # the example dataset already includes SSP projectors, which are noted in the # output when loading the data: @@ -109,14 +105,14 @@ # Creating the empty-room projectors # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # -# We create the SSP vectors using :func:`~mne.compute_proj_raw`, and control +# We create the SSP vectors using `~mne.compute_proj_raw`, and control # the number of projectors with parameters ``n_grad`` and ``n_mag``. Once # created, the field pattern of the projectors can be easily visualized with -# :func:`~mne.viz.plot_projs_topomap`. We include the parameter +# `~mne.viz.plot_projs_topomap`. We include the parameter # ``vlim='joint'`` so that the colormap is computed jointly for all projectors # of a given channel type; this makes it easier to compare their relative # smoothness. Note that for the function to know the types of channels in a -# projector, you must also provide the corresponding :class:`~mne.Info` object: +# projector, you must also provide the corresponding `~mne.Info` object: # sphinx_gallery_thumbnail_number = 3 empty_room_projs = mne.compute_proj_raw(empty_room_raw, n_grad=3, n_mag=3) @@ -143,8 +139,8 @@ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # We could visualize the different effects these have on the data by applying -# each set of projectors to different copies of the :class:`~mne.io.Raw` object -# using :meth:`~mne.io.Raw.apply_proj`. However, the :meth:`~mne.io.Raw.plot` +# each set of projectors to different copies of the `~mne.io.Raw` object +# using `~mne.io.Raw.apply_proj`. However, the `~mne.io.Raw.plot` # method has a ``proj`` parameter that allows us to *temporarily* apply # projectors while plotting, so we can use this to visualize the difference # without needing to copy the data. Because the projectors are so similar, we @@ -160,7 +156,7 @@ ############################################################################### # The effect is sometimes easier to see on averaged data. Here we use an -# interactive feature of :func:`mne.Evoked.plot_topomap` to turn projectors on +# interactive feature of `mne.Evoked.plot_topomap` to turn projectors on # and off to see the effect on the data. Of course, the interactivity won't # work on the tutorial website, but you can download the tutorial and try it # locally: @@ -207,8 +203,8 @@ # # MNE-Python provides several functions for detecting and removing heartbeats # from EEG and MEG data. As we saw in :ref:`tut-artifact-overview`, -# :func:`~mne.preprocessing.create_ecg_epochs` can be used to both detect and -# extract heartbeat artifacts into an :class:`~mne.Epochs` object, which can +# `~mne.preprocessing.create_ecg_epochs` can be used to both detect and +# extract heartbeat artifacts into an `~mne.Epochs` object, which can # be used to visualize how the heartbeat artifacts manifest across the sensors: ecg_evoked = create_ecg_epochs(raw).average() @@ -223,20 +219,21 @@ ############################################################################### # To compute SSP projectors for the heartbeat artifact, you can use -# :func:`~mne.preprocessing.compute_proj_ecg`, which takes a -# :class:`~mne.io.Raw` object as input and returns the requested number of +# `~mne.preprocessing.compute_proj_ecg`, which takes a +# `~mne.io.Raw` object as input and returns the requested number of # projectors for magnetometers, gradiometers, and EEG channels (default is two # projectors for each channel type). -# :func:`~mne.preprocessing.compute_proj_ecg` also returns an :term:`events` -# array containing the sample numbers corresponding to the onset of each -# detected heartbeat. +# `~mne.preprocessing.compute_proj_ecg` also returns an :term:`events` +# array containing the sample numbers corresponding to the peak of the +# `R wave `__ of each detected +# heartbeat. projs, events = compute_proj_ecg(raw, n_grad=1, n_mag=1, n_eeg=1, reject=None) ############################################################################### # The first line of output tells us that -# :func:`~mne.preprocessing.compute_proj_ecg` found three existing projectors -# already in the :class:`~mne.io.Raw` object, and will include those in the +# `~mne.preprocessing.compute_proj_ecg` found three existing projectors +# already in the `~mne.io.Raw` object, and will include those in the # list of projectors that it returns (appending the new ECG projectors to the # end of the list). If you don't want that, you can change that behavior with # the boolean ``no_proj`` parameter. Since we've already run the computation, @@ -254,24 +251,24 @@ ############################################################################### # Since no dedicated ECG sensor channel was detected in the -# :class:`~mne.io.Raw` object, by default -# :func:`~mne.preprocessing.compute_proj_ecg` used the magnetometers to +# `~mne.io.Raw` object, by default +# `~mne.preprocessing.compute_proj_ecg` used the magnetometers to # estimate the ECG signal (as stated on the third line of output, above). You # can also supply the ``ch_name`` parameter to restrict which channel to use # for ECG artifact detection; this is most useful when you had an ECG sensor -# but it is not labeled as such in the :class:`~mne.io.Raw` file. +# but it is not labeled as such in the `~mne.io.Raw` file. # # The next few lines of the output describe the filter used to isolate ECG # events. The default settings are usually adequate, but the filter can be # customized via the parameters ``ecg_l_freq``, ``ecg_h_freq``, and # ``filter_length`` (see the documentation of -# :func:`~mne.preprocessing.compute_proj_ecg` for details). +# `~mne.preprocessing.compute_proj_ecg` for details). # # .. TODO what are the cases where you might need to customize the ECG filter? # infants? Heart murmur? # # Once the ECG events have been identified, -# :func:`~mne.preprocessing.compute_proj_ecg` will also filter the data +# `~mne.preprocessing.compute_proj_ecg` will also filter the data # channels before extracting epochs around each heartbeat, using the parameter # values given in ``l_freq``, ``h_freq``, ``filter_length``, ``filter_method``, # and ``iir_params``. Here again, the default parameter values are usually @@ -301,7 +298,7 @@ ############################################################################### # Finally, note that above we passed ``reject=None`` to the -# :func:`~mne.preprocessing.compute_proj_ecg` function, meaning that all +# `~mne.preprocessing.compute_proj_ecg` function, meaning that all # detected ECG epochs would be used when computing the projectors (regardless # of signal quality in the data sensors during those epochs). The default # behavior is to reject epochs based on signal amplitude: epochs with @@ -318,23 +315,23 @@ # # .. note:: # -# :func:`~mne.preprocessing.compute_proj_ecg` has a similar parameter +# `~mne.preprocessing.compute_proj_ecg` has a similar parameter # ``flat`` for specifying the *minimum* acceptable peak-to-peak amplitude # for each channel type. # -# While :func:`~mne.preprocessing.compute_proj_ecg` conveniently combines +# While `~mne.preprocessing.compute_proj_ecg` conveniently combines # several operations into a single function, MNE-Python also provides functions # for performing each part of the process. Specifically: # -# - :func:`mne.preprocessing.find_ecg_events` for detecting heartbeats in a -# :class:`~mne.io.Raw` object and returning a corresponding :term:`events` +# - `mne.preprocessing.find_ecg_events` for detecting heartbeats in a +# `~mne.io.Raw` object and returning a corresponding :term:`events` # array # -# - :func:`mne.preprocessing.create_ecg_epochs` for detecting heartbeats in a -# :class:`~mne.io.Raw` object and returning an :class:`~mne.Epochs` object +# - `mne.preprocessing.create_ecg_epochs` for detecting heartbeats in a +# `~mne.io.Raw` object and returning an `~mne.Epochs` object # -# - :func:`mne.compute_proj_epochs` for creating projector(s) from any -# :class:`~mne.Epochs` object +# - `mne.compute_proj_epochs` for creating projector(s) from any +# `~mne.Epochs` object # # See the documentation of each function for further details. # @@ -352,12 +349,12 @@ ############################################################################### # Just like we did with the heartbeat artifact, we can compute SSP projectors -# for the ocular artifact using :func:`~mne.preprocessing.compute_proj_eog`, -# which again takes a :class:`~mne.io.Raw` object as input and returns the +# for the ocular artifact using `~mne.preprocessing.compute_proj_eog`, +# which again takes a `~mne.io.Raw` object as input and returns the # requested number of projectors for magnetometers, gradiometers, and EEG # channels (default is two projectors for each channel type). This time, we'll # pass ``no_proj`` parameter (so we get back only the new EOG projectors, not -# also the existing projectors in the :class:`~mne.io.Raw` object), and we'll +# also the existing projectors in the `~mne.io.Raw` object), and we'll # ignore the events array by assigning it to ``_`` (the conventional way of # handling unwanted return elements in Python). @@ -426,7 +423,7 @@ # source amplitudes. However, for sensor space analyses, it can be useful to # visualize the extent to which SSP projection has biased the data. This can be # explored by using ``proj='reconstruct'`` in evoked plotting functions, for -# example via :meth:`evoked.plot() `: +# example via `evoked.plot() `: evoked = epochs.average() # Apply the average ref first: diff --git a/tutorials/preprocessing/plot_55_setting_eeg_reference.py b/tutorials/preprocessing/55_setting_eeg_reference.py similarity index 91% rename from tutorials/preprocessing/plot_55_setting_eeg_reference.py rename to tutorials/preprocessing/55_setting_eeg_reference.py index e6dab483b6a..6f2a6ec4434 100644 --- a/tutorials/preprocessing/plot_55_setting_eeg_reference.py +++ b/tutorials/preprocessing/55_setting_eeg_reference.py @@ -7,10 +7,6 @@ This tutorial describes how to set or change the EEG reference in MNE-Python. -.. contents:: Page contents - :local: - :depth: 2 - As usual we'll start by importing the modules we need, loading some :ref:`example data `, and cropping it to save memory. Since this tutorial deals specifically with EEG, we'll also restrict the dataset to @@ -80,6 +76,9 @@ # use average of mastoid channels as reference # raw.set_eeg_reference(ref_channels=['M1', 'M2']) +# use a bipolar reference (contralateral) +# raw.set_bipolar_reference(anode='[F3'], cathode=['F4']) + ############################################################################### # If a scalp electrode was used as reference but was not saved alongside the # raw data (reference channels often aren't), you may wish to add it back to @@ -192,6 +191,25 @@ fig.subplots_adjust(top=0.9) fig.suptitle('{} reference'.format(title), size='xx-large', weight='bold') +############################################################################### +# Using a bipolar reference +# ^^^^^^^^^^^^^^^^^^^^^^^^^ +# +# To create a bipolar reference, you can use :meth:`~mne.set_bipolar_reference` +# along with the respective channel names for ``anode`` and ``cathode`` which +# creates a new virtual channel that takes the difference between two +# specified channels (anode and cathode) and drops the original channels by +# default. The new virtual channel will be annotated with the channel info of +# the anode with location set to ``(0, 0, 0)`` and coil type set to +# ``EEG_BIPOLAR`` by default. Here we use a contralateral/transverse bipolar +# reference between channels ``EEG 054`` and ``EEG 055`` as described in +# :footcite:`YaoEtAl2019` which creates a new virtual channel +# named ``EEG 054-EEG 055``. + +raw_bip_ref = mne.set_bipolar_reference(raw, anode=['EEG 054'], + cathode=['EEG 055']) +raw_bip_ref.plot() + ############################################################################### # EEG reference and source modeling # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/tutorials/preprocessing/plot_59_head_positions.py b/tutorials/preprocessing/59_head_positions.py similarity index 100% rename from tutorials/preprocessing/plot_59_head_positions.py rename to tutorials/preprocessing/59_head_positions.py diff --git a/tutorials/preprocessing/plot_60_maxwell_filtering_sss.py b/tutorials/preprocessing/60_maxwell_filtering_sss.py similarity index 99% rename from tutorials/preprocessing/plot_60_maxwell_filtering_sss.py rename to tutorials/preprocessing/60_maxwell_filtering_sss.py index 563bdb36a4b..bc019139b51 100644 --- a/tutorials/preprocessing/plot_60_maxwell_filtering_sss.py +++ b/tutorials/preprocessing/60_maxwell_filtering_sss.py @@ -7,10 +7,6 @@ This tutorial covers reducing environmental noise and compensating for head movement with SSS and Maxwell filtering. -.. contents:: Page contents - :local: - :depth: 2 - As usual we'll start by importing the modules we need, loading some :ref:`example data `, and cropping it to save on memory: """ @@ -294,7 +290,7 @@ # ^^^^^^^^^^^^^^^^^^^^^ # # If you have information about subject head position relative to the sensors -# (i.e., continuous head position indicator coils, or :term:`cHPI `), SSS +# (i.e., continuous head position indicator coils, or :term:`cHPI`), SSS # can take that into account when projecting sensor data onto the internal # subspace. Head position data can be computed using # :func:`mne.chpi.compute_chpi_locs` and :func:`mne.chpi.compute_head_pos`, diff --git a/tutorials/preprocessing/plot_70_fnirs_processing.py b/tutorials/preprocessing/70_fnirs_processing.py similarity index 99% rename from tutorials/preprocessing/plot_70_fnirs_processing.py rename to tutorials/preprocessing/70_fnirs_processing.py index 36c3ccfaa7f..34734b18b9c 100644 --- a/tutorials/preprocessing/plot_70_fnirs_processing.py +++ b/tutorials/preprocessing/70_fnirs_processing.py @@ -8,10 +8,6 @@ (fNIRS) data from raw measurements to relative oxyhaemoglobin (HbO) and deoxyhaemoglobin (HbR) concentration. -.. contents:: Page contents - :local: - :depth: 2 - Here we will work with the :ref:`fNIRS motor data `. """ diff --git a/tutorials/preprocessing/README.txt b/tutorials/preprocessing/README.txt index 81caaf6f2de..b9089799a38 100644 --- a/tutorials/preprocessing/README.txt +++ b/tutorials/preprocessing/README.txt @@ -1,5 +1,5 @@ Preprocessing -============= +------------- These tutorials cover various preprocessing techniques for continuous data, as well as some diagnostic plotting methods. diff --git a/tutorials/raw/plot_10_raw_overview.py b/tutorials/raw/10_raw_overview.py similarity index 99% rename from tutorials/raw/plot_10_raw_overview.py rename to tutorials/raw/10_raw_overview.py index 08fbaa81b60..6c59ba6f75f 100644 --- a/tutorials/raw/plot_10_raw_overview.py +++ b/tutorials/raw/10_raw_overview.py @@ -13,10 +13,6 @@ from simulated data in a :class:`NumPy array `, see :ref:`tut_creating_data_structures`. -.. contents:: Page contents - :local: - :depth: 2 - As usual we'll start by importing the modules we need: """ @@ -283,9 +279,9 @@ # inaccurate, you can change the type of any channel with the # :meth:`~mne.io.Raw.set_channel_types` method. The method takes a # :class:`dictionary ` mapping channel names to types; allowed types are -# ``ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, stim, syst, ecog, hbo, -# hbr``. A common use case for changing channel type is when using frontal EEG -# electrodes as makeshift EOG channels: +# ``ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, dbs, stim, syst, ecog, +# hbo, hbr``. A common use case for changing channel type is when using frontal +# EEG electrodes as makeshift EOG channels: raw.set_channel_types({'EEG_001': 'eog'}) print(raw.copy().pick_types(meg=False, eog=True).ch_names) diff --git a/tutorials/raw/plot_20_event_arrays.py b/tutorials/raw/20_event_arrays.py similarity index 99% rename from tutorials/raw/plot_20_event_arrays.py rename to tutorials/raw/20_event_arrays.py index 1edb6d0fe29..f8e4824101a 100644 --- a/tutorials/raw/plot_20_event_arrays.py +++ b/tutorials/raw/20_event_arrays.py @@ -8,10 +8,6 @@ This tutorial describes event representation and how event arrays are used to subselect data. -.. contents:: Page contents - :local: - :depth: 2 - As usual we'll start by importing the modules we need, loading some :ref:`example data `, and cropping the :class:`~mne.io.Raw` object to just 60 seconds before loading it into RAM to save memory: diff --git a/tutorials/raw/plot_30_annotate_raw.py b/tutorials/raw/30_annotate_raw.py similarity index 98% rename from tutorials/raw/plot_30_annotate_raw.py rename to tutorials/raw/30_annotate_raw.py index 53851a93bbc..993c6441187 100644 --- a/tutorials/raw/plot_30_annotate_raw.py +++ b/tutorials/raw/30_annotate_raw.py @@ -8,10 +8,6 @@ This tutorial describes adding annotations to a `~mne.io.Raw` object, and how annotations are used in later stages of data processing. -.. contents:: Page contents - :local: - :depth: 1 - As usual we'll start by importing the modules we need, loading some :ref:`example data `, and (since we won't actually analyze the raw data in this tutorial) cropping the `~mne.io.Raw` object to just 60 @@ -49,8 +45,8 @@ # you can even pass lists or arrays to the `~mne.Annotations` # constructor to annotate multiple spans at once: -my_annot = mne.Annotations(onset=[3, 5, 7], - duration=[1, 0.5, 0.25], +my_annot = mne.Annotations(onset=[3, 5, 7], # in seconds + duration=[1, 0.5, 0.25], # in seconds, too description=['AAA', 'BBB', 'CCC']) print(my_annot) diff --git a/tutorials/raw/plot_40_visualize_raw.py b/tutorials/raw/40_visualize_raw.py similarity index 99% rename from tutorials/raw/plot_40_visualize_raw.py rename to tutorials/raw/40_visualize_raw.py index 1ae0029081c..30b56e6ffd6 100644 --- a/tutorials/raw/plot_40_visualize_raw.py +++ b/tutorials/raw/40_visualize_raw.py @@ -9,10 +9,6 @@ the spectral density of continuous data, and how to plot the sensor locations and projectors stored in `~mne.io.Raw` objects. -.. contents:: Page contents - :local: - :depth: 2 - As usual we'll start by importing the modules we need, loading some :ref:`example data `, and cropping the `~mne.io.Raw` object to just 60 seconds before loading it into RAM to save memory: diff --git a/tutorials/raw/README.txt b/tutorials/raw/README.txt index 2ce3974744c..8c90802418b 100644 --- a/tutorials/raw/README.txt +++ b/tutorials/raw/README.txt @@ -1,5 +1,5 @@ Working with continuous data -============================ +---------------------------- These tutorials cover the basics of loading EEG/MEG data into MNE-Python, and how to query, manipulate, annotate, plot, and export continuous data in the diff --git a/tutorials/sample-datasets/README.txt b/tutorials/sample-datasets/README.txt deleted file mode 100644 index bc7d16a3c3f..00000000000 --- a/tutorials/sample-datasets/README.txt +++ /dev/null @@ -1,5 +0,0 @@ -Sample datasets -=============== - -These tutorials illustrate some of the sample datasets available through -MNE-Python. diff --git a/tutorials/simulation/plot_creating_data_structures.py b/tutorials/simulation/10_array_objs.py similarity index 98% rename from tutorials/simulation/plot_creating_data_structures.py rename to tutorials/simulation/10_array_objs.py index f249e221e21..2952eef340d 100644 --- a/tutorials/simulation/plot_creating_data_structures.py +++ b/tutorials/simulation/10_array_objs.py @@ -7,10 +7,6 @@ This tutorial shows how to create MNE-Python's core data structures using an existing :class:`NumPy array ` of (real or synthetic) data. -.. contents:: Page contents - :local: - :depth: 1 - We begin by importing the necessary Python modules: """ @@ -99,7 +95,7 @@ # # The expected units for the different channel types are: # -# - Volts: eeg, eog, seeg, emg, ecg, bio, ecog +# - Volts: eeg, eog, seeg, dbs, emg, ecg, bio, ecog # - Teslas: mag # - Teslas/meter: grad # - Molar: hbo, hbr diff --git a/tutorials/simulation/plot_point_spread.py b/tutorials/simulation/70_point_spread.py similarity index 100% rename from tutorials/simulation/plot_point_spread.py rename to tutorials/simulation/70_point_spread.py diff --git a/tutorials/simulation/plot_dics.py b/tutorials/simulation/80_dics.py similarity index 99% rename from tutorials/simulation/plot_dics.py rename to tutorials/simulation/80_dics.py index 387c8460a0c..330ccffae52 100644 --- a/tutorials/simulation/plot_dics.py +++ b/tutorials/simulation/80_dics.py @@ -187,7 +187,8 @@ def coh_signal_gen(): # Plot some of the channels of the simulated data that are situated above one # of our simulated sources. -picks = mne.pick_channels(epochs.ch_names, mne.read_selection('Left-frontal')) +picks = mne.pick_channels(epochs.ch_names, + mne.read_vectorview_selection('Left-frontal')) epochs.plot(picks=picks) ############################################################################### diff --git a/tutorials/simulation/README.txt b/tutorials/simulation/README.txt index 9126414be89..531c2feac4f 100644 --- a/tutorials/simulation/README.txt +++ b/tutorials/simulation/README.txt @@ -1,5 +1,5 @@ Simulation -========== +---------- These tutorials describe how to populate MNE-Python data structures with arbitrary data, using the array-based constructors and the simulation diff --git a/tutorials/source-modeling/plot_eeg_no_mri.py b/tutorials/source-modeling/plot_eeg_no_mri.py deleted file mode 100644 index 15d78888b74..00000000000 --- a/tutorials/source-modeling/plot_eeg_no_mri.py +++ /dev/null @@ -1,81 +0,0 @@ -# -*- coding: utf-8 -*- -""" -.. _tut-eeg-fsaverage-source-modeling: - -EEG forward operator with a template MRI -======================================== - -This tutorial explains how to compute the forward operator from EEG data -using the standard template MRI subject ``fsaverage``. - -.. caution:: Source reconstruction without an individual T1 MRI from the - subject will be less accurate. Do not over interpret - activity locations which can be off by multiple centimeters. - -.. contents:: This tutorial covers: - :local: - :depth: 2 - -""" -# Authors: Alexandre Gramfort -# Joan Massich -# -# License: BSD Style. - -import os.path as op - -import mne -from mne.datasets import eegbci -from mne.datasets import fetch_fsaverage - -# Download fsaverage files -fs_dir = fetch_fsaverage(verbose=True) -subjects_dir = op.dirname(fs_dir) - -# The files live in: -subject = 'fsaverage' -trans = 'fsaverage' # MNE has a built-in fsaverage transformation -src = op.join(fs_dir, 'bem', 'fsaverage-ico-5-src.fif') -bem = op.join(fs_dir, 'bem', 'fsaverage-5120-5120-5120-bem-sol.fif') - -############################################################################## -# Load the data -# ------------- -# -# We use here EEG data from the BCI dataset. -# -# .. note:: See :ref:`plot_montage` to view all the standard EEG montages -# available in MNE-Python. - -raw_fname, = eegbci.load_data(subject=1, runs=[6]) -raw = mne.io.read_raw_edf(raw_fname, preload=True) - -# Clean channel names to be able to use a standard 1005 montage -new_names = dict( - (ch_name, - ch_name.rstrip('.').upper().replace('Z', 'z').replace('FP', 'Fp')) - for ch_name in raw.ch_names) -raw.rename_channels(new_names) - -# Read and set the EEG electrode locations -montage = mne.channels.make_standard_montage('standard_1005') -raw.set_montage(montage) -raw.set_eeg_reference(projection=True) # needed for inverse modeling - -# Check that the locations of EEG electrodes is correct with respect to MRI -mne.viz.plot_alignment( - raw.info, src=src, eeg=['original', 'projected'], trans=trans, - show_axes=True, mri_fiducials=True, dig='fiducials') - -############################################################################## -# Setup source space and compute forward -# -------------------------------------- - -fwd = mne.make_forward_solution(raw.info, trans=trans, src=src, - bem=bem, eeg=True, mindist=5.0, n_jobs=1) -print(fwd) - -# Use fwd to compute the sensitivity map for illustration purposes -eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed') -brain = eeg_map.plot(time_label='EEG sensitivity', subjects_dir=subjects_dir, - clim=dict(lims=[5, 50, 100])) diff --git a/tutorials/source-modeling/plot_fix_bem_in_blender.py b/tutorials/source-modeling/plot_fix_bem_in_blender.py deleted file mode 100644 index 61e9bc483e5..00000000000 --- a/tutorials/source-modeling/plot_fix_bem_in_blender.py +++ /dev/null @@ -1,137 +0,0 @@ -""" -Editing BEM surfaces in Blender -=============================== - -Sometimes when creating a BEM model the surfaces need manual correction because -of a series of problems that can arise (e.g. intersection between surfaces). -Here, we will see how this can be achieved by exporting the surfaces to the 3D -modeling program `Blender `_, editing them, and -re-importing them. - -This tutorial is based on https://github.com/ezemikulan/blender_freesurfer by -Ezequiel Mikulan. -""" - -# Authors: Marijn van Vliet -# Ezequiel Mikulan -# -# License: BSD (3-clause) - -# sphinx_gallery_thumbnail_path = '_static/blender_import_obj/blender_import_obj2.jpg' # noqa - -import os -import os.path as op -import shutil -import mne - -data_path = mne.datasets.sample.data_path() -subjects_dir = op.join(data_path, 'subjects') -bem_dir = op.join(subjects_dir, 'sample', 'bem') - -############################################################################### -# Exporting surfaces to Blender -# ----------------------------- -# -# In this tutorial, we are working with the MNE-Sample set, for which the -# surfaces have no issues. To demonstrate how to fix problematic surfaces, we -# are going to manually place one of the inner-skull vertices outside the -# outer-skill mesh. -# -# We then convert the surfaces to `.obj -# `_ files and create a new -# folder called ``conv`` inside the FreeSurfer subject folder to keep them in. - -# Put the converted surfaces in a separate 'conv' folder -conv_dir = op.join(subjects_dir, 'sample', 'conv') -os.makedirs(conv_dir, exist_ok=True) - -# Load the inner skull surface and create a problem -coords, faces = mne.read_surface(op.join(bem_dir, 'inner_skull.surf')) -coords[0] *= 1.1 # Move the first vertex outside the skull - -# Write the inner skull surface as an .obj file that can be imported by -# Blender. -mne.write_surface(op.join(conv_dir, 'inner_skull.obj'), coords, faces, - overwrite=True) - -# Also convert the outer skull surface. -coords, faces = mne.read_surface(op.join(bem_dir, 'outer_skull.surf')) -mne.write_surface(op.join(conv_dir, 'outer_skull.obj'), coords, faces, - overwrite=True) - -############################################################################### -# Editing in Blender -# ------------------ -# -# We can now open Blender and import the surfaces. Go to *File > Import > -# Wavefront (.obj)*. Navigate to the ``conv`` folder and select the file you -# want to import. Make sure to select the *Keep Vert Order* option. You can -# also select the *Y Forward* option to load the axes in the correct direction -# (RAS): -# -# .. image:: ../../_static/blender_import_obj/blender_import_obj1.jpg -# :width: 800 -# :alt: Importing .obj files in Blender -# -# For convenience, you can save these settings by pressing the ``+`` button -# next to *Operator Presets*. -# -# Repeat the procedure for all surfaces you want to import (e.g. inner_skull -# and outer_skull). -# -# You can now edit the surfaces any way you like. See the -# `Beginner Blender Tutorial Series -# `_ -# to learn how to use Blender. Specifically, `part 2 -# `_ will teach you how to -# use the basic editing tools you need to fix the surface. -# -# .. image:: ../../_static/blender_import_obj/blender_import_obj2.jpg -# :width: 800 -# :alt: Editing surfaces in Blender -# -# Using the fixed surfaces in MNE-Python -# -------------------------------------- -# -# In Blender, you can export a surface as an .obj file by selecting it and go -# to *File > Export > Wavefront (.obj)*. You need to again select the *Y -# Forward* option and check the *Keep Vertex Order* box. -# -# .. image:: ../../_static/blender_import_obj/blender_import_obj3.jpg -# :width: 200 -# :alt: Exporting .obj files in Blender -# -# -# Each surface needs to be exported as a separate file. We recommend saving -# them in the ``conv`` folder and ending the file name with ``_fixed.obj``, -# although this is not strictly necessary. -# -# In order to be able to run this tutorial script top to bottom, we here -# simulate the edits you did manually in Blender using Python code: - -coords, faces = mne.read_surface(op.join(conv_dir, 'inner_skull.obj')) -coords[0] /= 1.1 # Move the first vertex back inside the skull -mne.write_surface(op.join(conv_dir, 'inner_skull_fixed.obj'), coords, faces, - overwrite=True) - -############################################################################### -# Back in Python, you can read the fixed .obj files and save them as -# FreeSurfer .surf files. For the :func:`mne.make_bem_model` function to find -# them, they need to be saved using their original names in the ``surf`` -# folder, e.g. ``surf/inner_skull.surf``. Be sure to first backup the original -# surfaces in case you make a mistake! - -# Read the fixed surface -coords, faces = mne.read_surface(op.join(conv_dir, 'inner_skull_fixed.obj')) - -# Backup the original surface -shutil.copy(op.join(bem_dir, 'inner_skull.surf'), - op.join(bem_dir, 'inner_skull_orig.surf')) - -# Overwrite the original surface with the fixed version -mne.write_surface(op.join(bem_dir, 'inner_skull.surf'), coords, faces, - overwrite=True) - -############################################################################### -# That's it! You are ready to continue with your analysis pipeline (e.g. -# running :func:`mne.make_bem_model`). diff --git a/tutorials/discussions/plot_background_statistics.py b/tutorials/stats-sensor-space/10_background_stats.py similarity index 97% rename from tutorials/discussions/plot_background_statistics.py rename to tutorials/stats-sensor-space/10_background_stats.py index 11e1c0f7d21..bed07a9560b 100644 --- a/tutorials/discussions/plot_background_statistics.py +++ b/tutorials/stats-sensor-space/10_background_stats.py @@ -8,11 +8,6 @@ Here we will briefly cover multiple concepts of inferential statistics in an introductory manner, and demonstrate how to use some MNE statistical functions. - -.. contents:: Topics - :local: - :depth: 3 - """ # Authors: Eric Larson @@ -46,9 +41,9 @@ # some probability (e.g., p < 0.05). This probability is also called the # significance level :math:`\alpha`. # To think about what this means, let's follow the illustrative example from -# [1]_ and construct a toy dataset consisting of a 40 x 40 square with a -# "signal" present in the center with white noise added and a Gaussian -# smoothing kernel applied. +# :footcite:`RidgwayEtAl2012` and construct a toy dataset consisting of a +# 40 x 40 square with a "signal" present in the center with white noise added +# and a Gaussian smoothing kernel applied. width = 40 n_subjects = 10 @@ -173,7 +168,8 @@ def plot_t_p(t, p, title, mcc, axes=None): # "Hat" variance adjustment # ~~~~~~~~~~~~~~~~~~~~~~~~~ # The "hat" technique regularizes the variance values used in the t-test -# calculation [1]_ to compensate for implausibly small variances. +# calculation :footcite:`RidgwayEtAl2012` to compensate for implausibly small +# variances. ts.append(ttest_1samp_no_p(X, sigma=sigma)) ps.append(stats.distributions.t.sf(np.abs(ts[-1]), len(X) - 1) * 2) titles.append(r'$\mathrm{t_{hat}}$') @@ -465,7 +461,7 @@ def plot_t_p(t, p, title, mcc, axes=None): # "Hat" variance adjustment # ~~~~~~~~~~~~~~~~~~~~~~~~~ # This method can also be used in this context to correct for small -# variances [1]_: +# variances :footcite:`RidgwayEtAl2012`: titles.append(r'$\mathbf{C_{hat}}$') stat_fun_hat = partial(ttest_1samp_no_p, sigma=sigma) t_hat, clusters, p_values, H0 = permutation_cluster_1samp_test( @@ -487,7 +483,8 @@ def plot_t_p(t, p, title, mcc, axes=None): # TFCE eliminates the free parameter initial ``threshold`` value that # determines which points are included in clustering by approximating # a continuous integration across possible threshold values with a standard -# `Riemann sum `__ [2]_. +# `Riemann sum `__ +# :footcite:`SmithNichols2009`. # This requires giving a starting threshold ``start`` and a step # size ``step``, which in MNE is supplied as a dict. # The smaller the ``step`` and closer to 0 the ``start`` value, @@ -657,12 +654,6 @@ def plot_t_p(t, p, title, mcc, axes=None): # # References # ---------- -# .. [1] Ridgway et al. 2012, "The problem of low variance voxels in -# statistical parametric mapping; a new hat avoids a 'haircut'", -# NeuroImage. 2012 Feb 1;59(3):2131-41. -# -# .. [2] Smith and Nichols 2009, "Threshold-free cluster enhancement: -# addressing problems of smoothing, threshold dependence, and -# localisation in cluster inference", NeuroImage 44 (2009) 83-98. +# .. footbibliography:: # # .. include:: ../../links.inc diff --git a/tutorials/stats-sensor-space/plot_stats_cluster_erp.py b/tutorials/stats-sensor-space/20_erp_stats.py similarity index 91% rename from tutorials/stats-sensor-space/plot_stats_cluster_erp.py rename to tutorials/stats-sensor-space/20_erp_stats.py index 6734476683d..fefd6becf5c 100644 --- a/tutorials/stats-sensor-space/plot_stats_cluster_erp.py +++ b/tutorials/stats-sensor-space/20_erp_stats.py @@ -9,8 +9,8 @@ permutation approaches (here with Threshold-Free Cluster Enhancement); and how to visualise the results. -The underlying data comes from [1]_; we contrast long vs. short words. -TFCE is described in [2]_. +The underlying data comes from :footcite:`DufauEtAl2015`; we contrast long vs. +short words. TFCE is described in :footcite:`SmithNichols2009`. """ import numpy as np @@ -123,9 +123,4 @@ ############################################################################### # References # ---------- -# .. [1] Dufau, S., Grainger, J., Midgley, KJ., Holcomb, PJ. A thousand -# words are worth a picture: Snapshots of printed-word processing in an -# event-related potential megastudy. Psychological Science, 2015 -# .. [2] Smith and Nichols 2009, "Threshold-free cluster enhancement: -# addressing problems of smoothing, threshold dependence, and -# localisation in cluster inference", NeuroImage 44 (2009) 83-98. +# .. footbibliography:: diff --git a/tutorials/stats-sensor-space/plot_stats_cluster_1samp_test_time_frequency.py b/tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py similarity index 98% rename from tutorials/stats-sensor-space/plot_stats_cluster_1samp_test_time_frequency.py rename to tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py index a82887101de..8b59f35687a 100644 --- a/tutorials/stats-sensor-space/plot_stats_cluster_1samp_test_time_frequency.py +++ b/tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py @@ -54,7 +54,7 @@ baseline=(None, 0), preload=True, reject=dict(grad=4000e-13, eog=150e-6)) # just use right temporal sensors for speed -epochs.pick_channels(mne.read_selection('Right-temporal')) +epochs.pick_channels(mne.read_vectorview_selection('Right-temporal')) evoked = epochs.average() # Factor to down-sample the temporal dimension of the TFR computed by diff --git a/tutorials/stats-sensor-space/plot_stats_cluster_time_frequency.py b/tutorials/stats-sensor-space/50_cluster_between_time_freq.py similarity index 100% rename from tutorials/stats-sensor-space/plot_stats_cluster_time_frequency.py rename to tutorials/stats-sensor-space/50_cluster_between_time_freq.py diff --git a/tutorials/stats-sensor-space/plot_stats_spatio_temporal_cluster_sensors.py b/tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py similarity index 100% rename from tutorials/stats-sensor-space/plot_stats_spatio_temporal_cluster_sensors.py rename to tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py diff --git a/tutorials/stats-sensor-space/README.txt b/tutorials/stats-sensor-space/README.txt index 1e4c7f4407d..e6ab89b2399 100644 --- a/tutorials/stats-sensor-space/README.txt +++ b/tutorials/stats-sensor-space/README.txt @@ -1,5 +1,5 @@ Statistical analysis of sensor data -=================================== +----------------------------------- These tutorials describe some approaches to statistical analysis of sensor-level data. diff --git a/tutorials/stats-source-space/plot_stats_cluster_spatio_temporal.py b/tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.py similarity index 99% rename from tutorials/stats-source-space/plot_stats_cluster_spatio_temporal.py rename to tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.py index 2f9733e1c1e..0a1d9726625 100644 --- a/tutorials/stats-source-space/plot_stats_cluster_spatio_temporal.py +++ b/tutorials/stats-source-space/20_cluster_1samp_spatiotemporal.py @@ -179,7 +179,7 @@ subject='fsaverage') # Let's actually plot the first "time point" in the SourceEstimate, which -# shows all the clusters, weighted by duration +# shows all the clusters, weighted by duration. subjects_dir = op.join(data_path, 'subjects') # blue blobs are for condition A < condition B, red for A > B brain = stc_all_cluster_vis.plot( diff --git a/tutorials/stats-source-space/plot_stats_cluster_spatio_temporal_2samp.py b/tutorials/stats-source-space/30_cluster_ftest_spatiotemporal.py similarity index 100% rename from tutorials/stats-source-space/plot_stats_cluster_spatio_temporal_2samp.py rename to tutorials/stats-source-space/30_cluster_ftest_spatiotemporal.py diff --git a/tutorials/stats-source-space/plot_stats_cluster_spatio_temporal_repeated_measures_anova.py b/tutorials/stats-source-space/60_cluster_rmANOVA_spatiotemporal.py similarity index 100% rename from tutorials/stats-source-space/plot_stats_cluster_spatio_temporal_repeated_measures_anova.py rename to tutorials/stats-source-space/60_cluster_rmANOVA_spatiotemporal.py diff --git a/tutorials/stats-source-space/plot_stats_cluster_time_frequency_repeated_measures_anova.py b/tutorials/stats-source-space/70_cluster_rmANOVA_time_freq.py similarity index 100% rename from tutorials/stats-source-space/plot_stats_cluster_time_frequency_repeated_measures_anova.py rename to tutorials/stats-source-space/70_cluster_rmANOVA_time_freq.py diff --git a/tutorials/stats-source-space/README.txt b/tutorials/stats-source-space/README.txt index bdb72800e4d..20403bd86d4 100644 --- a/tutorials/stats-source-space/README.txt +++ b/tutorials/stats-source-space/README.txt @@ -1,5 +1,5 @@ Statistical analysis of source estimates -======================================== +---------------------------------------- These tutorials cover within-subject statistical analysis of source estimates. diff --git a/tutorials/time-freq/plot_sensors_time_frequency.py b/tutorials/time-freq/20_sensors_time_frequency.py similarity index 96% rename from tutorials/time-freq/plot_sensors_time_frequency.py rename to tutorials/time-freq/20_sensors_time_frequency.py index a8f205ce39c..66a46924d1a 100644 --- a/tutorials/time-freq/plot_sensors_time_frequency.py +++ b/tutorials/time-freq/20_sensors_time_frequency.py @@ -136,7 +136,13 @@ # To this we'll use the function :func:`mne.time_frequency.tfr_morlet` # but you can also use :func:`mne.time_frequency.tfr_multitaper` # or :func:`mne.time_frequency.tfr_stockwell`. - +# +# .. note:: +# The ``decim`` parameter reduces the sampling rate of the time-frequency +# decomposition by the defined factor. This is usually done to reduce +# memory usage. For more information refer to the documentation of +# :func:`mne.time_frequency.tfr_morlet`. +# # define frequencies of interest (log-spaced) freqs = np.logspace(*np.log10([6, 35]), num=8) n_cycles = freqs / 2. # different number of cycle per frequency diff --git a/tutorials/time-freq/50_ssvep.py b/tutorials/time-freq/50_ssvep.py new file mode 100644 index 00000000000..dbdf9d16695 --- /dev/null +++ b/tutorials/time-freq/50_ssvep.py @@ -0,0 +1,675 @@ +""" +.. _tut-ssvep: + +========================================================== +Frequency-tagging: Basic analysis of an SSVEP/vSSR dataset +========================================================== + +In this tutorial we compute the frequency spectrum and quantify signal-to-noise +ratio (SNR) at a target frequency in EEG data recorded during fast periodic +visual stimulation (FPVS) at 12 Hz and 15 Hz in different trials. +Extracting SNR at stimulation frequency is a simple way to quantify frequency +tagged responses in MEEG (a.k.a. steady state visually evoked potentials, +SSVEP, or visual steady-state responses, vSSR in the visual domain, +or auditory steady-state responses, ASSR in the auditory domain). + +For a general introduction to the method see +`Norcia et al. (2015) `_ for the visual domain, +and `Picton et al. (2003) `_ for +the auditory domain. + +**Data and outline:** + +We use a simple example dataset with frequency tagged visual stimulation: +N=2 participants observed checkerboard patterns inverting with a constant +frequency of either 12.0 Hz of 15.0 Hz. +32 channels wet EEG was recorded. +(see :ref:`ssvep-dataset` for more information). + +We will visualize both the power-spectral density (PSD) and the SNR +spectrum of the epoched data, +extract SNR at stimulation frequency, +plot the topography of the response, +and statistically separate 12 Hz and 15 Hz responses in the different trials. +Since the evoked response is mainly generated in early visual areas of the +brain the statistical analysis will be carried out on an occipital +ROI. + +.. contents:: Outline + :depth: 2 +""" # noqa: E501 +# Authors: Dominik Welke +# Evgenii Kalenkovich +# +# License: BSD (3-clause) + +import matplotlib.pyplot as plt +import mne +import numpy as np +from scipy.stats import ttest_rel + +############################################################################### +# Data preprocessing +# ------------------ +# Due to a generally high SNR in SSVEP/vSSR, typical preprocessing steps +# are considered optional. This doesn't mean, that a proper cleaning would not +# increase your signal quality! +# +# * Raw data have FCz reference, so we will apply common-average rereferencing. +# +# * We will apply a 0.1 highpass filter. +# +# * Lastly, we will cut the data in 20 s epochs corresponding to the trials. +# +# + +# Load raw data +data_path = mne.datasets.ssvep.data_path() +bids_fname = data_path + '/sub-02/ses-01/eeg/sub-02_ses-01_task-ssvep_eeg.vhdr' + +raw = mne.io.read_raw_brainvision(bids_fname, preload=True, verbose=False) +raw.info['line_freq'] = 50. + +# Set montage +montage = mne.channels.make_standard_montage('easycap-M1') +raw.set_montage(montage, verbose=False) + +# Set common average reference +raw.set_eeg_reference('average', projection=False, verbose=False) + +# Apply bandpass filter +raw.filter(l_freq=0.1, h_freq=None, fir_design='firwin', verbose=False) + +# Construct epochs +event_id = { + '12hz': 255, + '15hz': 155 +} +events, _ = mne.events_from_annotations(raw, verbose=False) +raw.info["events"] = events +tmin, tmax = -1., 20. # in s +baseline = None +epochs = mne.Epochs( + raw, events=events, + event_id=[event_id['12hz'], event_id['15hz']], tmin=tmin, + tmax=tmax, baseline=baseline, verbose=False) + +############################################################################### +# Frequency analysis +# ------------------ +# Now we compute the frequency spectrum of the EEG data. +# You will already see the peaks at the stimulation frequencies and some of +# their harmonics, without any further processing. +# +# The 'classical' PSD plot will be compared to a plot of the SNR spectrum. +# SNR will be computed as a ratio of the power in a given frequency bin +# to the average power in its neighboring bins. +# This procedure has two advantages over using the raw PSD: +# +# * it normalizes the spectrum and accounts for 1/f power decay. +# +# * power modulations which are not very narrow band will disappear. +# +# Calculate power spectral density (PSD) +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# The frequency spectrum will be computed using Fast Fourier transform (FFT). +# This seems to be common practice in the steady-state literature and is +# based on the exact knowledge of the stimulus and the assumed response - +# especially in terms of it's stability over time. +# For a discussion see e.g. +# `Bach & Meigen (1999) `_ +# +# We will exclude the first second of each trial from the analysis: +# +# * steady-state response often take a while to stabilize, and the +# transient phase in the beginning can distort the signal estimate. +# +# * this section of data is expected to be dominated by responses related to +# the stimulus onset, and we are not interested in this. +# +# In MNE we call plain FFT as a special case of Welch's method, with only a +# single Welch window spanning the entire trial and no specific windowing +# function (i.e. applying a boxcar window). +# + +tmin = 1. +tmax = 20. +fmin = 1. +fmax = 90. +sfreq = epochs.info['sfreq'] + +psds, freqs = mne.time_frequency.psd_welch( + epochs, + n_fft=int(sfreq * (tmax - tmin)), + n_overlap=0, n_per_seg=None, + tmin=tmin, tmax=tmax, + fmin=fmin, fmax=fmax, + window='boxcar', + verbose=False) + + +############################################################################### +# Calculate signal to noise ratio (SNR) +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# +# SNR - as we define it here - is a measure of relative power: +# it's the ratio of power in a given frequency bin - the 'signal' - +# to a 'noise' baseline - the average power in the surrounding frequency bins. +# This approach was initially proposed by +# `Meigen & Bach (1999) `_ +# +# Hence, we need to set some parameters for this baseline - how many +# neighboring bins should be taken for this computation, and do we want to skip +# the direct neighbors (this can make sense if the stimulation frequency is not +# super constant, or frequency bands are very narrow). +# +# The function below does what we want. +# + +def snr_spectrum(psd, noise_n_neighbor_freqs=1, noise_skip_neighbor_freqs=1): + """Compute SNR spectrum from PSD spectrum using convolution. + + Parameters + ---------- + psd : ndarray, shape ([n_trials, n_channels,] n_frequency_bins) + Data object containing PSD values. Works with arrays as produced by + MNE's PSD functions or channel/trial subsets. + noise_n_neighbor_freqs : int + Number of neighboring frequencies used to compute noise level. + increment by one to add one frequency bin ON BOTH SIDES + noise_skip_neighbor_freqs : int + set this >=1 if you want to exclude the immediately neighboring + frequency bins in noise level calculation + + Returns + ------- + snr : ndarray, shape ([n_trials, n_channels,] n_frequency_bins) + Array containing SNR for all epochs, channels, frequency bins. + NaN for frequencies on the edges, that do not have enough neighbors on + one side to calculate SNR. + """ + # Construct a kernel that calculates the mean of the neighboring + # frequencies + averaging_kernel = np.concatenate(( + np.ones(noise_n_neighbor_freqs), + np.zeros(2 * noise_skip_neighbor_freqs + 1), + np.ones(noise_n_neighbor_freqs))) + averaging_kernel /= averaging_kernel.sum() + + # Calculate the mean of the neighboring frequencies by convolving with the + # averaging kernel. + mean_noise = np.apply_along_axis( + lambda psd_: np.convolve(psd_, averaging_kernel, mode='valid'), + axis=-1, arr=psd + ) + + # The mean is not defined on the edges so we will pad it with nas. The + # padding needs to be done for the last dimension only so we set it to + # (0, 0) for the other ones. + edge_width = noise_n_neighbor_freqs + noise_skip_neighbor_freqs + pad_width = [(0, 0)] * (mean_noise.ndim - 1) + [(edge_width, edge_width)] + mean_noise = np.pad( + mean_noise, pad_width=pad_width, constant_values=np.nan + ) + + return psd / mean_noise + + +############################################################################### +# Now we call the function to compute our SNR spectrum. +# +# As described above, we have to define two parameters. +# +# * how many noise bins do we want? +# +# * do we want to skip the n bins directly next to the target bin? +# +# +# Tweaking these parameters *can* drastically impact the resulting spectrum, +# but mainly if you choose extremes. +# E.g. if you'd skip very many neighboring bins, broad band power modulations +# (such as the alpha peak) should reappear in the SNR spectrum. +# On the other hand, if you skip none you might miss or smear peaks if the +# induced power is distributed over two or more frequency bins (e.g. if the +# stimulation frequency isn't perfectly constant, or you have very narrow +# bins). +# +# Here, we want to compare power at each bin with average power of the +# **three neighboring bins** (on each side) and **skip one bin** directly next +# to it. +# + + +snrs = snr_spectrum(psds, noise_n_neighbor_freqs=3, + noise_skip_neighbor_freqs=1) + +############################################################################## +# Plot PSD and SNR spectra +# ^^^^^^^^^^^^^^^^^^^^^^^^ +# Now we will plot grand average PSD (in blue) and SNR (in red) ± sd +# for every frequency bin. +# PSD is plotted on a log scale. +# + +fig, axes = plt.subplots(2, 1, sharex='all', sharey='none', figsize=(8, 5)) +freq_range = range(np.where(np.floor(freqs) == 1.)[0][0], + np.where(np.ceil(freqs) == fmax - 1)[0][0]) + +psds_plot = 10 * np.log10(psds) +psds_mean = psds_plot.mean(axis=(0, 1))[freq_range] +psds_std = psds_plot.std(axis=(0, 1))[freq_range] +axes[0].plot(freqs[freq_range], psds_mean, color='b') +axes[0].fill_between( + freqs[freq_range], psds_mean - psds_std, psds_mean + psds_std, + color='b', alpha=.2) +axes[0].set(title="PSD spectrum", ylabel='Power Spectral Density [dB]') + +# SNR spectrum +snr_mean = snrs.mean(axis=(0, 1))[freq_range] +snr_std = snrs.std(axis=(0, 1))[freq_range] + +axes[1].plot(freqs[freq_range], snr_mean, color='r') +axes[1].fill_between( + freqs[freq_range], snr_mean - snr_std, snr_mean + snr_std, + color='r', alpha=.2) +axes[1].set( + title="SNR spectrum", xlabel='Frequency [Hz]', + ylabel='SNR', ylim=[-2, 30], xlim=[fmin, fmax]) +fig.show() + +############################################################################### +# You can see that the peaks at the stimulation frequencies (12 Hz, 15 Hz) +# and their harmonics are visible in both plots (just as the line noise at +# 50 Hz). +# Yet, the SNR spectrum shows them more prominently as peaks from a +# noisy but more or less constant baseline of SNR = 1. +# You can further see that the SNR processing removes any broad-band power +# differences (such as the increased power in alpha band around 10 Hz), +# and also removes the 1/f decay in the PSD. +# +# Note, that while the SNR plot implies the possibility of values below 0 +# (mean minus sd) such values do not make sense. +# Each SNR value is a ratio of positive PSD values, and the lowest possible PSD +# value is 0 (negative Y-axis values in the upper panel only result from +# plotting PSD on a log scale). +# Hence SNR values must be positive and can minimally go towards 0. +# +# Extract SNR values at the stimulation frequency +# ----------------------------------------------- +# +# Our processing yielded a large array of many SNR values for each trial x +# channel x frequency-bin of the PSD array. +# +# For statistical analysis we obviously need to define specific subsets of this +# array. First of all, we are only interested in SNR at the stimulation +# frequency, but we also want to restrict the analysis to a spatial ROI. +# Lastly, answering your interesting research questions will probably rely on +# comparing SNR in different trials. +# +# Therefore we will have to find the indices of trials, channels, etc. +# Alternatively, one could subselect the trials already at the epoching step, +# using MNE's event information, and process different epoch structures +# separately. +# +# Let's only have a look at the trials with 12 Hz stimulation, for now. +# + +# define stimulation frequency +stim_freq = 12. + +############################################################################### +# Get index for the stimulation frequency (12 Hz) +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# Ideally, there would be a bin with the stimulation frequency exactly in its +# center. However, depending on your Spectral decomposition this is not +# always the case. We will find the bin closest to it - this one should contain +# our frequency tagged response. +# + +# find index of frequency bin closest to stimulation frequency +i_bin_12hz = np.argmin(abs(freqs - stim_freq)) +# could be updated to support multiple frequencies + +# for later, we will already find the 15 Hz bin and the 1st and 2nd harmonic +# for both. +i_bin_24hz = np.argmin(abs(freqs - 24)) +i_bin_36hz = np.argmin(abs(freqs - 36)) +i_bin_15hz = np.argmin(abs(freqs - 15)) +i_bin_30hz = np.argmin(abs(freqs - 30)) +i_bin_45hz = np.argmin(abs(freqs - 45)) + +############################################################################### +# Get indices for the different trial types +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +i_trial_12hz = np.where(epochs.events[:, 2] == event_id['12hz'])[0] +i_trial_15hz = np.where(epochs.events[:, 2] == event_id['15hz'])[0] + +############################################################################### +# Get indices of EEG channels forming the ROI +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +# Define different ROIs +roi_vis = ['POz', 'Oz', 'O1', 'O2', 'PO3', 'PO4', 'PO7', + 'PO8', 'PO9', 'PO10', 'O9', 'O10'] # visual roi + +# Find corresponding indices using mne.pick_types() +picks_roi_vis = mne.pick_types(epochs.info, eeg=True, stim=False, + exclude='bads', selection=roi_vis) + +############################################################################### +# Apply the subset, and check the result +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# Now we simply need to apply our selection and yield a result. Therefore, +# we typically report grand average SNR over the subselection. +# +# In this tutorial we don't verify the presence of a neural response. +# This is commonly done in the ASSR literature where SNR is +# often lower. An F-test or Hotelling T² would be +# appropriate for this purpose. + +snrs_target = snrs[i_trial_12hz, :, i_bin_12hz][:, picks_roi_vis] +print("sub 2, 12 Hz trials, SNR at 12 Hz") +print(f'average SNR (occipital ROI): {snrs_target.mean()}') + +############################################################################## +# Topography of the vSSR +# ---------------------- +# But wait... +# As described in the intro, we have decided *a priori* to work with average +# SNR over a subset of occipital channels - a visual region of interest (ROI) +# - because we expect SNR to be higher on these channels than in other +# channels. +# +# Let's check out, whether this was a good decision! +# +# Here we will plot average SNR for each channel location as a topoplot. +# Then we will do a simple paired T-test to check, whether average SNRs over +# the two sets of channels are significantly different. +# + +# get average SNR at 12 Hz for ALL channels +snrs_12hz = snrs[i_trial_12hz, :, i_bin_12hz] +snrs_12hz_chaverage = snrs_12hz.mean(axis=0) + +# plot SNR topography +fig, ax = plt.subplots(1) +mne.viz.plot_topomap(snrs_12hz_chaverage, epochs.info, vmin=1., axes=ax) + +print("sub 2, 12 Hz trials, SNR at 12 Hz") +print("average SNR (all channels): %f" % snrs_12hz_chaverage.mean()) +print("average SNR (occipital ROI): %f" % snrs_target.mean()) + +tstat_roi_vs_scalp = \ + ttest_rel(snrs_target.mean(axis=1), snrs_12hz.mean(axis=1)) +print("12 Hz SNR in occipital ROI is significantly larger than 12 Hz SNR over " + "all channels: t = %.3f, p = %f" % tstat_roi_vs_scalp) + +############################################################################## +# We can see, that 1) this participant indeed exhibits a cluster of channels +# with high SNR in the occipital region and 2) that the average SNR over all +# channels is smaller than the average of the visual ROI computed above. +# The difference is statistically significant. Great! +# +# Such a topography plot can be a nice tool to explore and play with your data +# - e.g. you could try how changing the reference will affect the spatial +# distribution of SNR values. +# +# However, we also wanted to show this plot to point at a potential +# problem with frequency-tagged (or any other brain imaging) data: +# there are many channels and somewhere you will likely find some +# statistically significant effect. +# It is very easy - even unintended - to end up double-dipping or p-hacking. +# So if you want to work with an ROI or individual channels, ideally select +# them *a priori* - before collecting or looking at the data - and preregister +# this decision so people will believe you. +# If you end up selecting an ROI or individual channel for reporting *because +# this channel or ROI shows an effect*, e.g. in an explorative analysis, this +# is also fine but make it transparently and correct for multiple comparison. +# +# Statistical separation of 12 Hz and 15 Hz vSSR +# ---------------------------------------------- +# After this little detour into open science, let's move on and +# do the analyses we actually wanted to do: +# +# We will show that we can easily detect and discriminate the brains responses +# in the trials with different stimulation frequencies. +# +# In the frequency and SNR spectrum plot above, we had all trials mixed up. +# Now we will extract 12 and 15 Hz SNR in both types of trials individually, +# and compare the values with a simple t-test. +# We will also extract SNR of the 1st and 2nd harmonic for both stimulation +# frequencies. These are often reported as well and can show interesting +# interactions. +# + +snrs_roi = snrs[:, picks_roi_vis, :].mean(axis=1) + +freq_plot = [12, 15, 24, 30, 36, 45] +color_plot = [ + 'darkblue', 'darkgreen', 'mediumblue', 'green', 'blue', 'seagreen' +] +xpos_plot = [-5. / 12, -3. / 12, -1. / 12, 1. / 12, 3. / 12, 5. / 12] +fig, ax = plt.subplots() +labels = ['12 Hz trials', '15 Hz trials'] +x = np.arange(len(labels)) # the label locations +width = 0.6 # the width of the bars +res = dict() + +# loop to plot SNRs at stimulation frequencies and harmonics +for i, f in enumerate(freq_plot): + # extract snrs + stim_12hz_tmp = \ + snrs_roi[i_trial_12hz, np.argmin(abs(freqs - f))] + stim_15hz_tmp = \ + snrs_roi[i_trial_15hz, np.argmin(abs(freqs - f))] + SNR_tmp = [stim_12hz_tmp.mean(), stim_15hz_tmp.mean()] + # plot (with std) + ax.bar( + x + width * xpos_plot[i], SNR_tmp, width / len(freq_plot), + yerr=np.std(SNR_tmp), + label='%i Hz SNR' % f, color=color_plot[i]) + # store results for statistical comparison + res['stim_12hz_snrs_%ihz' % f] = stim_12hz_tmp + res['stim_15hz_snrs_%ihz' % f] = stim_15hz_tmp + +# Add some text for labels, title and custom x-axis tick labels, etc. +ax.set_ylabel('SNR') +ax.set_title('Average SNR at target frequencies') +ax.set_xticks(x) +ax.set_xticklabels(labels) +ax.legend(['%i Hz' % f for f in freq_plot], title='SNR at:') +ax.set_ylim([0, 70]) +ax.axhline(1, ls='--', c='r') +fig.show() + +############################################################################### +# As you can easily see there are striking differences between the trials. +# Let's verify this using a series of two-tailed paired T-Tests. +# + +# Compare 12 Hz and 15 Hz SNR in trials after averaging over channels + +tstat_12hz_trial_stim = \ + ttest_rel(res['stim_12hz_snrs_12hz'], res['stim_12hz_snrs_15hz']) +print("12 Hz Trials: 12 Hz SNR is significantly higher than 15 Hz SNR" + ": t = %.3f, p = %f" % tstat_12hz_trial_stim) + +tstat_12hz_trial_1st_harmonic = \ + ttest_rel(res['stim_12hz_snrs_24hz'], res['stim_12hz_snrs_30hz']) +print("12 Hz Trials: 24 Hz SNR is significantly higher than 30 Hz SNR" + ": t = %.3f, p = %f" % tstat_12hz_trial_1st_harmonic) + +tstat_12hz_trial_2nd_harmonic = \ + ttest_rel(res['stim_12hz_snrs_36hz'], res['stim_12hz_snrs_45hz']) +print("12 Hz Trials: 36 Hz SNR is significantly higher than 45 Hz SNR" + ": t = %.3f, p = %f" % tstat_12hz_trial_2nd_harmonic) + +print() +tstat_15hz_trial_stim = \ + ttest_rel(res['stim_15hz_snrs_12hz'], res['stim_15hz_snrs_15hz']) +print("15 Hz trials: 12 Hz SNR is significantly lower than 15 Hz SNR" + ": t = %.3f, p = %f" % tstat_15hz_trial_stim) + +tstat_15hz_trial_1st_harmonic = \ + ttest_rel(res['stim_15hz_snrs_24hz'], res['stim_15hz_snrs_30hz']) +print("15 Hz trials: 24 Hz SNR is significantly lower than 30 Hz SNR" + ": t = %.3f, p = %f" % tstat_15hz_trial_1st_harmonic) + +tstat_15hz_trial_2nd_harmonic = \ + ttest_rel(res['stim_15hz_snrs_36hz'], res['stim_15hz_snrs_45hz']) +print("15 Hz trials: 36 Hz SNR is significantly lower than 45 Hz SNR" + ": t = %.3f, p = %f" % tstat_15hz_trial_2nd_harmonic) + +############################################################################## +# Debriefing +# ---------- +# So that's it, we hope you enjoyed our little tour through this example +# dataset. +# +# As you could see, frequency-tagging is a very powerful tool that can yield +# very high signal to noise ratios and effect sizes that enable you to detect +# brain responses even within a single participant and single trials of only +# a few seconds duration. +# +# Bonus exercises +# --------------- +# For the overly motivated amongst you, let's see what else we can show with +# these data. +# +# Using the PSD function as implemented in MNE makes it very easy to change +# the amount of data that is actually used in the spectrum +# estimation. +# +# Here we employ this to show you some features of frequency +# tagging data that you might or might not have already intuitively expected: +# +# Effect of trial duration on SNR +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# First we will simulate shorter trials by taking only the first x s of our 20s +# trials (2, 4, 6, 8, ..., 20 s), and compute the SNR using a FFT window +# that covers the entire epoch: +# + +stim_bandwidth = .5 + +# shorten data and welch window +window_lengths = [i for i in range(2, 21, 2)] +window_snrs = [[]] * len(window_lengths) +for i_win, win in enumerate(window_lengths): + # compute spectrogram + windowed_psd, windowed_freqs = mne.time_frequency.psd_welch( + epochs[str(event_id['12hz'])], + n_fft=int(sfreq * win), + n_overlap=0, n_per_seg=None, + tmin=0, tmax=win, + window='boxcar', + fmin=fmin, fmax=fmax, verbose=False) + # define a bandwidth of 1 Hz around stimfreq for SNR computation + bin_width = windowed_freqs[1] - windowed_freqs[0] + skip_neighbor_freqs = \ + round((stim_bandwidth / 2) / bin_width - bin_width / 2. - .5) if ( + bin_width < stim_bandwidth) else 0 + n_neighbor_freqs = \ + int((sum((windowed_freqs <= 13) & (windowed_freqs >= 11) + ) - 1 - 2 * skip_neighbor_freqs) / 2) + # compute snr + windowed_snrs = \ + snr_spectrum( + windowed_psd, + noise_n_neighbor_freqs=n_neighbor_freqs if ( + n_neighbor_freqs > 0 + ) else 1, + noise_skip_neighbor_freqs=skip_neighbor_freqs) + window_snrs[i_win] = \ + windowed_snrs[ + :, picks_roi_vis, + np.argmin( + abs(windowed_freqs - 12.))].mean(axis=1) + +fig, ax = plt.subplots(1) +ax.boxplot(window_snrs, labels=window_lengths, vert=True) +ax.set(title='Effect of trial duration on 12 Hz SNR', + ylabel='Average SNR', xlabel='Trial duration [s]') +ax.axhline(1, ls='--', c='r') +fig.show() + + +############################################################################## +# You can see that the signal estimate / our SNR measure increases with the +# trial duration. +# +# This should be easy to understand: in longer recordings there is simply +# more signal (one second of additional stimulation adds, in our case, 12 +# cycles of signal) while the noise is (hopefully) stochastic and not locked +# to the stimulation frequency. +# In other words: with more data the signal term grows faster than the noise +# term. +# +# We can further see that the very short trials with FFT windows < 2-3s are not +# great - here we've either hit the noise floor and/or the transient response +# at the trial onset covers too much of the trial. +# +# Again, this tutorial doesn't statistically test for the presence of a neural +# response, but an F-test or Hotelling T² would be appropriate for this +# purpose. +# +# Time resolved SNR +# ^^^^^^^^^^^^^^^^^ +# ..and finally we can trick MNE's PSD implementation to make it a +# sliding window analysis and come up with a time resolved SNR measure. +# This will reveal whether a participant blinked or scratched their head.. +# +# Each of the ten trials is coded with a different color in the plot below. +# + +# 3s sliding window +window_length = 4 +window_starts = [i for i in range(20 - window_length)] +window_snrs = [[]] * len(window_starts) + +for i_win, win in enumerate(window_starts): + # compute spectrogram + windowed_psd, windowed_freqs = mne.time_frequency.psd_welch( + epochs[str(event_id['12hz'])], + n_fft=int(sfreq * window_length) - 1, + n_overlap=0, n_per_seg=None, + window='boxcar', + tmin=win, tmax=win + window_length, + fmin=fmin, fmax=fmax, + verbose=False) + # define a bandwidth of 1 Hz around stimfreq for SNR computation + bin_width = windowed_freqs[1] - windowed_freqs[0] + skip_neighbor_freqs = \ + round((stim_bandwidth / 2) / bin_width - bin_width / 2. - .5) if ( + bin_width < stim_bandwidth) else 0 + n_neighbor_freqs = \ + int((sum((windowed_freqs <= 13) & (windowed_freqs >= 11) + ) - 1 - 2 * skip_neighbor_freqs) / 2) + # compute snr + windowed_snrs = snr_spectrum( + windowed_psd, + noise_n_neighbor_freqs=n_neighbor_freqs if ( + n_neighbor_freqs > 0) else 1, + noise_skip_neighbor_freqs=skip_neighbor_freqs) + window_snrs[i_win] = \ + windowed_snrs[:, picks_roi_vis, np.argmin( + abs(windowed_freqs - 12.))].mean(axis=1) + +fig, ax = plt.subplots(1) +colors = plt.get_cmap('Greys')(np.linspace(0, 1, 10)) +for i in range(10): + ax.plot(window_starts, np.array(window_snrs)[:, i], color=colors[i]) +ax.set(title='Time resolved 12 Hz SNR - %is sliding window' % window_length, + ylabel='Average SNR', xlabel='t0 of analysis window [s]') +ax.axhline(1, ls='--', c='r') +ax.legend(['individual trials in greyscale']) +fig.show() +############################################################################## +# Well.. turns out this was a bit too optimistic ;) +# +# But seriously: this was a nice idea, but we've reached the limit of +# what's possible with this single-subject example dataset. +# However, there might be data, applications, or research questions +# where such an analysis makes sense. +# diff --git a/tutorials/time-freq/README.txt b/tutorials/time-freq/README.txt index a8978bf09d8..dc78183b8ff 100644 --- a/tutorials/time-freq/README.txt +++ b/tutorials/time-freq/README.txt @@ -1,5 +1,5 @@ Time-frequency analysis -======================= +----------------------- These tutorials cover frequency and time-frequency analysis of neural signals.