diff --git a/content/_config.yml b/content/_config.yml index cced1c9..d8dd5a3 100644 --- a/content/_config.yml +++ b/content/_config.yml @@ -5,7 +5,7 @@ ####################################################################################### # Book settings -title : "Introduction to brain encoding and decoding in fMRI" # The title of the book. Will be placed in the left navbar. +title : "Introduction to brain decoding in fMRI" # The title of the book. Will be placed in the left navbar. author : # The author of the book copyright : "2022" # Copyright year to be placed in the footer logo : "../images/neurolibre-logo.png" # A path to the book logo @@ -30,7 +30,8 @@ html: google_analytics_id : "" # A GA id that can be used to track book views. home_page_in_navbar : true # Whether to include your home page in the left Navigation Bar baseurl : "https://main-educational.github.io/brain_encoding_decoding/" # The base URL where your book will be hosted. Used for creating image previews and social links. e.g.: https://mypage.com/mybook/ - + comments: + hypothesis: true ####################################################################################### # Launch button settings launch_buttons: @@ -55,6 +56,39 @@ sphinx: config: html_js_files: - https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js + nb_custom_formats: + .Rmd: + - jupytext.reads + - fmt: Rmd + mathjax_config: + TeX: + Macros: + "N": "\\mathbb{N}" + "floor": ["\\lfloor#1\\rfloor", 1] + "bmat": ["\\left[\\begin{array}"] + "emat": ["\\end{array}\\right]"] + latex_elements: + preamble: | + \newcommand\N{\mathbb{N}} + \newcommand\floor[1]{\lfloor#1\rfloor} + \newcommand{\bmat}{\left[\begin{array}} + \newcommand{\emat}{\end{array}\right]} + intersphinx_mapping: + ebp: + - "https://executablebooks.org/en/latest/" + - null + myst-parser: + - "https://myst-parser.readthedocs.io/en/latest/" + - null + myst-nb: + - "https://myst-nb.readthedocs.io/en/latest/" + - null + sphinx: + - "https://www.sphinx-doc.org/en/master" + - null + nbformat: + - "https://nbformat.readthedocs.io/en/latest" + - null ####################################################################################### # parse settings diff --git a/content/conf.py b/content/conf.py index 1f5356a..dbea9d2 100644 --- a/content/conf.py +++ b/content/conf.py @@ -54,7 +54,7 @@ # html_title = "" html_theme = "sphinx_book_theme" -html_logo = "_static/cog_com_neuro_ml_dl.png" +html_logo = "images/neurolibre_logo.png" html_theme_options = { "github_url": "https://github.com/main-educational/brain_encoding_decoding", "repository_url": "https://github.com/main-educational/brain_encoding_decoding", diff --git a/content/graphics/decoding_pipeline_example.png b/content/graphics/decoding_pipeline_example.png new file mode 100644 index 0000000..6e9fdea Binary files /dev/null and b/content/graphics/decoding_pipeline_example.png differ diff --git a/content/haxby_data.md b/content/haxby_data.md index c969c1f..96d0673 100644 --- a/content/haxby_data.md +++ b/content/haxby_data.md @@ -8,94 +8,322 @@ jupytext: format_version: 0.13 jupytext_version: 1.11.5 kernelspec: - display_name: Python 3 (ipykernel) + display_name: main_edu_2022 language: python - name: python3 + name: main_edu_2022 --- (haxby-dataset)= -# The Haxby dataset +# An overview of the Haxby dataset -## Downloading the data - In the field of functional magnetic resonance imaging (fMRI), one of the first studies which have demonstrated the feasibility of brain decoding was the study by Haxby and colleagues (2001) {cite:p}`Haxby2001-vt`. Subjects were presented with various images drawn from different categories. In this tutorial, we will try to decode the category of the image presented to the subject from brain data. We are first going to use nilearn to download one subject (number 4) from the Haxby dataset. +This part of the `tutorial` aims to make `participants` familiar with the `dataset` we are going to use during this session and also address/introduce/recap some important aspects concerning `datasets` within `machine learning`/`decoding`. The objectives 📍 are: -```{code-cell} python3 -:tags: ["remove-output"] +- (re-)familiarize everyone with important `datasets` aspects + +- exploring and understand the `tutorial dataset` + + +## A short primer on datasets + +We wanted to avoid "just talking" about `brain decoding` in theory and also showcase how the respective `models` and workflows can be implemented, as well as run to give you some first hands-on experience. Even though we would have loved to get everyone to bring their data and directly apply the things we talk about, it's unfortunately a bit too time-consuming for this setting. Thus, we decided to utilize an `example dataset` that is ready to go and "small enough" to run `decoding models` locally, ie on laptops. You might think "One of those tutorials again...it works with the example dataset but I have little or no chance on running it on/adapting it to my data." and we would agree based on workshops we did ourselves. + +However, we tried our best to address this here by utilizing `software` whose `workflows` and `processing steps` are rather agnostic and implemented via `high-level API` that _should_ allow a comparably straightforward application to different kinds of `data`. This specifically refers to a set of core aspects concerning the dataset's structure and information entailed therein. How about a brief recap? + +```{figure} graphics/decoding_pipeline_example.png +--- +width: 800px +name: decoding_pipeline_example +--- + +A schematic representation of standard `decoding workflow`/`pipeline`. The `input` (`data`) is prepared and potentially `preprocessed` before being submitted to a `model` that then utilizes a certain `metric` to provide a certain `output`. +``` + +Here, we are going to focus on the `input` (`data`). As you heard before, it is usually expected to be structured as `samples` X `features`. + +```{admonition} What could samples X features refer to/entail? +:class: tip, dropdown + +A `sample` could be considered an `observation`/`data point`/one distinct entity in the `dataset`/one distinct part of the `dataset`. For example, if you want to `predict` what a participant perceived based on their `brain activation`/`response`, the `samples` could entail the `fMRI` `scans` or estimated `contrast images` of that `participant`. If you want to `predict` whether a `participant` exhibited a certain `behavior`, e.g. a captured by a `clinical measure`, etc., then the `samples` could comprise different `participants`. + +A `feature` on the other hand would entail/describe certain aspects of a given `sample`. For example, if you want to `predict` what a participant perceived based on their `brain activation`/`response`, the `features` could entail the `voxel pattern` at a certain `ROI` at the given `sample`. +``` + +Thus, in order to make a given `dataset` "ready" for `machine learning`/`decoding`, we need to get it into the respective structure. Lucky for us, the tools we are going to explore, specifically `nilearn`, incorporate this aspect and are make the corresponding process rather easy. What you need to run `machine learning`/`decoding` on your `dataset` is: + +- know what your `samples` are (e.g. `time series`, `statistical maps`, etc.) +- know what your `features` are (e.g. `voxel pattern` of an `ROI`, `annotations`, etc.) +- get the `dataset` in the form `samples` X `features`, ie `samples` are `rows` and `features` are `columns` + +While exploring the `tutorial dataset` we will refer to this to make it more clear. + +```{admonition} Bonus question: ever heard of the "small-n-high-p" (p >> n) problem? +:class: tip, dropdown + +"Classical" `machine learning`/`decoding` models and the underlying algorithms operate on the assumption that are more `predictors` or `features` than there are `sample`. In fact many more. Why is that? +Consider a high-dimensional `space` whose `dimensions` are defined by the number of `features` (e.g. `10 features` would result in a space with `10 dimensions`. The resulting `volume` of this `space` is the amount of `samples` that could be drawn from the `domain` and the number of `samples` entail the `samples` you need to address your `learning problem`, ie `decoding` outcome. That is why folks say: "get more data", `machine learning` is `data`-hungry: our `sample` needs to be as representative of the high-dimensional domain as possible. Thus, as the number of `features` increases, so should the number of `samples` so to capture enough of the `space` for the `decoding model` at hand. + +This referred to as the [curse of dimensionality](https://en.wikipedia.org/wiki/Curse_of_dimensionality) and poses as a major problem in many fields that aim to utilize `machine learning`/`decoding` on unsuitable data. Why is that? +Just imagine we have way more `features` than `samples`, ie `50 features` and `10` `samples`. Instead of having a large amount of `samples` within the `space`, allowing to achieve a sufficient coverage of the latter, we now have a very high-dimensional `space` (`50 dimensions`) and only very few `samples` therein, basically not allowing us to capture nearly enough of the `space` as we would need to. This can result in expected outcomes, misleading results or even lead to complete `model` failure. Furthermore, respective `datasets` often lead to `models` that are `overfitted` and don't `generalize` well. + +However, there are a few things that can be done to address this, including `feature selection`, `projection` into `lower-dimensional` `spaces` or `representations` or `regularization`. + +Question for everyone: what kind have `datasets` do we usually have in `neuroscience`, especially `neuroimaging`? +``` + ++++ + +## Downloading & exploring the `Haxby dataset` + +In the field of `functional magnetic resonance imaging` (`fMRI`), one of the first studies which have demonstrated the feasibility of `brain decoding` was the study by Haxby and colleagues (2001) {cite:p}`Haxby2001-vt`. `Subjects` were presented with various `images` drawn from different `categories` and subsequently a `decoding model` used to `predict` the presented `categories` based on the `brain activity`/`responses`. In the respective parts of this session, we will try to do the same! + +We are going to start with one `subject`, number `4`. To get the `data`, we can simply use [nilearn's dataset module](https://nilearn.github.io/stable/modules/datasets.html). At first, we need to import the respective `module`. + +```{code-cell} ipython3 import os from nilearn import datasets -# We are fetching the data for subject 4 +``` + +Next, we get the `data` and going to save it in a directory called `data`. Depending on your machine and internet connection, this might take a minute or so. + +```{code-cell} ipython3 data_dir = os.path.join('..', 'data') -sub_no = 4 -haxby_dataset = datasets.fetch_haxby(subjects=[sub_no], fetch_stimuli=True, data_dir=data_dir) -func_file = haxby_dataset.func[0] +haxby_dataset = datasets.fetch_haxby(subjects=[4], fetch_stimuli=True, data_dir=data_dir) ``` -## Visualizing the data -The data includes `nii` files, which contains images of brain volumes, either anatomical or functional. We can examine one functional volume using nilearn's plotting tools. Because fmri data are 4D we use [nilearn.image.mean_img](https://nilearn.github.io/modules/generated/nilearn.image.mean_img.html#nilearn.image.mean_img) to extract the average brain volume. -```{code-cell} python3 -from nilearn import plotting -from nilearn.image import mean_img -plotting.view_img(mean_img(func_file), threshold=None) + +What do we have now? Lets have a look! + +```{code-cell} ipython3 +haxby_dataset ``` -Note that it is very hard to see the anatomy of the brain with that type of image. But it is not designed to capture brain anatomy, but rather changes of brain activity over time, through the coupling of neuronal activity with the oxygenation of blood vessels. -```{admonition} Interactive viewer +As you can see, we get a `python dictionary` and there's quite bit in it. This includes: + +- the `anatomical data` under `anat` +- the `functional data` under `func` +- an annotation when `participants` perceived what `category` +- several `masks` under `mask*` +- a `dataset` `description` +- `stimuli categories` and respective `stimuli` + ++++ + +`````{admonition} Thinking about input data again... :class: tip -The viewer `plotting.view_img` is interactive. You can click on the brain volume to explore different slices. You can learn about the three anatomical planes: sagittal (`x`), coronal (`y`) and axial (`z`) in the [wikipedia article](https://en.wikipedia.org/wiki/Anatomical_plane). +What would be our `samples` and `features`? +````` + ++++ + +## The data in more depth + +After getting a first idea of what our `dataset` entails, we should spend a bit more time exploring it in more depth, starting with the neuroimaging files. + +### Neuroimaging files + +As seen above, the data includes several `nii` files, which contain `images` of `brain volumes`, either `anatomical` or `functional` `scans`, as well as (`binary`) `masks`. Lets have a look at the `anatomical` `image` first. +Using `nilearn`, we can either `load` and then `plot` it or directly `plot` it. Here we are going to do the first option as it will allow us to check the properties of the `image`. + +```{code-cell} ipython3 +from nilearn.image import load_img ``` -## Preparing the fMRI data -```{figure} haxby_data/masker.png ---- -width: 800px -name: masker-fig ---- -A `Masker` object is used to convert a 4D volume (space + time) into a data array, where each column is a voxel or brain region (features) and each row is a time point (samples). Figure from the [nilearn documentation](https://nilearn.github.io/stable/manipulating_images/masker_objects.html). +```{code-cell} ipython3 +anat_image = load_img(haxby_dataset.anat) ``` -Instead of keeping the fMRI data as a 4D array (3D spatial coordinates + time), we are going to extract the time series associated with a mask of the ventral temporal cortex. This mask has been generated as part of the Haxby et al. (2001) study, and highlights a part of the brain specialized in the processing of visual information, and which contains areas sensitive to different types of image categories. -```{code-cell} python3 -mask_filename = haxby_dataset.mask_vt[0] -# Let's visualize it, using the subject's anatomical image as a -# background -plotting.plot_roi(mask_filename, bg_img=haxby_dataset.anat[0], - cmap='Paired') +Now we can access basically all parts of the `image`, including the `header` + +```{code-cell} ipython3 +print(anat_image.header) ``` -We use one of the nilearn maskers to extract the fMRI time series associated just with this mask: -```{code-cell} python3 -from nilearn.input_data import NiftiMasker -masker = NiftiMasker(mask_img=mask_filename, standardize=True, detrend=True) -# Selecting data -X = masker.fit_transform(func_file) -print(X.shape) +and actual `data`. + +```{code-cell} ipython3 +anat_image.dataobj +``` + +```{code-cell} ipython3 +anat_image.dataobj.shape +``` + +As you can see, this is basically a `numpy array` that has the same `dimensions` as our `image` and the `data` reflect `values` for a given `voxel`. So far so good but how does it actually look? We can make use of one of `nilearn`'s many [plotting functions](https://nilearn.github.io/stable/modules/plotting.html). + +```{code-cell} ipython3 +from nilearn import plotting +``` + +```{code-cell} ipython3 +plotting.plot_anat(anat_image) +``` + +We can even create an `interactive plot`: + +```{code-cell} ipython3 +plotting.view_img(anat_image, symmetric_cmap=False, cmap='Greys_r', colorbar=False) +``` + +Comparably, we can do the same things with the `functional` `image`. That is `load`ing the `image`: + +```{code-cell} ipython3 +func_image = load_img(haxby_dataset.func) +``` + +and inspect its `header`: + +```{code-cell} ipython3 +print(func_image.header) +``` + +and `data`: + +```{code-cell} ipython3 +func_image.get_data() ``` -We can see that the dataset has 1452 time samples (number of rows) and 675 voxels in the mask (number of columns). -```{admonition} Nilearn maskers +```{code-cell} ipython3 +func_image.dataobj.shape +``` + +`````{admonition} We already noticed something... :class: tip -Nilearn maskers are very versatile and offer many approaches to extract a time series array from 4D data, as well as reshape back an array into a series of brain volumes. You can learn more about nilearn maskers in this [documentation](https://nilearn.github.io/stable/manipulating_images/masker_objects.html). +The `data` of the `anatomical` and `functional` `image` are quite different. Do you know why and which we would use for our planned `decoding` analyses? +````` + ++++ + +As we have a `4D` `image`, that is `brain volumes` acquired over time (the `4th dimension`), we need to adapt the `plotting` a bit. More precisely, we need to either `plot` a `3D image` at a given `time point` or e.g. compute the `mean image` over `time` and `plot` that. The latter might be more informative and additional shows you how easy this can be done using `nilearn`'s [image functions](https://nilearn.github.io/stable/modules/image.html). Thus, we, at first, `import` the respective `function` and compute the `mean image`: + +```{code-cell} ipython3 +from nilearn.image import mean_img +``` + +```{code-cell} ipython3 +func_image_mean = mean_img(func_image) +``` + +We can check if this worked via the approach we followed above, ie checking the `data`: + +```{code-cell} ipython3 +func_image_mean.dataobj.shape +``` + +That seems about right and we can give the plot a try: + +```{code-cell} ipython3 +plotting.plot_epi(func_image_mean, cmap='magma') ``` -## Preparing the cognitive annotations +and of course, this also works for `interactive` plots. + +```{code-cell} ipython3 +plotting.view_img(func_image_mean, cmap='magma', symmetric_cmap=False) +``` + +The last type of `neuroimaging` file we need to check are the (`binary`) `masks`, so let's do it for one example `mask`: the `ventral temporal cortex`. This mask has been generated as part of the Haxby et al. (2001) study {cite:p}`Haxby2001-vt`, and highlights a part of the brain specialized in the processing of visual information, and which contains areas sensitive to different types of image categories {cite:p}`grill-spector_functional_2014` . As with the types before, we can `load`, + +```{code-cell} ipython3 +vt_mask = load_img(haxby_dataset.mask_vt) +``` + +`inspect` + +```{code-cell} ipython3 +print(vt_mask.header) +``` + +```{code-cell} ipython3 +vt_mask.get_data() +``` + +```{code-cell} ipython3 +vt_mask.dataobj.shape +``` + +and `visualize` it (Here, we are going to plot it as an overlay on the `anatomical image`). + +```{code-cell} ipython3 +plotting.plot_roi(vt_mask, bg_img=anat_image, + cmap='Paired') +``` + +With that, we had a quick look at all `neuroimaging` `file` `types` present in the `dataset` and can continue to have a look at the other `file types` (and information therein) required to apply a `decoding model`. + ++++ + +### Labels and stimulus annotations + +As mentioned in prior sessions (e.g.[Supervised learning using scikit-learn](https://main-educational.github.io/material.html#supervised-learning-using-scikit-learn) and hinted at the [beginning of this session](#A-short-primer-on-datasets), when working on a `supervised learning problem`, we also need the `ground truth`/`true labels` for each `sample`. Why? Because we need to evaluate how a given `model` performs via comparing the `labels` it `predicted` to the `true labels`. What these `labels` refer to can be manifold and of course depends on the `task` at hand. + +For example, a `supervised learning problem` in the `dataset` at hand could entail `training` a `model` to `recognize` and `predict` what `category` `participants` perceived based on their `brain activation`. Thus, we would need to know what `category` was shown when during the acquisition of the `data` (or which `category` resulted in which `estimated` `brain activity`). -Now, we are going to extract cognitive annotations, that is values which tell us what type of images the subject was viewing at each time point. Let's look at the first 20 annotations: -```{code-cell} python3 +Within our `tutorial dataset`, this information is included in the `session_target` file. Using [pandas](https://pandas.pydata.org/pandas-docs/stable/index.html) we can easily `load` and `inspect` this file: + +```{code-cell} ipython3 import pandas as pd -behavioral = pd.read_csv(haxby_dataset.session_target[0], delimiter=' ') -display(behavioral.iloc[0:20]) +stimulus_annotations = pd.read_csv(haxby_dataset.session_target[0], delimiter=' ') ``` -So let's extract the labels for each time points. We can check that the number of annotations match exactly the number of samples we had in `X`. We can also check all the annotation categories available. -```{code-cell} python3 -y = behavioral['labels'] -categories = y.unique() -print(categories) -print(y.shape) -print(X.shape) + +```{code-cell} ipython3 +stimulus_annotations.head(n=40) ``` -These annotations correspond to the category of the image subjects were watching at each time point. Samples of images for each category are shown below: -```{code-cell} python3 -:tags: ["hide-input"] +While this is already informative, let's plot it to get a better intuition. + +```{code-cell} ipython3 +import seaborn as sns +import matplotlib.pyplot as plt + +ax = sns.scatterplot(x=stimulus_annotations.index, y=stimulus_annotations['labels'], + hue=stimulus_annotations['labels'], legend=False, palette='colorblind') +plt.title('Categories shown across time') +ax.set_xlabel('Time point/fMRI scan') +sns.despine(offset=5) +``` + +As we can see, the information provided indicates what `category` `participants` perceived at which `sample` or `fMRI image acquisition` aka point in time during the experiment. With that, we have the needed `labels` for our `samples` (ie our `Y`) and can thus apply a `supervised learning problem`. + ++++ + +## Summary + +This already concludes this section of the session within which we explored the went through basic `datasets` concepts again and afterwards explored the `tutorial dataset` which we are going to use during the remaining sections of this session, ie [Decoding via SVM](), [Decoding using MLPs]() and [Decoding using GCNs](). + +Within this section you should have learned: + +- important aspects of `datasets` + - structured input in the form `samples X features` + - `small n high p` problem + + +- the `tutorial dataset` + - background + - file types and information therein + - `neuroimaging` files + - `stimulus annotations` + +If you have any questions, please don't hesitate to ask us. Thank you very much for your attention and see you in the next section. + ++++ + +## References + +```{bibliography} +:filter: docname in docnames +``` +``` + ++++ + +## Bonus: checking the stimuli + +As you saw above, our `tutorial dataset` actually also contains the `stimuli` utilized in the experiment. This pretty unique (because of e.g. copyright problems) but really cool. As we could use the `stimuli` for certain analyses, e.g. [encoding]() and/or comparing their processing in `biological` and `artificial neural networks`. However, this is unfortunately outside the scope of this session. Thus, we are just going to plot a few of them so you get an impression. + ++++ + +We can examine one functional volume using nilearn's plotting tools. Because fmri data are 4D we use [nilearn.image.mean_img](https://nilearn.github.io/modules/generated/nilearn.image.mean_img.html#nilearn.image.mean_img) to extract the average brain volume. + +```{code-cell} ipython3 import matplotlib.pyplot as plt from nilearn import datasets @@ -120,10 +348,10 @@ for stim_type in stimulus_information: show() ``` -Note that for each image category, a number of scrambled images were also presented. -```{code-cell} python3 -:tags: ["hide-input"] +Please note that for each `image` `category`, a number of `scrambled images` were also presented. + +```{code-cell} ipython3 for stim_num in range(len(stimulus_information['controls'])): stim_type = stimulus_information['controls'][stim_num][0] file_names = stimulus_information['controls'][stim_num][1] @@ -139,10 +367,3 @@ for stim_num in range(len(stimulus_information['controls'])): show() ``` - -## References - -```{bibliography} -:filter: docname in docnames -``` -``` diff --git a/content/intro.md b/content/intro.md index 6c2bb9c..e38cfa1 100644 --- a/content/intro.md +++ b/content/intro.md @@ -1,9 +1,9 @@ # Welcome -**"Introduction to brain encoding and decoding in fMRI"** +**"Introduction to brain decoding in fMRI"** -This `jupyter book` presents an introduction to `brain encoding` and `decoding` using `fMRI`. It was developed within the [educational courses](https://main-educational.github.io), conducted as part of the [Montreal AI and Neuroscience (MAIN) conference](https://www.main2022.org/) in November 2022. +This `jupyter book` presents an introduction to `brain decoding` using `fMRI`. It was developed within the [educational courses](https://main-educational.github.io), conducted as part of the [Montreal AI and Neuroscience (MAIN) conference](https://www.main2022.org/) in November 2022. [![Jupyter Book Badge](https://jupyterbook.org/badge.svg)](https://main-educational.github.io/brain_encoding_decoding/intro.html) [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/main-educational/brain_encoding_decoding/HEAD) @@ -14,7 +14,7 @@ This `jupyter book` presents an introduction to `brain encoding` and `decoding` [![License](https://img.shields.io/github/license/main-educational/brain_encoding_decoding)](https://github.com/main-educational/brain_encoding_decoding) [![CONP](https://img.shields.io/badge/Supported%20by-%20CONP%2FPCNO-red)](https://conp.ca/) -Building upon the prior sections of the [educational courses](https://main-educational.github.io), the here presented resources aim to provide an overview of how `encoding` and `decoding` `models` can be applied to `fMRI` data in order to investigate `brain function`. Importantly, these methods cannot only be utilized to analyze data from `biological agents` (e.g. `humans`, `non-human primates`, etc.) but also `artificial neural networks`, as well as presenting the opportunity to compare processing in both. They are thus core approaches that are prominently used at the intersection of `neuroscience` and `AI`. +Building upon the prior sections of the [educational courses](https://main-educational.github.io), the here presented resources aim to provide an overview of how `decoding` `models` can be applied to `fMRI` data in order to investigate `brain function`. Importantly, the respective methods cannot only be utilized to analyze data from `biological agents` (e.g. `humans`, `non-human primates`, etc.) but also `artificial neural networks`, as well as presenting the opportunity to compare processing in both. They are thus core approaches that are prominently used at the intersection of `neuroscience` and `AI`. ```{figure} haxby_data/brain-encoding-decoding.png @@ -27,12 +27,12 @@ To test the consistency of representations in artificial neural networks (ANNs) ``` The tutorials make heavy use of [nilearn](https://nilearn.github.io/stable/index.html) concerning -manipulating and processing `fMRI` data, as well as [scikit-learn](https://scikit-learn.org/stable/) and [pytorch](https://pytorch.org/) to apply `encoding` and `decoding` on the data. +manipulating and processing `fMRI` data, as well as [scikit-learn](https://scikit-learn.org/stable/) and [pytorch](https://pytorch.org/) to apply `decoding models` on the data. We used the [Jupyter Book](https://jupyterbook.org/en/stable/intro.html) framework to provide all materials in an open, structured and interactive manner. ALl pages and section you see here are built from `markdown` files or `jupyter notebooks`, allowing you to read through the materials and/or run them, locally or in the cloud. The three symbols on the top right allow to enable full screen mode, link to the underlying [GitHub repository](https://github.com/main-educational/brain_encoding_decoding) and allow you to download the respective sections as a `pdf` or `jupyter notebook` respectively. Some sections will additionally have a little rocket in that row which will allow you to interactively rerun certain parts via cloud computing (please see the [Binder](#Binder) section for more information). -## Brain encoding and decoding +## Brain decoding vs. encoding In short, `encoding` and `decoding` entail contrary operations that can yet be utilized in a complementary manner. `Encoding models` applied to `brain data`, e.g. `fMRI`, aim to predict `brain responses`/`activity` based on `annotations` or `features` of the `stimuli` perceived by the `participant`. These can be obtained from a multitude of options, including `artificial neural networks` which would allow to relate their `processing` of the `stimuli` to that of `biological agents`, ie `brains`. `Decoding models` on the other hand comprise `models` with which we aim to `estimate`/`predict` what a `participant` is `perceiving` or `doing` based on `recordings` of `brain responses`/`activity`, e.g. `fMRI`. @@ -46,10 +46,12 @@ name: brain_encoding_decoding_example_fig `Encoding` and `decoding` present contrary, yet complementary operations. While the former targets the prediction of `brain activity`/`responses` based on stimulus percepts/features (e.g. vision & audition), cognitive states or behavior, the latter aims to predict those aspects based on `brain activity`/`responses`. ``` -More information and their application can be found in the respective sections of this resources. You can either use the `ToC` on the left or the links below to navigate accordingly. +For the tutorial on `encoding models`, please have a look at the respective session [here](). +As noted above, this part of the educational course focuses on `decoding models`. More information and their application can be found in the respective sections of this resource. You can either use the `ToC` on the left or the links below to navigate accordingly. -::::{card-carousel} 3 + +::::{card-carousel} 4 :::{card} :margin: 3 @@ -100,10 +102,7 @@ Brain decoding using a basic artificial neural network. +++ Explore this book {fas}`arrow-right` ::: -:::: - -::::{card-carousel} 2 :::{card} :margin: 3 :class-body: text-center @@ -120,23 +119,6 @@ Graph convolutional networks for brain decoding. +++ Explore this tutorial {fas}`arrow-right` ::: - -:::{card} -:margin: 3 -:class-body: text-center -:class-header: bg-light text-center -:link: https://main-educational.github.io/brain_encoding_decoding/encoding.html - -**Brain encoding with regression** -^^^ -```{image} https://main-educational.github.io/brain_encoding_decoding/_images/encoding_87_0.png -:height: 100 -``` - -Conducting brain encoding analyses using regression models. -+++ -Explore this tutorial {fas}`arrow-right` -::: :::: diff --git a/content/references.bib b/content/references.bib index e631896..f3a5b14 100644 --- a/content/references.bib +++ b/content/references.bib @@ -1,3 +1,18 @@ +@ARTICLE{grill-spector_functional_2014, + title = "The functional architecture of the ventral temporal cortex and its role in categorization", + volume = 15, + issn = "1471-0048", + url = "https://doi.org/10.1038/nrn3747", + doi = "10.1038/nrn3747", + abstract = "Understanding information processing in the visual system requires an understanding of the interplay among the system's computational goals and representations, and their physical implementation in the brain.Recent results indicate a consistent topology of functional representations relative to each other and anatomical landmarks in high-level visual cortex.The consistent topology of functional representations reveals that axes of representational spaces are physically implemented as axes in cortical space.Anatomical constraints might determine the topology of functional representations in the brain, which would explain the correspondence between representational and anatomical axes in the ventral temporal cortex (VTC).Superimposition and topology generate predictable spatial convergences and divergences among functional representations, which in turn enable information integration and parallel processing, respectively.Superimposition and topological organization in the VTC generates a series of nested functional representations, the arrangements of which generate a spatial hierarchy of category information.The spatial scale of functional representations may be tied to the level of category abstractness in which more abstract information is represented in larger spatial scales across the VTC.", + number = 8, + journal = "Nature Reviews Neuroscience", + author = "Grill-Spector, Kalanit and Weiner, Kevin S.", + month = aug, + year = 2014, + pages = "536--548", +} + @ARTICLE{Haxby2001-vt, title = "Distributed and overlapping representations of faces and objects