diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index d374bbad52c..f84ccc03e4f 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -25,6 +25,7 @@ Enhancements ~~~~~~~~~~~~ - Add ``cmap`` argument for the :func:`mne.viz.plot_sensors` (:gh:`11720` by :newcontrib:`Gennadiy Belonosov`) - When failing to locate a file, we now print the full path in quotation marks to help spot accidentally added trailing spaces (:gh:`11718` by `Richard Höchenberger`_) +- Added :class:`mne.preprocessing.eyetracking.Calibration` to store eye-tracking calibration info, and :func:`mne.preprocessing.eyetracking.read_eyelink_calibration` to read calibration data from EyeLink systems (:gh:`11719` by `Scott Huberty`_) Bugs ~~~~ @@ -35,7 +36,9 @@ Bugs - Fix bug with PySide6 compatibility (:gh:`11721` by `Eric Larson`_) - Fix hanging interpreter with matplotlib figures using ``mne/viz/_mpl_figure.py`` in spyder console and jupyter notebooks (:gh:`11696` by `Mathieu Scheltienne`_) - Fix bug with overlapping text for :meth:`mne.Evoked.plot` (:gh:`11698` by `Alex Rockhill`_) +- For :func:`mne.io.read_raw_eyelink`, the default value of the ``gap_description`` parameter is now ``'BAD_ACQ_SKIP'``, following MNE convention (:gh:`11719` by `Scott Huberty`_) API changes ~~~~~~~~~~~ - The ``baseline`` argument can now be array-like (e.g. ``list``, ``tuple``, ``np.ndarray``, ...) instead of only a ``tuple`` (:gh:`11713` by `Clemens Brunner`_) +- Deprecated ``gap_description`` keyword argument of :func:`mne.io.read_raw_eyelink`, which will be removed in mne version 1.6, in favor of using :meth:`mne.Annotations.rename` (:gh:`11719` by `Scott Huberty`_) \ No newline at end of file diff --git a/doc/conf.py b/doc/conf.py index e205439bf01..b30d777292f 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -398,6 +398,7 @@ r"\.fromkeys", r"\.items", r"\.keys", + r"\.move_to_end", r"\.pop", r"\.popitem", r"\.setdefault", @@ -735,6 +736,7 @@ def append_attr_meth_examples(app, what, name, obj, options, lines): ("py:class", "(k, v), remove and return some (key, value) pair as a"), ("py:class", "_FuncT"), # type hint used in @verbose decorator ("py:class", "mne.utils._logging._FuncT"), + ("py:class", "None. Remove all items from od."), ] nitpick_ignore_regex = [ ("py:.*", r"mne\.io\.BaseRaw.*"), diff --git a/doc/file_io.rst b/doc/file_io.rst index 4ddcf7d1d01..c7957fb8468 100644 --- a/doc/file_io.rst +++ b/doc/file_io.rst @@ -61,4 +61,4 @@ Base class: :toctree: generated :template: autosummary/class_no_members.rst - BaseEpochs + BaseEpochs \ No newline at end of file diff --git a/doc/preprocessing.rst b/doc/preprocessing.rst index 0ed960be4b9..7028e7ab307 100644 --- a/doc/preprocessing.rst +++ b/doc/preprocessing.rst @@ -153,6 +153,8 @@ Projections: .. autosummary:: :toctree: generated/ + Calibration + read_eyelink_calibration set_channel_types_eyetrack EEG referencing: diff --git a/mne/io/eyelink/__init__.py b/mne/io/eyelink/__init__.py index 77ee7ebc9ef..e8f09e1aee5 100644 --- a/mne/io/eyelink/__init__.py +++ b/mne/io/eyelink/__init__.py @@ -1,6 +1,7 @@ """Module for loading Eye-Tracker data.""" -# Author: Dominik Welke +# Authors: Dominik Welke +# Scott Huberty # # License: BSD-3-Clause diff --git a/mne/io/eyelink/_utils.py b/mne/io/eyelink/_utils.py new file mode 100644 index 00000000000..3e6cf76e2fe --- /dev/null +++ b/mne/io/eyelink/_utils.py @@ -0,0 +1,113 @@ +"""Helper functions for reading eyelink ASCII files.""" +# Authors: Scott Huberty +# License: BSD-3-Clause + +import re +import numpy as np + + +def _find_recording_start(lines): + """Return the first START line in an SR Research EyeLink ASCII file. + + Parameters + ---------- + lines: A list of strings, which are The lines in an eyelink ASCII file. + + Returns + ------- + The line that contains the info on the start of the recording. + """ + for line in lines: + if line.startswith("START"): + return line + raise ValueError("Could not find the start of the recording.") + + +def _parse_validation_line(line): + """Parse a single line of eyelink validation data. + + Parameters + ---------- + line: A string containing a line of validation data from an eyelink + ASCII file. + + Returns + ------- + A list of tuples containing the validation data. + """ + tokens = line.split() + xy = tokens[-6].strip("[]").split(",") # e.g. '960, 540' + xy_diff = tokens[-2].strip("[]").split(",") # e.g. '-1.5, -2.8' + vals = [float(v) for v in [*xy, tokens[-4], *xy_diff]] + vals[3] += vals[0] # pos_x + eye_x i.e. 960 + -1.5 + vals[4] += vals[1] # pos_y + eye_y + + return tuple(vals) + + +def _parse_calibration( + lines, screen_size=None, screen_distance=None, screen_resolution=None +): + """Parse the lines in the given list and returns a list of Calibration instances. + + Parameters + ---------- + lines: A list of strings, which are The lines in an eyelink ASCII file. + + Returns + ------- + A list containing one or more Calibration instances, + one for each calibration that was recorded in the eyelink ASCII file + data. + """ + from ...preprocessing.eyetracking.calibration import Calibration + + regex = re.compile(r"\d+") # for finding numeric characters + calibrations = list() + rec_start = float(_find_recording_start(lines).split()[1]) + + for line_number, line in enumerate(lines): + if ( + "!CAL VALIDATION " in line and "ABORTED" not in line + ): # Start of a calibration + tokens = line.split() + model = tokens[4] # e.g. 'HV13' + this_eye = tokens[6].lower() # e.g. 'left' + timestamp = float(tokens[1]) + onset = (timestamp - rec_start) / 1000.0 # in seconds + avg_error = float(line.split("avg.")[0].split()[-1]) # e.g. 0.3 + max_error = float(line.split("max")[0].split()[-1]) # e.g. 0.9 + + n_points = int(regex.search(model).group()) # e.g. 13 + n_points *= 2 if "LR" in line else 1 # one point per eye if "LR" + # The next n_point lines contain the validation data + points = [] + for validation_index in range(n_points): + subline = lines[line_number + validation_index + 1] + if "!CAL VALIDATION" in subline: + continue # for bino mode, skip the second eye's validation summary + subline_eye = subline.split("at")[0].split()[-1].lower() # e.g. 'left' + if subline_eye != this_eye: + continue # skip the validation lines for the other eye + point_info = _parse_validation_line(subline) + points.append(point_info) + # Convert the list of validation data into a numpy array + positions = np.array([point[:2] for point in points]) + offsets = np.array([point[2] for point in points]) + gaze = np.array([point[3:] for point in points]) + # create the Calibration instance + calibration = Calibration( + onset=max(0.0, onset), # 0 if calibrated before recording + model=model, + eye=this_eye, + avg_error=avg_error, + max_error=max_error, + positions=positions, + offsets=offsets, + gaze=gaze, + screen_size=screen_size, + screen_distance=screen_distance, + screen_resolution=screen_resolution, + ) + calibrations.append(calibration) + return calibrations diff --git a/mne/io/eyelink/eyelink.py b/mne/io/eyelink/eyelink.py index e01f46a30b7..5321ddc136d 100644 --- a/mne/io/eyelink/eyelink.py +++ b/mne/io/eyelink/eyelink.py @@ -1,3 +1,5 @@ +"""SR Research Eyelink Load Function.""" + # Authors: Dominik Welke # Scott Huberty # Christian O'Reilly @@ -12,7 +14,7 @@ from ..base import BaseRaw from ..meas_info import create_info from ...annotations import Annotations -from ...utils import logger, verbose, fill_doc, _check_pandas_installed +from ...utils import _check_fname, _check_pandas_installed, fill_doc, logger, verbose EYELINK_COLS = { "timestamp": ("time",), @@ -293,13 +295,13 @@ def read_raw_eyelink( apply_offsets=False, find_overlaps=False, overlap_threshold=0.05, - gap_description="bad_rec_gap", + gap_description=None, ): """Reader for an Eyelink .asc file. Parameters ---------- - fname : str + fname : path-like Path to the eyelink file (.asc). %(preload)s %(verbose)s @@ -318,15 +320,20 @@ def read_raw_eyelink( saccades) if their start times and their stop times are both not separated by more than overlap_threshold. overlap_threshold : float (default 0.05) - Time in seconds. Threshold of allowable time-gap between the start and - stop times of the left and right eyes. If gap is larger than threshold, - the :class:`mne.Annotations` will be kept separate (i.e. "blink_L", - "blink_R"). If the gap is smaller than the threshold, the - :class:`mne.Annotations` will be merged (i.e. "blink_both"). - gap_description : str (default 'bad_rec_gap') - If there are multiple recording blocks in the file, the description of + Time in seconds. Threshold of allowable time-gap between both the start and + stop times of the left and right eyes. If the gap is larger than the threshold, + the :class:`mne.Annotations` will be kept separate (i.e. ``"blink_L"``, + ``"blink_R"``). If the gap is smaller than the threshold, the + :class:`mne.Annotations` will be merged and labeled as ``"blink_both"``. + Defaults to ``0.05`` seconds (50 ms), meaning that if the blink start times of + the left and right eyes are separated by less than 50 ms, and the blink stop + times of the left and right eyes are separated by less than 50 ms, then the + blink will be merged into a single :class:`mne.Annotations`. + gap_description : str (default 'BAD_ACQ_SKIP') + This parameter is deprecated and will be removed in 1.6. + Use :meth:`mne.Annotations.rename` instead. the annotation that will span across the gap period between the - blocks. Uses 'bad_rec_gap' by default so that these time periods will + blocks. Uses ``'BAD_ACQ_SKIP'`` by default so that these time periods will be considered bad by MNE and excluded from operations like epoching. Returns @@ -337,17 +344,26 @@ def read_raw_eyelink( See Also -------- mne.io.Raw : Documentation of attribute and methods. + + Notes + ----- + It is common for SR Research Eyelink eye trackers to only record data during trials. + To avoid frequent data discontinuities and to ensure that the data is continuous + so that it can be aligned with EEG and MEG data (if applicable), this reader will + preserve the times between recording trials and annotate them with + ``'BAD_ACQ_SKIP'``. """ - extension = Path(fname).suffix + fname = _check_fname(fname, overwrite="read", must_exist=True, name="fname") + extension = fname.suffix if extension not in ".asc": raise ValueError( "This reader can only read eyelink .asc files." - f" Got extension {extension} instead. consult eyelink" - " manual for converting eyelink data format (.edf)" + f" Got extension {extension} instead. consult EyeLink" + " manual for converting EyeLink data format (.edf)" " files to .asc format." ) - return RawEyelink( + raw_eyelink = RawEyelink( fname, preload=preload, verbose=verbose, @@ -357,6 +373,7 @@ def read_raw_eyelink( overlap_threshold=overlap_threshold, gap_desc=gap_description, ) + return raw_eyelink @fill_doc @@ -365,7 +382,7 @@ class RawEyelink(BaseRaw): Parameters ---------- - fname : str + fname : path-like Path to the data file (.XXX). create_annotations : bool | list (default True) Whether to create mne.Annotations from occular events @@ -387,11 +404,15 @@ class RawEyelink(BaseRaw): the :class:`mne.Annotations` will be kept separate (i.e. "blink_L", "blink_R"). If the gap is smaller than the threshold, the :class:`mne.Annotations` will be merged (i.e. "blink_both"). - gap_desc : str (default 'bad_rec_gap') + gap_desc : str If there are multiple recording blocks in the file, the description of the annotation that will span across the gap period between the - blocks. Uses 'bad_rec_gap' by default so that these time periods will - be considered bad by MNE and excluded from operations like epoching. + blocks. Default is ``None``, which uses 'BAD_ACQ_SKIP' by default so that these + timeperiods will be considered bad by MNE and excluded from operations like + epoching. Note that this parameter is deprecated and will be removed in 1.6. + Use ``mne.annotations.rename`` instead. + + %(preload)s %(verbose)s @@ -402,23 +423,6 @@ class RawEyelink(BaseRaw): dataframes : dict Dictionary of pandas DataFrames. One for eyetracking samples, and one for each type of eyelink event (blinks, messages, etc) - _sample_lines : list - List of lists, each list is one sample containing eyetracking - X/Y and pupil channel data (+ other channels, if they exist) - _event_lines : dict - Each key contains a list of lists, for an event-type that occurred - during the recording period. Events can vary, from occular events - (blinks, saccades, fixations), to messages from the stimulus - presentation software, or info from a response controller. - _system_lines : list - List of tab delimited strings. Each string is a system message, - that in most cases aren't needed. System messages occur for - Eyelinks DataViewer application. - _tracking_mode : str - Whether whether a single eye was tracked ('monocular'), or both - ('binocular'). - _gap_desc : str - The description to be used for annotations returned by _make_gap_annots See Also -------- @@ -435,17 +439,26 @@ def __init__( apply_offsets=False, find_overlaps=False, overlap_threshold=0.05, - gap_desc="bad_rec_gap", + gap_desc=None, ): logger.info("Loading {}".format(fname)) self.fname = Path(fname) - self._sample_lines = None - self._event_lines = None - self._system_lines = None + self._sample_lines = None # sample lines from file + self._event_lines = None # event messages from file + self._system_lines = None # unparsed lines of system messages from file self._tracking_mode = None # assigned in self._infer_col_names self._meas_date = None self._rec_info = None + if gap_desc is None: + gap_desc = "BAD_ACQ_SKIP" + else: + logger.warn( + "gap_description is deprecated in 1.5 and will be removed in 1.6, " + "use raw.annotations.rename to use a description other than " + "'BAD_ACQ_SKIP'", + FutureWarning, + ) self._gap_desc = gap_desc self.dataframes = {} diff --git a/mne/io/eyelink/tests/test_eyelink.py b/mne/io/eyelink/tests/test_eyelink.py index 51d64ea5ed5..c16970a26dc 100644 --- a/mne/io/eyelink/tests/test_eyelink.py +++ b/mne/io/eyelink/tests/test_eyelink.py @@ -28,6 +28,7 @@ def test_eyetrack_not_data_ch(): @pytest.mark.parametrize( "fname, create_annotations, find_overlaps", [ + (fname, False, False), (fname, False, False), (fname, True, False), (fname, True, True), @@ -37,7 +38,9 @@ def test_eyetrack_not_data_ch(): def test_eyelink(fname, create_annotations, find_overlaps): """Test reading eyelink asc files.""" raw = read_raw_eyelink( - fname, create_annotations=create_annotations, find_overlaps=find_overlaps + fname, + create_annotations=create_annotations, + find_overlaps=find_overlaps, ) # First, tests that shouldn't change based on function arguments diff --git a/mne/preprocessing/eyetracking/__init__.py b/mne/preprocessing/eyetracking/__init__.py index 7c7f5f42765..c232475b2fc 100644 --- a/mne/preprocessing/eyetracking/__init__.py +++ b/mne/preprocessing/eyetracking/__init__.py @@ -5,3 +5,4 @@ # License: BSD-3-Clause from .eyetracking import set_channel_types_eyetrack +from .calibration import Calibration, read_eyelink_calibration diff --git a/mne/preprocessing/eyetracking/calibration.py b/mne/preprocessing/eyetracking/calibration.py new file mode 100644 index 00000000000..d6002eaa1f8 --- /dev/null +++ b/mne/preprocessing/eyetracking/calibration.py @@ -0,0 +1,229 @@ +"""Eyetracking Calibration(s) class constructor.""" + +# Authors: Scott Huberty +# Eric Larson +# Adapted from: https://github.com/pyeparse/pyeparse +# License: BSD-3-Clause + +from copy import deepcopy + +import numpy as np + +from ...utils import _check_fname, _validate_type, fill_doc, logger +from ...viz.utils import plt_show + + +@fill_doc +class Calibration(dict): + """Eye-tracking calibration info. + + This data structure behaves like a dictionary. It contains information regarding a + calibration that was conducted during an eye-tracking recording. + + .. note:: + When possible, a Calibration instance should be created with a helper function, + such as :func:`~mne.preprocessing.eyetracking.read_eyelink_calibration`. + + Parameters + ---------- + onset : float + The onset of the calibration in seconds. If the calibration was + performed before the recording started, the the onset can be + negative. + model : str + A string, which is the model of the eye-tracking calibration that was applied. + For example ``'H3'`` for a horizontal only 3-point calibration, or ``'HV3'`` + for a horizontal and vertical 3-point calibration. + eye : str + The eye that was calibrated. For example, ``'left'``, or ``'right'``. + avg_error : float + The average error in degrees between the calibration positions and the + actual gaze position. + max_error : float + The maximum error in degrees that occurred between the calibration + positions and the actual gaze position. + positions : array-like of float, shape ``(n_calibration_points, 2)`` + The x and y coordinates of the calibration points. + offsets : array-like of float, shape ``(n_calibration_points,)`` + The error in degrees between the calibration position and the actual + gaze position for each calibration point. + gaze : array-like of float, shape ``(n_calibration_points, 2)`` + The x and y coordinates of the actual gaze position for each calibration point. + screen_size : array-like of shape ``(2,)`` + The width and height (in meters) of the screen that the eyetracking + data was collected with. For example ``(.531, .298)`` for a monitor with + a display area of 531 x 298 mm. + screen_distance : float + The distance (in meters) from the participant's eyes to the screen. + screen_resolution : array-like of shape ``(2,)`` + The resolution (in pixels) of the screen that the eyetracking data + was collected with. For example, ``(1920, 1080)`` for a 1920x1080 + resolution display. + """ + + def __init__( + self, + *, + onset, + model, + eye, + avg_error, + max_error, + positions, + offsets, + gaze, + screen_size=None, + screen_distance=None, + screen_resolution=None, + ): + super().__init__( + onset=onset, + model=model, + eye=eye, + avg_error=avg_error, + max_error=max_error, + screen_size=screen_size, + screen_distance=screen_distance, + screen_resolution=screen_resolution, + positions=positions, + offsets=offsets, + gaze=gaze, + ) + + def __repr__(self): + """Return a summary of the Calibration object.""" + return ( + f"Calibration |\n" + f" onset: {self['onset']} seconds\n" + f" model: {self['model']}\n" + f" eye: {self['eye']}\n" + f" average error: {self['avg_error']} degrees\n" + f" max error: {self['max_error']} degrees\n" + f" screen size: {self['screen_size']} meters\n" + f" screen distance: {self['screen_distance']} meters\n" + f" screen resolution: {self['screen_resolution']} pixels\n" + ) + + def copy(self): + """Copy the instance. + + Returns + ------- + cal : instance of Calibration + The copied Calibration. + """ + return deepcopy(self) + + def plot(self, title=None, show_offsets=True, axes=None, show=True): + """Visualize calibration. + + Parameters + ---------- + title : str + The title to be displayed. Defaults to ``None``, which uses a generic title. + show_offsets : bool + Whether to display the offset (in visual degrees) of each calibration + point or not. Defaults to ``True``. + axes : instance of matplotlib.axes.Axes | None + Axes to draw the calibration positions to. If ``None`` (default), a new axes + will be created. + show : bool + Whether to show the figure or not. Defaults to ``True``. + + Returns + ------- + fig : instance of matplotlib.figure.Figure + The resulting figure object for the calibration plot. + """ + import matplotlib.pyplot as plt + + msg = "positions and gaze keys must both be 2D numpy arrays." + assert isinstance(self["positions"], np.ndarray), msg + assert isinstance(self["gaze"], np.ndarray), msg + + if axes is not None: + from matplotlib.axes import Axes + + _validate_type(axes, Axes, "axes") + ax = axes + fig = ax.get_figure() + else: # create new figure and axes + fig, ax = plt.subplots(constrained_layout=True) + px, py = self["positions"].T + gaze_x, gaze_y = self["gaze"].T + + if title is None: + ax.set_title(f"Calibration ({self['eye']} eye)") + else: + ax.set_title(title) + ax.set_xlabel("x (pixels)") + ax.set_ylabel("y (pixels)") + + # Display avg_error and max_error in the top left corner + text = ( + f"avg_error: {self['avg_error']} deg.\nmax_error: {self['max_error']} deg." + ) + ax.text( + 0, + 1.01, + text, + transform=ax.transAxes, + verticalalignment="baseline", + fontsize=8, + ) + + # Invert y-axis because the origin is in the top left corner + ax.invert_yaxis() + ax.scatter(px, py, color="gray") + ax.scatter(gaze_x, gaze_y, color="red", alpha=0.5) + + if show_offsets: + for i in range(len(px)): + x_offset = 0.01 * gaze_x[i] # 1% to the right of the gazepoint + text = ax.text( + x=gaze_x[i] + x_offset, + y=gaze_y[i], + s=self["offsets"][i], + fontsize=8, + ha="left", + va="center", + ) + + plt_show(show) + return fig + + +@fill_doc +def read_eyelink_calibration( + fname, screen_size=None, screen_distance=None, screen_resolution=None +): + """Return info on calibrations collected in an eyelink file. + + Parameters + ---------- + fname : path-like + Path to the eyelink file (.asc). + screen_size : array-like of shape ``(2,)`` + The width and height (in meters) of the screen that the eyetracking + data was collected with. For example ``(.531, .298)`` for a monitor with + a display area of 531 x 298 mm. Defaults to ``None``. + screen_distance : float + The distance (in meters) from the participant's eyes to the screen. + Defaults to ``None``. + screen_resolution : array-like of shape ``(2,)`` + The resolution (in pixels) of the screen that the eyetracking data + was collected with. For example, ``(1920, 1080)`` for a 1920x1080 + resolution display. Defaults to ``None``. + + Returns + ------- + calibrations : list + A list of :class:`~mne.preprocessing.eyetracking.Calibration` instances, one for + each eye of every calibration that was performed during the recording session. + """ + from ...io.eyelink._utils import _parse_calibration + + fname = _check_fname(fname, overwrite="read", must_exist=True, name="fname") + logger.info("Reading calibration data from {}".format(fname)) + lines = fname.read_text(encoding="ASCII").splitlines() + return _parse_calibration(lines, screen_size, screen_distance, screen_resolution) diff --git a/mne/preprocessing/eyetracking/tests/__init__.py b/mne/preprocessing/eyetracking/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/mne/preprocessing/eyetracking/tests/test_calibration.py b/mne/preprocessing/eyetracking/tests/test_calibration.py new file mode 100644 index 00000000000..21a0d8b35ea --- /dev/null +++ b/mne/preprocessing/eyetracking/tests/test_calibration.py @@ -0,0 +1,247 @@ +import pytest + +import numpy as np + +from mne.datasets.testing import data_path, requires_testing_data +from ..calibration import Calibration, read_eyelink_calibration + +# for test_read_eylink_calibration +testing_path = data_path(download=False) +fname = testing_path / "eyetrack" / "test_eyelink.asc" + +# for test_create_calibration +POSITIONS = np.array([[115.0, 540.0], [960.0, 540.0], [1804.0, 540.0]]) +OFFSETS = np.array([0.42, 0.23, 0.17]) +GAZES = np.array([[101.5, 554.8], [9.9, -4.1], [1795.9, 539.0]]) + +EXPECTED_REPR = ( + "Calibration |\n" + " onset: 0 seconds\n" + " model: H3\n" + " eye: right\n" + " average error: 0.5 degrees\n" + " max error: 1.0 degrees\n" + " screen size: (0.531, 0.298) meters\n" + " screen distance: 0.065 meters\n" + " screen resolution: (1920, 1080) pixels\n" +) + + +@pytest.mark.parametrize( + ( + "onset, model, eye, avg_error, max_error, positions, offsets, gaze," + " screen_size, screen_distance, screen_resolution" + ), + [ + ( + 0, + "H3", + "right", + 0.5, + 1.0, + POSITIONS, + OFFSETS, + GAZES, + (0.531, 0.298), + 0.065, + (1920, 1080), + ), + (None, None, None, None, None, None, None, None, None, None, None), + ], +) +def test_create_calibration( + onset, + model, + eye, + avg_error, + max_error, + positions, + offsets, + gaze, + screen_size, + screen_distance, + screen_resolution, +): + """Test creating a Calibration object.""" + kwargs = dict( + onset=onset, + model=model, + eye=eye, + avg_error=avg_error, + max_error=max_error, + positions=positions, + offsets=offsets, + gaze=gaze, + screen_size=screen_size, + screen_distance=screen_distance, + screen_resolution=screen_resolution, + ) + cal = Calibration(**kwargs) + assert cal["onset"] == onset + assert cal["model"] == model + assert cal["eye"] == eye + assert cal["avg_error"] == avg_error + assert cal["max_error"] == max_error + if positions is not None: + assert isinstance(cal["positions"], np.ndarray) + assert np.array_equal(cal["positions"], np.array(POSITIONS)) + else: + assert cal["positions"] is None + if offsets is not None: + assert isinstance(cal["offsets"], np.ndarray) + assert np.array_equal(cal["offsets"], np.array(OFFSETS)) + if gaze is not None: + assert isinstance(cal["gaze"], np.ndarray) + assert np.array_equal(cal["gaze"], np.array(GAZES)) + assert cal["screen_size"] == screen_size + assert cal["screen_distance"] == screen_distance + assert cal["screen_resolution"] == screen_resolution + # test copy method + copied_obj = cal.copy() + # Check if the copied object is an instance of Calibration + assert isinstance(copied_obj, Calibration) + # Check if the an attribute of the copied object is equal to the original object + assert copied_obj["onset"] == cal["onset"] + # Modify the copied object and check if it is independent from the original object + copied_obj["onset"] = 20 + assert copied_obj["onset"] != cal["onset"] + # test __repr__ + if cal["onset"] is not None: + assert repr(cal) == EXPECTED_REPR # test __repr__ + + +@requires_testing_data +@pytest.mark.parametrize("fname", [(fname)]) +def test_read_calibration(fname): + """Test reading calibration data from an eyelink asc file.""" + calibrations = read_eyelink_calibration(fname) + # These numbers were pulled from the file and confirmed. + POSITIONS_L = ( + [960, 540], + [960, 92], + [960, 987], + [115, 540], + [1804, 540], + [216, 145], + [1703, 145], + [216, 934], + [1703, 934], + [537, 316], + [1382, 316], + [537, 763], + [1382, 763], + ) + + DIFF_L = ( + [9.9, -4.1], + [-7.8, 16.0], + [-1.9, -14.2], + [13.5, -14.8], + [8.1, 1.0], + [-7.0, -15.4], + [-10.1, -1.4], + [-0.3, 6.9], + [-32.3, -28.1], + [8.2, 7.6], + [9.6, 2.1], + [-10.6, -2.0], + [-11.8, 8.4], + ) + GAZE_L = np.array(POSITIONS_L) + np.array(DIFF_L) + + POSITIONS_R = ( + [960, 540], + [960, 92], + [960, 987], + [115, 540], + [1804, 540], + [216, 145], + [1703, 145], + [216, 934], + [1703, 934], + [537, 316], + [1382, 316], + [537, 763], + [1382, 763], + ) + DIFF_R = ( + [-5.2, -16.1], + [23.7, 1.3], + [2.0, -9.3], + [4.4, 1.5], + [-6.5, -12.7], + [16.6, -7.5], + [5.7, -1.8], + [15.4, -3.5], + [-2.0, -10.2], + [0.1, 8.3], + [1.9, -15.8], + [-24.8, -2.3], + [3.2, -9.2], + ) + GAZE_R = np.array(POSITIONS_R) + np.array(DIFF_R) + + OFFSETS_R = [ + 0.36, + 0.50, + 0.20, + 0.10, + 0.30, + 0.38, + 0.13, + 0.33, + 0.22, + 0.18, + 0.34, + 0.52, + 0.21, + ] + + assert len(calibrations) == 2 # calibration[0] is left, calibration[1] is right + assert calibrations[0]["onset"] == 0 + assert calibrations[1]["onset"] == 0 + assert calibrations[0]["model"] == "HV13" + assert calibrations[1]["model"] == "HV13" + assert calibrations[0]["eye"] == "left" + assert calibrations[1]["eye"] == "right" + assert calibrations[0]["avg_error"] == 0.30 + assert calibrations[0]["max_error"] == 0.90 + assert calibrations[1]["avg_error"] == 0.31 + assert calibrations[1]["max_error"] == 0.52 + assert np.array_equal(POSITIONS_L, calibrations[0]["positions"]) + assert np.array_equal(POSITIONS_R, calibrations[1]["positions"]) + assert np.array_equal(GAZE_L, calibrations[0]["gaze"]) + assert np.array_equal(GAZE_R, calibrations[1]["gaze"]) + assert np.array_equal(OFFSETS_R, calibrations[1]["offsets"]) + + +@requires_testing_data +@pytest.mark.parametrize( + "fname, axes", + [(fname, None), (fname, True)], +) +def test_plot_calibration(fname, axes): + """Test plotting calibration data.""" + import matplotlib.pyplot as plt + + # Set the non-interactive backend + plt.switch_backend("agg") + + if axes: + axes = plt.subplot() + calibrations = read_eyelink_calibration(fname) + cal_left = calibrations[0] + fig = cal_left.plot(show=True, show_offsets=True, axes=axes) + ax = fig.axes[0] + + scatter1 = ax.collections[0] + scatter2 = ax.collections[1] + px, py = cal_left["positions"].T + gaze_x, gaze_y = cal_left["gaze"].T + + assert ax.title.get_text() == f"Calibration ({cal_left['eye']} eye)" + assert len(ax.collections) == 2 # Two scatter plots + + assert np.allclose(scatter1.get_offsets(), np.column_stack((px, py))) + assert np.allclose(scatter2.get_offsets(), np.column_stack((gaze_x, gaze_y))) + plt.close(fig) diff --git a/tutorials/preprocessing/90_eyetracking_data.py b/tutorials/preprocessing/90_eyetracking_data.py index 3c3a9d84b09..07b6846f768 100644 --- a/tutorials/preprocessing/90_eyetracking_data.py +++ b/tutorials/preprocessing/90_eyetracking_data.py @@ -36,12 +36,58 @@ from mne import Epochs, find_events from mne.io import read_raw_eyelink from mne.datasets.eyelink import data_path +from mne.preprocessing.eyetracking import read_eyelink_calibration eyelink_fname = data_path() / "mono_multi-block_multi-DINS.asc" raw = read_raw_eyelink(eyelink_fname, create_annotations=["blinks", "messages"]) raw.crop(tmin=0, tmax=146) +# %% +# Checking the calibration +# ------------------------ +# +# We can also load the calibrations from the recording and visualize them. +# Checking the quality of the calibration is a useful first step in assessing +# the quality of the eye tracking data. Note that +# :func:`~mne.preprocessing.eyetracking.read_eyelink_calibration` +# will return a list of :class:`~mne.preprocessing.eyetracking.Calibration` instances, +# one for each calibration. We can index that list to access a specific calibration. + +cals = read_eyelink_calibration(eyelink_fname) +print(f"number of calibrations: {len(cals)}") +first_cal = cals[0] # let's access the first (and only in this case) calibration +print(first_cal) + +# %% +# Here we can see that a 5-point calibration was performed at the beginning of +# the recording. Note that you can access the calibration information using +# dictionary style indexing: + +print(f"Eye calibrated: {first_cal['eye']}") +print(f"Calibration model: {first_cal['model']}") +print(f"Calibration average error: {first_cal['avg_error']}") + +# %% +# The data for individual calibration points are stored as :class:`numpy.ndarray` +# arrays, in the ``'positions'``, ``'gaze'``, and ``'offsets'`` keys. ``'positions'`` +# contains the x and y coordinates of each calibration point. ``'gaze'`` contains the +# x and y coordinates of the actual gaze position for each calibration point. +# ``'offsets'`` contains the offset (in visual degrees) between the calibration position +# and the actual gaze position for each calibration point. Below is an example of +# how to access these data: +print(f"offset of the first calibration point: {first_cal['offsets'][0]}") +print(f"offset for each calibration point: {first_cal['offsets']}") +print(f"x-coordinate for each calibration point: {first_cal['positions'].T[0]}") + +# %% +# Let's plot the calibration to get a better look. Below we see the location that each +# calibration point was displayed (gray dots), the positions of the actual gaze (red), +# and the offsets (in visual degrees) between the calibration position and the actual +# gaze position of each calibration point. + +first_cal.plot(show_offsets=True) + # %% # Get stimulus events from DIN channel # ------------------------------------ @@ -70,7 +116,8 @@ # categorized as blinks). Also, notice that we have passed a custom `dict` into # the scalings argument of ``raw.plot``. This is necessary to make the eyegaze # channel traces legible when plotting, since the file contains pixel position -# data (as opposed to eye angles, which are reported in radians). +# data (as opposed to eye angles, which are reported in radians). We also could +# have simply passed ``scalings='auto'``. raw.plot( events=events, @@ -102,7 +149,7 @@ # It is important to note that pupil size data are reported by Eyelink (and # stored internally by MNE) as arbitrary units (AU). While it often can be # preferable to convert pupil size data to millimeters, this requires -# information that is not always present in the file. MNE does not currently +# information that is not present in the file. MNE does not currently # provide methods to convert pupil size data. # See :ref:`tut-importing-eyetracking-data` for more information on pupil size # data.