diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 616a346d9da..05b2ae8552d 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -83,7 +83,6 @@ jobs: MNE_FORCE_SERIAL: 'true' OPENBLAS_NUM_THREADS: 1 AZURE_CI_WINDOWS: 'true' - PYTHON_VERSION: '3.7' CONDA_VERSION: '>=4.3.27' strategy: maxParallel: 4 @@ -96,32 +95,45 @@ jobs: TEST_MODE: 'conda' CONDA_ENV: 'environment.yml' PIP_NO_UPGRADE: 'True' + PYTHON_VERSION: '3.7' Python37-64bit-full-pip: PYTHON_ARCH: 'x64' TEST_MODE: 'pip' + PYTHON_VERSION: '3.7' + Python38-64bit-pre-pip: + PYTHON_ARCH: 'x64' + TEST_MODE: 'pre-pip' + PYTHON_VERSION: '3.8' steps: - task: UsePythonVersion@0 inputs: versionSpec: $(PYTHON_VERSION) architecture: $(PYTHON_ARCH) addToPath: true - condition: eq(variables['TEST_MODE'], 'pip') - - powershell: | - Set-StrictMode -Version Latest - $ErrorActionPreference = "Stop" - $PSDefaultParameterValues['*:ErrorAction']='Stop' + condition: in(variables['TEST_MODE'], 'pip', 'pre-pip') + - bash: | + set -e git clone --depth 1 git://github.com/pyvista/gl-ci-helpers.git powershell gl-ci-helpers/appveyor/install_opengl.ps1 displayName: Install OpenGL - - powershell: | - Set-StrictMode -Version Latest - $ErrorActionPreference = "Stop" - $PSDefaultParameterValues['*:ErrorAction']='Stop' + - bash: | + set -e pip install --upgrade numpy scipy vtk pip install --upgrade -r requirements.txt pip install codecov condition: eq(variables['TEST_MODE'], 'pip') displayName: 'Install dependencies with pip' + # VTK does not have wheels (scheduled Q1 2020): + # https://github.com/KitwareMedical/VTKPythonPackage/issues/39 + # Missing: vtk, mayavi, PySurfer, pyvista, numexpr, python-picard, numba, dipy, statsmodels (doesn't work on pre as of 2019/12/04) + - bash: | + set -e + python -m pip install --upgrade pip setuptools + python -m pip install --upgrade --pre --only-binary ":all:" -f "https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" numpy scipy scikit-learn matplotlib h5py pandas Pillow + python -m pip install --upgrade nilearn pyqt5 pyqt5-sip nibabel pytest pytest-cov pytest-faulthandler pytest-mock pytest-sugar pytest-timeout joblib psutil https://api.github.com/repos/numpy/numpydoc/zipball/master neo xlrd codecov + python -m pip install codecov + condition: eq(variables['TEST_MODE'], 'pre-pip') + displayName: 'Install dependencies with pip --pre' - powershell: | Set-StrictMode -Version Latest $ErrorActionPreference = "Stop" diff --git a/mne/conftest.py b/mne/conftest.py index 82073bdcaf1..5597f1d74ca 100644 --- a/mne/conftest.py +++ b/mne/conftest.py @@ -75,6 +75,8 @@ def pytest_configure(config): ignore:.*imp.*:DeprecationWarning ignore:Exception creating Regex for oneOf.*:SyntaxWarning ignore:scipy\.gradient is deprecated.*:DeprecationWarning + ignore:sklearn\.externals\.joblib is deprecated.*:FutureWarning + ignore:The sklearn.*module.*deprecated.*:FutureWarning always:.*get_data.* is deprecated in favor of.*:DeprecationWarning """ # noqa: E501 for warning_line in warning_lines.split('\n'): diff --git a/mne/decoding/tests/test_base.py b/mne/decoding/tests/test_base.py index cdafb47cb43..ecce93936fe 100644 --- a/mne/decoding/tests/test_base.py +++ b/mne/decoding/tests/test_base.py @@ -73,19 +73,11 @@ def test_get_coef(): parameters = {'kernel': ['linear'], 'C': [1, 10]} lm_gs_classification = LinearModel( - GridSearchCV( - svm.SVC(), parameters, - cv=2, refit=True, iid=False, n_jobs=1 - ) - ) + GridSearchCV(svm.SVC(), parameters, cv=2, refit=True, n_jobs=1)) assert (is_classifier(lm_gs_classification)) lm_gs_regression = LinearModel( - GridSearchCV( - svm.SVR(), parameters, cv=2, - refit=True, iid=False, n_jobs=1 - ) - ) + GridSearchCV(svm.SVR(), parameters, cv=2, refit=True, n_jobs=1)) assert (is_regressor(lm_gs_regression)) # Define a classifier, an invertible transformer and an non-invertible one. @@ -236,11 +228,7 @@ def test_linearmodel(): from sklearn.model_selection import GridSearchCV parameters = {'kernel': ['linear'], 'C': [1, 10]} clf = LinearModel( - GridSearchCV( - svm.SVC(), parameters, cv=2, - refit=True, iid=False, n_jobs=1 - ) - ) + GridSearchCV(svm.SVC(), parameters, cv=2, refit=True, n_jobs=1)) clf.fit(X, y) assert_equal(clf.filters_.shape, (n_features,)) assert_equal(clf.patterns_.shape, (n_features,)) @@ -252,11 +240,7 @@ def test_linearmodel(): n_targets = 1 Y = rng.rand(n, n_targets) clf = LinearModel( - GridSearchCV( - svm.SVR(), parameters, cv=2, - refit=True, iid=False, n_jobs=1 - ) - ) + GridSearchCV(svm.SVR(), parameters, cv=2, refit=True, n_jobs=1)) clf.fit(X, y) assert_equal(clf.filters_.shape, (n_features, )) assert_equal(clf.patterns_.shape, (n_features, )) diff --git a/mne/epochs.py b/mne/epochs.py index 9c08d822c23..ff22f45721e 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -16,6 +16,7 @@ import json import operator import os.path as op +import warnings from distutils.version import LooseVersion import numpy as np @@ -382,7 +383,9 @@ def __init__(self, info, data, events, event_id=None, tmin=-0.2, tmax=0.5, if events is not None: # RtEpochs can have events=None events_type = type(events) - events = np.asarray(events) + with warnings.catch_warnings(record=True): + warnings.simplefilter('ignore') # deprecation for object array + events = np.asarray(events) if not np.issubdtype(events.dtype, np.integer): raise TypeError('events should be a NumPy array of integers, ' 'got {}'.format(events_type)) diff --git a/mne/label.py b/mne/label.py index f00cf756bc3..ee996b988ab 100644 --- a/mne/label.py +++ b/mne/label.py @@ -1505,7 +1505,8 @@ def grow_labels(subject, seeds, extents, hemis, subjects_dir=None, n_jobs=1, # make sure the inputs are arrays if np.isscalar(seeds): seeds = [seeds] - seeds = np.atleast_1d([np.atleast_1d(seed) for seed in seeds]) + # these can have different sizes so need to use object array + seeds = np.asarray([np.atleast_1d(seed) for seed in seeds], dtype='O') extents = np.atleast_1d(extents) hemis = np.atleast_1d(hemis) n_seeds = len(seeds) diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py index 1bbb36d3032..efe3aaf914c 100644 --- a/mne/tests/test_epochs.py +++ b/mne/tests/test_epochs.py @@ -321,7 +321,7 @@ def test_reject(): epochs.drop_bad() assert_equal(len(epochs), len(events)) assert_array_equal(epochs.selection, np.arange(len(events))) - assert_array_equal(epochs.drop_log, [[]] * 7) + assert epochs.drop_log == [[]] * 7 if proj not in data_7: data_7[proj] = epochs.get_data() assert_array_equal(epochs.get_data(), data_7[proj]) @@ -332,7 +332,7 @@ def test_reject(): epochs.drop_bad() assert_equal(len(epochs), len(events) - 4) assert_array_equal(epochs.selection, selection) - assert_array_equal(epochs.drop_log, drop_log) + assert epochs.drop_log == drop_log assert_array_equal(epochs.get_data(), data_7[proj][keep_idx]) # rejection post-hoc @@ -345,7 +345,7 @@ def test_reject(): assert_equal(len(epochs), len(events) - 4) assert_equal(len(epochs), len(epochs.get_data())) assert_array_equal(epochs.selection, selection) - assert_array_equal(epochs.drop_log, drop_log) + assert epochs.drop_log == drop_log assert_array_equal(epochs.get_data(), data_7[proj][keep_idx]) # rejection twice @@ -357,7 +357,7 @@ def test_reject(): epochs.drop_bad(reject) assert_equal(len(epochs), len(events) - 4) assert_array_equal(epochs.selection, selection) - assert_array_equal(epochs.drop_log, drop_log) + assert epochs.drop_log == drop_log assert_array_equal(epochs.get_data(), data_7[proj][keep_idx]) # ensure that thresholds must become more stringent, not less @@ -957,7 +957,7 @@ def test_epochs_io_preload(tmpdir, preload): epochs_read5 = read_epochs(temp_fname, preload=preload) assert_array_equal(epochs_read5.selection, epochs.selection) assert_equal(len(epochs_read5.selection), len(epochs_read5.events)) - assert_array_equal(epochs_read5.drop_log, epochs.drop_log) + assert epochs_read5.drop_log == epochs.drop_log if preload: # Test that one can drop channels on read file @@ -986,7 +986,7 @@ def test_epochs_io_preload(tmpdir, preload): assert_allclose(epochs.get_data(), epochs_read.get_data(), **tols) assert_array_equal(epochs.events, epochs_read.events) assert_array_equal(epochs.selection, epochs_read.selection) - assert_equal(epochs.drop_log, epochs_read.drop_log) + assert epochs.drop_log == epochs_read.drop_log # Test that having a single time point works epochs.load_data().crop(0, 0) diff --git a/mne/viz/tests/test_3d.py b/mne/viz/tests/test_3d.py index 88fa51fc3f3..05031730ec7 100644 --- a/mne/viz/tests/test_3d.py +++ b/mne/viz/tests/test_3d.py @@ -32,7 +32,7 @@ plot_sensors_connectivity, plot_brain_colorbar) from mne.viz.utils import _fake_click from mne.utils import (requires_mayavi, requires_pysurfer, run_tests_if_main, - requires_nibabel, check_version, + requires_nibabel, check_version, requires_dipy, traits_test, requires_version, catch_logging) from mne.datasets import testing from mne.source_space import read_source_spaces @@ -480,6 +480,7 @@ def test_snapshot_brain_montage(renderer): @pytest.mark.slowtest # can be slow on OSX @testing.requires_testing_data +@requires_dipy() @requires_nibabel() @requires_version('nilearn', '0.4') @pytest.mark.parametrize('mode, stype, init_t, want_t, init_p, want_p', [ @@ -525,6 +526,7 @@ def test_plot_volume_source_estimates(mode, stype, init_t, want_t, @pytest.mark.slowtest # can be slow on OSX @testing.requires_testing_data +@requires_dipy() @requires_nibabel() @requires_version('nilearn', '0.4') def test_plot_volume_source_estimates_morph(): diff --git a/mne/viz/topomap.py b/mne/viz/topomap.py index 2158ff015bb..7d22e1c45a2 100644 --- a/mne/viz/topomap.py +++ b/mne/viz/topomap.py @@ -338,7 +338,7 @@ def plot_projs_topomap(projs, layout=None, cmap=None, sensors=True, if vlim == 'joint': for _ch_type in set(types): idx = np.where(np.in1d(types, _ch_type))[0] - these_data = np.concatenate(np.array(datas)[idx]) + these_data = np.concatenate(np.array(datas, dtype=object)[idx]) norm = all(these_data >= 0) _vl = _setup_vmin_vmax(these_data, vmin=None, vmax=None, norm=norm) for _idx in idx: