From c97e6bd2352e59bd2b63913395fc8d8744cf3553 Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Fri, 29 Oct 2021 19:38:58 +0200 Subject: [PATCH 01/23] Replace tmpdir with tmp_path --- mne/beamformer/tests/test_dics.py | 4 +- mne/beamformer/tests/test_lcmv.py | 4 +- mne/channels/tests/test_channels.py | 4 +- mne/channels/tests/test_layout.py | 16 +-- mne/channels/tests/test_montage.py | 42 +++--- mne/commands/tests/test_commands.py | 40 +++--- mne/conftest.py | 6 +- .../sleep_physionet/tests/test_physionet.py | 48 +++---- mne/datasets/tests/test_datasets.py | 44 +++--- mne/export/tests/test_export.py | 12 +- mne/forward/tests/test_forward.py | 18 +-- mne/forward/tests/test_make_forward.py | 30 ++-- mne/gui/tests/test_coreg_gui.py | 4 +- mne/gui/tests/test_file_traits.py | 8 +- mne/gui/tests/test_ieeg_locate_gui.py | 8 +- mne/gui/tests/test_kit2fiff_gui.py | 8 +- mne/gui/tests/test_marker_gui.py | 4 +- mne/io/artemis123/tests/test_artemis123.py | 4 +- mne/io/brainvision/tests/test_brainvision.py | 46 +++--- mne/io/ctf/tests/test_ctf.py | 10 +- mne/io/curry/tests/test_curry.py | 16 +-- mne/io/edf/tests/test_edf.py | 24 ++-- mne/io/edf/tests/test_gdf.py | 4 +- mne/io/eeglab/tests/test_eeglab.py | 36 ++--- mne/io/egi/tests/test_egi.py | 8 +- mne/io/fiff/tests/test_raw_fiff.py | 136 +++++++++--------- mne/io/hitachi/tests/test_hitachi.py | 4 +- mne/io/kit/tests/test_coreg.py | 4 +- mne/io/kit/tests/test_kit.py | 8 +- mne/io/nirx/tests/test_nirx.py | 24 ++-- mne/io/persyst/tests/test_persyst.py | 16 +-- mne/io/snirf/tests/test_snirf.py | 6 +- mne/io/tests/test_compensator.py | 8 +- mne/io/tests/test_constants.py | 20 +-- mne/io/tests/test_meas_info.py | 56 ++++---- mne/io/tests/test_what.py | 4 +- mne/io/tests/test_write.py | 4 +- mne/minimum_norm/tests/test_inverse.py | 22 +-- mne/minimum_norm/tests/test_snr.py | 4 +- .../ieeg/tests/test_projection.py | 4 +- .../nirs/tests/test_beer_lambert_law.py | 6 +- .../nirs/tests/test_scalp_coupling_index.py | 2 +- ...temporal_derivative_distribution_repair.py | 2 +- mne/preprocessing/tests/test_fine_cal.py | 4 +- mne/preprocessing/tests/test_ica.py | 34 ++--- mne/preprocessing/tests/test_maxwell.py | 16 +-- mne/report/tests/test_report.py | 80 +++++------ mne/simulation/tests/test_raw.py | 4 +- mne/stats/tests/test_cluster_level.py | 4 +- mne/tests/test_annotations.py | 44 +++--- mne/tests/test_bem.py | 30 ++-- mne/tests/test_chpi.py | 8 +- mne/tests/test_coreg.py | 8 +- mne/tests/test_cov.py | 34 ++--- mne/tests/test_dipole.py | 28 ++-- mne/tests/test_epochs.py | 90 ++++++------ mne/tests/test_event.py | 12 +- mne/tests/test_evoked.py | 38 ++--- mne/tests/test_filter.py | 4 +- mne/tests/test_freesurfer.py | 4 +- mne/tests/test_label.py | 16 +-- mne/tests/test_line_endings.py | 4 +- mne/tests/test_morph.py | 20 +-- mne/tests/test_morph_map.py | 4 +- mne/tests/test_proj.py | 8 +- mne/tests/test_source_estimate.py | 50 +++---- mne/tests/test_source_space.py | 56 ++++---- mne/tests/test_surface.py | 4 +- mne/tests/test_transforms.py | 8 +- mne/time_frequency/tests/test_csd.py | 8 +- mne/time_frequency/tests/test_tfr.py | 4 +- mne/utils/tests/test_check.py | 8 +- mne/utils/tests/test_config.py | 4 +- mne/utils/tests/test_logging.py | 8 +- mne/utils/tests/test_numerics.py | 4 +- mne/utils/tests/test_testing.py | 8 +- mne/viz/_brain/tests/test_brain.py | 24 ++-- mne/viz/backends/tests/test_renderer.py | 4 +- mne/viz/tests/test_3d.py | 10 +- 79 files changed, 738 insertions(+), 734 deletions(-) diff --git a/mne/beamformer/tests/test_dics.py b/mne/beamformer/tests/test_dics.py index 5cf8f4653b3..62509846ad7 100644 --- a/mne/beamformer/tests/test_dics.py +++ b/mne/beamformer/tests/test_dics.py @@ -150,7 +150,7 @@ def _make_rand_csd(info, csd): pytest.param(False, marks=pytest.mark.slowtest), True, ]) -def test_make_dics(tmpdir, _load_forward, idx, whiten): +def test_make_dics(tmp_path, _load_forward, idx, whiten): """Test making DICS beamformer filters.""" # We only test proper handling of parameters here. Testing the results is # done in test_apply_dics_timeseries and test_apply_dics_csd. @@ -319,7 +319,7 @@ def test_make_dics(tmpdir, _load_forward, idx, whiten): # Test whether spatial filter contains src_type assert 'src_type' in filters - fname = op.join(str(tmpdir), 'filters-dics.h5') + fname = op.join(str(tmp_path), 'filters-dics.h5') filters.save(fname) filters_read = read_beamformer(fname) assert isinstance(filters, Beamformer) diff --git a/mne/beamformer/tests/test_lcmv.py b/mne/beamformer/tests/test_lcmv.py index 54fca235551..0dab6f5812f 100644 --- a/mne/beamformer/tests/test_lcmv.py +++ b/mne/beamformer/tests/test_lcmv.py @@ -214,7 +214,7 @@ def test_lcmv_vector(): (0.01, False, 'surface'), (0., True, 'surface'), ]) -def test_make_lcmv_bem(tmpdir, reg, proj, kind): +def test_make_lcmv_bem(tmp_path, reg, proj, kind): """Test LCMV with evoked data and single trials.""" raw, epochs, evoked, data_cov, noise_cov, label, forward,\ forward_surf_ori, forward_fixed, forward_vol = _get_data(proj=proj) @@ -304,7 +304,7 @@ def test_make_lcmv_bem(tmpdir, reg, proj, kind): assert 'rank %s' % rank in repr(filters) # I/O - fname = op.join(str(tmpdir), 'filters.h5') + fname = op.join(str(tmp_path), 'filters.h5') with pytest.warns(RuntimeWarning, match='-lcmv.h5'): filters.save(fname) filters_read = read_beamformer(fname) diff --git a/mne/channels/tests/test_channels.py b/mne/channels/tests/test_channels.py index 2543e2e1a80..614e5d175cd 100644 --- a/mne/channels/tests/test_channels.py +++ b/mne/channels/tests/test_channels.py @@ -178,9 +178,9 @@ def test_set_channel_types(): pytest.raises(ValueError, raw.set_channel_types, ch_types) -def test_read_ch_adjacency(tmpdir): +def test_read_ch_adjacency(tmp_path): """Test reading channel adjacency templates.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) a = partial(np.array, dtype=' 0: @@ -235,7 +235,7 @@ def _compare_combo(raw, new, times, n_times): @pytest.mark.slowtest @testing.requires_testing_data -def test_multiple_files(tmpdir): +def test_multiple_files(tmp_path): """Test loading multiple files simultaneously.""" # split file raw = read_raw_fif(fif_fname).crop(0, 10) @@ -253,7 +253,7 @@ def test_multiple_files(tmpdir): # going in reverse order so the last fname is the first file (need later) raws = [None] * len(tmins) for ri in range(len(tmins) - 1, -1, -1): - fname = tmpdir.join('test_raw_split-%d_raw.fif' % ri) + fname = tmp_path.join('test_raw_split-%d_raw.fif' % ri) raw.save(fname, tmin=tmins[ri], tmax=tmaxs[ri]) raws[ri] = read_raw_fif(fname) assert (len(raws[ri].times) == @@ -387,17 +387,17 @@ def test_concatenate_raws(on_mismatch): 'ignore:.*naming conventions.*:RuntimeWarning'), pytest.mark.slowtest]), )) -def test_split_files(tmpdir, mod, monkeypatch): +def test_split_files(tmp_path, mod, monkeypatch): """Test writing and reading of split raw files.""" raw_1 = read_raw_fif(fif_fname, preload=True) # Test a very close corner case assert_allclose(raw_1.buffer_size_sec, 10., atol=1e-2) # samp rate - split_fname = tmpdir.join(f'split_raw_{mod}.fif') + split_fname = tmp_path.join(f'split_raw_{mod}.fif') # intended filenames - split_fname_elekta_part2 = tmpdir.join(f'split_raw_{mod}-1.fif') - split_fname_bids_part1 = tmpdir.join(f'split_raw_split-01_{mod}.fif') - split_fname_bids_part2 = tmpdir.join(f'split_raw_split-02_{mod}.fif') + split_fname_elekta_part2 = tmp_path.join(f'split_raw_{mod}-1.fif') + split_fname_bids_part1 = tmp_path.join(f'split_raw_split-01_{mod}.fif') + split_fname_bids_part2 = tmp_path.join(f'split_raw_split-02_{mod}.fif') raw_1.set_annotations(Annotations([2.], [5.5], 'test')) # Check that if BIDS is used and no split is needed it defaults to @@ -432,7 +432,7 @@ def test_split_files(tmpdir, mod, monkeypatch): annot = Annotations(np.arange(20), np.ones((20,)), 'test') raw_1.set_annotations(annot) - split_fname = op.join(tmpdir, 'split_raw.fif') + split_fname = op.join(tmp_path, 'split_raw.fif') raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB') raw_2 = read_raw_fif(split_fname) assert_allclose(raw_2.buffer_size_sec, 1., atol=1e-2) # samp rate @@ -512,18 +512,18 @@ def test_split_files(tmpdir, mod, monkeypatch): assert_allclose(raw_crop[:][0], raw_read[:][0]) # proper ending - assert op.isdir(tmpdir) + assert op.isdir(tmp_path) with pytest.raises(ValueError, match='must end with an underscore'): raw_crop.save( - tmpdir.join('test.fif'), split_naming='bids', verbose='error') + tmp_path.join('test.fif'), split_naming='bids', verbose='error') # reserved file is deleted - fname = tmpdir.join('test_raw.fif') + fname = tmp_path.join('test_raw.fif') monkeypatch.setattr(base, '_write_raw_fid', _err) with pytest.raises(RuntimeError, match='Killed mid-write'): raw_1.save(fname, split_size='10MB', split_naming='bids') assert op.isfile(fname) - assert not op.isfile(tmpdir.join('test_split-01_raw.fif')) + assert not op.isfile(tmp_path.join('test_split-01_raw.fif')) def _err(*args, **kwargs): @@ -535,12 +535,12 @@ def _no_write_file_name(fid, kind, data): return -def test_split_numbers(tmpdir, monkeypatch): +def test_split_numbers(tmp_path, monkeypatch): """Test handling of split files using numbers instead of names.""" monkeypatch.setattr(base, 'write_string', _no_write_file_name) raw = read_raw_fif(test_fif_fname).pick('eeg') # gh-8339 - dashes_fname = tmpdir.join('sub-1_ses-2_task-3_raw.fif') + dashes_fname = tmp_path.join('sub-1_ses-2_task-3_raw.fif') raw.save(dashes_fname, split_size='5MB', buffer_size_sec=1.) assert op.isfile(dashes_fname) @@ -551,7 +551,7 @@ def test_split_numbers(tmpdir, monkeypatch): assert_allclose(raw.get_data(), raw_read.get_data(), atol=1e-16) -def test_load_bad_channels(tmpdir): +def test_load_bad_channels(tmp_path): """Test reading/writing of bad channels.""" # Load correctly marked file (manually done in mne_process_raw) raw_marked = read_raw_fif(fif_bad_marked_fname) @@ -563,8 +563,8 @@ def test_load_bad_channels(tmpdir): # Test normal case raw.load_bad_channels(bad_file_works) # Write it out, read it in, and check - raw.save(tmpdir.join('foo_raw.fif')) - raw_new = read_raw_fif(tmpdir.join('foo_raw.fif')) + raw.save(tmp_path.join('foo_raw.fif')) + raw_new = read_raw_fif(tmp_path.join('foo_raw.fif')) assert correct_bads == raw_new.info['bads'] # Reset it raw.info['bads'] = [] @@ -576,20 +576,20 @@ def test_load_bad_channels(tmpdir): with pytest.warns(RuntimeWarning, match='1 bad channel'): raw.load_bad_channels(bad_file_wrong, force=True) # write it out, read it in, and check - raw.save(tmpdir.join('foo_raw.fif'), overwrite=True) - raw_new = read_raw_fif(tmpdir.join('foo_raw.fif')) + raw.save(tmp_path.join('foo_raw.fif'), overwrite=True) + raw_new = read_raw_fif(tmp_path.join('foo_raw.fif')) assert correct_bads == raw_new.info['bads'] # Check that bad channels are cleared raw.load_bad_channels(None) - raw.save(tmpdir.join('foo_raw.fif'), overwrite=True) - raw_new = read_raw_fif(tmpdir.join('foo_raw.fif')) + raw.save(tmp_path.join('foo_raw.fif'), overwrite=True) + raw_new = read_raw_fif(tmp_path.join('foo_raw.fif')) assert raw_new.info['bads'] == [] @pytest.mark.slowtest @testing.requires_testing_data -def test_io_raw(tmpdir): +def test_io_raw(tmp_path): """Test IO for raw data (Neuromag).""" rng = np.random.RandomState(0) # test unicode io @@ -598,7 +598,7 @@ def test_io_raw(tmpdir): assert ('Raw' in repr(r)) assert (op.basename(fif_fname) in repr(r)) r.info['description'] = chars - temp_file = tmpdir.join('raw.fif') + temp_file = tmp_path.join('raw.fif') r.save(temp_file, overwrite=True) with read_raw_fif(temp_file) as r2: desc2 = r2.info['description'] @@ -611,7 +611,7 @@ def test_io_raw(tmpdir): data = rng.randn(raw._data.shape[0], raw._data.shape[1]) raw._data[:, :] = data # save it somewhere - fname = tmpdir.join('test_copy_raw.fif') + fname = tmp_path.join('test_copy_raw.fif') raw.save(fname, buffer_size_sec=1.0) # read it in, make sure the whole thing matches raw = read_raw_fif(fname) @@ -626,9 +626,9 @@ def test_io_raw(tmpdir): (test_fif_fname, 'raw.fif'), pytest.param(test_fif_gz_fname, 'raw.fif.gz', marks=pytest.mark.slowtest), (ctf_fname, 'raw.fif')]) -def test_io_raw_additional(fname_in, fname_out, tmpdir): +def test_io_raw_additional(fname_in, fname_out, tmp_path): """Test IO for raw data (Neuromag + CTF + gz).""" - fname_out = tmpdir.join(fname_out) + fname_out = tmp_path.join(fname_out) raw = read_raw_fif(fname_in).crop(0, 2) nchan = raw.info['nchan'] @@ -697,7 +697,7 @@ def test_io_raw_additional(fname_in, fname_out, tmpdir): assert_allclose(raw.info['dig'][0]['r'], raw2.info['dig'][0]['r']) # test warnings on bad filenames - raw_badname = tmpdir.join('test-bad-name.fif.gz') + raw_badname = tmp_path.join('test-bad-name.fif.gz') with pytest.warns(RuntimeWarning, match='raw.fif'): raw.save(raw_badname) with pytest.warns(RuntimeWarning, match='raw.fif'): @@ -706,7 +706,7 @@ def test_io_raw_additional(fname_in, fname_out, tmpdir): @testing.requires_testing_data @pytest.mark.parametrize('dtype', ('complex128', 'complex64')) -def test_io_complex(tmpdir, dtype): +def test_io_complex(tmp_path, dtype): """Test IO with complex data types.""" rng = np.random.RandomState(0) n_ch = 5 @@ -717,13 +717,13 @@ def test_io_complex(tmpdir, dtype): raw_cp._data = np.array(raw_cp._data, dtype) raw_cp._data += imag_rand with pytest.warns(RuntimeWarning, match='Saving .* complex data.'): - raw_cp.save(tmpdir.join('raw.fif'), overwrite=True) + raw_cp.save(tmp_path / 'raw.fif', overwrite=True) - raw2 = read_raw_fif(tmpdir.join('raw.fif')) + raw2 = read_raw_fif(tmp_path / 'raw.fif') raw2_data, _ = raw2[:] assert_allclose(raw2_data, raw_cp._data) # with preloading - raw2 = read_raw_fif(tmpdir.join('raw.fif'), preload=True) + raw2 = read_raw_fif(tmp_path / 'raw.fif', preload=True) raw2_data, _ = raw2[:] assert_allclose(raw2_data, raw_cp._data) assert_allclose(data_orig, raw_cp._data.real) @@ -756,7 +756,7 @@ def test_getitem(): @testing.requires_testing_data -def test_proj(tmpdir): +def test_proj(tmp_path): """Test SSP proj operations.""" for proj in [True, False]: raw = read_raw_fif(fif_fname, preload=False) @@ -796,8 +796,8 @@ def test_proj(tmpdir): raw = read_raw_fif(fif_fname, preload=preload) # write the file with proj. activated, make sure proj has been applied - raw.save(tmpdir.join('raw.fif'), proj=True, overwrite=True) - raw2 = read_raw_fif(tmpdir.join('raw.fif')) + raw.save(tmp_path.join('raw.fif'), proj=True, overwrite=True) + raw2 = read_raw_fif(tmp_path.join('raw.fif')) data_proj_2, _ = raw2[:, 0:2] assert_allclose(data_proj_1, data_proj_2) assert (all(p['active'] for p in raw2.info['projs'])) @@ -829,7 +829,7 @@ def test_proj(tmpdir): # I/O roundtrip of an MEG projector with a Raw that only contains EEG # data. - out_fname = tmpdir.join('test_raw.fif') + out_fname = tmp_path.join('test_raw.fif') raw = read_raw_fif(test_fif_fname, preload=True).crop(0, 0.002) proj = raw.info['projs'][-1] raw.pick_types(meg=False, eeg=True) @@ -844,7 +844,7 @@ def test_proj(tmpdir): @testing.requires_testing_data @pytest.mark.parametrize('preload', [False, True, 'memmap.dat']) -def test_preload_modify(preload, tmpdir): +def test_preload_modify(preload, tmp_path): """Test preloading and modifying data.""" rng = np.random.RandomState(0) raw = read_raw_fif(fif_fname, preload=preload) @@ -862,7 +862,7 @@ def test_preload_modify(preload, tmpdir): else: raise - tmp_fname = tmpdir.join('raw.fif') + tmp_fname = tmp_path.join('raw.fif') raw.save(tmp_fname, overwrite=True) raw_new = read_raw_fif(tmp_fname) @@ -1086,7 +1086,7 @@ def test_resample_equiv(): (True, 512, 'auto'), (False, 512, 0), ]) -def test_resample(tmpdir, preload, n, npad): +def test_resample(tmp_path, preload, n, npad): """Test resample (with I/O and multiple files).""" raw = read_raw_fif(fif_fname) raw.crop(0, raw.times[n - 1]) @@ -1098,8 +1098,8 @@ def test_resample(tmpdir, preload, n, npad): # test parallel on upsample raw_resamp.resample(sfreq * 2, n_jobs=2, npad=npad) assert raw_resamp.n_times == len(raw_resamp.times) - raw_resamp.save(tmpdir.join('raw_resamp-raw.fif')) - raw_resamp = read_raw_fif(tmpdir.join('raw_resamp-raw.fif'), + raw_resamp.save(tmp_path.join('raw_resamp-raw.fif')) + raw_resamp = read_raw_fif(tmp_path.join('raw_resamp-raw.fif'), preload=True) assert sfreq == raw_resamp.info['sfreq'] / 2 assert raw.n_times == raw_resamp.n_times // 2 @@ -1376,9 +1376,9 @@ def test_add_channels(): @testing.requires_testing_data -def test_save(tmpdir): +def test_save(tmp_path): """Test saving raw.""" - temp_fname = tmpdir.join('test_raw.fif') + temp_fname = tmp_path.join('test_raw.fif') shutil.copyfile(fif_fname, temp_fname) raw = read_raw_fif(temp_fname, preload=False) # can't write over file being read @@ -1394,7 +1394,7 @@ def test_save(tmpdir): annot = Annotations([10], [5], ['test'], orig_time=orig_time) raw.set_annotations(annot) annot = raw.annotations - new_fname = tmpdir.join('break_raw.fif') + new_fname = tmp_path.join('break_raw.fif') raw.save(new_fname, overwrite=True) new_raw = read_raw_fif(new_fname, preload=False) pytest.raises(ValueError, new_raw.save, new_fname) @@ -1411,7 +1411,7 @@ def test_save(tmpdir): @testing.requires_testing_data -def test_annotation_crop(tmpdir): +def test_annotation_crop(tmp_path): """Test annotation sync after cropping and concatenating.""" annot = Annotations([5., 11., 15.], [2., 1., 3.], ['test', 'test', 'test']) raw = read_raw_fif(fif_fname, preload=False) @@ -1437,7 +1437,7 @@ def test_annotation_crop(tmpdir): [1., 1. + 1. / raw.info['sfreq']], atol=1e-3) # make sure we can overwrite the file we loaded when preload=True - new_fname = tmpdir.join('break_raw.fif') + new_fname = tmp_path.join('break_raw.fif') raw.save(new_fname) new_raw = read_raw_fif(new_fname, preload=True) new_raw.save(new_fname, overwrite=True) @@ -1451,7 +1451,7 @@ def test_with_statement(): print(raw_) -def test_compensation_raw(tmpdir): +def test_compensation_raw(tmp_path): """Test Raw compensation.""" raw_3 = read_raw_fif(ctf_comp_fname) assert raw_3.compensation_grade == 3 @@ -1514,7 +1514,7 @@ def test_compensation_raw(tmpdir): assert_allclose(data_3, data_3_new, **tols) # Try IO with compensation - temp_file = tmpdir.join('raw.fif') + temp_file = tmp_path.join('raw.fif') raw_3.save(temp_file, overwrite=True) for preload in (True, False): raw_read = read_raw_fif(temp_file, preload=preload) @@ -1550,10 +1550,10 @@ def test_compensation_raw(tmpdir): @requires_mne -def test_compensation_raw_mne(tmpdir): +def test_compensation_raw_mne(tmp_path): """Test Raw compensation by comparing with MNE-C.""" def compensate_mne(fname, grad): - tmp_fname = tmpdir.join('mne_ctf_test_raw.fif') + tmp_fname = tmp_path.join('mne_ctf_test_raw.fif') cmd = ['mne_process_raw', '--raw', fname, '--save', tmp_fname, '--grad', str(grad), '--projoff', '--filteroff'] run_subprocess(cmd) @@ -1641,10 +1641,10 @@ def test_equalize_channels(): assert ch_names == e.ch_names -def test_memmap(tmpdir): +def test_memmap(tmp_path): """Test some interesting memmapping cases.""" # concatenate_raw - memmaps = [tmpdir.join(str(ii)) for ii in range(3)] + memmaps = [tmp_path.join(str(ii)) for ii in range(3)] raw_0 = read_raw_fif(test_fif_fname, preload=memmaps[0]) assert raw_0._data.filename == memmaps[0] raw_1 = read_raw_fif(test_fif_fname, preload=memmaps[1]) @@ -1696,17 +1696,17 @@ def test_memmap(tmpdir): False, pytest.param(True, marks=pytest.mark.slowtest), ]) -def test_file_like(kind, preload, split, tmpdir): +def test_file_like(kind, preload, split, tmp_path): """Test handling with file-like objects.""" if split: - fname = tmpdir.join('test_raw.fif') + fname = tmp_path / 'test_raw.fif' read_raw_fif(test_fif_fname).save(fname, split_size='5MB') assert op.isfile(fname) assert op.isfile(str(fname)[:-4] + '-1.fif') else: fname = test_fif_fname if preload is str: - preload = str(tmpdir.join('memmap')) + preload = str(tmp_path / 'memmap') with open(str(fname), 'rb') as file_fid: fid = BytesIO(file_fid.read()) if kind == 'bytes' else file_fid assert not fid.closed @@ -1754,17 +1754,17 @@ def test_bad_acq(fname): @testing.requires_testing_data @pytest.mark.skipif(sys.platform not in ('darwin', 'linux'), reason='Needs proper symlinking') -def test_split_symlink(tmpdir): +def test_split_symlink(tmp_path): """Test split files with symlinks.""" # regression test for gh-9221 - first = str(tmpdir.mkdir('first').join('test_raw.fif')) + first = str(tmp_path.mkdir('first').join('test_raw.fif')) raw = read_raw_fif(fif_fname).pick('meg').load_data() raw.save(first, buffer_size_sec=1, split_size='10MB', verbose=True) second = first[:-4] + '-1.fif' assert op.isfile(second) assert not op.isfile(first[:-4] + '-2.fif') - new_first = tmpdir.mkdir('a').join('test_raw.fif') - new_second = tmpdir.mkdir('b').join('test_raw-1.fif') + new_first = tmp_path.mkdir('a').join('test_raw.fif') + new_second = tmp_path.mkdir('b').join('test_raw-1.fif') shutil.move(first, new_first) shutil.move(second, new_second) os.symlink(new_first, first) @@ -1774,7 +1774,7 @@ def test_split_symlink(tmpdir): @testing.requires_testing_data -def test_corrupted(tmpdir): +def test_corrupted(tmp_path): """Test that a corrupted file can still be read.""" # Must be a file written by Neuromag, not us, since we don't write the dir # at the end, so use the skip one (straight from acq). @@ -1786,7 +1786,7 @@ def test_corrupted(tmpdir): assert dirpos == 12641532 fid.seek(0) data = fid.read(dirpos) - bad_fname = tmpdir.join('test_raw.fif') + bad_fname = tmp_path.join('test_raw.fif') with open(bad_fname, 'wb') as fid: fid.write(data) with pytest.warns(RuntimeWarning, match='.*tag directory.*corrupt.*'): diff --git a/mne/io/hitachi/tests/test_hitachi.py b/mne/io/hitachi/tests/test_hitachi.py index 0bd47756be9..fa838457269 100644 --- a/mne/io/hitachi/tests/test_hitachi.py +++ b/mne/io/hitachi/tests/test_hitachi.py @@ -187,9 +187,9 @@ ('1.25', 108, 10, 5., 1, (2020, 2, 2, 11, 20, 0, 0), b'\r\n'), ]) def test_hitachi_basic(preload, version, n_ch, n_times, lowpass, sex, date, - end, tmpdir): + end, tmp_path): """Test NIRSport1 file with no saturation.""" - fname = str(tmpdir.join('test.csv')) + fname = str(tmp_path.join('test.csv')) contents = CONTENTS[version] if end is not None: contents = contents.replace(b'\r', b'\n').replace(b'\n\n', b'\n') diff --git a/mne/io/kit/tests/test_coreg.py b/mne/io/kit/tests/test_coreg.py index 9af9461ed62..e654096ff7f 100644 --- a/mne/io/kit/tests/test_coreg.py +++ b/mne/io/kit/tests/test_coreg.py @@ -19,9 +19,9 @@ mrk_fname = os.path.join(data_dir, 'test_mrk.sqd') -def test_io_mrk(tmpdir): +def test_io_mrk(tmp_path): """Test IO for mrk files.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) pts = read_mrk(mrk_fname) # txt diff --git a/mne/io/kit/tests/test_kit.py b/mne/io/kit/tests/test_kit.py index 1eac5eb3883..41215dbd07c 100644 --- a/mne/io/kit/tests/test_kit.py +++ b/mne/io/kit/tests/test_kit.py @@ -55,7 +55,7 @@ @requires_testing_data -def test_data(tmpdir): +def test_data(tmp_path): """Test reading raw kit files.""" pytest.raises(TypeError, read_raw_kit, epochs_path) pytest.raises(TypeError, read_epochs_kit, sqd_path) @@ -162,7 +162,7 @@ def _assert_sinusoid(data, t, freq, amp, msg): (yokogawa_path, 'Meg160/Analysis (1001) V3R000 PQA160C'), (ricoh_path, 'Meg160/Analysis (1001) V3R000 PQA160C'), ]) -def test_ricoh_data(tmpdir, fname, desc): +def test_ricoh_data(tmp_path, fname, desc): """Test reading channel names and dig information from Ricoh systems.""" raw = read_raw_kit(fname, standardize_names=True) assert raw.ch_names[0] == 'MEG 001' @@ -295,14 +295,14 @@ def test_hsp_elp(): assert_array_almost_equal(pts_elp_in_dev, pts_txt_in_dev, decimal=5) -def test_decimate(tmpdir): +def test_decimate(tmp_path): """Test decimation of digitizer headshapes with too many points.""" # load headshape and convert to meters hsp_mm = _get_ico_surface(5)['rr'] * 100 hsp_m = hsp_mm / 1000. # save headshape to a file in mm in temporary directory - tempdir = str(tmpdir) + tempdir = str(tmp_path) sphere_hsp_path = op.join(tempdir, 'test_sphere.txt') np.savetxt(sphere_hsp_path, hsp_mm) diff --git a/mne/io/nirx/tests/test_nirx.py b/mne/io/nirx/tests/test_nirx.py index bc7a99d4f49..6340e8659d8 100644 --- a/mne/io/nirx/tests/test_nirx.py +++ b/mne/io/nirx/tests/test_nirx.py @@ -247,23 +247,23 @@ def test_nirx_missing_warn(): @requires_testing_data -def test_nirx_missing_evt(tmpdir): +def test_nirx_missing_evt(tmp_path): """Test reading NIRX files when missing data.""" - shutil.copytree(fname_nirx_15_2_short, str(tmpdir) + "/data/") - os.rename(str(tmpdir) + "/data" + "/NIRS-2019-08-23_001.evt", - str(tmpdir) + "/data" + "/NIRS-2019-08-23_001.xxx") - fname = str(tmpdir) + "/data" + "/NIRS-2019-08-23_001.hdr" + shutil.copytree(fname_nirx_15_2_short, str(tmp_path) + "/data/") + os.rename(str(tmp_path) + "/data" + "/NIRS-2019-08-23_001.evt", + str(tmp_path) + "/data" + "/NIRS-2019-08-23_001.xxx") + fname = str(tmp_path) + "/data" + "/NIRS-2019-08-23_001.hdr" raw = read_raw_nirx(fname, preload=True) assert raw.annotations.onset.shape == (0, ) @requires_testing_data -def test_nirx_dat_warn(tmpdir): +def test_nirx_dat_warn(tmp_path): """Test reading NIRX files when missing data.""" - shutil.copytree(fname_nirx_15_2_short, str(tmpdir) + "/data/") - os.rename(str(tmpdir) + "/data" + "/NIRS-2019-08-23_001.dat", - str(tmpdir) + "/data" + "/NIRS-2019-08-23_001.tmp") - fname = str(tmpdir) + "/data" + "/NIRS-2019-08-23_001.hdr" + shutil.copytree(fname_nirx_15_2_short, str(tmp_path) + "/data/") + os.rename(str(tmp_path) + "/data" + "/NIRS-2019-08-23_001.dat", + str(tmp_path) + "/data" + "/NIRS-2019-08-23_001.tmp") + fname = str(tmp_path) + "/data" + "/NIRS-2019-08-23_001.hdr" with pytest.raises(RuntimeWarning, match='A single dat'): read_raw_nirx(fname, preload=True) @@ -449,9 +449,9 @@ def test_nirx_15_3_short(): @requires_testing_data -def test_encoding(tmpdir): +def test_encoding(tmp_path): """Test NIRx encoding.""" - fname = str(tmpdir.join('latin')) + fname = str(tmp_path.join('latin')) shutil.copytree(fname_nirx_15_2, fname) hdr_fname = op.join(fname, 'NIRS-2019-10-02_003.hdr') hdr = list() diff --git a/mne/io/persyst/tests/test_persyst.py b/mne/io/persyst/tests/test_persyst.py index c0177560b01..0dca436dd8b 100644 --- a/mne/io/persyst/tests/test_persyst.py +++ b/mne/io/persyst/tests/test_persyst.py @@ -77,10 +77,10 @@ def test_persyst_raw(): @requires_testing_data -def test_persyst_dates(tmpdir): +def test_persyst_dates(tmp_path): """Test different Persyst date formats for meas date.""" # now test what if you change contents of the lay file - out_dir = str(tmpdir) + out_dir = str(tmp_path) new_fname_lay = op.join(out_dir, op.basename(fname_lay)) new_fname_dat = op.join(out_dir, op.basename(fname_dat)) shutil.copy(fname_dat, new_fname_dat) @@ -118,12 +118,12 @@ def test_persyst_dates(tmpdir): @requires_testing_data -def test_persyst_wrong_file(tmpdir): +def test_persyst_wrong_file(tmp_path): """Test reading Persyst files when passed in wrong file path.""" with pytest.raises(FileNotFoundError, match='The path you'): read_raw_persyst(fname_dat, preload=True) - out_dir = str(tmpdir) + out_dir = str(tmp_path) new_fname_lay = op.join(out_dir, op.basename(fname_lay)) new_fname_dat = op.join(out_dir, op.basename(fname_dat)) shutil.copy(fname_lay, new_fname_lay) @@ -142,9 +142,9 @@ def test_persyst_wrong_file(tmpdir): @requires_testing_data -def test_persyst_moved_file(tmpdir): +def test_persyst_moved_file(tmp_path): """Test reader - Persyst files need to be in same directory.""" - out_dir = str(tmpdir) + out_dir = str(tmp_path) new_fname_lay = op.join(out_dir, op.basename(fname_lay)) new_fname_dat = op.join(out_dir, op.basename(fname_dat)) shutil.copy(fname_lay, new_fname_lay) @@ -212,9 +212,9 @@ def test_persyst_annotations(tmp_path): @requires_testing_data -def test_persyst_errors(tmpdir): +def test_persyst_errors(tmp_path): """Test reading Persyst files when passed in wrong file path.""" - out_dir = str(tmpdir) + out_dir = str(tmp_path) new_fname_lay = op.join(out_dir, op.basename(fname_lay)) new_fname_dat = op.join(out_dir, op.basename(fname_dat)) shutil.copy(fname_dat, new_fname_dat) diff --git a/mne/io/snirf/tests/test_snirf.py b/mne/io/snirf/tests/test_snirf.py index d2f115e414c..7493dd13fbd 100644 --- a/mne/io/snirf/tests/test_snirf.py +++ b/mne/io/snirf/tests/test_snirf.py @@ -136,12 +136,12 @@ def test_snirf_against_nirx(): @requires_h5py @requires_testing_data -def test_snirf_nonstandard(tmpdir): +def test_snirf_nonstandard(tmp_path): """Test custom tags.""" from mne.externals.pymatreader.utils import _import_h5py h5py = _import_h5py() - shutil.copy(sfnirs_homer_103_wShort, str(tmpdir) + "/mod.snirf") - fname = str(tmpdir) + "/mod.snirf" + shutil.copy(sfnirs_homer_103_wShort, str(tmp_path) + "/mod.snirf") + fname = str(tmp_path) + "/mod.snirf" # Manually mark up the file to match MNE-NIRS custom tags with h5py.File(fname, "r+") as f: f.create_dataset("nirs/metaDataTags/middleName", diff --git a/mne/io/tests/test_compensator.py b/mne/io/tests/test_compensator.py index 8cca908cb44..e5f4c02d6ec 100644 --- a/mne/io/tests/test_compensator.py +++ b/mne/io/tests/test_compensator.py @@ -41,7 +41,7 @@ def test_compensation_identity(): @pytest.mark.parametrize('preload', (True, False)) @pytest.mark.parametrize('pick', (False, True)) -def test_compensation_apply(tmpdir, preload, pick): +def test_compensation_apply(tmp_path, preload, pick): """Test applying compensation.""" # make sure that changing the comp doesn't modify the original data raw = read_raw_fif(ctf_comp_fname, preload=preload) @@ -56,7 +56,7 @@ def test_compensation_apply(tmpdir, preload, pick): assert raw2._comp is None else: assert raw2._comp.shape == (len(raw2.ch_names),) * 2 - fname = op.join(tmpdir, 'ctf-raw.fif') + fname = op.join(tmp_path, 'ctf-raw.fif') raw2.save(fname) raw2 = read_raw_fif(fname) assert raw2.compensation_grade == 2 @@ -71,7 +71,7 @@ def test_compensation_apply(tmpdir, preload, pick): @requires_mne -def test_compensation_mne(tmpdir): +def test_compensation_mne(tmp_path): """Test comensation by comparing with MNE.""" def make_evoked(fname, comp): """Make evoked data.""" @@ -93,7 +93,7 @@ def compensate_mne(fname, comp): return read_evokeds(tmp_fname)[0] # save evoked response with default compensation - fname_default = op.join(tmpdir, 'ctf_default-ave.fif') + fname_default = op.join(tmp_path, 'ctf_default-ave.fif') make_evoked(ctf_comp_fname, None).save(fname_default) for comp in [0, 1, 2, 3]: diff --git a/mne/io/tests/test_constants.py b/mne/io/tests/test_constants.py index fcf0a12f593..0a3832ed837 100644 --- a/mne/io/tests/test_constants.py +++ b/mne/io/tests/test_constants.py @@ -82,15 +82,15 @@ @requires_good_network -def test_constants(tmpdir): +def test_constants(tmp_path): """Test compensation.""" - tmpdir = str(tmpdir) # old pytest... + tmp_path = str(tmp_path) # old pytest... fname = 'fiff.zip' - dest = op.join(tmpdir, fname) + dest = op.join(tmp_path, fname) pooch.retrieve( url='https://codeload.github.com/' f'{REPO}/fiff-constants/zip/{COMMIT}', - path=tmpdir, + path=tmp_path, fname=fname, known_hash=None ) @@ -98,9 +98,9 @@ def test_constants(tmpdir): with zipfile.ZipFile(dest, 'r') as ff: for name in ff.namelist(): if 'Dictionary' in name: - ff.extract(name, tmpdir) + ff.extract(name, tmp_path) names.append(op.basename(name)) - shutil.move(op.join(tmpdir, name), op.join(tmpdir, names[-1])) + shutil.move(op.join(tmp_path, name), op.join(tmp_path, names[-1])) names = sorted(names) assert names == ['DictionaryIOD.txt', 'DictionaryIOD_MNE.txt', 'DictionaryStructures.txt', @@ -111,7 +111,7 @@ def test_constants(tmpdir): con = dict(iod=dict(), tags=dict(), types=dict(), defines=dict()) fiff_version = None for name in ['DictionaryIOD.txt', 'DictionaryIOD_MNE.txt']: - with open(op.join(tmpdir, name), 'rb') as fid: + with open(op.join(tmp_path, name), 'rb') as fid: for line in fid: line = line.decode('latin1').strip() if line.startswith('# Packing revision'): @@ -135,7 +135,7 @@ def test_constants(tmpdir): assert id_ not in fif['iod'] fif['iod'][id_] = [kind, desc] # Tags (MEGIN) - with open(op.join(tmpdir, 'DictionaryTags.txt'), 'rb') as fid: + with open(op.join(tmp_path, 'DictionaryTags.txt'), 'rb') as fid: for line in fid: line = line.decode('ISO-8859-1').strip() if (line.startswith('#') or line.startswith('alias') or @@ -152,7 +152,7 @@ def test_constants(tmpdir): assert id_ not in fif['tags'], (fif['tags'].get(id_), val) fif['tags'][id_] = val # Tags (MNE) - with open(op.join(tmpdir, 'DictionaryTags_MNE.txt'), 'rb') as fid: + with open(op.join(tmp_path, 'DictionaryTags_MNE.txt'), 'rb') as fid: for li, line in enumerate(fid): line = line.decode('ISO-8859-1').strip() # ignore continuation lines (*) @@ -187,7 +187,7 @@ def test_constants(tmpdir): re_defi = re.compile(r'#define\s*(\S*)\s*(\S*)\s*"(.*)"$') used_enums = list() for extra in ('', '_MNE'): - with open(op.join(tmpdir, 'DictionaryTypes%s.txt' + with open(op.join(tmp_path, 'DictionaryTypes%s.txt' % (extra,)), 'rb') as fid: for li, line in enumerate(fid): line = line.decode('ISO-8859-1').strip() diff --git a/mne/io/tests/test_meas_info.py b/mne/io/tests/test_meas_info.py index b81ff0f74ba..2d8ba85c8f3 100644 --- a/mne/io/tests/test_meas_info.py +++ b/mne/io/tests/test_meas_info.py @@ -145,13 +145,13 @@ def test_duplicate_name_correction(): create_info(['A', 'A', 'A-0'], 1000., verbose='error') -def test_fiducials_io(tmpdir): +def test_fiducials_io(tmp_path): """Test fiducials i/o.""" pts, coord_frame = read_fiducials(fiducials_fname) assert pts[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI assert pts[0]['ident'] == FIFF.FIFFV_POINT_CARDINAL - temp_fname = tmpdir.join('test.fif') + temp_fname = tmp_path.join('test.fif') write_fiducials(temp_fname, pts, coord_frame) pts_1, coord_frame_1 = read_fiducials(temp_fname) assert coord_frame == coord_frame_1 @@ -225,10 +225,10 @@ def test_info(): assert info == info2 -def test_read_write_info(tmpdir): +def test_read_write_info(tmp_path): """Test IO of info.""" info = read_info(raw_fname) - temp_file = str(tmpdir.join('info.fif')) + temp_file = str(tmp_path.join('info.fif')) # check for bug `#1198` info['dev_head_t']['trans'] = np.eye(4) t1 = info['dev_head_t']['trans'] @@ -269,7 +269,7 @@ def test_read_write_info(tmpdir): with open(temp_file, 'rb') as fid: m1.update(fid.read()) m1 = m1.hexdigest() - temp_file_2 = tmpdir.join('info2.fif') + temp_file_2 = tmp_path.join('info2.fif') assert temp_file_2 != temp_file write_info(temp_file_2, info) m2 = hashlib.md5() @@ -283,7 +283,7 @@ def test_read_write_info(tmpdir): info['meas_date'] = None anonymize_info(info, verbose='error') assert info['meas_date'] is None - tmp_fname_3 = tmpdir.join('info3.fif') + tmp_fname_3 = tmp_path.join('info3.fif') write_info(tmp_fname_3, info) assert info['meas_date'] is None info2 = read_info(tmp_fname_3) @@ -292,17 +292,17 @@ def test_read_write_info(tmpdir): # Check that having a very old date in fine until you try to save it to fif with info._unlock(check_after=True): info['meas_date'] = datetime(1800, 1, 1, 0, 0, 0, tzinfo=timezone.utc) - fname = tmpdir.join('test.fif') + fname = tmp_path.join('test.fif') with pytest.raises(RuntimeError, match='must be between '): write_info(fname, info) -def test_io_dig_points(tmpdir): +def test_io_dig_points(tmp_path): """Test Writing for dig files.""" points = read_polhemus_fastscan(hsp_fname, on_header_missing='ignore') - dest = str(tmpdir.join('test.txt')) - dest_bad = str(tmpdir.join('test.mne')) + dest = str(tmp_path.join('test.txt')) + dest_bad = str(tmp_path.join('test.mne')) with pytest.raises(ValueError, match='must be of shape'): _write_dig_points(dest, points[:, :2]) with pytest.raises(ValueError, match='extension'): @@ -320,9 +320,9 @@ def test_io_dig_points(tmpdir): read_polhemus_fastscan(dest, on_header_missing='warn') -def test_io_coord_frame(tmpdir): +def test_io_coord_frame(tmp_path): """Test round trip for coordinate frame.""" - fname = tmpdir.join('test.fif') + fname = tmp_path.join('test.fif') for ch_type in ('eeg', 'seeg', 'ecog', 'dbs', 'hbo', 'hbr'): info = create_info( ch_names=['Test Ch'], sfreq=1000., ch_types=[ch_type]) @@ -676,7 +676,7 @@ def test_meas_date_convert(stamp, dt): assert str(dt[0]) in repr(info) -def test_anonymize(tmpdir): +def test_anonymize(tmp_path): """Test that sensitive information can be anonymized.""" pytest.raises(TypeError, anonymize_info, 'foo') @@ -716,7 +716,7 @@ def test_anonymize(tmpdir): # write to disk & read back inst_type = 'raw' if isinstance(inst, BaseRaw) else 'epo' fname = 'tmp_raw.fif' if inst_type == 'raw' else 'tmp_epo.fif' - out_path = tmpdir.join(fname) + out_path = tmp_path.join(fname) inst.save(out_path, overwrite=True) if inst_type == 'raw': read_raw_fif(out_path) @@ -741,11 +741,11 @@ def test_anonymize(tmpdir): assert_allclose(raw.annotations.onset, expected_onset) -def test_anonymize_with_io(tmpdir): +def test_anonymize_with_io(tmp_path): """Test that IO does not break anonymization.""" raw = read_raw_fif(raw_fname) - temp_path = tmpdir.join('tmp_raw.fif') + temp_path = tmp_path.join('tmp_raw.fif') raw.save(temp_path) raw2 = read_raw_fif(temp_path) @@ -755,7 +755,7 @@ def test_anonymize_with_io(tmpdir): @testing.requires_testing_data -def test_csr_csc(tmpdir): +def test_csr_csc(tmp_path): """Test CSR and CSC.""" info = read_info(sss_ctc_fname) info = pick_info(info, pick_types(info, meg=True, exclude=[])) @@ -763,7 +763,7 @@ def test_csr_csc(tmpdir): ct = sss_ctc['decoupler'].copy() # CSC assert isinstance(ct, sparse.csc_matrix) - fname = tmpdir.join('test.fif') + fname = tmp_path.join('test.fif') write_info(fname, info) info_read = read_info(fname) ct_read = info_read['proc_history'][0]['max_info']['sss_ctc']['decoupler'] @@ -774,7 +774,7 @@ def test_csr_csc(tmpdir): assert isinstance(csr, sparse.csr_matrix) assert_array_equal(csr.toarray(), ct.toarray()) info['proc_history'][0]['max_info']['sss_ctc']['decoupler'] = csr - fname = tmpdir.join('test1.fif') + fname = tmp_path.join('test1.fif') write_info(fname, info) info_read = read_info(fname) ct_read = info_read['proc_history'][0]['max_info']['sss_ctc']['decoupler'] @@ -810,7 +810,7 @@ def test_check_compensation_consistency(): assert'Removing 5 compensators' in log.getvalue() -def test_field_round_trip(tmpdir): +def test_field_round_trip(tmp_path): """Test round-trip for new fields.""" info = create_info(1, 1000., 'eeg') with info._unlock(): @@ -821,7 +821,7 @@ def test_field_round_trip(tmpdir): info['helium_info'] = dict( he_level_raw=1., helium_level=2., orig_file_guid='e', meas_date=(1, 2)) - fname = tmpdir.join('temp-info.fif') + fname = tmp_path.join('temp-info.fif') write_info(fname, info) info_read = read_info(fname) assert_object_equal(info, info_read) @@ -878,7 +878,7 @@ def test_invalid_subject_birthday(): pytest.param(ctf_fname, marks=testing._pytest_mark()), raw_fname, ]) -def test_channel_name_limit(tmpdir, monkeypatch, fname): +def test_channel_name_limit(tmp_path, monkeypatch, fname): """Test that our remapping works properly.""" # # raw @@ -902,7 +902,7 @@ def test_channel_name_limit(tmpdir, monkeypatch, fname): raw.info.normalize_proj() raw.pick_channels(data_names + ref_names).crop(0, 2) long_names = ['123456789abcdefg' + name for name in raw.ch_names] - fname = tmpdir.join('test-raw.fif') + fname = tmp_path.join('test-raw.fif') with catch_logging() as log: raw.save(fname) log = log.getvalue() @@ -960,7 +960,7 @@ def test_channel_name_limit(tmpdir, monkeypatch, fname): # epochs # epochs = Epochs(raw, make_fixed_length_events(raw)) - fname = tmpdir.join('test-epo.fif') + fname = tmp_path.join('test-epo.fif') epochs.save(fname) epochs_read = read_epochs(fname) for ep in (epochs, epochs_read): @@ -970,7 +970,7 @@ def test_channel_name_limit(tmpdir, monkeypatch, fname): # cov epochs.info['bads'] = [] cov = compute_covariance(epochs, verbose='error') - fname = tmpdir.join('test-cov.fif') + fname = tmp_path.join('test-cov.fif') write_cov(fname, cov) cov_read = read_cov(fname) for co in (cov, cov_read): @@ -984,7 +984,7 @@ def test_channel_name_limit(tmpdir, monkeypatch, fname): evoked = epochs.average() evoked.info['bads'] = bads assert evoked.nave == 1 - fname = tmpdir.join('test-ave.fif') + fname = tmp_path.join('test-ave.fif') evoked.save(fname) evoked_read = read_evokeds(fname)[0] for ev in (evoked, evoked_read): @@ -1000,7 +1000,7 @@ def test_channel_name_limit(tmpdir, monkeypatch, fname): src = setup_volume_source_space( pos=dict(rr=[[0, 0, 0.04]], nn=[[0, 1., 0.]])) fwd = make_forward_solution(evoked.info, None, src, sphere) - fname = tmpdir.join('temp-fwd.fif') + fname = tmp_path.join('temp-fwd.fif') write_forward_solution(fname, fwd) fwd_read = read_forward_solution(fname) for fw in (fwd, fwd_read): @@ -1013,7 +1013,7 @@ def test_channel_name_limit(tmpdir, monkeypatch, fname): # inv # inv = make_inverse_operator(evoked.info, fwd, cov) - fname = tmpdir.join('test-inv.fif') + fname = tmp_path.join('test-inv.fif') write_inverse_operator(fname, inv) inv_read = read_inverse_operator(fname) for iv in (inv, inv_read): diff --git a/mne/io/tests/test_what.py b/mne/io/tests/test_what.py index 17121568116..4a8cc144029 100644 --- a/mne/io/tests/test_what.py +++ b/mne/io/tests/test_what.py @@ -19,7 +19,7 @@ @pytest.mark.slowtest @requires_sklearn @testing.requires_testing_data -def test_what(tmpdir, verbose_debug): +def test_what(tmp_path, verbose_debug): """Test mne.what.""" # ICA ica = ICA(max_iter=1) @@ -27,7 +27,7 @@ def test_what(tmpdir, verbose_debug): create_info(3, 1000., 'eeg')) with pytest.warns(None): # convergence sometimes ica.fit(raw) - fname = op.join(str(tmpdir), 'x-ica.fif') + fname = op.join(str(tmp_path), 'x-ica.fif') ica.save(fname) assert what(fname) == 'ica' # test files diff --git a/mne/io/tests/test_write.py b/mne/io/tests/test_write.py index ebfbd686a3f..fb4ac07741c 100644 --- a/mne/io/tests/test_write.py +++ b/mne/io/tests/test_write.py @@ -10,9 +10,9 @@ from mne.io.write import start_file, write_int -def test_write_int(tmpdir): +def test_write_int(tmp_path): """Test that write_int raises an error on bad values.""" - with start_file(tmpdir.join('temp.fif')) as fid: + with start_file(tmp_path.join('temp.fif')) as fid: write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, [2147483647]) # 2 ** 31 - 1 write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, []) # 2 ** 31 - 1 with pytest.raises(TypeError, match=r'.*exceeds max.*EVENT_LIST\)'): diff --git a/mne/minimum_norm/tests/test_inverse.py b/mne/minimum_norm/tests/test_inverse.py index b16c2965666..62c1f270bae 100644 --- a/mne/minimum_norm/tests/test_inverse.py +++ b/mne/minimum_norm/tests/test_inverse.py @@ -216,7 +216,7 @@ def test_warn_inverse_operator(evoked, noise_cov): @pytest.mark.slowtest -def test_make_inverse_operator_loose(evoked, tmpdir): +def test_make_inverse_operator_loose(evoked, tmp_path): """Test MNE inverse computation (precomputed and non-precomputed).""" # Test old version of inverse computation starting from forward operator noise_cov = read_cov(fname_cov) @@ -230,7 +230,7 @@ def test_make_inverse_operator_loose(evoked, tmpdir): log = log.getvalue() assert 'MEG: rank 302 computed' in log assert 'limit = 1/%d' % fwd_op['nsource'] in log - _compare_io(my_inv_op, tempdir=str(tmpdir)) + _compare_io(my_inv_op, tempdir=str(tmp_path)) assert_equal(inverse_operator['units'], 'Am') _compare_inverses_approx(my_inv_op, inverse_operator, evoked, rtol=1e-2, atol=1e-5, depth_atol=1e-3) @@ -241,7 +241,7 @@ def test_make_inverse_operator_loose(evoked, tmpdir): fixed=False, verbose=True) log = log.getvalue() assert 'MEG: rank 302 computed from 305' in log - _compare_io(my_inv_op, tempdir=str(tmpdir)) + _compare_io(my_inv_op, tempdir=str(tmp_path)) _compare_inverses_approx(my_inv_op, inverse_operator, evoked, rtol=1e-3, atol=1e-5) assert ('dev_head_t' in my_inv_op['info']) @@ -405,7 +405,7 @@ def test_localization_bias_free(bias_params_free, method, lower, upper, @pytest.mark.slowtest -def test_apply_inverse_sphere(evoked, tmpdir): +def test_apply_inverse_sphere(evoked, tmp_path): """Test applying an inverse with a sphere model (rank-deficient).""" evoked.pick_channels(evoked.ch_names[:306:8]) with evoked.info._unlock(): @@ -424,7 +424,7 @@ def test_apply_inverse_sphere(evoked, tmpdir): assert fwd['sol']['nrow'] == 39 assert fwd['nsource'] == 101 assert fwd['sol']['ncol'] == 303 - tempdir = str(tmpdir) + tempdir = str(tmp_path) temp_fname = op.join(tempdir, 'temp-inv.fif') inv = make_inverse_operator(evoked.info, fwd, cov, loose=1.) # This forces everything to be float32 @@ -774,14 +774,14 @@ def test_make_inverse_operator_vector(evoked, noise_cov): atol=1e-20) -def test_make_inverse_operator_diag(evoked, noise_cov, tmpdir, azure_windows): +def test_make_inverse_operator_diag(evoked, noise_cov, tmp_path, azure_windows): """Test MNE inverse computation with diagonal noise cov.""" noise_cov = noise_cov.as_diag() fwd_op = convert_forward_solution(read_forward_solution(fname_fwd), surf_ori=True) inv_op = make_inverse_operator(evoked.info, fwd_op, noise_cov, loose=0.2, depth=0.8) - _compare_io(inv_op, tempdir=str(tmpdir)) + _compare_io(inv_op, tempdir=str(tmp_path)) inverse_operator_diag = read_inverse_operator(fname_inv_meeg_diag) # This one is pretty bad, and for some reason it's worse on Azure Windows ctol = 0.75 if azure_windows else 0.99 @@ -807,9 +807,9 @@ def test_inverse_operator_noise_cov_rank(evoked, noise_cov): assert (compute_rank_inverse(inv) == 20) -def test_inverse_operator_volume(evoked, tmpdir): +def test_inverse_operator_volume(evoked, tmp_path): """Test MNE inverse computation on volume source space.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) inv_vol = read_inverse_operator(fname_vol_inv) assert (repr(inv_vol)) stc = apply_inverse(evoked, inv_vol, lambda2, 'dSPM') @@ -832,9 +832,9 @@ def test_inverse_operator_volume(evoked, tmpdir): @pytest.mark.slowtest @testing.requires_testing_data -def test_io_inverse_operator(tmpdir): +def test_io_inverse_operator(tmp_path): """Test IO of inverse_operator.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) inverse_operator = read_inverse_operator(fname_inv) x = repr(inverse_operator) assert (x) diff --git a/mne/minimum_norm/tests/test_snr.py b/mne/minimum_norm/tests/test_snr.py index ca451ba9ec5..060d608eeea 100644 --- a/mne/minimum_norm/tests/test_snr.py +++ b/mne/minimum_norm/tests/test_snr.py @@ -22,9 +22,9 @@ @testing.requires_testing_data @requires_mne -def test_snr(tmpdir): +def test_snr(tmp_path): """Test SNR calculation.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) inv = read_inverse_operator(fname_inv) evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0] snr = estimate_snr(evoked, inv)[0] diff --git a/mne/preprocessing/ieeg/tests/test_projection.py b/mne/preprocessing/ieeg/tests/test_projection.py index 59fa0ecb2fa..6ad0731fe31 100644 --- a/mne/preprocessing/ieeg/tests/test_projection.py +++ b/mne/preprocessing/ieeg/tests/test_projection.py @@ -27,9 +27,9 @@ @testing.requires_testing_data -def test_project_sensors_onto_brain(tmpdir): +def test_project_sensors_onto_brain(tmp_path): """Test projecting sensors onto the brain surface.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) raw = mne.io.read_raw_fif(fname_raw) trans = _get_trans(fname_trans)[0] # test informative error for no surface first diff --git a/mne/preprocessing/nirs/tests/test_beer_lambert_law.py b/mne/preprocessing/nirs/tests/test_beer_lambert_law.py index c355c160c1c..95a8b66c969 100644 --- a/mne/preprocessing/nirs/tests/test_beer_lambert_law.py +++ b/mne/preprocessing/nirs/tests/test_beer_lambert_law.py @@ -33,13 +33,13 @@ @pytest.mark.parametrize('fname', ([fname_nirx_15_2_short, fname_nirx_15_2, fname_nirx_15_0])) @pytest.mark.parametrize('fmt', ('nirx', 'fif')) -def test_beer_lambert(fname, fmt, tmpdir): +def test_beer_lambert(fname, fmt, tmp_path): """Test converting NIRX files.""" assert fmt in ('nirx', 'fif') raw = read_raw_nirx(fname) if fmt == 'fif': - raw.save(tmpdir.join('test_raw.fif')) - raw = read_raw_fif(tmpdir.join('test_raw.fif')) + raw.save(tmp_path.join('test_raw.fif')) + raw = read_raw_fif(tmp_path.join('test_raw.fif')) assert 'fnirs_cw_amplitude' in raw assert 'fnirs_od' not in raw raw = optical_density(raw) diff --git a/mne/preprocessing/nirs/tests/test_scalp_coupling_index.py b/mne/preprocessing/nirs/tests/test_scalp_coupling_index.py index 6f748effd08..86b9bad0f6b 100644 --- a/mne/preprocessing/nirs/tests/test_scalp_coupling_index.py +++ b/mne/preprocessing/nirs/tests/test_scalp_coupling_index.py @@ -29,7 +29,7 @@ @pytest.mark.parametrize('fname', ([fname_nirx_15_2_short, fname_nirx_15_2, fname_nirx_15_0])) @pytest.mark.parametrize('fmt', ('nirx', 'fif')) -def test_scalp_coupling_index(fname, fmt, tmpdir): +def test_scalp_coupling_index(fname, fmt, tmp_path): """Test converting NIRX files.""" assert fmt in ('nirx', 'fif') raw = read_raw_nirx(fname) diff --git a/mne/preprocessing/nirs/tests/test_temporal_derivative_distribution_repair.py b/mne/preprocessing/nirs/tests/test_temporal_derivative_distribution_repair.py index 9c84eb2c9f4..922a99dcb09 100644 --- a/mne/preprocessing/nirs/tests/test_temporal_derivative_distribution_repair.py +++ b/mne/preprocessing/nirs/tests/test_temporal_derivative_distribution_repair.py @@ -20,7 +20,7 @@ @testing.requires_testing_data @pytest.mark.parametrize('fname', ([fname_nirx_15_2])) -def test_temporal_derivative_distribution_repair(fname, tmpdir): +def test_temporal_derivative_distribution_repair(fname, tmp_path): """Test running artifact rejection.""" raw = read_raw_nirx(fname) raw = optical_density(raw) diff --git a/mne/preprocessing/tests/test_fine_cal.py b/mne/preprocessing/tests/test_fine_cal.py index f5e43d7c8ec..33c5d170916 100644 --- a/mne/preprocessing/tests/test_fine_cal.py +++ b/mne/preprocessing/tests/test_fine_cal.py @@ -31,9 +31,9 @@ @pytest.mark.parametrize('fname', (cal_mf_fname, fine_cal_fname, fine_cal_fname_3d)) @testing.requires_testing_data -def test_fine_cal_io(tmpdir, fname): +def test_fine_cal_io(tmp_path, fname): """Test round trip reading/writing of fine calibration .dat file.""" - temp_fname = op.join(str(tmpdir), 'fine_cal_temp.dat') + temp_fname = op.join(str(tmp_path), 'fine_cal_temp.dat') # Load fine calibration file fine_cal_dict = read_fine_calibration(fname) diff --git a/mne/preprocessing/tests/test_ica.py b/mne/preprocessing/tests/test_ica.py index bdda8adf4ba..618d944a4f7 100644 --- a/mne/preprocessing/tests/test_ica.py +++ b/mne/preprocessing/tests/test_ica.py @@ -198,7 +198,7 @@ def test_warnings(): @pytest.mark.parametrize('n_components', (None, 0.9999, 8, 9, 10)) @pytest.mark.parametrize('n_pca_components', [8, 9, 0.9999, 10]) @pytest.mark.filterwarnings('ignore:FastICA did not converge.*:UserWarning') -def test_ica_noop(n_components, n_pca_components, tmpdir): +def test_ica_noop(n_components, n_pca_components, tmp_path): """Test that our ICA is stable even with a bad max_pca_components.""" data = np.random.RandomState(0).randn(10, 1000) info = create_info(10, 1000., 'eeg') @@ -241,7 +241,7 @@ def test_ica_noop(n_components, n_pca_components, tmpdir): assert_allclose(raw.get_data(), raw_new.get_data(), err_msg='Id failure') _assert_ica_attributes(ica, data) # and with I/O - fname = tmpdir.join('temp-ica.fif') + fname = tmp_path.join('temp-ica.fif') ica.save(fname) ica = read_ica(fname) raw_new = ica.apply(raw.copy()) @@ -269,7 +269,7 @@ def test_ica_max_iter_(method, max_iter_default): @requires_sklearn @pytest.mark.parametrize("method", ["infomax", "fastica", "picard"]) -def test_ica_n_iter_(method, tmpdir): +def test_ica_n_iter_(method, tmp_path): """Test that ICA.n_iter_ is set after fitting.""" _skip_check_picard(method) @@ -289,7 +289,7 @@ def test_ica_n_iter_(method, tmpdir): assert_equal(ica.n_iter_, max_iter) # Test I/O roundtrip. - output_fname = tmpdir.join('test_ica-ica.fif') + output_fname = tmp_path.join('test_ica-ica.fif') _assert_ica_attributes(ica, raw.get_data('data'), limits=(5, 110)) ica.save(output_fname) ica = read_ica(output_fname) @@ -594,7 +594,7 @@ def short_raw_epochs(): @requires_sklearn @pytest.mark.slowtest @pytest.mark.parametrize("method", ["picard", "fastica"]) -def test_ica_additional(method, tmpdir, short_raw_epochs): +def test_ica_additional(method, tmp_path, short_raw_epochs): """Test additional ICA functionality.""" _skip_check_picard(method) raw, epochs, epochs_eog = short_raw_epochs @@ -683,7 +683,7 @@ def test_ica_additional(method, tmpdir, short_raw_epochs): corrmap([ica_different_channels, ica], (0, 0)) # test warnings on bad filenames - ica_badname = tmpdir.join('test-bad-name.fif.gz') + ica_badname = tmp_path.join('test-bad-name.fif.gz') with pytest.warns(RuntimeWarning, match='-ica.fif'): ica.save(ica_badname) with pytest.warns(RuntimeWarning, match='-ica.fif'): @@ -736,7 +736,7 @@ def test_ica_additional(method, tmpdir, short_raw_epochs): ica_raw.notch_filter([10], trans_bandwidth=10, fir_design='firwin') assert ((d1 != ica_raw._data[0]).any()) - test_ica_fname = tmpdir.join('test-ica.fif') + test_ica_fname = tmp_path.join('test-ica.fif') ica.n_pca_components = 2 ica.method = 'fake' ica.save(test_ica_fname) @@ -972,7 +972,7 @@ def f(x, y): ('picard', test_cov_name), ('fastica', None), ]) -def test_ica_cov(method, cov, tmpdir, short_raw_epochs): +def test_ica_cov(method, cov, tmp_path, short_raw_epochs): """Test ICA with cov.""" _skip_check_picard(method) raw, epochs, epochs_eog = short_raw_epochs @@ -980,7 +980,7 @@ def test_ica_cov(method, cov, tmpdir, short_raw_epochs): cov = read_cov(cov) # test reading and writing - test_ica_fname = tmpdir.join('test-ica.fif') + test_ica_fname = tmp_path.join('test-ica.fif') kwargs = dict(n_pca_components=4) ica = ICA(noise_cov=cov, n_components=2, method=method, max_iter=1) @@ -1080,7 +1080,7 @@ def test_ica_twice(method): @requires_sklearn @pytest.mark.parametrize("method", ["fastica", "picard", "infomax"]) -def test_fit_methods(method, tmpdir): +def test_fit_methods(method, tmp_path): """Test fit_params for ICA.""" _skip_check_picard(method) fit_params = {} @@ -1092,8 +1092,8 @@ def test_fit_methods(method, tmpdir): # Only picard and infomax support the "extended" keyword, so limit the # tests to those. if method in ['picard', 'infomax']: - tmpdir = str(tmpdir) - output_fname = op.join(tmpdir, 'test_ica-ica.fif') + tmp_path = str(tmp_path) + output_fname = op.join(tmp_path, 'test_ica-ica.fif') raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data() n_components = 3 @@ -1228,7 +1228,7 @@ def test_eog_channel(method): @requires_sklearn @pytest.mark.parametrize("method", ["fastica", "picard"]) -def test_n_components_none(method, tmpdir): +def test_n_components_none(method, tmp_path): """Test n_components=None.""" _skip_check_picard(method) raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data() @@ -1240,7 +1240,7 @@ def test_n_components_none(method, tmpdir): n_components = None random_state = 12345 - output_fname = tmpdir.join('test_ica-ica.fif') + output_fname = tmp_path.join('test_ica-ica.fif') ica = ICA(method=method, n_components=n_components, random_state=random_state) with pytest.warns(None): @@ -1448,12 +1448,12 @@ def test_read_ica_eeglab(): @testing.requires_testing_data -def test_read_ica_eeglab_mismatch(tmpdir): +def test_read_ica_eeglab_mismatch(tmp_path): """Test read_ica_eeglab function when there is a mismatch.""" fname_orig = op.join(test_base_dir, "EEGLAB", "test_raw.set") base = op.basename(fname_orig)[:-3] - shutil.copyfile(fname_orig[:-3] + 'fdt', tmpdir.join(base + 'fdt')) - fname = tmpdir.join(base) + shutil.copyfile(fname_orig[:-3] + 'fdt', tmp_path.join(base + 'fdt')) + fname = tmp_path.join(base) data = loadmat(fname_orig) w = data['EEG']['icaweights'][0][0] w[:] = np.random.RandomState(0).randn(*w.shape) diff --git a/mne/preprocessing/tests/test_maxwell.py b/mne/preprocessing/tests/test_maxwell.py index bf9a590368d..b678501f743 100644 --- a/mne/preprocessing/tests/test_maxwell.py +++ b/mne/preprocessing/tests/test_maxwell.py @@ -139,9 +139,9 @@ def read_crop(fname, lims=(0, None)): @pytest.mark.slowtest @testing.requires_testing_data -def test_movement_compensation(tmpdir): +def test_movement_compensation(tmp_path): """Test movement compensation.""" - temp_dir = str(tmpdir) + temp_dir = str(tmp_path) lims = (0, 4) raw = read_crop(raw_fname, lims).load_data() head_pos = read_head_pos(pos_fname) @@ -482,7 +482,7 @@ def test_basic(): @testing.requires_testing_data -def test_maxwell_filter_additional(tmpdir): +def test_maxwell_filter_additional(tmp_path): """Test processing of Maxwell filtered data.""" # TODO: Future tests integrate with mne/io/tests/test_proc_history @@ -504,7 +504,7 @@ def test_maxwell_filter_additional(tmpdir): bad_condition='ignore') # Test io on processed data - tempdir = str(tmpdir) + tempdir = str(tmp_path) test_outname = op.join(tempdir, 'test_raw_sss.fif') raw_sss.save(test_outname) raw_sss_loaded = read_crop(test_outname).load_data() @@ -734,7 +734,7 @@ def _check_reg_match(sss_py, sss_mf, comp_tol): @testing.requires_testing_data -def test_cross_talk(tmpdir): +def test_cross_talk(tmp_path): """Test Maxwell filter cross-talk cancellation.""" raw = read_crop(raw_fname, (0., 1.)) raw.info['bads'] = bads @@ -756,7 +756,7 @@ def test_cross_talk(tmpdir): assert_array_equal(py_ctc['decoupler'].toarray(), mf_ctc['decoupler'].toarray()) # I/O roundtrip - tempdir = str(tmpdir) + tempdir = str(tmp_path) fname = op.join(tempdir, 'test_sss_raw.fif') sss_ctc.save(fname) sss_ctc_read = read_raw_fif(fname) @@ -902,7 +902,7 @@ def get_n_projected(): @buggy_mkl_svd @pytest.mark.slowtest @testing.requires_testing_data -def test_shielding_factor(tmpdir): +def test_shielding_factor(tmp_path): """Test Maxwell filter shielding factor using empty room.""" raw_erm = read_crop(erm_fname).load_data().pick_types(meg=True) erm_power = raw_erm[pick_types(raw_erm.info, meg='mag')][0] @@ -1084,7 +1084,7 @@ def test_shielding_factor(tmpdir): _assert_shielding(raw_sss, erm_power, 57, 58) assert counts[0] == 3 # Show it by rewriting the 3D as 1D and testing it - temp_dir = str(tmpdir) + temp_dir = str(tmp_path) temp_fname = op.join(temp_dir, 'test_cal.dat') with open(fine_cal_fname_3d, 'r') as fid: with open(temp_fname, 'w') as fid_out: diff --git a/mne/report/tests/test_report.py b/mne/report/tests/test_report.py index 5627290bf79..cf863a7c316 100644 --- a/mne/report/tests/test_report.py +++ b/mne/report/tests/test_report.py @@ -94,9 +94,9 @@ def _make_invisible(fig, **kwargs): @pytest.mark.slowtest @testing.requires_testing_data -def test_render_report(renderer_pyvistaqt, tmpdir, invisible_fig): +def test_render_report(renderer_pyvistaqt, tmp_path, invisible_fig): """Test rendering *.fif files for mne report.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) raw_fname_new = op.join(tempdir, 'temp_raw.fif') raw_fname_new_bids = op.join(tempdir, 'temp_meg.fif') ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif') @@ -208,10 +208,10 @@ def test_render_report(renderer_pyvistaqt, tmpdir, invisible_fig): @testing.requires_testing_data -def test_render_report_extra(renderer_pyvistaqt, tmpdir, invisible_fig): +def test_render_report_extra(renderer_pyvistaqt, tmp_path, invisible_fig): """Test SVG and projector rendering separately.""" # ... otherwise things are very slow - tempdir = str(tmpdir) + tempdir = str(tmp_path) raw_fname_new = op.join(tempdir, 'temp_raw.fif') shutil.copyfile(raw_fname, raw_fname_new) report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir, @@ -231,9 +231,9 @@ def test_render_report_extra(renderer_pyvistaqt, tmpdir, invisible_fig): assert 'SSP Projectors' in html -def test_add_custom_css(tmpdir): +def test_add_custom_css(tmp_path): """Test adding custom CSS rules to the report.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) fname = op.join(tempdir, 'report.html') fig = plt.figure() # Empty figure @@ -248,9 +248,9 @@ def test_add_custom_css(tmpdir): assert custom_css in html -def test_add_custom_js(tmpdir): +def test_add_custom_js(tmp_path): """Test adding custom JavaScript to the report.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) fname = op.join(tempdir, 'report.html') fig = plt.figure() # Empty figure @@ -268,9 +268,9 @@ def test_add_custom_js(tmpdir): @testing.requires_testing_data -def test_render_non_fiff(tmpdir): +def test_render_non_fiff(tmp_path): """Test rendering non-FIFF files for mne report.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) fnames_in = [bdf_fname, edf_fname] fnames_out = [] for fname in fnames_in: @@ -303,12 +303,12 @@ def test_render_non_fiff(tmpdir): @testing.requires_testing_data -def test_report_raw_psd_and_date(tmpdir): +def test_report_raw_psd_and_date(tmp_path): """Test report raw PSD and DATE_NONE functionality.""" with pytest.raises(TypeError, match='dict'): Report(raw_psd='foo') - tempdir = str(tmpdir) + tempdir = str(tmp_path) raw = read_raw_fif(raw_fname).crop(0, 1.).load_data() raw.info['experimenter'] = 'mne test' raw.info['subject_info'] = dict(id=123, his_id='sample') @@ -352,9 +352,9 @@ def test_report_raw_psd_and_date(tmpdir): @pytest.mark.slowtest # slow for Mayavi on Azure @testing.requires_testing_data -def test_render_add_sections(renderer, tmpdir): +def test_render_add_sections(renderer, tmp_path): """Test adding figures/images to section.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) report = Report(subjects_dir=subjects_dir) # Check add_figure functionality fig = plt.plot([1, 2], [1, 2])[0].figure @@ -380,7 +380,7 @@ def test_render_add_sections(renderer, tmpdir): report.add_figure(fig=fig, title='random image') assert (repr(report)) - fname = op.join(str(tmpdir), 'test.html') + fname = op.join(str(tmp_path), 'test.html') report.save(fname, open_browser=False) assert len(report) == 4 @@ -389,9 +389,9 @@ def test_render_add_sections(renderer, tmpdir): @pytest.mark.slowtest @testing.requires_testing_data @requires_nibabel() -def test_render_mri(renderer, tmpdir): +def test_render_mri(renderer, tmp_path): """Test rendering MRI for mne report.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) trans_fname_new = op.join(tempdir, 'temp-trans.fif') for a, b in [[trans_fname, trans_fname_new]]: shutil.copyfile(a, b) @@ -448,9 +448,9 @@ def test_add_bem_n_jobs(n_jobs, monkeypatch): @testing.requires_testing_data @requires_nibabel() -def test_render_mri_without_bem(tmpdir): +def test_render_mri_without_bem(tmp_path): """Test rendering MRI without BEM for mne report.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) os.mkdir(op.join(tempdir, 'sample')) os.mkdir(op.join(tempdir, 'sample', 'mri')) shutil.copyfile(mri_fname, op.join(tempdir, 'sample', 'mri', 'T1.mgz')) @@ -477,9 +477,9 @@ def test_add_html(): @testing.requires_testing_data -def test_multiple_figs(tmpdir): +def test_multiple_figs(tmp_path): """Test adding a slider with a series of figures to a Report.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) report = Report(info_fname=raw_fname, subject='sample', subjects_dir=subjects_dir) figs = _get_example_figures() @@ -519,9 +519,9 @@ def test_validate_input(): @requires_h5py -def test_open_report(tmpdir): +def test_open_report(tmp_path): """Test the open_report function.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) hdf5 = op.join(tempdir, 'report.h5') # Test creating a new report through the open_report function @@ -605,15 +605,15 @@ def test_add_or_replace(): assert r.html[3] == old_r.html[3] -def test_scraper(tmpdir): +def test_scraper(tmp_path): """Test report scraping.""" r = Report() fig1, fig2 = _get_example_figures() r.add_figure(fig=fig1, title='a') r.add_figure(fig=fig2, title='b') # Mock a Sphinx + sphinx_gallery config - app = Bunch(builder=Bunch(srcdir=str(tmpdir), - outdir=op.join(str(tmpdir), '_build', 'html'))) + app = Bunch(builder=Bunch(srcdir=str(tmp_path), + outdir=op.join(str(tmp_path), '_build', 'html'))) scraper = _ReportScraper() scraper.app = app gallery_conf = dict(src_dir=app.builder.srcdir, builder_name='html') @@ -633,7 +633,7 @@ def test_scraper(tmpdir): rst = scraper(block, block_vars, gallery_conf) # Once it's saved, add it assert rst == '' - fname = op.join(str(tmpdir), 'my_html.html') + fname = op.join(str(tmp_path), 'my_html.html') r.save(fname, open_browser=False) rst = scraper(block, block_vars, gallery_conf) out_html = op.join(app.builder.outdir, 'auto_examples', 'my_html.html') @@ -647,24 +647,24 @@ def test_scraper(tmpdir): @testing.requires_testing_data @pytest.mark.parametrize('split_naming', ('neuromag', 'bids',)) -def test_split_files(tmpdir, split_naming): +def test_split_files(tmp_path, split_naming): """Test that in the case of split files, we only parse the first.""" raw = read_raw_fif(raw_fname) split_size = '7MB' # Should produce 3 files buffer_size_sec = 1 # Tiny buffer so it's smaller than the split size - raw.save(op.join(tmpdir, 'raw_meg.fif'), split_size=split_size, + raw.save(op.join(tmp_path, 'raw_meg.fif'), split_size=split_size, split_naming=split_naming, buffer_size_sec=buffer_size_sec) report = Report() - report.parse_folder(tmpdir, render_bem=False, raw_butterfly=False) + report.parse_folder(tmp_path, render_bem=False, raw_butterfly=False) assert len(report._content) == 1 @pytest.mark.slowtest # ~40 sec on Azure Windows @testing.requires_testing_data -def test_survive_pickle(tmpdir): +def test_survive_pickle(tmp_path): """Testing functionality of Report-Object after pickling.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) raw_fname_new = op.join(tempdir, 'temp_raw.fif') shutil.copyfile(raw_fname, raw_fname_new) @@ -682,7 +682,7 @@ def test_survive_pickle(tmpdir): @pytest.mark.slowtest # ~30 sec on Azure Windows @requires_sklearn @testing.requires_testing_data -def test_manual_report_2d(tmpdir, invisible_fig): +def test_manual_report_2d(tmp_path, invisible_fig): """Simulate user manually creating report by adding one file at a time.""" from sklearn.exceptions import ConvergenceWarning @@ -774,13 +774,13 @@ def test_manual_report_2d(tmpdir, invisible_fig): assert 'ICA component topographies' not in r._content[-1].html assert 'Original and cleaned signal' in r._content[-1].html - fname = op.join(tmpdir, 'report.html') + fname = op.join(tmp_path, 'report.html') r.save(fname=fname, open_browser=False) @pytest.mark.slowtest # 30 sec on Azure for Mayavi @testing.requires_testing_data -def test_manual_report_3d(tmpdir, renderer): +def test_manual_report_3d(tmp_path, renderer): """Simulate adding 3D sections.""" r = Report(title='My Report') r.add_trans(trans=trans_fname, info=raw_fname, title='my coreg', @@ -796,11 +796,11 @@ def test_manual_report_3d(tmpdir, renderer): subjects_dir=subjects_dir, n_time_points=2, stc_plot_kwargs=stc_plot_kwargs, ) - fname = op.join(tmpdir, 'report.html') + fname = op.join(tmp_path, 'report.html') r.save(fname=fname, open_browser=False) -def test_sorting(tmpdir): +def test_sorting(tmp_path): """Test that automated ordering based on tags works.""" r = Report() @@ -821,17 +821,17 @@ def test_sorting(tmpdir): assert content_sorted != r._content assert [c.tags[0] for c in content_sorted] == expected_order - r.save(fname=op.join(tmpdir, 'report.html'), sort_content=True, + r.save(fname=op.join(tmp_path, 'report.html'), sort_content=True, open_browser=False) @testing.requires_testing_data -def test_deprecated_methods(tmpdir): +def test_deprecated_methods(tmp_path): """Test methods that are scheduled for removal after 0.24.""" r = Report() r.add_projs(info=raw_fname, title='SSP Projectors', tags=('mytag',)) fig = plt.figure() # Empty figure - img_fname = op.join(tmpdir, 'testimage.png') + img_fname = op.join(tmp_path, 'testimage.png') fig.savefig(img_fname) with pytest.warns(DeprecationWarning, match='Report.fnames'): diff --git a/mne/simulation/tests/test_raw.py b/mne/simulation/tests/test_raw.py index 8c42ee3ee36..c584291a9a5 100644 --- a/mne/simulation/tests/test_raw.py +++ b/mne/simulation/tests/test_raw.py @@ -209,12 +209,12 @@ def _get_head_pos_sim(raw): return head_pos_sim -def test_simulate_raw_sphere(raw_data, tmpdir): +def test_simulate_raw_sphere(raw_data, tmp_path): """Test simulation of raw data with sphere model.""" seed = 42 raw, src, stc, trans, sphere = raw_data assert len(pick_types(raw.info, meg=False, ecg=True)) == 1 - tempdir = str(tmpdir) + tempdir = str(tmp_path) # head pos head_pos_sim = _get_head_pos_sim(raw) diff --git a/mne/stats/tests/test_cluster_level.py b/mne/stats/tests/test_cluster_level.py index 05685b321cd..f1116d5a50c 100644 --- a/mne/stats/tests/test_cluster_level.py +++ b/mne/stats/tests/test_cluster_level.py @@ -102,9 +102,9 @@ def test_thresholds(numba_conditional): buffer_size=None, out_type='mask') -def test_cache_dir(tmpdir, numba_conditional): +def test_cache_dir(tmp_path, numba_conditional): """Test use of cache dir.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) orig_dir = os.getenv('MNE_CACHE_DIR', None) orig_size = os.getenv('MNE_MEMMAP_MIN_SIZE', None) rng = np.random.RandomState(0) diff --git a/mne/tests/test_annotations.py b/mne/tests/test_annotations.py index cceeba54b8e..0f97167ed8b 100644 --- a/mne/tests/test_annotations.py +++ b/mne/tests/test_annotations.py @@ -110,10 +110,10 @@ def test_basics(): assert_array_equal(raw.annotations.description, np.repeat('test', 10)) -def test_annot_sanitizing(tmpdir): +def test_annot_sanitizing(tmp_path): """Test description sanitizing.""" annot = Annotations([0], [1], ['a;:b']) - fname = str(tmpdir.join('custom-annot.fif')) + fname = str(tmp_path.join('custom-annot.fif')) annot.save(fname) annot_read = read_annotations(fname) _assert_annotations_equal(annot, annot_read) @@ -174,7 +174,7 @@ def test_raw_array_orig_times(): assert raw.annotations.orig_time == orig_time -def test_crop(tmpdir): +def test_crop(tmp_path): """Test cropping with annotations.""" raw = read_raw_fif(fif_fname) events = mne.find_events(raw) @@ -226,7 +226,7 @@ def test_crop(tmpdir): assert_array_almost_equal(raw.annotations.onset, expected_onset, decimal=2) # Test IO - tempdir = str(tmpdir) + tempdir = str(tmp_path) fname = op.join(tempdir, 'test-annot.fif') raw.annotations.save(fname) annot_read = read_annotations(fname) @@ -824,7 +824,7 @@ def _assert_annotations_equal(a, b, tol=0): @pytest.fixture(scope='function', params=('ch_names', 'fmt')) -def dummy_annotation_file(tmpdir_factory, ch_names, fmt): +def dummy_annotation_file(tmp_path_factory, ch_names, fmt): """Create csv file for testing.""" if fmt == 'csv': content = ("onset,duration,description\n" @@ -852,7 +852,7 @@ def dummy_annotation_file(tmpdir_factory, ch_names, fmt): content[-1] += ',MEG0111:MEG2563' content = '\n'.join(content) - fname = tmpdir_factory.mktemp('data').join(f'annotations-annot.{fmt}') + fname = tmp_path_factory.mktemp('data').join(f'annotations-annot.{fmt}') if isinstance(content, str): fname.write(content) else: @@ -866,7 +866,7 @@ def dummy_annotation_file(tmpdir_factory, ch_names, fmt): 'txt', 'fif' ]) -def test_io_annotation(dummy_annotation_file, tmpdir, fmt, ch_names): +def test_io_annotation(dummy_annotation_file, tmp_path, fmt, ch_names): """Test CSV, TXT, and FIF input/output (which support ch_names).""" annot = read_annotations(dummy_annotation_file) assert annot.orig_time == _ORIG_TIME @@ -878,7 +878,7 @@ def test_io_annotation(dummy_annotation_file, tmpdir, fmt, ch_names): tol=1e-6) # Now test writing - fname = tmpdir.join(f'annotations-annot.{fmt}') + fname = tmp_path.join(f'annotations-annot.{fmt}') annot.save(fname) annot2 = read_annotations(fname) _assert_annotations_equal(annot, annot2) @@ -891,13 +891,13 @@ def test_io_annotation(dummy_annotation_file, tmpdir, fmt, ch_names): @requires_version('pandas') -def test_broken_csv(tmpdir): +def test_broken_csv(tmp_path): """Test broken .csv that does not use timestamps.""" content = ("onset,duration,description\n" "1.,1.0,AA\n" "3.,2.425,BB") - fname = tmpdir.join('annotations_broken.csv') + fname = tmp_path.join('annotations_broken.csv') fname.write(content) with pytest.warns(RuntimeWarning, match='save your CSV as a TXT'): read_annotations(fname) @@ -906,7 +906,7 @@ def test_broken_csv(tmpdir): # Test for IO with .txt files @pytest.fixture(scope='function', params=('ch_names',)) -def dummy_annotation_txt_file(tmpdir_factory, ch_names): +def dummy_annotation_txt_file(tmp_path_factory, ch_names): """Create txt file for testing.""" content = ("3.14, 42, AA \n" "6.28, 48, BB") @@ -916,13 +916,13 @@ def dummy_annotation_txt_file(tmpdir_factory, ch_names): content[1] = content[1].strip() + ', MEG0111:MEG2563' content = '\n'.join(content) - fname = tmpdir_factory.mktemp('data').join('annotations.txt') + fname = tmp_path_factory.mktemp('data').join('annotations.txt') fname.write(content) return fname @pytest.mark.parametrize('ch_names', (False, True)) -def test_io_annotation_txt(dummy_annotation_txt_file, tmpdir_factory, +def test_io_annotation_txt(dummy_annotation_txt_file, tmp_path_factory, ch_names): """Test TXT input/output without meas_date.""" annot = read_annotations(str(dummy_annotation_txt_file)) @@ -934,7 +934,7 @@ def test_io_annotation_txt(dummy_annotation_txt_file, tmpdir_factory, annot, Annotations([3.14, 6.28], [42., 48], ['AA', 'BB'], **kwargs)) # Now test writing - fname = str(tmpdir_factory.mktemp('data').join('annotations.txt')) + fname = str(tmp_path_factory.mktemp('data').join('annotations.txt')) annot.save(fname) annot2 = read_annotations(fname) _assert_annotations_equal(annot, annot2) @@ -967,7 +967,7 @@ def test_handle_meas_date(meas_date, out): assert _handle_meas_date(meas_date) == out -def test_read_annotation_txt_header(tmpdir): +def test_read_annotation_txt_header(tmp_path): """Test TXT orig_time recovery.""" content = ("# A something \n" "# orig_time : 42\n" @@ -975,29 +975,29 @@ def test_read_annotation_txt_header(tmpdir): "# orig_time : 42\n" "# C\n" "Done") - fname = tmpdir.join('header.txt') + fname = tmp_path.join('header.txt') fname.write(content) orig_time = _read_annotations_txt_parse_header(fname) want = datetime.fromtimestamp(1038942071.7201, timezone.utc) assert orig_time == want -def test_read_annotation_txt_one_segment(tmpdir): +def test_read_annotation_txt_one_segment(tmp_path): """Test empty TXT input/output.""" content = ("# MNE-Annotations\n" "# onset, duration, description\n" "3.14, 42, AA") - fname = tmpdir.join('one-annotations.txt') + fname = tmp_path.join('one-annotations.txt') fname.write(content) annot = read_annotations(fname) _assert_annotations_equal(annot, Annotations(3.14, 42, ['AA'])) -def test_read_annotation_txt_empty(tmpdir): +def test_read_annotation_txt_empty(tmp_path): """Test empty TXT input/output.""" content = ("# MNE-Annotations\n" "# onset, duration, description\n") - fname = tmpdir.join('empty-annotations.txt') + fname = tmp_path.join('empty-annotations.txt') fname.write(content) annot = read_annotations(fname) _assert_annotations_equal(annot, Annotations([], [], [])) @@ -1089,7 +1089,7 @@ def test_sorting(): assert_array_equal(annot.duration, duration) -def test_date_none(tmpdir): +def test_date_none(tmp_path): """Test that DATE_NONE is used properly.""" # Regression test for gh-5908 n_chans = 139 @@ -1100,7 +1100,7 @@ def test_date_none(tmpdir): info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=2048) assert info['meas_date'] is None raw = RawArray(data=data, info=info) - fname = op.join(str(tmpdir), 'test-raw.fif') + fname = op.join(str(tmp_path), 'test-raw.fif') raw.save(fname) raw_read = read_raw_fif(fname, preload=True) assert raw_read.info['meas_date'] is None diff --git a/mne/tests/test_bem.py b/mne/tests/test_bem.py index 2ea1f8a565b..d15ac886726 100644 --- a/mne/tests/test_bem.py +++ b/mne/tests/test_bem.py @@ -71,10 +71,10 @@ def _compare_bem_solutions(sol_a, sol_b): @testing.requires_testing_data @requires_h5py @pytest.mark.parametrize('ext', ('fif', 'h5')) -def test_io_bem(tmpdir, ext): +def test_io_bem(tmp_path, ext): """Test reading and writing of bem surfaces and solutions.""" import h5py - temp_bem = op.join(str(tmpdir), f'temp-bem.{ext}') + temp_bem = op.join(str(tmp_path), f'temp-bem.{ext}') # model with pytest.raises(ValueError, match='BEM data not found'): read_bem_surfaces(fname_raw) @@ -95,7 +95,7 @@ def test_io_bem(tmpdir, ext): # solution with pytest.raises(RuntimeError, match='No BEM solution found'): read_bem_solution(fname_bem_3) - temp_sol = op.join(str(tmpdir), f'temp-sol.{ext}') + temp_sol = op.join(str(tmp_path), f'temp-sol.{ext}') sol = read_bem_solution(fname_bem_sol_3) assert 'BEM' in repr(sol) write_bem_solution(temp_sol, sol) @@ -141,9 +141,9 @@ def test_make_sphere_model(): pytest.param(dict(), fname_bem_3, marks=pytest.mark.slowtest), # Azure [dict(conductivity=[0.3]), fname_bem_1], ]) -def test_make_bem_model(tmpdir, kwargs, fname): +def test_make_bem_model(tmp_path, kwargs, fname): """Test BEM model creation from Python with I/O.""" - fname_temp = tmpdir.join('temp-bem.fif') + fname_temp = tmp_path.join('temp-bem.fif') with catch_logging() as log: model = make_bem_model('sample', ico=2, subjects_dir=subjects_dir, verbose=True, **kwargs) @@ -166,25 +166,25 @@ def test_make_bem_model(tmpdir, kwargs, fname): @testing.requires_testing_data -def test_bem_model_topology(tmpdir): +def test_bem_model_topology(tmp_path): """Test BEM model topological checks.""" # bad topology (not enough neighboring tris) - makedirs(tmpdir.join('foo', 'bem')) + makedirs(tmp_path.join('foo', 'bem')) for fname in ('inner_skull', 'outer_skull', 'outer_skin'): fname += '.surf' copy(op.join(subjects_dir, 'sample', 'bem', fname), - str(tmpdir.join('foo', 'bem', fname))) - outer_fname = tmpdir.join('foo', 'bem', 'outer_skull.surf') + str(tmp_path.join('foo', 'bem', fname))) + outer_fname = tmp_path.join('foo', 'bem', 'outer_skull.surf') rr, tris = read_surface(outer_fname) tris = tris[:-1] write_surface(outer_fname, rr, tris[:-1], overwrite=True) with pytest.raises(RuntimeError, match='Surface outer skull is not compl'): - make_bem_model('foo', None, subjects_dir=tmpdir) + make_bem_model('foo', None, subjects_dir=tmp_path) # Now get past this error to reach gh-6127 (not enough neighbor tris) rr_bad = np.concatenate([rr, np.mean(rr, axis=0, keepdims=True)], axis=0) write_surface(outer_fname, rr_bad, tris, overwrite=True) with pytest.raises(ValueError, match='Surface outer skull.*triangles'): - make_bem_model('foo', None, subjects_dir=tmpdir) + make_bem_model('foo', None, subjects_dir=tmp_path) @pytest.mark.slowtest @@ -193,7 +193,7 @@ def test_bem_model_topology(tmpdir): [(0.3,), fname_bem_sol_1], [(0.3, 0.006, 0.3), fname_bem_sol_3], ]) -def test_bem_solution(tmpdir, cond, fname): +def test_bem_solution(tmp_path, cond, fname): """Test making a BEM solution from Python with I/O.""" # test degenerate conditions surf = read_bem_surfaces(fname_bem_1)[0] @@ -218,7 +218,7 @@ def test_bem_solution(tmpdir, cond, fname): pytest.raises(RuntimeError, _check_surface_size, surfs[1]) # actually test functionality - fname_temp = op.join(str(tmpdir), 'temp-bem-sol.fif') + fname_temp = op.join(str(tmp_path), 'temp-bem-sol.fif') # use a model and solution made in Python for model_type in ('python', 'c'): if model_type == 'python': @@ -392,10 +392,10 @@ def test_fit_sphere_to_headshape(): @pytest.mark.slowtest # ~2 min on Azure Windows @testing.requires_testing_data -def test_io_head_bem(tmpdir): +def test_io_head_bem(tmp_path): """Test reading and writing of defective head surfaces.""" head = read_bem_surfaces(fname_dense_head)[0] - fname_defect = op.join(str(tmpdir), 'temp-head-defect.fif') + fname_defect = op.join(str(tmp_path), 'temp-head-defect.fif') # create defects head['rr'][0] = np.array([-0.01487014, -0.04563854, -0.12660208]) head['tris'][0] = np.array([21919, 21918, 21907]) diff --git a/mne/tests/test_chpi.py b/mne/tests/test_chpi.py index 42031030dd7..c2d194c53ad 100644 --- a/mne/tests/test_chpi.py +++ b/mne/tests/test_chpi.py @@ -99,9 +99,9 @@ def test_chpi_adjust(): @testing.requires_testing_data -def test_read_write_head_pos(tmpdir): +def test_read_write_head_pos(tmp_path): """Test reading and writing head position quaternion parameters.""" - temp_name = op.join(str(tmpdir), 'temp.pos') + temp_name = op.join(str(tmp_path), 'temp.pos') # This isn't a 100% valid quat matrix but it should be okay for tests head_pos_rand = np.random.RandomState(0).randn(20, 10) # This one is valid @@ -119,9 +119,9 @@ def test_read_write_head_pos(tmpdir): @testing.requires_testing_data -def test_hpi_info(tmpdir): +def test_hpi_info(tmp_path): """Test getting HPI info.""" - temp_name = op.join(str(tmpdir), 'temp_raw.fif') + temp_name = op.join(str(tmp_path), 'temp_raw.fif') for fname in (chpi_fif_fname, sss_fif_fname): raw = read_raw_fif(fname, allow_maxshield='yes').crop(0, 0.1) assert len(raw.info['hpi_subsystem']) > 0 diff --git a/mne/tests/test_coreg.py b/mne/tests/test_coreg.py index 74cd09602e9..4d6452d1eab 100644 --- a/mne/tests/test_coreg.py +++ b/mne/tests/test_coreg.py @@ -68,11 +68,11 @@ def make_dig(coords, cf): @pytest.mark.slowtest # can take forever on OSX Travis @testing.requires_testing_data @pytest.mark.parametrize('scale', (.9, [1, .2, .8])) -def test_scale_mri(tmpdir, few_surfaces, scale): +def test_scale_mri(tmp_path, few_surfaces, scale): """Test creating fsaverage and scaling it.""" # create fsaverage using the testing "fsaverage" instead of the FreeSurfer # one - tempdir = str(tmpdir) + tempdir = str(tmp_path) fake_home = testing.data_path() create_default_subject(subjects_dir=tempdir, fs_home=fake_home, verbose=True) @@ -167,10 +167,10 @@ def test_scale_mri(tmpdir, few_surfaces, scale): @pytest.mark.slowtest # can take forever on OSX Travis @testing.requires_testing_data @requires_nibabel() -def test_scale_mri_xfm(tmpdir, few_surfaces): +def test_scale_mri_xfm(tmp_path, few_surfaces): """Test scale_mri transforms and MRI scaling.""" # scale fsaverage - tempdir = str(tmpdir) + tempdir = str(tmp_path) fake_home = testing.data_path() # add fsaverage create_default_subject(subjects_dir=tempdir, fs_home=fake_home, diff --git a/mne/tests/test_cov.py b/mne/tests/test_cov.py index 43bafbb2a63..3e05db1212a 100644 --- a/mne/tests/test_cov.py +++ b/mne/tests/test_cov.py @@ -193,9 +193,9 @@ def _assert_reorder(cov_new, cov_orig, order): cov_orig['data'], atol=1e-20) -def test_ad_hoc_cov(tmpdir): +def test_ad_hoc_cov(tmp_path): """Test ad hoc cov creation and I/O.""" - out_fname = tmpdir.join('test-cov.fif') + out_fname = tmp_path.join('test-cov.fif') evoked = read_evokeds(ave_fname)[0] cov = make_ad_hoc_cov(evoked.info) cov.save(out_fname) @@ -218,13 +218,13 @@ def test_ad_hoc_cov(tmpdir): cov._get_square() -def test_io_cov(tmpdir): +def test_io_cov(tmp_path): """Test IO for noise covariance matrices.""" cov = read_cov(cov_fname) cov['method'] = 'empirical' cov['loglik'] = -np.inf - cov.save(tmpdir.join('test-cov.fif')) - cov2 = read_cov(tmpdir.join('test-cov.fif')) + cov.save(tmp_path.join('test-cov.fif')) + cov2 = read_cov(tmp_path.join('test-cov.fif')) assert_array_almost_equal(cov.data, cov2.data) assert_equal(cov['method'], cov2['method']) assert_equal(cov['loglik'], cov2['loglik']) @@ -232,24 +232,24 @@ def test_io_cov(tmpdir): cov2 = read_cov(cov_gz_fname) assert_array_almost_equal(cov.data, cov2.data) - cov2.save(tmpdir.join('test-cov.fif.gz')) - cov2 = read_cov(tmpdir.join('test-cov.fif.gz')) + cov2.save(tmp_path.join('test-cov.fif.gz')) + cov2 = read_cov(tmp_path.join('test-cov.fif.gz')) assert_array_almost_equal(cov.data, cov2.data) cov['bads'] = ['EEG 039'] cov_sel = pick_channels_cov(cov, exclude=cov['bads']) assert cov_sel['dim'] == (len(cov['data']) - len(cov['bads'])) assert cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim']) - cov_sel.save(tmpdir.join('test-cov.fif')) + cov_sel.save(tmp_path.join('test-cov.fif')) cov2 = read_cov(cov_gz_fname) assert_array_almost_equal(cov.data, cov2.data) - cov2.save(tmpdir.join('test-cov.fif.gz')) - cov2 = read_cov(tmpdir.join('test-cov.fif.gz')) + cov2.save(tmp_path.join('test-cov.fif.gz')) + cov2 = read_cov(tmp_path.join('test-cov.fif.gz')) assert_array_almost_equal(cov.data, cov2.data) # test warnings on bad filenames - cov_badname = tmpdir.join('test-bad-name.fif.gz') + cov_badname = tmp_path.join('test-bad-name.fif.gz') with pytest.warns(RuntimeWarning, match='-cov.fif'): write_cov(cov_badname, cov) with pytest.warns(RuntimeWarning, match='-cov.fif'): @@ -257,7 +257,7 @@ def test_io_cov(tmpdir): @pytest.mark.parametrize('method', (None, 'empirical', 'shrunk')) -def test_cov_estimation_on_raw(method, tmpdir): +def test_cov_estimation_on_raw(method, tmp_path): """Test estimation from raw (typically empty room).""" if method == 'shrunk': try: @@ -301,8 +301,8 @@ def test_cov_estimation_on_raw(method, tmpdir): assert_snr(cov.data, cov_mne.data, 170) # test IO when computation done in Python - cov.save(tmpdir.join('test-cov.fif')) # test saving - cov_read = read_cov(tmpdir.join('test-cov.fif')) + cov.save(tmp_path.join('test-cov.fif')) # test saving + cov_read = read_cov(tmp_path.join('test-cov.fif')) assert cov_read.ch_names == cov.ch_names assert cov_read.nfree == cov.nfree assert_array_almost_equal(cov.data, cov_read.data) @@ -359,7 +359,7 @@ def _assert_cov(cov, cov_desired, tol=0.005, nfree=True): @pytest.mark.slowtest @pytest.mark.parametrize('rank', ('full', None)) -def test_cov_estimation_with_triggers(rank, tmpdir): +def test_cov_estimation_with_triggers(rank, tmp_path): """Test estimation from raw with triggers.""" raw = read_raw_fif(raw_fname) raw.set_eeg_reference(projection=True).load_data() @@ -406,8 +406,8 @@ def test_cov_estimation_with_triggers(rank, tmpdir): keep_sample_mean=False, method='shrunk', rank=rank) # test IO when computation done in Python - cov.save(tmpdir.join('test-cov.fif')) # test saving - cov_read = read_cov(tmpdir.join('test-cov.fif')) + cov.save(tmp_path.join('test-cov.fif')) # test saving + cov_read = read_cov(tmp_path.join('test-cov.fif')) _assert_cov(cov, cov_read, 1e-5) # cov with list of epochs with different projectors diff --git a/mne/tests/test_dipole.py b/mne/tests/test_dipole.py index 73c45be0069..2cf22de1e0c 100644 --- a/mne/tests/test_dipole.py +++ b/mne/tests/test_dipole.py @@ -75,11 +75,11 @@ def _check_dipole(dip, n_dipoles): @testing.requires_testing_data -def test_io_dipoles(tmpdir): +def test_io_dipoles(tmp_path): """Test IO for .dip files.""" dipole = read_dipole(fname_dip) assert 'Dipole ' in repr(dipole) # test repr - out_fname = op.join(str(tmpdir), 'temp.dip') + out_fname = op.join(str(tmp_path), 'temp.dip') dipole.save(out_fname) dipole_new = read_dipole(out_fname) _compare_dipoles(dipole, dipole_new) @@ -105,10 +105,10 @@ def test_dipole_fitting_ctf(): @testing.requires_testing_data @requires_nibabel() @requires_mne -def test_dipole_fitting(tmpdir): +def test_dipole_fitting(tmp_path): """Test dipole fitting.""" amp = 100e-9 - tempdir = str(tmpdir) + tempdir = str(tmp_path) rng = np.random.RandomState(0) fname_dtemp = op.join(tempdir, 'test.dip') fname_sim = op.join(tempdir, 'test-ave.fif') @@ -208,7 +208,7 @@ def test_dipole_fitting(tmpdir): @testing.requires_testing_data -def test_dipole_fitting_fixed(tmpdir): +def test_dipole_fitting_fixed(tmp_path): """Test dipole fitting with a fixed position.""" tpeak = 0.073 sphere = make_sphere_model(head_radius=0.1) @@ -244,7 +244,7 @@ def test_dipole_fitting_fixed(tmpdir): assert_allclose(dip_fixed.info['chs'][0]['loc'][3:6], ori) assert_allclose(dip_fixed.data[1, t_idx], gof) assert_allclose(resid.data, resid_fixed.data[:, [t_idx]]) - _check_roundtrip_fixed(dip_fixed, tmpdir) + _check_roundtrip_fixed(dip_fixed, tmp_path) # bad resetting evoked.info['bads'] = [evoked.ch_names[3]] dip_fixed, resid_fixed = fit_dipole(evoked, cov, sphere, pos=pos, ori=ori) @@ -380,13 +380,13 @@ def test_accuracy(): @testing.requires_testing_data -def test_dipole_fixed(tmpdir): +def test_dipole_fixed(tmp_path): """Test reading a fixed-position dipole (from Xfit).""" dip = read_dipole(fname_xfit_dip) # print the representation of the object DipoleFixed assert 'DipoleFixed ' in repr(dip) - _check_roundtrip_fixed(dip, tmpdir) + _check_roundtrip_fixed(dip, tmp_path) with pytest.warns(RuntimeWarning, match='extra fields'): dip_txt = read_dipole(fname_xfit_dip_txt) assert_allclose(dip.info['chs'][0]['loc'][:3], dip_txt.pos[0]) @@ -396,9 +396,9 @@ def test_dipole_fixed(tmpdir): assert_allclose(dip_txt_seq.gof, [27.3, 46.4, 43.7, 41., 37.3, 32.5]) -def _check_roundtrip_fixed(dip, tmpdir): +def _check_roundtrip_fixed(dip, tmp_path): """Check roundtrip IO for fixed dipoles.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) dip.save(op.join(tempdir, 'test-dip.fif.gz')) dip_read = read_dipole(op.join(tempdir, 'test-dip.fif.gz')) assert_allclose(dip_read.data, dip_read.data) @@ -423,14 +423,14 @@ def test_get_phantom_dipoles(): @testing.requires_testing_data -def test_confidence(tmpdir): +def test_confidence(tmp_path): """Test confidence limits.""" evoked = read_evokeds(fname_evo_full, 'Left Auditory', baseline=(None, 0)) evoked.crop(0.08, 0.08).pick_types(meg=True) # MEG-only cov = make_ad_hoc_cov(evoked.info) sphere = make_sphere_model((0., 0., 0.04), 0.08) dip_py = fit_dipole(evoked, cov, sphere)[0] - fname_test = op.join(str(tmpdir), 'temp-dip.txt') + fname_test = op.join(str(tmp_path), 'temp-dip.txt') dip_py.save(fname_test) dip_read = read_dipole(fname_test) with pytest.warns(RuntimeWarning, match="'noise/ft/cm', 'prob'"): @@ -457,7 +457,7 @@ def test_confidence(tmpdir): (fname_dip, fname_bdip), (fname_dip_xfit, fname_bdip_xfit), ]) -def test_bdip(fname_dip_, fname_bdip_, tmpdir): +def test_bdip(fname_dip_, fname_bdip_, tmp_path): """Test bdip I/O.""" # use text as veridical with pytest.warns(None): # ignored fields @@ -466,7 +466,7 @@ def test_bdip(fname_dip_, fname_bdip_, tmpdir): orig_size = os.stat(fname_bdip_).st_size bdip = read_dipole(fname_bdip_) # test round-trip by writing and reading, too - fname = tmpdir.join('test.bdip') + fname = tmp_path.join('test.bdip') bdip.save(fname) bdip_read = read_dipole(fname) write_size = os.stat(str(fname)).st_size diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py index 044e644dff5..702fa8543e6 100644 --- a/mne/tests/test_epochs.py +++ b/mne/tests/test_epochs.py @@ -695,7 +695,7 @@ def test_savgol_filter(): np.mean(data_filt[:, :, stop_mask]) * 5) -def test_filter(tmpdir): +def test_filter(tmp_path): """Test filtering.""" h_freq = 40. raw, events = _get_data()[:2] @@ -722,7 +722,7 @@ def test_filter(tmpdir): np.mean(data_filt_fft[:, :, stop_mask]) * 10) # smoke test for filtering I/O data (gh-5614) - temp_fname = op.join(str(tmpdir), 'test-epo.fif') + temp_fname = op.join(str(tmp_path), 'test-epo.fif') epochs_orig.save(temp_fname, overwrite=True) epochs = mne.read_epochs(temp_fname) epochs.filter(None, h_freq) @@ -914,7 +914,7 @@ def test_read_epochs_bad_events(): assert evoked -def test_io_epochs_basic(tmpdir): +def test_io_epochs_basic(tmp_path): """Test epochs from raw files with IO as fif file.""" raw, events, picks = _get_data(preload=True) baseline = (None, 0) @@ -972,7 +972,7 @@ def test_io_epochs_basic(tmpdir): pytest.param('delayed', marks=pytest.mark.slowtest), False, ]) -def test_epochs_io_proj(tmpdir, proj): +def test_epochs_io_proj(tmp_path, proj): """Test epochs I/O with projection.""" # Test event access on non-preloaded data (#2345) @@ -984,7 +984,7 @@ def test_epochs_io_proj(tmpdir, proj): events[::2, 1] = 1 events[1::2, 2] = 2 event_ids = dict(a=1, b=2) - temp_fname = op.join(str(tmpdir), 'test-epo.fif') + temp_fname = tmp_path / 'test-epo.fif' epochs = Epochs(raw, events, event_ids, tmin, tmax, picks=picks, proj=proj, reject=reject, flat=dict(), @@ -1018,14 +1018,14 @@ def test_epochs_io_proj(tmpdir, proj): @pytest.mark.slowtest @pytest.mark.parametrize('preload', (False, True)) -def test_epochs_io_preload(tmpdir, preload): +def test_epochs_io_preload(tmp_path, preload): """Test epochs I/O with preloading.""" # due to reapplication of the proj matrix, this is our quality limit # for some tests tols = dict(atol=1e-3, rtol=1e-20) raw, events, picks = _get_data(preload=True) - tempdir = str(tmpdir) + tempdir = str(tmp_path) temp_fname = op.join(tempdir, 'test-epo.fif') temp_fname_no_bl = op.join(tempdir, 'test_no_bl-epo.fif') baseline = (None, 0) @@ -1180,7 +1180,7 @@ def test_epochs_io_preload(tmpdir, preload): not check_version('pandas'), reason='Requires Pandas')) ]) @pytest.mark.parametrize('concat', (False, True)) -def test_split_saving(tmpdir, split_size, n_epochs, n_files, size, metadata, +def test_split_saving(tmp_path, split_size, n_epochs, n_files, size, metadata, concat): """Test saving split epochs.""" # See gh-5102 @@ -1205,7 +1205,7 @@ def test_split_saving(tmpdir, split_size, n_epochs, n_files, size, metadata, epochs = concatenate_epochs([epochs[ii] for ii in range(len(epochs))]) epochs_data = epochs.get_data() assert len(epochs) == n_epochs - fname = str(tmpdir.join('test-epo.fif')) + fname = str(tmp_path.join('test-epo.fif')) epochs.save(fname, split_size=split_size, overwrite=True) got_size = _get_split_size(split_size) assert got_size == size @@ -1218,7 +1218,7 @@ def test_split_saving(tmpdir, split_size, n_epochs, n_files, size, metadata, # Check that if BIDS is used and no split is needed it defaults to # simple writing without _split- entity. - split_fname = str(tmpdir.join('test_epo.fif')) + split_fname = str(tmp_path.join('test_epo.fif')) split_fname_neuromag_part1 = split_fname.replace( 'epo.fif', f'epo-{n_files + 1}.fif') split_fname_bids_part1 = split_fname.replace( @@ -1243,8 +1243,12 @@ def test_split_saving(tmpdir, split_size, n_epochs, n_files, size, metadata, assert op.isfile(split_fname_bids_part1) +<<<<<<< HEAD @pytest.mark.slowtest def test_split_many_reset(tmpdir): +======= +def test_split_many_reset(tmp_path): +>>>>>>> 50051e845 (Replace tmpdir with tmp_path) """Test splitting with many events and using reset.""" data = np.zeros((1000, 1, 1024)) # 1 ch, 1024 samples assert data[0, 0].nbytes == 8192 # 8 kB per epoch @@ -1253,7 +1257,7 @@ def test_split_many_reset(tmpdir): epochs = EpochsArray(data, info, tmin=0., selection=selection) assert len(epochs.drop_log) == 101000 assert len(epochs) == len(data) == len(epochs.events) - fname = str(tmpdir.join('temp-epo.fif')) + fname = str(tmp_path.join('temp-epo.fif')) for split_size in ('0.5MB', '1MB', '2MB'): # tons of overhead from sel with pytest.raises(ValueError, match='too small to safely'): epochs.save(fname, split_size=split_size, verbose='debug') @@ -1267,7 +1271,7 @@ def test_split_many_reset(tmpdir): mb = 3 * 1024 * 1024 _assert_splits(fname, 6, mb) # reset, then it should work - fname = str(tmpdir.join('temp-reset-epo.fif')) + fname = str(tmp_path.join('temp-reset-epo.fif')) epochs.reset_drop_log_selection() epochs.save(fname, split_size=split_size, verbose='debug') _assert_splits(fname, 4, mb) @@ -1291,9 +1295,9 @@ def _assert_splits(fname, n, size): assert not op.isfile(bad_fname), f'Errantly wrote {bad_fname}' -def test_epochs_proj(tmpdir): +def test_epochs_proj(tmp_path): """Test handling projection (apply proj in Raw or in Epochs).""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) raw, events, picks = _get_data() exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more this_picks = pick_types(raw.info, meg=True, eeg=False, stim=True, @@ -1391,9 +1395,9 @@ def test_evoked_arithmetic(): assert_equal(evoked_avg.nave, evoked1.nave + evoked2.nave) -def test_evoked_io_from_epochs(tmpdir): +def test_evoked_io_from_epochs(tmp_path): """Test IO of evoked data made from epochs.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) raw, events, picks = _get_data() with raw.info._unlock(): raw.info['lowpass'] = 40 # avoid aliasing warnings @@ -1454,10 +1458,10 @@ def test_evoked_io_from_epochs(tmpdir): write_evokeds(fname_temp, evokeds) -def test_evoked_standard_error(tmpdir): +def test_evoked_standard_error(tmp_path): """Test calculation and read/write of standard error.""" raw, events, picks = _get_data() - tempdir = str(tmpdir) + tempdir = str(tmp_path) epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks) evoked = [epochs.average(), epochs.standard_error()] write_evokeds(op.join(tempdir, 'evoked-ave.fif'), evoked) @@ -1482,9 +1486,9 @@ def test_evoked_standard_error(tmpdir): assert ave.first == ave2.first -def test_reject_epochs(tmpdir): +def test_reject_epochs(tmp_path): """Test of epochs rejection.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) temp_fname = op.join(tempdir, 'test-epo.fif') raw, events, picks = _get_data() @@ -1664,9 +1668,9 @@ def test_comparision_with_c(): assert_array_almost_equal(evoked.times, c_evoked.times, 12) -def test_crop(tmpdir): +def test_crop(tmp_path): """Test of crop of epochs.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) temp_fname = op.join(tempdir, 'test-epo.fif') raw, events, picks = _get_data() @@ -2085,9 +2089,9 @@ def test_epoch_eq(): epochs.equalize_event_counts(1.5) -def test_access_by_name(tmpdir): +def test_access_by_name(tmp_path): """Test accessing epochs by event name and on_missing for rare events.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) raw, events, picks = _get_data() # Test various invalid inputs @@ -2672,9 +2676,9 @@ def make_epochs(picks, proj): assert_allclose(new_proj[n_meg:, n_meg:], np.eye(n_eeg), atol=1e-12) -def test_array_epochs(tmpdir): +def test_array_epochs(tmp_path): """Test creating epochs from array.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) # creating data = rng.random_sample((10, 20, 300)) @@ -2911,7 +2915,7 @@ def test_default_values(): @requires_pandas -def test_metadata(tmpdir): +def test_metadata(tmp_path): """Test metadata support with pandas.""" from pandas import DataFrame @@ -2978,7 +2982,7 @@ def test_metadata(tmpdir): # I/O # Make sure values don't change with I/O - tempdir = str(tmpdir) + tempdir = str(tmp_path) temp_fname = op.join(tempdir, 'tmp-epo.fif') temp_one_fname = op.join(tempdir, 'tmp-one-epo.fif') with catch_logging() as log: @@ -3198,9 +3202,9 @@ def test_events_list(): assert (epochs._repr_html_()) # test _repr_html_ -def test_save_overwrite(tmpdir): +def test_save_overwrite(tmp_path): """Test saving with overwrite functionality.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) raw = mne.io.RawArray(np.random.RandomState(0).randn(100, 10000), mne.create_info(100, 1000.)) @@ -3239,14 +3243,14 @@ def test_save_overwrite(tmpdir): @pytest.mark.parametrize('preload', (True, False)) @pytest.mark.parametrize('is_complex', (True, False)) @pytest.mark.parametrize('fmt, rtol', [('single', 2e-6), ('double', 1e-10)]) -def test_save_complex_data(tmpdir, preload, is_complex, fmt, rtol): +def test_save_complex_data(tmp_path, preload, is_complex, fmt, rtol): """Test whether epochs of hilbert-transformed data can be saved.""" raw, events = _get_data()[:2] raw.load_data() if is_complex: raw.apply_hilbert(envelope=False, n_fft=None) epochs = Epochs(raw, events[:1], preload=True)[0] - temp_fname = op.join(str(tmpdir), 'test-epo.fif') + temp_fname = op.join(str(tmp_path), 'test-epo.fif') epochs.save(temp_fname, fmt=fmt) data = epochs.get_data().copy() epochs_read = read_epochs(temp_fname, proj=False, preload=preload) @@ -3261,21 +3265,21 @@ def test_save_complex_data(tmpdir, preload, is_complex, fmt, rtol): assert_allclose(data_read, data, rtol=rtol) -def test_no_epochs(tmpdir): +def test_no_epochs(tmp_path): """Test that having the first epoch bad does not break writing.""" # a regression noticed in #5564 raw, events = _get_data()[:2] reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6) raw.info['bads'] = ['MEG 2443', 'EEG 053'] epochs = mne.Epochs(raw, events, reject=reject) - epochs.save(op.join(str(tmpdir), 'sample-epo.fif'), overwrite=True) + epochs.save(op.join(str(tmp_path), 'sample-epo.fif'), overwrite=True) assert 0 not in epochs.selection assert len(epochs) > 0 # and with no epochs remaining raw.info['bads'] = [] epochs = mne.Epochs(raw, events, reject=reject) with pytest.warns(RuntimeWarning, match='no data'): - epochs.save(op.join(str(tmpdir), 'sample-epo.fif'), overwrite=True) + epochs.save(op.join(str(tmp_path), 'sample-epo.fif'), overwrite=True) assert len(epochs) == 0 # all dropped @@ -3427,9 +3431,9 @@ def _get_selection(epochs): @pytest.mark.parametrize('kind', ('file', 'bytes')) @pytest.mark.parametrize('preload', (True, False)) -def test_file_like(kind, preload, tmpdir): +def test_file_like(kind, preload, tmp_path): """Test handling with file-like objects.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) raw = mne.io.RawArray(np.random.RandomState(0).randn(100, 10000), mne.create_info(100, 1000.)) events = mne.make_fixed_length_events(raw, 1) @@ -3509,7 +3513,7 @@ def test_make_fixed_length_epochs(): assert '2' in epochs.event_id and len(epochs.event_id) == 1 -def test_epochs_huge_events(tmpdir): +def test_epochs_huge_events(tmp_path): """Test epochs with event numbers that are too large.""" data = np.zeros((1, 1, 1000)) info = create_info(1, 1000., 'eeg') @@ -3522,7 +3526,7 @@ def test_epochs_huge_events(tmpdir): epochs = EpochsArray(data, info) epochs.events = events with pytest.raises(TypeError, match='exceeds maximum'): - epochs.save(tmpdir.join('temp-epo.fif')) + epochs.save(tmp_path.join('temp-epo.fif')) def _old_bad_write(fid, kind, arr): @@ -3532,7 +3536,7 @@ def _old_bad_write(fid, kind, arr): return write_int(fid, kind, arr) -def test_concat_overflow(tmpdir, monkeypatch): +def test_concat_overflow(tmp_path, monkeypatch): """Test overflow events during concat.""" data = np.zeros((2, 10, 1000)) events = np.array([[0, 0, 1], [INT32_MAX, 0, 2]]) @@ -3542,7 +3546,7 @@ def test_concat_overflow(tmpdir, monkeypatch): with pytest.warns(RuntimeWarning, match='consecutive increasing'): epochs = mne.concatenate_epochs((epochs_1, epochs_2)) assert_array_less(0, epochs.events[:, 0]) - fname = tmpdir.join('temp-epo.fif') + fname = tmp_path.join('temp-epo.fif') epochs.save(fname) epochs = read_epochs(fname) assert_array_less(0, epochs.events[:, 0]) @@ -3556,7 +3560,7 @@ def test_concat_overflow(tmpdir, monkeypatch): assert_array_less(epochs.events[:, 0], INT32_MAX + 1) -def test_epochs_baseline_after_cropping(tmpdir): +def test_epochs_baseline_after_cropping(tmp_path): """Epochs.baseline should be retained if baseline period was cropped.""" sfreq = 1000 tstep = 1. / sfreq @@ -3596,7 +3600,7 @@ def test_epochs_baseline_after_cropping(tmpdir): epochs_orig.get_data().squeeze()[200:]) # Test I/O roundtrip. - epochs_fname = tmpdir.join('temp-cropped-epo.fif') + epochs_fname = tmp_path.join('temp-cropped-epo.fif') epochs_cropped.save(epochs_fname) epochs_cropped_read = mne.read_epochs(epochs_fname) diff --git a/mne/tests/test_event.py b/mne/tests/test_event.py index f58aaf83de4..3d96d531c9f 100644 --- a/mne/tests/test_event.py +++ b/mne/tests/test_event.py @@ -110,11 +110,11 @@ def test_merge_events(): assert_array_equal(events, events_good) -def test_io_events(tmpdir): +def test_io_events(tmp_path): """Test IO for events.""" # Test binary fif IO events = read_events(fname) # Use as the gold standard - fname_temp = tmpdir.join('events-eve.fif') + fname_temp = tmp_path.join('events-eve.fif') write_events(fname_temp, events) events2 = read_events(fname_temp) assert_array_almost_equal(events, events2) @@ -128,7 +128,7 @@ def test_io_events(tmpdir): assert_array_almost_equal(events, events2) # Test new format text file IO - fname_temp = str(tmpdir.join('events.eve')) + fname_temp = str(tmp_path.join('events.eve')) write_events(fname_temp, events) events2 = read_events(fname_temp) assert_array_almost_equal(events, events2) @@ -144,7 +144,7 @@ def test_io_events(tmpdir): assert_array_almost_equal(events, events2) # Test event selection - fname_temp = tmpdir.join('events-eve.fif') + fname_temp = tmp_path.join('events-eve.fif') a = read_events(fname_temp, include=1) b = read_events(fname_temp, include=[1]) c = read_events(fname_temp, exclude=[2, 3, 4, 5, 32]) @@ -167,13 +167,13 @@ def test_io_events(tmpdir): assert_array_almost_equal(events, events2) # Test text file IO for 1 event - fname_temp = str(tmpdir.join('events.eve')) + fname_temp = str(tmp_path.join('events.eve')) write_events(fname_temp, events) events2 = read_events(fname_temp) assert_array_almost_equal(events, events2) # test warnings on bad filenames - fname2 = tmpdir.join('test-bad-name.fif') + fname2 = tmp_path.join('test-bad-name.fif') with pytest.warns(RuntimeWarning, match='-eve.fif'): write_events(fname2, events) with pytest.warns(RuntimeWarning, match='-eve.fif'): diff --git a/mne/tests/test_evoked.py b/mne/tests/test_evoked.py index bc56136e628..402401953d4 100644 --- a/mne/tests/test_evoked.py +++ b/mne/tests/test_evoked.py @@ -176,14 +176,14 @@ def _aspect_kinds(): @pytest.mark.parametrize('aspect_kind', _aspect_kinds()) -def test_evoked_aspects(aspect_kind, tmpdir): +def test_evoked_aspects(aspect_kind, tmp_path): """Test handling of evoked aspects.""" # gh-6359 ave = read_evokeds(fname, 0) ave._aspect_kind = aspect_kind assert 'Evoked' in repr(ave) # for completeness let's try a round-trip - temp_fname = op.join(str(tmpdir), 'test-ave.fif') + temp_fname = op.join(str(tmp_path), 'test-ave.fif') ave.save(temp_fname) ave_2 = read_evokeds(temp_fname, condition=0) assert_allclose(ave.data, ave_2.data) @@ -191,15 +191,15 @@ def test_evoked_aspects(aspect_kind, tmpdir): @pytest.mark.slowtest -def test_io_evoked(tmpdir): +def test_io_evoked(tmp_path): """Test IO for evoked data (fif + gz) with integer and str args.""" ave = read_evokeds(fname, 0) ave_double = ave.copy() ave_double.comment = ave.comment + ' doubled nave' ave_double.nave = ave.nave * 2 - write_evokeds(tmpdir.join('evoked-ave.fif'), [ave, ave_double]) - ave2, ave_double = read_evokeds(op.join(tmpdir, 'evoked-ave.fif')) + write_evokeds(tmp_path.join('evoked-ave.fif'), [ave, ave_double]) + ave2, ave_double = read_evokeds(op.join(tmp_path, 'evoked-ave.fif')) assert ave2.nave * 2 == ave_double.nave # This not being assert_array_equal due to windows rounding @@ -228,8 +228,8 @@ def test_io_evoked(tmpdir): aves1 = read_evokeds(fname)[1::2] aves2 = read_evokeds(fname, [1, 3]) aves3 = read_evokeds(fname, ['Right Auditory', 'Right visual']) - write_evokeds(tmpdir.join('evoked-ave.fif'), aves1) - aves4 = read_evokeds(tmpdir.join('evoked-ave.fif')) + write_evokeds(tmp_path.join('evoked-ave.fif'), aves1) + aves4 = read_evokeds(tmp_path.join('evoked-ave.fif')) for aves in [aves2, aves3, aves4]: for [av1, av2] in zip(aves1, aves): assert_array_almost_equal(av1.data, av2.data) @@ -244,20 +244,20 @@ def test_io_evoked(tmpdir): # test saving and reading complex numbers in evokeds ave_complex = ave.copy() ave_complex._data = 1j * ave_complex.data - fname_temp = str(tmpdir.join('complex-ave.fif')) + fname_temp = str(tmp_path.join('complex-ave.fif')) ave_complex.save(fname_temp) ave_complex = read_evokeds(fname_temp)[0] assert_allclose(ave.data, ave_complex.data.imag) # test warnings on bad filenames - fname2 = tmpdir.join('test-bad-name.fif') + fname2 = tmp_path.join('test-bad-name.fif') with pytest.warns(RuntimeWarning, match='-ave.fif'): write_evokeds(fname2, ave) with pytest.warns(RuntimeWarning, match='-ave.fif'): read_evokeds(fname2) # test writing when order of bads doesn't match - fname3 = tmpdir.join('test-bad-order-ave.fif') + fname3 = tmp_path.join('test-bad-order-ave.fif') condition = 'Left Auditory' ave4 = read_evokeds(fname, condition) ave4.info['bads'] = ave4.ch_names[:3] @@ -269,7 +269,7 @@ def test_io_evoked(tmpdir): pytest.raises(TypeError, Evoked, fname) # MaxShield - fname_ms = tmpdir.join('test-ave.fif') + fname_ms = tmp_path.join('test-ave.fif') assert (ave.info['maxshield'] is False) with ave.info._unlock(): ave.info['maxshield'] = True @@ -282,9 +282,9 @@ def test_io_evoked(tmpdir): assert (all(ave.info['maxshield'] is True for ave in aves)) -def test_shift_time_evoked(tmpdir): +def test_shift_time_evoked(tmp_path): """Test for shifting of time scale.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) # Shift backward ave = read_evokeds(fname, 0).shift_time(-0.1, relative=True) write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave) @@ -349,9 +349,9 @@ def test_tmin_tmax(): assert evoked.times[-1] == evoked.tmax -def test_evoked_resample(tmpdir): +def test_evoked_resample(tmp_path): """Test resampling evoked data.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) # upsample, write it out, read it in ave = read_evokeds(fname, 0) orig_lp = ave.info['lowpass'] @@ -663,9 +663,9 @@ def test_arithmetic(): assert evoked1.ch_names == evoked3.ch_names -def test_array_epochs(tmpdir): +def test_array_epochs(tmp_path): """Test creating evoked from array.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) # creating rng = np.random.RandomState(42) @@ -763,7 +763,7 @@ def test_add_channels(): pytest.raises(TypeError, evoked_meg.add_channels, evoked_badsf) -def test_evoked_baseline(tmpdir): +def test_evoked_baseline(tmp_path): """Test evoked baseline.""" evoked = read_evokeds(fname, condition=0, baseline=None) @@ -806,7 +806,7 @@ def test_evoked_baseline(tmpdir): evoked.apply_baseline(baseline) assert_allclose(evoked.baseline, baseline) - tmp_fname = tmpdir / 'test-ave.fif' + tmp_fname = tmp_path / 'test-ave.fif' evoked.save(tmp_fname) evoked_read = read_evokeds(tmp_fname, condition=0) assert_allclose(evoked_read.baseline, evoked.baseline) diff --git a/mne/tests/test_filter.py b/mne/tests/test_filter.py index 20d073e8a75..86785b1e3dd 100644 --- a/mne/tests/test_filter.py +++ b/mne/tests/test_filter.py @@ -28,9 +28,9 @@ def test_filter_array(): @requires_mne -def test_mne_c_design(tmpdir): +def test_mne_c_design(tmp_path): """Test MNE-C filter design.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) temp_fname = op.join(tempdir, 'test_raw.fif') out_fname = op.join(tempdir, 'test_c_raw.fif') x = np.zeros((1, 10001)) diff --git a/mne/tests/test_freesurfer.py b/mne/tests/test_freesurfer.py index e580485bfc1..e28f478e62b 100644 --- a/mne/tests/test_freesurfer.py +++ b/mne/tests/test_freesurfer.py @@ -102,7 +102,7 @@ def test_vertex_to_mni_fs_nibabel(monkeypatch): None, op.join(op.dirname(mne.__file__), 'data', 'FreeSurferColorLUT.txt'), ]) -def test_read_freesurfer_lut(fname, tmpdir): +def test_read_freesurfer_lut(fname, tmp_path): """Test reading volume label names.""" atlas_ids, colors = read_freesurfer_lut(fname) assert list(atlas_ids).count('Brain-Stem') == 1 @@ -125,7 +125,7 @@ def test_read_freesurfer_lut(fname, tmpdir): # long name (only test on one run) if fname is not None: return - fname = str(tmpdir.join('long.txt')) + fname = str(tmp_path.join('long.txt')) names = ['Anterior_Cingulate_and_Medial_Prefrontal_Cortex-' + hemi for hemi in ('lh', 'rh')] ids = np.arange(1, len(names) + 1) diff --git a/mne/tests/test_label.py b/mne/tests/test_label.py index 5005461e918..1b60e5b514c 100644 --- a/mne/tests/test_label.py +++ b/mne/tests/test_label.py @@ -310,9 +310,9 @@ def test_label_io_and_time_course_estimates(): @testing.requires_testing_data -def test_label_io(tmpdir): +def test_label_io(tmp_path): """Test IO of label files.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) label = read_label(label_fname) # label attributes @@ -345,10 +345,10 @@ def _assert_labels_equal(labels_a, labels_b, ignore_pos=False): @testing.requires_testing_data -def test_annot_io(tmpdir): +def test_annot_io(tmp_path): """Test I/O from and to *.annot files.""" # copy necessary files from fsaverage to tempdir - tempdir = str(tmpdir) + tempdir = str(tmp_path) subject = 'fsaverage' label_src = os.path.join(subjects_dir, 'fsaverage', 'label') surf_src = os.path.join(subjects_dir, 'fsaverage', 'surf') @@ -448,7 +448,7 @@ def test_labels_to_stc(): @testing.requires_testing_data -def test_read_labels_from_annot(tmpdir): +def test_read_labels_from_annot(tmp_path): """Test reading labels from FreeSurfer parcellation.""" # test some invalid inputs pytest.raises(ValueError, read_labels_from_annot, 'sample', hemi='bla', @@ -458,7 +458,7 @@ def test_read_labels_from_annot(tmpdir): with pytest.raises(IOError, match='does not exist'): _read_annot_cands('foo') with pytest.raises(IOError, match='no candidate'): - _read_annot(str(tmpdir)) + _read_annot(str(tmp_path)) # read labels using hemi specification labels_lh = read_labels_from_annot('sample', hemi='lh', @@ -522,9 +522,9 @@ def test_read_labels_from_annot_annot2labels(): @testing.requires_testing_data -def test_write_labels_to_annot(tmpdir): +def test_write_labels_to_annot(tmp_path): """Test writing FreeSurfer parcellation from labels.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) labels = read_labels_from_annot('sample', subjects_dir=subjects_dir) diff --git a/mne/tests/test_line_endings.py b/mne/tests/test_line_endings.py index 29420c64bc3..17952dbb304 100644 --- a/mne/tests/test_line_endings.py +++ b/mne/tests/test_line_endings.py @@ -57,9 +57,9 @@ def _assert_line_endings(dir_): % (len(report), '\n'.join(report))) -def test_line_endings(tmpdir): +def test_line_endings(tmp_path): """Test line endings of mne-python.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) with open(op.join(tempdir, 'foo'), 'wb') as fid: fid.write('bad\r\ngood\n'.encode('ascii')) _assert_line_endings(tempdir) diff --git a/mne/tests/test_morph.py b/mne/tests/test_morph.py index b2792d7182b..a523147a693 100644 --- a/mne/tests/test_morph.py +++ b/mne/tests/test_morph.py @@ -224,7 +224,7 @@ def assert_power_preserved(orig, new, limits=(1., 1.05)): @requires_h5py @testing.requires_testing_data -def test_surface_vector_source_morph(tmpdir): +def test_surface_vector_source_morph(tmp_path): """Test surface and vector source estimate morph.""" inverse_operator_surf = read_inverse_operator(fname_inv_surf) @@ -256,9 +256,9 @@ def test_surface_vector_source_morph(tmpdir): assert 'surface' in repr(source_morph_surf) # check loading and saving for surf - source_morph_surf.save(tmpdir.join('42.h5')) + source_morph_surf.save(tmp_path.join('42.h5')) - source_morph_surf_r = read_source_morph(tmpdir.join('42.h5')) + source_morph_surf_r = read_source_morph(tmp_path.join('42.h5')) assert (all([read == saved for read, saved in zip(sorted(source_morph_surf_r.__dict__), @@ -279,7 +279,7 @@ def test_surface_vector_source_morph(tmpdir): @requires_dipy() @pytest.mark.slowtest @testing.requires_testing_data -def test_volume_source_morph_basic(tmpdir): +def test_volume_source_morph_basic(tmp_path): """Test volume source estimate morph, special cases and exceptions.""" import nibabel as nib inverse_operator_vol = read_inverse_operator(fname_inv_vol) @@ -342,14 +342,14 @@ def test_volume_source_morph_basic(tmpdir): subjects_dir=subjects_dir) # two different ways of saving - source_morph_vol.save(tmpdir.join('vol')) + source_morph_vol.save(tmp_path.join('vol')) # check loading - source_morph_vol_r = read_source_morph(tmpdir.join('vol-morph.h5')) + source_morph_vol_r = read_source_morph(tmp_path.join('vol-morph.h5')) # check for invalid file name handling () with pytest.raises(IOError, match='not found'): - read_source_morph(tmpdir.join('42')) + read_source_morph(tmp_path.join('42')) # check morph stc_vol_morphed = source_morph_vol.apply(stc_vol) @@ -473,7 +473,7 @@ def test_volume_source_morph_basic(tmpdir): ('sample', 'fsaverage', 7.0, 7.4, float, True), # morph_mat ]) def test_volume_source_morph_round_trip( - tmpdir, subject_from, subject_to, lower, upper, dtype, morph_mat, + tmp_path, subject_from, subject_to, lower, upper, dtype, morph_mat, monkeypatch): """Test volume source estimate morph round-trips well.""" import nibabel as nib @@ -563,7 +563,7 @@ def test_volume_source_morph_round_trip( stc_from_unit_rt = morph_to_from.apply(morph_from_to.apply(stc_from_unit)) assert_power_preserved(stc_from_unit, stc_from_unit_rt, limits=limits) if morph_mat: - fname = tmpdir.join('temp-morph.h5') + fname = tmp_path.join('temp-morph.h5') morph_to_from.save(fname) morph_to_from = read_source_morph(fname) assert morph_to_from.vol_morph_mat is None @@ -745,7 +745,7 @@ def test_morph_stc_sparse(): (slice(1, 2), 51, 204, 12), (slice(0, 2), 88, 324, 20), ]) -def test_volume_labels_morph(tmpdir, sl, n_real, n_mri, n_orig): +def test_volume_labels_morph(tmp_path, sl, n_real, n_mri, n_orig): """Test generating a source space from volume label.""" import nibabel as nib n_use = (sl.stop - sl.start) // (sl.step or 1) diff --git a/mne/tests/test_morph_map.py b/mne/tests/test_morph_map.py index d15249d7be8..fbaabcbbde5 100644 --- a/mne/tests/test_morph_map.py +++ b/mne/tests/test_morph_map.py @@ -21,10 +21,10 @@ @pytest.mark.slowtest @testing.requires_testing_data -def test_make_morph_maps(tmpdir): +def test_make_morph_maps(tmp_path): """Test reading and creating morph maps.""" # make a new fake subjects_dir - tempdir = str(tmpdir) + tempdir = str(tmp_path) for subject in ('sample', 'sample_ds', 'fsaverage_ds'): os.mkdir(op.join(tempdir, subject)) os.mkdir(op.join(tempdir, subject, 'surf')) diff --git a/mne/tests/test_proj.py b/mne/tests/test_proj.py index c3046f9c4f0..e137c20475f 100644 --- a/mne/tests/test_proj.py +++ b/mne/tests/test_proj.py @@ -143,9 +143,9 @@ def test_sensitivity_maps(): sensitivity_map(fwd) -def test_compute_proj_epochs(tmpdir): +def test_compute_proj_epochs(tmp_path): """Test SSP computation on epochs.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) event_id, tmin, tmax = 1, -0.2, 0.3 raw = read_raw_fif(raw_fname, preload=True) @@ -227,9 +227,9 @@ def test_compute_proj_epochs(tmpdir): @pytest.mark.slowtest -def test_compute_proj_raw(tmpdir): +def test_compute_proj_raw(tmp_path): """Test SSP computation on raw.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) # Test that the raw projectors work raw_time = 2.5 # Do shorter amount for speed raw = read_raw_fif(raw_fname).crop(0, raw_time) diff --git a/mne/tests/test_source_estimate.py b/mne/tests/test_source_estimate.py index a4f40f8a300..89fe9f1c2ca 100644 --- a/mne/tests/test_source_estimate.py +++ b/mne/tests/test_source_estimate.py @@ -149,7 +149,7 @@ def test_spatial_inter_hemi_adjacency(): @pytest.mark.slowtest @testing.requires_testing_data @requires_h5py -def test_volume_stc(tmpdir): +def test_volume_stc(tmp_path): """Test volume STCs.""" N = 100 data = np.arange(N)[:, np.newaxis] @@ -173,7 +173,7 @@ def test_volume_stc(tmpdir): stc = VolVectorSourceEstimate(data, [vertno], 0, 1) ext = 'h5' klass = VolVectorSourceEstimate - fname_temp = tmpdir.join('temp-vl.' + ext) + fname_temp = tmp_path.join('temp-vl.' + ext) stc_new = stc n = 3 if ext == 'h5' else 2 for ii in range(n): @@ -204,7 +204,7 @@ def test_volume_stc(tmpdir): pytest.raises(ValueError, stc.save, fname_vol, ftype='whatever') for ftype in ['w', 'h5']: for _ in range(2): - fname_temp = tmpdir.join('temp-vol.%s' % ftype) + fname_temp = tmp_path.join('temp-vol.%s' % ftype) stc_new.save(fname_temp, ftype=ftype) stc_new = read_source_estimate(fname_temp) assert (isinstance(stc_new, VolSourceEstimate)) @@ -240,11 +240,11 @@ def test_stc_as_volume(): @testing.requires_testing_data @requires_nibabel() -def test_save_vol_stc_as_nifti(tmpdir): +def test_save_vol_stc_as_nifti(tmp_path): """Save the stc as a nifti file and export.""" import nibabel as nib src = read_source_spaces(fname_vsrc) - vol_fname = tmpdir.join('stc.nii.gz') + vol_fname = tmp_path.join('stc.nii.gz') # now let's actually read a MNE-C processed file stc = read_source_estimate(fname_vol, 'sample') @@ -258,7 +258,7 @@ def test_save_vol_stc_as_nifti(tmpdir): with pytest.warns(None): # nib<->numpy t1_img = nib.load(fname_t1) - stc.save_as_volume(tmpdir.join('stc.nii.gz'), src, + stc.save_as_volume(tmp_path.join('stc.nii.gz'), src, dest='mri', mri_resolution=True) with pytest.warns(None): # nib<->numpy img = nib.load(str(vol_fname)) @@ -410,11 +410,11 @@ def attempt_assignment(stc, attr, val): assert stc.data.shape == (len(data), 1) -def test_io_stc(tmpdir): +def test_io_stc(tmp_path): """Test IO for STC files.""" stc = _fake_stc() - stc.save(tmpdir.join("tmp.stc")) - stc2 = read_source_estimate(tmpdir.join("tmp.stc")) + stc.save(tmp_path.join("tmp.stc")) + stc2 = read_source_estimate(tmp_path.join("tmp.stc")) assert_array_almost_equal(stc.data, stc2.data) assert_array_almost_equal(stc.tmin, stc2.tmin) @@ -425,21 +425,21 @@ def test_io_stc(tmpdir): # test warning for complex data stc2.data = stc2.data.astype(np.complex128) with pytest.raises(ValueError, match='Cannot save complex-valued STC'): - stc2.save(tmpdir.join('complex.stc')) + stc2.save(tmp_path.join('complex.stc')) @requires_h5py @pytest.mark.parametrize('is_complex', (True, False)) @pytest.mark.parametrize('vector', (True, False)) -def test_io_stc_h5(tmpdir, is_complex, vector): +def test_io_stc_h5(tmp_path, is_complex, vector): """Test IO for STC files using HDF5.""" if vector: stc = _fake_vec_stc(is_complex=is_complex) else: stc = _fake_stc(is_complex=is_complex) - pytest.raises(ValueError, stc.save, tmpdir.join('tmp'), + pytest.raises(ValueError, stc.save, tmp_path.join('tmp'), ftype='foo') - out_name = tmpdir.join('tmp') + out_name = tmp_path.join('tmp') stc.save(out_name, ftype='h5') stc.save(out_name, ftype='h5') # test overwrite stc3 = read_source_estimate(out_name) @@ -457,14 +457,14 @@ def test_io_stc_h5(tmpdir, is_complex, vector): assert_array_equal(v1, v2) -def test_io_w(tmpdir): +def test_io_w(tmp_path): """Test IO for w files.""" stc = _fake_stc(n_time=1) - w_fname = tmpdir.join('fake') + w_fname = tmp_path.join('fake') stc.save(w_fname, ftype='w') src = read_source_estimate(w_fname) - src.save(tmpdir.join('tmp'), ftype='w') - src2 = read_source_estimate(tmpdir.join('tmp-lh.w')) + src.save(tmp_path.join('tmp'), ftype='w') + src2 = read_source_estimate(tmp_path.join('tmp-lh.w')) assert_array_almost_equal(src.data, src2.data) assert_array_almost_equal(src.lh_vertno, src2.lh_vertno) assert_array_almost_equal(src.rh_vertno, src2.rh_vertno) @@ -1170,7 +1170,7 @@ def test_get_peak(kind, vector, n_times): @requires_h5py @testing.requires_testing_data -def test_mixed_stc(tmpdir): +def test_mixed_stc(tmp_path): """Test source estimate from mixed source space.""" N = 90 # number of sources T = 2 # number of time points @@ -1186,7 +1186,7 @@ def test_mixed_stc(tmpdir): stc = MixedSourceEstimate(data, vertno, 0, 1) # make sure error is raised for plotting surface with volume source - fname = tmpdir.join('mixed-stc.h5') + fname = tmp_path.join('mixed-stc.h5') stc.save(fname) stc_out = read_source_estimate(fname) assert_array_equal(stc_out.vertices, vertno) @@ -1205,7 +1205,7 @@ def test_mixed_stc(tmpdir): ]) @pytest.mark.parametrize('dtype', [ np.float32, np.float64, np.complex64, np.complex128]) -def test_vec_stc_basic(tmpdir, klass, kind, dtype): +def test_vec_stc_basic(tmp_path, klass, kind, dtype): """Test (vol)vector source estimate.""" nn = np.array([ [1, 0, 0], @@ -1278,7 +1278,7 @@ def test_vec_stc_basic(tmpdir, klass, kind, dtype): assert_allclose(got_directions, directions * flips) assert_allclose(projected.data, amplitudes * flips) - out_name = tmpdir.join('temp.h5') + out_name = tmp_path.join('temp.h5') stc.save(out_name) stc_read = read_source_estimate(out_name) assert_allclose(stc.data, stc_read.data) @@ -1530,7 +1530,7 @@ def test_vol_mask(): @testing.requires_testing_data -def test_stc_near_sensors(tmpdir): +def test_stc_near_sensors(tmp_path): """Test stc_near_sensors.""" info = read_info(fname_evoked) # pick the left EEG sensors @@ -1544,7 +1544,7 @@ def test_stc_near_sensors(tmpdir): evoked = EvokedArray(np.eye(info['nchan']), info) trans = read_trans(fname_fwd) assert trans['to'] == FIFF.FIFFV_COORD_HEAD - this_dir = str(tmpdir) + this_dir = str(tmp_path) # testing does not have pial, so fake it os.makedirs(op.join(this_dir, 'sample', 'surf')) for hemi in ('lh', 'rh'): @@ -1660,9 +1660,9 @@ def _make_morph_map_hemi_same(subject_from, subject_to, subjects_dir, 'surface', )) @pytest.mark.parametrize('scale', ((1.0, 0.8, 1.2), 1., 0.9)) -def test_scale_morph_labels(kind, scale, monkeypatch, tmpdir): +def test_scale_morph_labels(kind, scale, monkeypatch, tmp_path): """Test label extraction, morphing, and MRI scaling relationships.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) subject_from = 'sample' subject_to = 'small' testing_dir = op.join(subjects_dir, subject_from) diff --git a/mne/tests/test_source_space.py b/mne/tests/test_source_space.py index 3869cd59899..329c79a540b 100644 --- a/mne/tests/test_source_space.py +++ b/mne/tests/test_source_space.py @@ -163,7 +163,7 @@ def test_add_patch_info(monkeypatch): @testing.requires_testing_data -def test_add_source_space_distances_limited(tmpdir): +def test_add_source_space_distances_limited(tmp_path): """Test adding distances to source space with a dist_limit.""" src = read_source_spaces(fname) src_new = read_source_spaces(fname) @@ -172,7 +172,7 @@ def test_add_source_space_distances_limited(tmpdir): n_do = 200 # limit this for speed src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy() src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy() - out_name = tmpdir.join('temp-src.fif') + out_name = tmp_path.join('temp-src.fif') add_source_space_distances(src_new, dist_limit=0.007) write_source_spaces(out_name, src_new) src_new = read_source_spaces(out_name) @@ -197,7 +197,7 @@ def test_add_source_space_distances_limited(tmpdir): @pytest.mark.slowtest @testing.requires_testing_data -def test_add_source_space_distances(tmpdir): +def test_add_source_space_distances(tmp_path): """Test adding distances to source space.""" src = read_source_spaces(fname) src_new = read_source_spaces(fname) @@ -206,7 +206,7 @@ def test_add_source_space_distances(tmpdir): n_do = 19 # limit this for speed src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy() src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy() - out_name = tmpdir.join('temp-src.fif') + out_name = tmp_path.join('temp-src.fif') n_jobs = 2 assert n_do % n_jobs != 0 with pytest.raises(ValueError, match='non-negative'): @@ -241,15 +241,15 @@ def test_add_source_space_distances(tmpdir): @testing.requires_testing_data @requires_mne -def test_discrete_source_space(tmpdir): +def test_discrete_source_space(tmp_path): """Test setting up (and reading/writing) discrete source spaces.""" src = read_source_spaces(fname) v = src[0]['vertno'] # let's make a discrete version with the C code, and with ours - temp_name = tmpdir.join('temp-src.fif') + temp_name = tmp_path.join('temp-src.fif') # save - temp_pos = tmpdir.join('temp-pos.txt') + temp_pos = tmp_path.join('temp-pos.txt') np.savetxt(str(temp_pos), np.c_[src[0]['rr'][v], src[0]['nn'][v]]) # let's try the spherical one (no bem or surf supplied) run_subprocess(['mne_volume_source_space', '--meters', @@ -285,10 +285,10 @@ def test_discrete_source_space(tmpdir): @requires_nibabel() @pytest.mark.slowtest @testing.requires_testing_data -def test_volume_source_space(tmpdir): +def test_volume_source_space(tmp_path): """Test setting up volume source spaces.""" src = read_source_spaces(fname_vol) - temp_name = tmpdir.join('temp-src.fif') + temp_name = tmp_path.join('temp-src.fif') surf = read_bem_surfaces(fname_bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN) surf['rr'] *= 1e3 # convert to mm bem_sol = read_bem_solution(fname_bem_3_sol) @@ -337,13 +337,13 @@ def test_volume_source_space(tmpdir): @testing.requires_testing_data @requires_mne -def test_other_volume_source_spaces(tmpdir): +def test_other_volume_source_spaces(tmp_path): """Test setting up other volume source spaces.""" # these are split off because they require the MNE tools, and # Travis doesn't seem to like them # let's try the spherical one (no bem or surf supplied) - temp_name = tmpdir.join('temp-src.fif') + temp_name = tmp_path.join('temp-src.fif') run_subprocess(['mne_volume_source_space', '--grid', '7.0', '--src', temp_name, @@ -427,7 +427,7 @@ def test_accumulate_normals(): @pytest.mark.slowtest @testing.requires_testing_data -def test_setup_source_space(tmpdir): +def test_setup_source_space(tmp_path): """Test setting up ico, oct, and all source spaces.""" fname_ico = op.join(data_path, 'subjects', 'fsaverage', 'bem', 'fsaverage-ico-5-src.fif') @@ -463,7 +463,7 @@ def test_setup_source_space(tmpdir): # oct-6 (sample) - auto filename + IO src = read_source_spaces(fname) - temp_name = tmpdir.join('temp-src.fif') + temp_name = tmp_path.join('temp-src.fif') with pytest.warns(None): # sklearn equiv neighbors src_new = setup_source_space('sample', spacing='oct6', subjects_dir=subjects_dir, add_dist=False) @@ -489,13 +489,13 @@ def test_setup_source_space(tmpdir): @pytest.mark.slowtest @pytest.mark.timeout(60) @pytest.mark.parametrize('spacing', [2, 7]) -def test_setup_source_space_spacing(tmpdir, spacing): +def test_setup_source_space_spacing(tmp_path, spacing): """Test setting up surface source spaces using a given spacing.""" - copytree(op.join(subjects_dir, 'sample'), str(tmpdir.join('sample'))) + copytree(op.join(subjects_dir, 'sample'), str(tmp_path.join('sample'))) args = [] if spacing == 7 else ['--spacing', str(spacing)] - with modified_env(SUBJECTS_DIR=str(tmpdir), SUBJECT='sample'): + with modified_env(SUBJECTS_DIR=str(tmp_path), SUBJECT='sample'): run_subprocess(['mne_setup_source_space'] + args) - src = read_source_spaces(tmpdir.join('sample', 'bem', + src = read_source_spaces(tmp_path.join('sample', 'bem', 'sample-%d-src.fif' % spacing)) src_new = setup_source_space('sample', spacing=spacing, add_dist=False, subjects_dir=subjects_dir) @@ -531,16 +531,16 @@ def test_read_source_spaces(): @pytest.mark.slowtest @testing.requires_testing_data -def test_write_source_space(tmpdir): +def test_write_source_space(tmp_path): """Test reading and writing of source spaces.""" src0 = read_source_spaces(fname, patch_stats=False) - temp_fname = tmpdir.join('tmp-src.fif') + temp_fname = tmp_path.join('tmp-src.fif') write_source_spaces(temp_fname, src0) src1 = read_source_spaces(temp_fname, patch_stats=False) _compare_source_spaces(src0, src1) # test warnings on bad filenames - src_badname = tmpdir.join('test-bad-name.fif.gz') + src_badname = tmp_path.join('test-bad-name.fif.gz') with pytest.warns(RuntimeWarning, match='-src.fif'): write_source_spaces(src_badname, src0) with pytest.warns(RuntimeWarning, match='-src.fif'): @@ -550,7 +550,7 @@ def test_write_source_space(tmpdir): @testing.requires_testing_data @requires_nibabel() @pytest.mark.parametrize('pass_ids', (True, False)) -def test_source_space_from_label(tmpdir, pass_ids): +def test_source_space_from_label(tmp_path, pass_ids): """Test generating a source space from volume label.""" aseg_short = 'aseg.mgz' atlas_ids, _ = read_freesurfer_lut() @@ -590,7 +590,7 @@ def test_source_space_from_label(tmpdir, pass_ids): assert src[0]['nuse'] == 404 # for our given pos and label # test reading and writing - out_name = tmpdir.join('temp-src.fif') + out_name = tmp_path.join('temp-src.fif') write_source_spaces(out_name, src) src_from_file = read_source_spaces(out_name) _compare_source_spaces(src, src_from_file, mode='approx') @@ -667,7 +667,7 @@ def test_read_volume_from_src(): @testing.requires_testing_data @requires_nibabel() -def test_combine_source_spaces(tmpdir): +def test_combine_source_spaces(tmp_path): """Test combining source spaces.""" import nibabel as nib rng = np.random.RandomState(2) @@ -705,7 +705,7 @@ def test_combine_source_spaces(tmpdir): assert len(src) == 4 # test reading and writing - src_out_name = tmpdir.join('temp-src.fif') + src_out_name = tmp_path.join('temp-src.fif') src.save(src_out_name) src_from_file = read_source_spaces(src_out_name) _compare_source_spaces(src, src_from_file, mode='approx') @@ -717,7 +717,7 @@ def test_combine_source_spaces(tmpdir): assert (coord_frames == FIFF.FIFFV_COORD_MRI).all() # test errors for export_volume - image_fname = tmpdir.join('temp-image.mgz') + image_fname = tmp_path.join('temp-image.mgz') # source spaces with no volume with pytest.raises(ValueError, match='at least one volume'): @@ -731,7 +731,7 @@ def test_combine_source_spaces(tmpdir): del disc2 # unrecognized file type - bad_image_fname = tmpdir.join('temp-image.png') + bad_image_fname = tmp_path.join('temp-image.png') # vertices outside vol space warning pytest.raises(ValueError, src.export_volume, bad_image_fname, verbose='error') @@ -744,7 +744,7 @@ def test_combine_source_spaces(tmpdir): src_mixed_coord.export_volume(image_fname, verbose='error') # now actually write it - fname_img = tmpdir.join('img.nii') + fname_img = tmp_path.join('img.nii') for mri_resolution in (False, 'sparse', True): for src, up in ((vol, 705), (srf + vol, 27272), @@ -760,7 +760,7 @@ def test_combine_source_spaces(tmpdir): assert n_src == n_want, src # gh-8004 - temp_aseg = tmpdir.join('aseg.mgz') + temp_aseg = tmp_path.join('aseg.mgz') aseg_img = nib.load(aseg_fname) aseg_affine = aseg_img.affine aseg_affine[:3, :3] *= 0.7 diff --git a/mne/tests/test_surface.py b/mne/tests/test_surface.py index 094e82740a1..61c0f0fe141 100644 --- a/mne/tests/test_surface.py +++ b/mne/tests/test_surface.py @@ -117,9 +117,9 @@ def test_compute_nearest(): @testing.requires_testing_data -def test_io_surface(tmpdir): +def test_io_surface(tmp_path): """Test reading and writing of Freesurfer surface mesh files.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) fname_quad = op.join(data_path, 'subjects', 'bert', 'surf', 'lh.inflated.nofix') fname_tri = op.join(data_path, 'subjects', 'sample', 'bem', diff --git a/mne/tests/test_transforms.py b/mne/tests/test_transforms.py index b8235b315e3..60704d14598 100644 --- a/mne/tests/test_transforms.py +++ b/mne/tests/test_transforms.py @@ -70,9 +70,9 @@ def test_get_trans(): @testing.requires_testing_data -def test_io_trans(tmpdir): +def test_io_trans(tmp_path): """Test reading and writing of trans files.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) os.mkdir(op.join(tempdir, 'sample')) pytest.raises(RuntimeError, _find_trans, 'sample', subjects_dir=tempdir) trans0 = read_trans(fname) @@ -371,7 +371,7 @@ def test_average_quats(): @testing.requires_testing_data @pytest.mark.parametrize('subject', ('fsaverage', 'sample')) -def test_fs_xfm(subject, tmpdir): +def test_fs_xfm(subject, tmp_path): """Test reading and writing of Freesurfer transforms.""" fname = op.join(data_path, 'subjects', subject, 'mri', 'transforms', 'talairach.xfm') @@ -379,7 +379,7 @@ def test_fs_xfm(subject, tmpdir): if subject == 'fsaverage': assert_allclose(xfm, np.eye(4), atol=1e-5) # fsaverage is in MNI assert kind == 'MNI Transform File' - tempdir = str(tmpdir) + tempdir = str(tmp_path) fname_out = op.join(tempdir, 'out.xfm') _write_fs_xfm(fname_out, xfm, kind) xfm_read, kind_read = _read_fs_xfm(fname_out) diff --git a/mne/time_frequency/tests/test_csd.py b/mne/time_frequency/tests/test_csd.py index 94c52226b28..221297e05c6 100644 --- a/mne/time_frequency/tests/test_csd.py +++ b/mne/time_frequency/tests/test_csd.py @@ -221,10 +221,10 @@ def test_csd_get_data(): @requires_h5py -def test_csd_save(tmpdir): +def test_csd_save(tmp_path): """Test saving and loading a CrossSpectralDensity.""" csd = _make_csd() - tempdir = str(tmpdir) + tempdir = str(tmp_path) fname = op.join(tempdir, 'csd.h5') csd.save(fname) csd2 = read_csd(fname) @@ -236,10 +236,10 @@ def test_csd_save(tmpdir): assert csd._is_sum == csd2._is_sum -def test_csd_pickle(tmpdir): +def test_csd_pickle(tmp_path): """Test pickling and unpickling a CrossSpectralDensity.""" csd = _make_csd() - tempdir = str(tmpdir) + tempdir = str(tmp_path) fname = op.join(tempdir, 'csd.dat') with open(fname, 'wb') as f: pickle.dump(csd, f) diff --git a/mne/time_frequency/tests/test_tfr.py b/mne/time_frequency/tests/test_tfr.py index b9a39bb3700..58b0413d772 100644 --- a/mne/time_frequency/tests/test_tfr.py +++ b/mne/time_frequency/tests/test_tfr.py @@ -406,10 +406,10 @@ def test_crop(): @requires_h5py @requires_pandas -def test_io(tmpdir): +def test_io(tmp_path): """Test TFR IO capacities.""" from pandas import DataFrame - tempdir = str(tmpdir) + tempdir = str(tmp_path) fname = op.join(tempdir, 'test-tfr.h5') data = np.zeros((3, 2, 3)) times = np.array([.1, .2, .3]) diff --git a/mne/utils/tests/test_check.py b/mne/utils/tests/test_check.py index 9df1e485114..fdf978ccf2a 100644 --- a/mne/utils/tests/test_check.py +++ b/mne/utils/tests/test_check.py @@ -32,12 +32,12 @@ @testing.requires_testing_data -def test_check(tmpdir): +def test_check(tmp_path): """Test checking functions.""" pytest.raises(ValueError, check_random_state, 'foo') pytest.raises(TypeError, _check_fname, 1) _check_fname(Path('./foo')) - fname = str(tmpdir.join('foo')) + fname = str(tmp_path.join('foo')) with open(fname, 'wb'): pass assert op.isfile(fname) @@ -66,9 +66,9 @@ def test_check(tmpdir): @pytest.mark.parametrize('suffix', ('_meg.fif', '_eeg.fif', '_ieeg.fif', '_meg.fif.gz', '_eeg.fif.gz', '_ieeg.fif.gz')) -def test_check_fname_suffixes(suffix, tmpdir): +def test_check_fname_suffixes(suffix, tmp_path): """Test checking for valid filename suffixes.""" - new_fname = str(tmpdir.join(op.basename(fname_raw) + new_fname = str(tmp_path.join(op.basename(fname_raw) .replace('_raw.fif', suffix))) raw = mne.io.read_raw_fif(fname_raw).crop(0, 0.1) raw.save(new_fname) diff --git a/mne/utils/tests/test_config.py b/mne/utils/tests/test_config.py index 17db9fc5ee7..1c33468fcb9 100644 --- a/mne/utils/tests/test_config.py +++ b/mne/utils/tests/test_config.py @@ -8,9 +8,9 @@ ClosingStringIO, get_subjects_dir) -def test_config(tmpdir): +def test_config(tmp_path): """Test mne-python config file support.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) key = '_MNE_PYTHON_CONFIG_TESTING' value = '123456' value2 = '123' diff --git a/mne/utils/tests/test_logging.py b/mne/utils/tests/test_logging.py index d68f5f9f6ba..021555cb588 100644 --- a/mne/utils/tests/test_logging.py +++ b/mne/utils/tests/test_logging.py @@ -63,12 +63,12 @@ def clean_lines(lines=[]): for line in lines] -def test_logging_options(tmpdir): +def test_logging_options(tmp_path): """Test logging (to file).""" with use_log_level(None): # just ensure it's set back with pytest.raises(ValueError, match="Invalid value for the 'verbose"): set_log_level('foo') - tempdir = str(tmpdir) + tempdir = str(tmp_path) test_name = op.join(tempdir, 'test.log') with open(fname_log, 'r') as old_log_file: # [:-1] used to strip an extra "No baseline correction applied" @@ -169,7 +169,7 @@ def test_verbose_method(verbose): assert log == '' -def test_warn(capsys, tmpdir, monkeypatch): +def test_warn(capsys, tmp_path, monkeypatch): """Test the smart warn() function.""" with pytest.warns(RuntimeWarning, match='foo'): warn('foo') @@ -177,7 +177,7 @@ def test_warn(capsys, tmpdir, monkeypatch): assert captured.out == '' # gh-5592 assert captured.err == '' # this is because pytest.warns took it already # test ignore_namespaces - bad_name = tmpdir.join('bad.fif') + bad_name = tmp_path.join('bad.fif') raw = RawArray(np.zeros((1, 1)), create_info(1, 1000., 'eeg')) with pytest.warns(RuntimeWarning, match='filename') as ws: raw.save(bad_name) diff --git a/mne/utils/tests/test_numerics.py b/mne/utils/tests/test_numerics.py index 106191c8a09..693d402d272 100644 --- a/mne/utils/tests/test_numerics.py +++ b/mne/utils/tests/test_numerics.py @@ -53,9 +53,9 @@ def test_get_inst_data(): pytest.raises(TypeError, _get_inst_data, 'foo') -def test_hashfunc(tmpdir): +def test_hashfunc(tmp_path): """Test md5/sha1 hash calculations.""" - tempdir = str(tmpdir) + tempdir = str(tmp_path) fname1 = op.join(tempdir, 'foo') fname2 = op.join(tempdir, 'bar') with open(fname1, 'wb') as fid: diff --git a/mne/utils/tests/test_testing.py b/mne/utils/tests/test_testing.py index 54b1f769221..656dad2c390 100644 --- a/mne/utils/tests/test_testing.py +++ b/mne/utils/tests/test_testing.py @@ -32,14 +32,14 @@ def test_tempdir(): assert (not op.isdir(x)) -def test_datasets(monkeypatch, tmpdir): +def test_datasets(monkeypatch, tmp_path): """Test dataset config.""" # gh-4192 - fake_path = tmpdir.mkdir('MNE-testing-data') + fake_path = tmp_path.mkdir('MNE-testing-data') with open(fake_path.join('version.txt'), 'w') as fid: fid.write('9999.9999') - monkeypatch.setenv('_MNE_FAKE_HOME_DIR', str(tmpdir)) - monkeypatch.setenv('MNE_DATASETS_TESTING_PATH', str(tmpdir)) + monkeypatch.setenv('_MNE_FAKE_HOME_DIR', str(tmp_path)) + monkeypatch.setenv('MNE_DATASETS_TESTING_PATH', str(tmp_path)) assert testing.data_path(download=False, verbose='debug') == str(fake_path) diff --git a/mne/viz/_brain/tests/test_brain.py b/mne/viz/_brain/tests/test_brain.py index 123d84d8313..9959fc92ffc 100644 --- a/mne/viz/_brain/tests/test_brain.py +++ b/mne/viz/_brain/tests/test_brain.py @@ -169,7 +169,7 @@ def test_brain_routines(renderer, brain_gc): @testing.requires_testing_data -def test_brain_init(renderer_pyvistaqt, tmpdir, pixel_ratio, brain_gc): +def test_brain_init(renderer_pyvistaqt, tmp_path, pixel_ratio, brain_gc): """Test initialization of the Brain instance.""" from mne.source_estimate import _BaseSourceEstimate @@ -388,7 +388,7 @@ def __init__(self): brain.show_view(view=dict(azimuth=180., elevation=90.)) # image and screenshot - fname = op.join(str(tmpdir), 'test.png') + fname = op.join(str(tmp_path), 'test.png') assert not op.isfile(fname) brain.save_image(fname) assert op.isfile(fname) @@ -435,14 +435,14 @@ def test_single_hemi(hemi, renderer_interactive_pyvistaqt, brain_gc): @testing.requires_testing_data @pytest.mark.slowtest -def test_brain_save_movie(tmpdir, renderer, brain_gc): +def test_brain_save_movie(tmp_path, renderer, brain_gc): """Test saving a movie of a Brain instance.""" if renderer._get_3d_backend() == "mayavi": pytest.skip('Save movie only supported on PyVista') from imageio_ffmpeg import count_frames_and_secs brain = _create_testing_brain(hemi='lh', time_viewer=False, cortex=['r', 'b']) # custom binarized - filename = str(op.join(tmpdir, "brain_test.mov")) + filename = str(op.join(tmp_path, "brain_test.mov")) for interactive_state in (False, True): # for coverage, we set interactivity if interactive_state: @@ -469,12 +469,12 @@ def test_brain_save_movie(tmpdir, renderer, brain_gc): _TINY_SIZE = (350, 300) -def tiny(tmpdir): +def tiny(tmp_path): """Create a tiny fake brain.""" # This is a minimal version of what we need for our viz-with-timeviewer # support currently subject = 'test' - subject_dir = tmpdir.mkdir(subject) + subject_dir = tmp_path.mkdir(subject) surf_dir = subject_dir.mkdir('surf') rng = np.random.RandomState(0) rr = rng.randn(4, 3) @@ -489,7 +489,7 @@ def tiny(tmpdir): vertices = [np.arange(len(rr)), []] data = rng.randn(len(rr), 10) stc = SourceEstimate(data, vertices, 0, 1, subject) - brain = stc.plot(subjects_dir=tmpdir, hemi='lh', surface='white', + brain = stc.plot(subjects_dir=tmp_path, hemi='lh', surface='white', size=_TINY_SIZE) # in principle this should be sufficient: # @@ -504,12 +504,12 @@ def tiny(tmpdir): @pytest.mark.filterwarnings('ignore:.*constrained_layout not applied.*:') -def test_brain_screenshot(renderer_interactive_pyvistaqt, tmpdir, brain_gc): +def test_brain_screenshot(renderer_interactive_pyvistaqt, tmp_path, brain_gc): """Test time viewer screenshot.""" # XXX disable for sprint because it's too unreliable if sys.platform == 'darwin' and os.getenv('GITHUB_ACTIONS', '') == 'true': pytest.skip('Test is unreliable on GitHub Actions macOS') - tiny_brain, ratio = tiny(tmpdir) + tiny_brain, ratio = tiny(tmp_path) img_nv = tiny_brain.screenshot(time_viewer=False) want = (_TINY_SIZE[1] * ratio, _TINY_SIZE[0] * ratio, 3) assert img_nv.shape == want @@ -625,7 +625,7 @@ def test_brain_time_viewer(renderer_interactive_pyvistaqt, pixel_ratio, pytest.param('mixed', marks=pytest.mark.slowtest), ]) @pytest.mark.slowtest -def test_brain_traces(renderer_interactive_pyvistaqt, hemi, src, tmpdir, +def test_brain_traces(renderer_interactive_pyvistaqt, hemi, src, tmp_path, brain_gc): """Test brain traces.""" hemi_str = list() @@ -796,7 +796,7 @@ def test_brain_traces(renderer_interactive_pyvistaqt, hemi, src, tmpdir, check_version('sphinx_gallery')): brain.close() return - fnames = [str(tmpdir.join(f'temp_{ii}.png')) for ii in range(2)] + fnames = [str(tmp_path.join(f'temp_{ii}.png')) for ii in range(2)] block_vars = dict(image_path_iterator=iter(fnames), example_globals=dict(brain=brain)) block = ('code', """ @@ -805,7 +805,7 @@ def test_brain_traces(renderer_interactive_pyvistaqt, hemi, src, tmpdir, # interpolation='linear', time_viewer=True) # """, 1) - gallery_conf = dict(src_dir=str(tmpdir), compress_images=[]) + gallery_conf = dict(src_dir=str(tmp_path), compress_images=[]) scraper = _BrainScraper() rst = scraper(block, block_vars, gallery_conf) assert brain.plotter is None # closed diff --git a/mne/viz/backends/tests/test_renderer.py b/mne/viz/backends/tests/test_renderer.py index 43a199990f7..e5fcd262922 100644 --- a/mne/viz/backends/tests/test_renderer.py +++ b/mne/viz/backends/tests/test_renderer.py @@ -188,7 +188,7 @@ def test_renderer(renderer, monkeypatch): run_subprocess(cmd) -def test_set_3d_backend_bad(monkeypatch, tmpdir): +def test_set_3d_backend_bad(monkeypatch, tmp_path): """Test that the error emitted when a bad backend name is used.""" match = "Allowed values are 'pyvistaqt', 'mayavi', and 'notebook'" with pytest.raises(ValueError, match=match): @@ -201,7 +201,7 @@ def fail(x): monkeypatch.setattr( 'mne.viz.backends.renderer.MNE_3D_BACKEND', None) # avoid using the config - monkeypatch.setenv('_MNE_FAKE_HOME_DIR', str(tmpdir)) + monkeypatch.setenv('_MNE_FAKE_HOME_DIR', str(tmp_path)) match = 'Could not load any valid 3D.*\npyvistaqt: .*' assert get_3d_backend() is None with pytest.raises(RuntimeError, match=match): diff --git a/mne/viz/tests/test_3d.py b/mne/viz/tests/test_3d.py index da88939ff42..71abca6ee01 100644 --- a/mne/viz/tests/test_3d.py +++ b/mne/viz/tests/test_3d.py @@ -221,10 +221,10 @@ def test_plot_alignment_surf(renderer): @pytest.mark.slowtest # can be slow on OSX @testing.requires_testing_data @traits_test -def test_plot_alignment_basic(tmpdir, renderer, mixed_fwd_cov_evoked): +def test_plot_alignment_basic(tmp_path, renderer, mixed_fwd_cov_evoked): """Test plotting of -trans.fif files and MEG sensor layouts.""" # generate fiducials file for testing - tempdir = str(tmpdir) + tempdir = str(tmp_path) fiducials_path = op.join(tempdir, 'fiducials.fif') fid = [{'coord_frame': 5, 'ident': 1, 'kind': 1, 'r': [-0.08061612, -0.02908875, -0.04131077]}, @@ -433,16 +433,16 @@ def test_plot_alignment_basic(tmpdir, renderer, mixed_fwd_cov_evoked): @testing.requires_testing_data -def test_plot_alignment_fnirs(renderer, tmpdir): +def test_plot_alignment_fnirs(renderer, tmp_path): """Test fNIRS plotting.""" - # Here we use subjects_dir=tmpdir, since no surfaces should actually + # Here we use subjects_dir=tmp_path, since no surfaces should actually # be loaded! # fNIRS (default is pairs) info = read_raw_nirx(nirx_fname).info assert info['nchan'] == 26 kwargs = dict(trans='fsaverage', subject='fsaverage', surfaces=(), - verbose=True, subjects_dir=tmpdir) + verbose=True, subjects_dir=tmp_path) with catch_logging() as log: fig = plot_alignment(info, **kwargs) log = log.getvalue() From 4a445078e4d2a7121820af44af1b0ea6c4b98fbf Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Fri, 29 Oct 2021 19:51:24 +0200 Subject: [PATCH 02/23] Fix flake8 --- mne/channels/tests/test_montage.py | 3 +- .../sleep_physionet/tests/test_physionet.py | 38 +++++++++---------- mne/io/tests/test_constants.py | 3 +- mne/minimum_norm/tests/test_inverse.py | 3 +- mne/tests/test_source_space.py | 2 +- mne/utils/tests/test_check.py | 4 +- 6 files changed, 28 insertions(+), 25 deletions(-) diff --git a/mne/channels/tests/test_montage.py b/mne/channels/tests/test_montage.py index 5225ab0e275..df777caa855 100644 --- a/mne/channels/tests/test_montage.py +++ b/mne/channels/tests/test_montage.py @@ -1006,7 +1006,8 @@ def test_read_dig_captrak(tmp_path): ) montage = transform_to_head(montage) # transform_to_head has to be tested - _check_roundtrip(montage=montage, fname=str(tmp_path.join('bvct_test.fif'))) + _check_roundtrip(montage=montage, + fname=str(tmp_path.join('bvct_test.fif'))) fid, _ = _get_fid_coords(montage.dig) assert_allclose( diff --git a/mne/datasets/sleep_physionet/tests/test_physionet.py b/mne/datasets/sleep_physionet/tests/test_physionet.py index e510a1e6c0b..7fbff8b4224 100644 --- a/mne/datasets/sleep_physionet/tests/test_physionet.py +++ b/mne/datasets/sleep_physionet/tests/test_physionet.py @@ -24,7 +24,7 @@ @pytest.fixture(scope='session') -def physionet_tmp_path(tmp_path_factory): +def physionet_tmpdir(tmp_path_factory): """Fixture exposing a temporary directory for testing.""" return str(tmp_path_factory.mktemp('physionet_files')) @@ -88,63 +88,63 @@ def test_run_update_age_records(tmp_path): @pytest.mark.parametrize('subject', [39, 68, 69, 78, 79, 83]) -def test_sleep_physionet_age_missing_subjects(physionet_tmp_path, subject, +def test_sleep_physionet_age_missing_subjects(physionet_tmpdir, subject, download_is_error): """Test handling of missing subjects in Sleep Physionet age fetcher.""" with pytest.raises( ValueError, match='This dataset contains subjects 0 to 82'): age.fetch_data( subjects=[subject], recording=[1], on_missing='raise', - path=physionet_tmp_path) + path=physionet_tmpdir) with pytest.warns(RuntimeWarning, match='This dataset contains subjects 0 to 82'): age.fetch_data( subjects=[subject], recording=[1], on_missing='warn', - path=physionet_tmp_path) + path=physionet_tmpdir) paths = age.fetch_data( subjects=[subject], recording=[1], on_missing='ignore', - path=physionet_tmp_path) + path=physionet_tmpdir) assert paths == [] @pytest.mark.parametrize('subject,recording', [(13, 2), (36, 1), (52, 1)]) -def test_sleep_physionet_age_missing_recordings(physionet_tmp_path, subject, +def test_sleep_physionet_age_missing_recordings(physionet_tmpdir, subject, recording, download_is_error): """Test handling of missing recordings in Sleep Physionet age fetcher.""" with pytest.raises( ValueError, match=f'Requested recording {recording} for subject'): age.fetch_data(subjects=[subject], recording=[recording], - on_missing='raise', path=physionet_tmp_path) + on_missing='raise', path=physionet_tmpdir) with pytest.warns(RuntimeWarning, match=f'Requested recording {recording} for subject'): age.fetch_data(subjects=[subject], recording=[recording], - on_missing='warn', path=physionet_tmp_path) + on_missing='warn', path=physionet_tmpdir) paths = age.fetch_data(subjects=[subject], recording=[recording], - on_missing='ignore', path=physionet_tmp_path) + on_missing='ignore', path=physionet_tmpdir) assert paths == [] -def test_sleep_physionet_age(physionet_tmp_path, monkeypatch, download_is_error): +def test_sleep_physionet_age(physionet_tmpdir, monkeypatch, download_is_error): """Test Sleep Physionet URL handling.""" # check download_is_error patching with pytest.raises(AssertionError, match='Test should not download'): - age.fetch_data(subjects=[0], recording=[1], path=physionet_tmp_path) + age.fetch_data(subjects=[0], recording=[1], path=physionet_tmpdir) # then patch my_func = _FakeFetch() monkeypatch.setattr(pooch, 'retrieve', my_func) - paths = age.fetch_data(subjects=[0], recording=[1], path=physionet_tmp_path) + paths = age.fetch_data(subjects=[0], recording=[1], path=physionet_tmpdir) assert_array_equal(_keep_basename_only(paths), [['SC4001E0-PSG.edf', 'SC4001EC-Hypnogram.edf']]) paths = age.fetch_data(subjects=[0, 1], recording=[1], - path=physionet_tmp_path) + path=physionet_tmpdir) assert_array_equal(_keep_basename_only(paths), [['SC4001E0-PSG.edf', 'SC4001EC-Hypnogram.edf'], ['SC4011E0-PSG.edf', 'SC4011EH-Hypnogram.edf']]) paths = age.fetch_data(subjects=[0], recording=[1, 2], - path=physionet_tmp_path) + path=physionet_tmpdir) assert_array_equal(_keep_basename_only(paths), [['SC4001E0-PSG.edf', 'SC4001EC-Hypnogram.edf'], ['SC4002E0-PSG.edf', 'SC4002EC-Hypnogram.edf']]) @@ -170,7 +170,7 @@ def test_sleep_physionet_age(physionet_tmp_path, monkeypatch, download_is_error) 'hash': 'c6b6d7a8605cc7e7602b6028ee77f6fbf5f7581d'}, {'name': 'SC4002EC-Hypnogram.edf', 'hash': '386230188a3552b1fc90bba0fb7476ceaca174b6'}) - base_path = age.data_path(path=physionet_tmp_path) + base_path = age.data_path(path=physionet_tmpdir) _check_mocked_function_calls(my_func, EXPECTED_CALLS, base_path) @@ -189,12 +189,12 @@ def test_run_update_temazepam_records(tmp_path): data, pd.read_csv(TEMAZEPAM_SLEEP_RECORDS)) -def test_sleep_physionet_temazepam(physionet_tmp_path, monkeypatch): +def test_sleep_physionet_temazepam(physionet_tmpdir, monkeypatch): """Test Sleep Physionet URL handling.""" my_func = _FakeFetch() monkeypatch.setattr(pooch, 'retrieve', my_func) - paths = temazepam.fetch_data(subjects=[0], path=physionet_tmp_path) + paths = temazepam.fetch_data(subjects=[0], path=physionet_tmpdir) assert_array_equal(_keep_basename_only(paths), [['ST7011J0-PSG.edf', 'ST7011JP-Hypnogram.edf']]) @@ -203,9 +203,9 @@ def test_sleep_physionet_temazepam(physionet_tmp_path, monkeypatch): 'hash': 'b9d11484126ebff1884034396d6a20c62c0ef48d'}, {'name': 'ST7011JP-Hypnogram.edf', 'hash': 'ff28e5e01296cefed49ae0c27cfb3ebc42e710bf'}) - base_path = temazepam.data_path(path=physionet_tmp_path) + base_path = temazepam.data_path(path=physionet_tmpdir) _check_mocked_function_calls(my_func, EXPECTED_CALLS, base_path) with pytest.raises( ValueError, match='This dataset contains subjects 0 to 21'): - paths = temazepam.fetch_data(subjects=[22], path=physionet_tmp_path) + paths = temazepam.fetch_data(subjects=[22], path=physionet_tmpdir) diff --git a/mne/io/tests/test_constants.py b/mne/io/tests/test_constants.py index 0a3832ed837..361616a7d19 100644 --- a/mne/io/tests/test_constants.py +++ b/mne/io/tests/test_constants.py @@ -100,7 +100,8 @@ def test_constants(tmp_path): if 'Dictionary' in name: ff.extract(name, tmp_path) names.append(op.basename(name)) - shutil.move(op.join(tmp_path, name), op.join(tmp_path, names[-1])) + shutil.move(op.join(tmp_path, name), + op.join(tmp_path, names[-1])) names = sorted(names) assert names == ['DictionaryIOD.txt', 'DictionaryIOD_MNE.txt', 'DictionaryStructures.txt', diff --git a/mne/minimum_norm/tests/test_inverse.py b/mne/minimum_norm/tests/test_inverse.py index 62c1f270bae..92c2ba2f1cf 100644 --- a/mne/minimum_norm/tests/test_inverse.py +++ b/mne/minimum_norm/tests/test_inverse.py @@ -774,7 +774,8 @@ def test_make_inverse_operator_vector(evoked, noise_cov): atol=1e-20) -def test_make_inverse_operator_diag(evoked, noise_cov, tmp_path, azure_windows): +def test_make_inverse_operator_diag(evoked, noise_cov, tmp_path, + azure_windows): """Test MNE inverse computation with diagonal noise cov.""" noise_cov = noise_cov.as_diag() fwd_op = convert_forward_solution(read_forward_solution(fname_fwd), diff --git a/mne/tests/test_source_space.py b/mne/tests/test_source_space.py index 329c79a540b..1bc5b836d1a 100644 --- a/mne/tests/test_source_space.py +++ b/mne/tests/test_source_space.py @@ -496,7 +496,7 @@ def test_setup_source_space_spacing(tmp_path, spacing): with modified_env(SUBJECTS_DIR=str(tmp_path), SUBJECT='sample'): run_subprocess(['mne_setup_source_space'] + args) src = read_source_spaces(tmp_path.join('sample', 'bem', - 'sample-%d-src.fif' % spacing)) + 'sample-%d-src.fif' % spacing)) src_new = setup_source_space('sample', spacing=spacing, add_dist=False, subjects_dir=subjects_dir) _compare_source_spaces(src, src_new, mode='approx', nearest=True) diff --git a/mne/utils/tests/test_check.py b/mne/utils/tests/test_check.py index fdf978ccf2a..8ed9d8a0245 100644 --- a/mne/utils/tests/test_check.py +++ b/mne/utils/tests/test_check.py @@ -68,8 +68,8 @@ def test_check(tmp_path): '_meg.fif.gz', '_eeg.fif.gz', '_ieeg.fif.gz')) def test_check_fname_suffixes(suffix, tmp_path): """Test checking for valid filename suffixes.""" - new_fname = str(tmp_path.join(op.basename(fname_raw) - .replace('_raw.fif', suffix))) + new_fname = str(tmp_path.join(op.basename(fname_raw).replace('_raw.fif', + suffix))) raw = mne.io.read_raw_fif(fname_raw).crop(0, 0.1) raw.save(new_fname) mne.io.read_raw_fif(new_fname) From bbd664173b1058ef9e5275e8cc0aeedbe58cb3fb Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Fri, 29 Oct 2021 21:30:33 +0200 Subject: [PATCH 03/23] Fix some joins --- mne/channels/tests/test_montage.py | 6 +++--- mne/datasets/tests/test_datasets.py | 2 +- mne/tests/test_annotations.py | 6 +++--- mne/tests/test_source_space.py | 5 +++-- mne/utils/tests/test_check.py | 3 +-- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/mne/channels/tests/test_montage.py b/mne/channels/tests/test_montage.py index df777caa855..f10e3f776c7 100644 --- a/mne/channels/tests/test_montage.py +++ b/mne/channels/tests/test_montage.py @@ -113,7 +113,7 @@ def test_dig_montage_trans(tmp_path): trans = compute_native_head_t(montage) _ensure_trans(trans) # ensure that we can save and load it, too - fname = tmp_path.join('temp-mon.fif') + fname = tmp_path / 'temp-mon.fif' _check_roundtrip(montage, fname, 'mri') # test applying a trans position1 = montage.get_positions() @@ -579,7 +579,7 @@ def test_read_dig_montage_using_polhemus_fastscan_error_handling(tmp_path): with open(op.join(kit_dir, 'test_elp.txt')) as fid: content = fid.read().replace('FastSCAN', 'XxxxXXXX') - fname = str(tmp_path.join('faulty_FastSCAN.txt')) + fname = str(tmp_path).join('faulty_FastSCAN.txt') with open(fname, 'w') as fid: fid.write(content) @@ -639,7 +639,7 @@ def isotrak_eeg(tmp_path_factory): N_ROWS, N_COLS = 5, 3 content = np.random.RandomState(_SEED).randn(N_ROWS, N_COLS) - fname = tmp_path_factory.mktemp('data').join('test.eeg') + fname = tmp_path_factory.mktemp('data') / 'test.eeg' with open(str(fname), 'w') as fid: fid.write(( '3 200\n' diff --git a/mne/datasets/tests/test_datasets.py b/mne/datasets/tests/test_datasets.py index efe0f4c5e38..4af9f2f259f 100644 --- a/mne/datasets/tests/test_datasets.py +++ b/mne/datasets/tests/test_datasets.py @@ -57,7 +57,7 @@ def test_datasets_basic(tmp_path, monkeypatch): _set_montage_coreg_path() sd = get_subjects_dir() assert sd.endswith('MNE-fsaverage-data') - monkeypatch.setenv('MNE_DATA', str(tmp_path.join('foo'))) + monkeypatch.setenv('MNE_DATA', tmp_path / 'foo') with pytest.raises(FileNotFoundError, match='as specified by MNE_DAT'): testing.data_path(download=False) diff --git a/mne/tests/test_annotations.py b/mne/tests/test_annotations.py index 0f97167ed8b..0aca49868fe 100644 --- a/mne/tests/test_annotations.py +++ b/mne/tests/test_annotations.py @@ -852,7 +852,7 @@ def dummy_annotation_file(tmp_path_factory, ch_names, fmt): content[-1] += ',MEG0111:MEG2563' content = '\n'.join(content) - fname = tmp_path_factory.mktemp('data').join(f'annotations-annot.{fmt}') + fname = tmp_path_factory.mktemp('data') / f'annotations-annot.{fmt}' if isinstance(content, str): fname.write(content) else: @@ -916,7 +916,7 @@ def dummy_annotation_txt_file(tmp_path_factory, ch_names): content[1] = content[1].strip() + ', MEG0111:MEG2563' content = '\n'.join(content) - fname = tmp_path_factory.mktemp('data').join('annotations.txt') + fname = tmp_path_factory.mktemp('data') / 'annotations.txt' fname.write(content) return fname @@ -934,7 +934,7 @@ def test_io_annotation_txt(dummy_annotation_txt_file, tmp_path_factory, annot, Annotations([3.14, 6.28], [42., 48], ['AA', 'BB'], **kwargs)) # Now test writing - fname = str(tmp_path_factory.mktemp('data').join('annotations.txt')) + fname = tmp_path_factory.mktemp('data') / 'annotations.txt' annot.save(fname) annot2 = read_annotations(fname) _assert_annotations_equal(annot, annot2) diff --git a/mne/tests/test_source_space.py b/mne/tests/test_source_space.py index 1bc5b836d1a..80a9b703ccc 100644 --- a/mne/tests/test_source_space.py +++ b/mne/tests/test_source_space.py @@ -495,8 +495,9 @@ def test_setup_source_space_spacing(tmp_path, spacing): args = [] if spacing == 7 else ['--spacing', str(spacing)] with modified_env(SUBJECTS_DIR=str(tmp_path), SUBJECT='sample'): run_subprocess(['mne_setup_source_space'] + args) - src = read_source_spaces(tmp_path.join('sample', 'bem', - 'sample-%d-src.fif' % spacing)) + src = read_source_spaces( + tmp_path / 'sample' / 'bem' / 'sample-%d-src.fif' % spacing + ) src_new = setup_source_space('sample', spacing=spacing, add_dist=False, subjects_dir=subjects_dir) _compare_source_spaces(src, src_new, mode='approx', nearest=True) diff --git a/mne/utils/tests/test_check.py b/mne/utils/tests/test_check.py index 8ed9d8a0245..fcdad7ad3f0 100644 --- a/mne/utils/tests/test_check.py +++ b/mne/utils/tests/test_check.py @@ -68,8 +68,7 @@ def test_check(tmp_path): '_meg.fif.gz', '_eeg.fif.gz', '_ieeg.fif.gz')) def test_check_fname_suffixes(suffix, tmp_path): """Test checking for valid filename suffixes.""" - new_fname = str(tmp_path.join(op.basename(fname_raw).replace('_raw.fif', - suffix))) + new_fname = tmp_path / op.basename(fname_raw).replace('_raw.fif', suffix) raw = mne.io.read_raw_fif(fname_raw).crop(0, 0.1) raw.save(new_fname) mne.io.read_raw_fif(new_fname) From ccb6ada681c14300ea1df3f6d2304ed023a16c22 Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Fri, 29 Oct 2021 22:07:08 +0200 Subject: [PATCH 04/23] Fix test_annotations.py --- mne/tests/test_annotations.py | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/mne/tests/test_annotations.py b/mne/tests/test_annotations.py index 0aca49868fe..77997b093ef 100644 --- a/mne/tests/test_annotations.py +++ b/mne/tests/test_annotations.py @@ -113,7 +113,7 @@ def test_basics(): def test_annot_sanitizing(tmp_path): """Test description sanitizing.""" annot = Annotations([0], [1], ['a;:b']) - fname = str(tmp_path.join('custom-annot.fif')) + fname = tmp_path / 'custom-annot.fif' annot.save(fname) annot_read = read_annotations(fname) _assert_annotations_equal(annot, annot_read) @@ -854,7 +854,8 @@ def dummy_annotation_file(tmp_path_factory, ch_names, fmt): fname = tmp_path_factory.mktemp('data') / f'annotations-annot.{fmt}' if isinstance(content, str): - fname.write(content) + with open(fname, "w") as f: + f.write(content) else: content.save(fname) return fname @@ -878,7 +879,7 @@ def test_io_annotation(dummy_annotation_file, tmp_path, fmt, ch_names): tol=1e-6) # Now test writing - fname = tmp_path.join(f'annotations-annot.{fmt}') + fname = tmp_path / f'annotations-annot.{fmt}' annot.save(fname) annot2 = read_annotations(fname) _assert_annotations_equal(annot, annot2) @@ -897,8 +898,9 @@ def test_broken_csv(tmp_path): "1.,1.0,AA\n" "3.,2.425,BB") - fname = tmp_path.join('annotations_broken.csv') - fname.write(content) + fname = tmp_path / 'annotations_broken.csv' + with open(fname, "w") as f: + f.write(content) with pytest.warns(RuntimeWarning, match='save your CSV as a TXT'): read_annotations(fname) @@ -917,7 +919,8 @@ def dummy_annotation_txt_file(tmp_path_factory, ch_names): content = '\n'.join(content) fname = tmp_path_factory.mktemp('data') / 'annotations.txt' - fname.write(content) + with open(fname, "w") as f: + f.write(content) return fname @@ -975,8 +978,9 @@ def test_read_annotation_txt_header(tmp_path): "# orig_time : 42\n" "# C\n" "Done") - fname = tmp_path.join('header.txt') - fname.write(content) + fname = tmp_path / 'header.txt' + with open(fname, "w") as f: + f.write(content) orig_time = _read_annotations_txt_parse_header(fname) want = datetime.fromtimestamp(1038942071.7201, timezone.utc) assert orig_time == want @@ -987,8 +991,9 @@ def test_read_annotation_txt_one_segment(tmp_path): content = ("# MNE-Annotations\n" "# onset, duration, description\n" "3.14, 42, AA") - fname = tmp_path.join('one-annotations.txt') - fname.write(content) + fname = tmp_path / 'one-annotations.txt' + with open(fname, "w") as f: + f.write(content) annot = read_annotations(fname) _assert_annotations_equal(annot, Annotations(3.14, 42, ['AA'])) @@ -997,8 +1002,9 @@ def test_read_annotation_txt_empty(tmp_path): """Test empty TXT input/output.""" content = ("# MNE-Annotations\n" "# onset, duration, description\n") - fname = tmp_path.join('empty-annotations.txt') - fname.write(content) + fname = tmp_path / 'empty-annotations.txt' + with open(fname, "w") as f: + f.write(content) annot = read_annotations(fname) _assert_annotations_equal(annot, Annotations([], [], [])) From 0a6b50ee698bcf27d95a704cc332c48f2fab090e Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Fri, 29 Oct 2021 22:43:35 +0200 Subject: [PATCH 05/23] More path join fixes --- mne/channels/tests/test_montage.py | 2 +- mne/datasets/tests/test_datasets.py | 2 +- mne/forward/tests/test_make_forward.py | 2 +- mne/io/edf/tests/test_edf.py | 6 +++--- mne/io/fiff/tests/test_raw_fiff.py | 12 +++++------ mne/io/hitachi/tests/test_hitachi.py | 2 +- mne/io/tests/test_meas_info.py | 18 ++++++++-------- mne/io/tests/test_write.py | 2 +- mne/tests/test_cov.py | 24 ++++++++++----------- mne/tests/test_epochs.py | 12 +++++------ mne/tests/test_event.py | 10 ++++----- mne/tests/test_evoked.py | 2 +- mne/tests/test_source_estimate.py | 29 +++++++++++++------------- mne/utils/tests/test_logging.py | 2 +- mne/utils/tests/test_testing.py | 2 +- 15 files changed, 63 insertions(+), 64 deletions(-) diff --git a/mne/channels/tests/test_montage.py b/mne/channels/tests/test_montage.py index f10e3f776c7..d1afdaa1142 100644 --- a/mne/channels/tests/test_montage.py +++ b/mne/channels/tests/test_montage.py @@ -579,7 +579,7 @@ def test_read_dig_montage_using_polhemus_fastscan_error_handling(tmp_path): with open(op.join(kit_dir, 'test_elp.txt')) as fid: content = fid.read().replace('FastSCAN', 'XxxxXXXX') - fname = str(tmp_path).join('faulty_FastSCAN.txt') + fname = tmp_path / 'faulty_FastSCAN.txt' with open(fname, 'w') as fid: fid.write(content) diff --git a/mne/datasets/tests/test_datasets.py b/mne/datasets/tests/test_datasets.py index 4af9f2f259f..b1212b10f24 100644 --- a/mne/datasets/tests/test_datasets.py +++ b/mne/datasets/tests/test_datasets.py @@ -57,7 +57,7 @@ def test_datasets_basic(tmp_path, monkeypatch): _set_montage_coreg_path() sd = get_subjects_dir() assert sd.endswith('MNE-fsaverage-data') - monkeypatch.setenv('MNE_DATA', tmp_path / 'foo') + monkeypatch.setenv('MNE_DATA', str(tmp_path / 'foo')) with pytest.raises(FileNotFoundError, match='as specified by MNE_DAT'): testing.data_path(download=False) diff --git a/mne/forward/tests/test_make_forward.py b/mne/forward/tests/test_make_forward.py index 30605734b57..ef9ee5e87ce 100644 --- a/mne/forward/tests/test_make_forward.py +++ b/mne/forward/tests/test_make_forward.py @@ -482,7 +482,7 @@ def test_use_coil_def(tmp_path): trans = Transform('head', 'mri', None) with pytest.raises(RuntimeError, match='coil definition not found'): make_forward_solution(info, trans, src, sphere) - coil_fname = tmp_path.join('coil_def.dat') + coil_fname = tmp_path / 'coil_def.dat' with open(coil_fname, 'w') as fid: fid.write("""# custom cube coil def 1 9999 2 8 3e-03 0.000e+00 "Test" diff --git a/mne/io/edf/tests/test_edf.py b/mne/io/edf/tests/test_edf.py index 12a4d711e9b..c31595ee023 100644 --- a/mne/io/edf/tests/test_edf.py +++ b/mne/io/edf/tests/test_edf.py @@ -85,7 +85,7 @@ def test_subject_info(tmp_path): want = {'id': 'X', 'sex': 'X', 'birthday': 'X', 'name': 'X'} for key, val in want.items(): assert edf_info['subject_info'][key] == val, key - fname = tmp_path.join('test_raw.fif') + fname = tmp_path / 'test_raw.fif' raw.save(fname) raw = read_raw_fif(fname) assert raw.info['subject_info'] is None # XXX should eventually round-trip @@ -197,7 +197,7 @@ def test_parse_annotation(tmp_path): b'+3.14\x1504.20\x14nothing\x14\x00\x00\x00\x00' b'+1800.2\x1525.5\x14Apnea\x14\x00\x00\x00\x00\x00\x00\x00' b'+123\x14\x14\x00\x00\x00\x00\x00\x00\x00') - annot_file = tmp_path.join('annotations.txt') + annot_file = tmp_path / 'annotations.txt' annot_file.write(annot) annot = [a for a in bytes(annot)] @@ -315,7 +315,7 @@ def test_read_annot(tmp_path): b'+3.14\x1504.20\x14nothing\x14\x00\x00\x00\x00' b'+1800.2\x1525.5\x14Apnea\x14\x00\x00\x00\x00\x00\x00\x00' b'+123\x14\x14\x00\x00\x00\x00\x00\x00\x00') - annot_file = tmp_path.join('annotations.txt') + annot_file = tmp_path / 'annotations.txt' annot_file.write(annot) onset, duration, desc = _read_annotations_edf(annotations=str(annot_file)) diff --git a/mne/io/fiff/tests/test_raw_fiff.py b/mne/io/fiff/tests/test_raw_fiff.py index 80b26e4d3c5..8aa54bf61e9 100644 --- a/mne/io/fiff/tests/test_raw_fiff.py +++ b/mne/io/fiff/tests/test_raw_fiff.py @@ -134,7 +134,7 @@ def test_concat(tmp_path): # we trim the file to save lots of memory and some time raw = read_raw_fif(test_fif_fname) raw.crop(0, 2.) - test_name = tmp_path.join('test_raw.fif') + test_name = tmp_path / 'test_raw.fif' raw.save(test_name) # now run the standard test _test_concat(partial(read_raw_fif), test_name) @@ -540,7 +540,7 @@ def test_split_numbers(tmp_path, monkeypatch): monkeypatch.setattr(base, 'write_string', _no_write_file_name) raw = read_raw_fif(test_fif_fname).pick('eeg') # gh-8339 - dashes_fname = tmp_path.join('sub-1_ses-2_task-3_raw.fif') + dashes_fname = tmp_path / 'sub-1_ses-2_task-3_raw.fif' raw.save(dashes_fname, split_size='5MB', buffer_size_sec=1.) assert op.isfile(dashes_fname) @@ -563,7 +563,7 @@ def test_load_bad_channels(tmp_path): # Test normal case raw.load_bad_channels(bad_file_works) # Write it out, read it in, and check - raw.save(tmp_path.join('foo_raw.fif')) + raw.save(tmp_path / 'foo_raw.fif') raw_new = read_raw_fif(tmp_path.join('foo_raw.fif')) assert correct_bads == raw_new.info['bads'] # Reset it @@ -628,7 +628,7 @@ def test_io_raw(tmp_path): (ctf_fname, 'raw.fif')]) def test_io_raw_additional(fname_in, fname_out, tmp_path): """Test IO for raw data (Neuromag + CTF + gz).""" - fname_out = tmp_path.join(fname_out) + fname_out = tmp_path / fname_out raw = read_raw_fif(fname_in).crop(0, 2) nchan = raw.info['nchan'] @@ -1514,7 +1514,7 @@ def test_compensation_raw(tmp_path): assert_allclose(data_3, data_3_new, **tols) # Try IO with compensation - temp_file = tmp_path.join('raw.fif') + temp_file = tmp_path / 'raw.fif' raw_3.save(temp_file, overwrite=True) for preload in (True, False): raw_read = read_raw_fif(temp_file, preload=preload) @@ -1644,7 +1644,7 @@ def test_equalize_channels(): def test_memmap(tmp_path): """Test some interesting memmapping cases.""" # concatenate_raw - memmaps = [tmp_path.join(str(ii)) for ii in range(3)] + memmaps = [tmp_path / str(ii) for ii in range(3)] raw_0 = read_raw_fif(test_fif_fname, preload=memmaps[0]) assert raw_0._data.filename == memmaps[0] raw_1 = read_raw_fif(test_fif_fname, preload=memmaps[1]) diff --git a/mne/io/hitachi/tests/test_hitachi.py b/mne/io/hitachi/tests/test_hitachi.py index fa838457269..d0dc58ff758 100644 --- a/mne/io/hitachi/tests/test_hitachi.py +++ b/mne/io/hitachi/tests/test_hitachi.py @@ -189,7 +189,7 @@ def test_hitachi_basic(preload, version, n_ch, n_times, lowpass, sex, date, end, tmp_path): """Test NIRSport1 file with no saturation.""" - fname = str(tmp_path.join('test.csv')) + fname = tmp_path / 'test.csv' contents = CONTENTS[version] if end is not None: contents = contents.replace(b'\r', b'\n').replace(b'\n\n', b'\n') diff --git a/mne/io/tests/test_meas_info.py b/mne/io/tests/test_meas_info.py index 2d8ba85c8f3..b4c9a160e84 100644 --- a/mne/io/tests/test_meas_info.py +++ b/mne/io/tests/test_meas_info.py @@ -151,7 +151,7 @@ def test_fiducials_io(tmp_path): assert pts[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI assert pts[0]['ident'] == FIFF.FIFFV_POINT_CARDINAL - temp_fname = tmp_path.join('test.fif') + temp_fname = tmp_path / 'test.fif' write_fiducials(temp_fname, pts, coord_frame) pts_1, coord_frame_1 = read_fiducials(temp_fname) assert coord_frame == coord_frame_1 @@ -228,7 +228,7 @@ def test_info(): def test_read_write_info(tmp_path): """Test IO of info.""" info = read_info(raw_fname) - temp_file = str(tmp_path.join('info.fif')) + temp_file = tmp_path / 'info.fif' # check for bug `#1198` info['dev_head_t']['trans'] = np.eye(4) t1 = info['dev_head_t']['trans'] @@ -301,8 +301,8 @@ def test_io_dig_points(tmp_path): """Test Writing for dig files.""" points = read_polhemus_fastscan(hsp_fname, on_header_missing='ignore') - dest = str(tmp_path.join('test.txt')) - dest_bad = str(tmp_path.join('test.mne')) + dest = tmp_path / 'test.txt' + dest_bad = tmp_path / 'test.mne' with pytest.raises(ValueError, match='must be of shape'): _write_dig_points(dest, points[:, :2]) with pytest.raises(ValueError, match='extension'): @@ -322,7 +322,7 @@ def test_io_dig_points(tmp_path): def test_io_coord_frame(tmp_path): """Test round trip for coordinate frame.""" - fname = tmp_path.join('test.fif') + fname = tmp_path / 'test.fif' for ch_type in ('eeg', 'seeg', 'ecog', 'dbs', 'hbo', 'hbr'): info = create_info( ch_names=['Test Ch'], sfreq=1000., ch_types=[ch_type]) @@ -716,7 +716,7 @@ def test_anonymize(tmp_path): # write to disk & read back inst_type = 'raw' if isinstance(inst, BaseRaw) else 'epo' fname = 'tmp_raw.fif' if inst_type == 'raw' else 'tmp_epo.fif' - out_path = tmp_path.join(fname) + out_path = tmp_path / fname inst.save(out_path, overwrite=True) if inst_type == 'raw': read_raw_fif(out_path) @@ -745,7 +745,7 @@ def test_anonymize_with_io(tmp_path): """Test that IO does not break anonymization.""" raw = read_raw_fif(raw_fname) - temp_path = tmp_path.join('tmp_raw.fif') + temp_path = tmp_path / 'tmp_raw.fif' raw.save(temp_path) raw2 = read_raw_fif(temp_path) @@ -821,7 +821,7 @@ def test_field_round_trip(tmp_path): info['helium_info'] = dict( he_level_raw=1., helium_level=2., orig_file_guid='e', meas_date=(1, 2)) - fname = tmp_path.join('temp-info.fif') + fname = tmp_path / 'temp-info.fif' write_info(fname, info) info_read = read_info(fname) assert_object_equal(info, info_read) @@ -902,7 +902,7 @@ def test_channel_name_limit(tmp_path, monkeypatch, fname): raw.info.normalize_proj() raw.pick_channels(data_names + ref_names).crop(0, 2) long_names = ['123456789abcdefg' + name for name in raw.ch_names] - fname = tmp_path.join('test-raw.fif') + fname = tmp_path / 'test-raw.fif' with catch_logging() as log: raw.save(fname) log = log.getvalue() diff --git a/mne/io/tests/test_write.py b/mne/io/tests/test_write.py index fb4ac07741c..2a67566c61f 100644 --- a/mne/io/tests/test_write.py +++ b/mne/io/tests/test_write.py @@ -12,7 +12,7 @@ def test_write_int(tmp_path): """Test that write_int raises an error on bad values.""" - with start_file(tmp_path.join('temp.fif')) as fid: + with start_file(tmp_path / 'temp.fif') as fid: write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, [2147483647]) # 2 ** 31 - 1 write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, []) # 2 ** 31 - 1 with pytest.raises(TypeError, match=r'.*exceeds max.*EVENT_LIST\)'): diff --git a/mne/tests/test_cov.py b/mne/tests/test_cov.py index 3e05db1212a..bfc66f4fbfd 100644 --- a/mne/tests/test_cov.py +++ b/mne/tests/test_cov.py @@ -195,7 +195,7 @@ def _assert_reorder(cov_new, cov_orig, order): def test_ad_hoc_cov(tmp_path): """Test ad hoc cov creation and I/O.""" - out_fname = tmp_path.join('test-cov.fif') + out_fname = tmp_path / 'test-cov.fif' evoked = read_evokeds(ave_fname)[0] cov = make_ad_hoc_cov(evoked.info) cov.save(out_fname) @@ -223,8 +223,8 @@ def test_io_cov(tmp_path): cov = read_cov(cov_fname) cov['method'] = 'empirical' cov['loglik'] = -np.inf - cov.save(tmp_path.join('test-cov.fif')) - cov2 = read_cov(tmp_path.join('test-cov.fif')) + cov.save(tmp_path / 'test-cov.fif') + cov2 = read_cov(tmp_path / 'test-cov.fif') assert_array_almost_equal(cov.data, cov2.data) assert_equal(cov['method'], cov2['method']) assert_equal(cov['loglik'], cov2['loglik']) @@ -232,24 +232,24 @@ def test_io_cov(tmp_path): cov2 = read_cov(cov_gz_fname) assert_array_almost_equal(cov.data, cov2.data) - cov2.save(tmp_path.join('test-cov.fif.gz')) - cov2 = read_cov(tmp_path.join('test-cov.fif.gz')) + cov2.save(tmp_path / 'test-cov.fif.gz') + cov2 = read_cov(tmp_path / 'test-cov.fif.gz') assert_array_almost_equal(cov.data, cov2.data) cov['bads'] = ['EEG 039'] cov_sel = pick_channels_cov(cov, exclude=cov['bads']) assert cov_sel['dim'] == (len(cov['data']) - len(cov['bads'])) assert cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim']) - cov_sel.save(tmp_path.join('test-cov.fif')) + cov_sel.save(tmp_path / 'test-cov.fif') cov2 = read_cov(cov_gz_fname) assert_array_almost_equal(cov.data, cov2.data) - cov2.save(tmp_path.join('test-cov.fif.gz')) - cov2 = read_cov(tmp_path.join('test-cov.fif.gz')) + cov2.save(tmp_path / 'test-cov.fif.gz') + cov2 = read_cov(tmp_path / 'test-cov.fif.gz') assert_array_almost_equal(cov.data, cov2.data) # test warnings on bad filenames - cov_badname = tmp_path.join('test-bad-name.fif.gz') + cov_badname = tmp_path / 'test-bad-name.fif.gz' with pytest.warns(RuntimeWarning, match='-cov.fif'): write_cov(cov_badname, cov) with pytest.warns(RuntimeWarning, match='-cov.fif'): @@ -301,8 +301,8 @@ def test_cov_estimation_on_raw(method, tmp_path): assert_snr(cov.data, cov_mne.data, 170) # test IO when computation done in Python - cov.save(tmp_path.join('test-cov.fif')) # test saving - cov_read = read_cov(tmp_path.join('test-cov.fif')) + cov.save(tmp_path / 'test-cov.fif') # test saving + cov_read = read_cov(tmp_path / 'test-cov.fif') assert cov_read.ch_names == cov.ch_names assert cov_read.nfree == cov.nfree assert_array_almost_equal(cov.data, cov_read.data) @@ -406,7 +406,7 @@ def test_cov_estimation_with_triggers(rank, tmp_path): keep_sample_mean=False, method='shrunk', rank=rank) # test IO when computation done in Python - cov.save(tmp_path.join('test-cov.fif')) # test saving + cov.save(tmp_path / 'test-cov.fif') # test saving cov_read = read_cov(tmp_path.join('test-cov.fif')) _assert_cov(cov, cov_read, 1e-5) diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py index 702fa8543e6..a6b173a790a 100644 --- a/mne/tests/test_epochs.py +++ b/mne/tests/test_epochs.py @@ -1205,7 +1205,7 @@ def test_split_saving(tmp_path, split_size, n_epochs, n_files, size, metadata, epochs = concatenate_epochs([epochs[ii] for ii in range(len(epochs))]) epochs_data = epochs.get_data() assert len(epochs) == n_epochs - fname = str(tmp_path.join('test-epo.fif')) + fname = tmp_path / 'test-epo.fif' epochs.save(fname, split_size=split_size, overwrite=True) got_size = _get_split_size(split_size) assert got_size == size @@ -1218,7 +1218,7 @@ def test_split_saving(tmp_path, split_size, n_epochs, n_files, size, metadata, # Check that if BIDS is used and no split is needed it defaults to # simple writing without _split- entity. - split_fname = str(tmp_path.join('test_epo.fif')) + split_fname = tmp_path / 'test_epo.fif' split_fname_neuromag_part1 = split_fname.replace( 'epo.fif', f'epo-{n_files + 1}.fif') split_fname_bids_part1 = split_fname.replace( @@ -1257,7 +1257,7 @@ def test_split_many_reset(tmp_path): epochs = EpochsArray(data, info, tmin=0., selection=selection) assert len(epochs.drop_log) == 101000 assert len(epochs) == len(data) == len(epochs.events) - fname = str(tmp_path.join('temp-epo.fif')) + fname = tmp_path / 'temp-epo.fif' for split_size in ('0.5MB', '1MB', '2MB'): # tons of overhead from sel with pytest.raises(ValueError, match='too small to safely'): epochs.save(fname, split_size=split_size, verbose='debug') @@ -3526,7 +3526,7 @@ def test_epochs_huge_events(tmp_path): epochs = EpochsArray(data, info) epochs.events = events with pytest.raises(TypeError, match='exceeds maximum'): - epochs.save(tmp_path.join('temp-epo.fif')) + epochs.save(tmp_path / 'temp-epo.fif') def _old_bad_write(fid, kind, arr): @@ -3546,7 +3546,7 @@ def test_concat_overflow(tmp_path, monkeypatch): with pytest.warns(RuntimeWarning, match='consecutive increasing'): epochs = mne.concatenate_epochs((epochs_1, epochs_2)) assert_array_less(0, epochs.events[:, 0]) - fname = tmp_path.join('temp-epo.fif') + fname = tmp_path / 'temp-epo.fif' epochs.save(fname) epochs = read_epochs(fname) assert_array_less(0, epochs.events[:, 0]) @@ -3600,7 +3600,7 @@ def test_epochs_baseline_after_cropping(tmp_path): epochs_orig.get_data().squeeze()[200:]) # Test I/O roundtrip. - epochs_fname = tmp_path.join('temp-cropped-epo.fif') + epochs_fname = tmp_path / 'temp-cropped-epo.fif' epochs_cropped.save(epochs_fname) epochs_cropped_read = mne.read_epochs(epochs_fname) diff --git a/mne/tests/test_event.py b/mne/tests/test_event.py index 3d96d531c9f..4f1c7a76956 100644 --- a/mne/tests/test_event.py +++ b/mne/tests/test_event.py @@ -114,7 +114,7 @@ def test_io_events(tmp_path): """Test IO for events.""" # Test binary fif IO events = read_events(fname) # Use as the gold standard - fname_temp = tmp_path.join('events-eve.fif') + fname_temp = tmp_path / 'events-eve.fif' write_events(fname_temp, events) events2 = read_events(fname_temp) assert_array_almost_equal(events, events2) @@ -128,7 +128,7 @@ def test_io_events(tmp_path): assert_array_almost_equal(events, events2) # Test new format text file IO - fname_temp = str(tmp_path.join('events.eve')) + fname_temp = tmp_path / 'events.eve' write_events(fname_temp, events) events2 = read_events(fname_temp) assert_array_almost_equal(events, events2) @@ -144,7 +144,7 @@ def test_io_events(tmp_path): assert_array_almost_equal(events, events2) # Test event selection - fname_temp = tmp_path.join('events-eve.fif') + fname_temp = tmp_path / 'events-eve.fif' a = read_events(fname_temp, include=1) b = read_events(fname_temp, include=[1]) c = read_events(fname_temp, exclude=[2, 3, 4, 5, 32]) @@ -167,13 +167,13 @@ def test_io_events(tmp_path): assert_array_almost_equal(events, events2) # Test text file IO for 1 event - fname_temp = str(tmp_path.join('events.eve')) + fname_temp = tmp_path / 'events.eve' write_events(fname_temp, events) events2 = read_events(fname_temp) assert_array_almost_equal(events, events2) # test warnings on bad filenames - fname2 = tmp_path.join('test-bad-name.fif') + fname2 = tmp_path / 'test-bad-name.fif' with pytest.warns(RuntimeWarning, match='-eve.fif'): write_events(fname2, events) with pytest.warns(RuntimeWarning, match='-eve.fif'): diff --git a/mne/tests/test_evoked.py b/mne/tests/test_evoked.py index 402401953d4..beb8321c0c6 100644 --- a/mne/tests/test_evoked.py +++ b/mne/tests/test_evoked.py @@ -198,7 +198,7 @@ def test_io_evoked(tmp_path): ave_double.comment = ave.comment + ' doubled nave' ave_double.nave = ave.nave * 2 - write_evokeds(tmp_path.join('evoked-ave.fif'), [ave, ave_double]) + write_evokeds(tmp_path / 'evoked-ave.fif', [ave, ave_double]) ave2, ave_double = read_evokeds(op.join(tmp_path, 'evoked-ave.fif')) assert ave2.nave * 2 == ave_double.nave diff --git a/mne/tests/test_source_estimate.py b/mne/tests/test_source_estimate.py index 89fe9f1c2ca..8cf8ea67ff0 100644 --- a/mne/tests/test_source_estimate.py +++ b/mne/tests/test_source_estimate.py @@ -173,7 +173,7 @@ def test_volume_stc(tmp_path): stc = VolVectorSourceEstimate(data, [vertno], 0, 1) ext = 'h5' klass = VolVectorSourceEstimate - fname_temp = tmp_path.join('temp-vl.' + ext) + fname_temp = tmp_path / 'temp-vl.' + ext stc_new = stc n = 3 if ext == 'h5' else 2 for ii in range(n): @@ -204,7 +204,7 @@ def test_volume_stc(tmp_path): pytest.raises(ValueError, stc.save, fname_vol, ftype='whatever') for ftype in ['w', 'h5']: for _ in range(2): - fname_temp = tmp_path.join('temp-vol.%s' % ftype) + fname_temp = tmp_path / ('temp-vol.%s' % ftype) stc_new.save(fname_temp, ftype=ftype) stc_new = read_source_estimate(fname_temp) assert (isinstance(stc_new, VolSourceEstimate)) @@ -244,7 +244,7 @@ def test_save_vol_stc_as_nifti(tmp_path): """Save the stc as a nifti file and export.""" import nibabel as nib src = read_source_spaces(fname_vsrc) - vol_fname = tmp_path.join('stc.nii.gz') + vol_fname = tmp_path / 'stc.nii.gz' # now let's actually read a MNE-C processed file stc = read_source_estimate(fname_vol, 'sample') @@ -258,7 +258,7 @@ def test_save_vol_stc_as_nifti(tmp_path): with pytest.warns(None): # nib<->numpy t1_img = nib.load(fname_t1) - stc.save_as_volume(tmp_path.join('stc.nii.gz'), src, + stc.save_as_volume(tmp_path / 'stc.nii.gz', src, dest='mri', mri_resolution=True) with pytest.warns(None): # nib<->numpy img = nib.load(str(vol_fname)) @@ -413,8 +413,8 @@ def attempt_assignment(stc, attr, val): def test_io_stc(tmp_path): """Test IO for STC files.""" stc = _fake_stc() - stc.save(tmp_path.join("tmp.stc")) - stc2 = read_source_estimate(tmp_path.join("tmp.stc")) + stc.save(tmp_path / "tmp.stc") + stc2 = read_source_estimate(tmp_path / "tmp.stc") assert_array_almost_equal(stc.data, stc2.data) assert_array_almost_equal(stc.tmin, stc2.tmin) @@ -425,7 +425,7 @@ def test_io_stc(tmp_path): # test warning for complex data stc2.data = stc2.data.astype(np.complex128) with pytest.raises(ValueError, match='Cannot save complex-valued STC'): - stc2.save(tmp_path.join('complex.stc')) + stc2.save(tmp_path / 'complex.stc') @requires_h5py @@ -437,9 +437,8 @@ def test_io_stc_h5(tmp_path, is_complex, vector): stc = _fake_vec_stc(is_complex=is_complex) else: stc = _fake_stc(is_complex=is_complex) - pytest.raises(ValueError, stc.save, tmp_path.join('tmp'), - ftype='foo') - out_name = tmp_path.join('tmp') + pytest.raises(ValueError, stc.save, tmp_path / 'tmp', ftype='foo') + out_name = tmp_path / 'tmp' stc.save(out_name, ftype='h5') stc.save(out_name, ftype='h5') # test overwrite stc3 = read_source_estimate(out_name) @@ -460,11 +459,11 @@ def test_io_stc_h5(tmp_path, is_complex, vector): def test_io_w(tmp_path): """Test IO for w files.""" stc = _fake_stc(n_time=1) - w_fname = tmp_path.join('fake') + w_fname = tmp_path / 'fake' stc.save(w_fname, ftype='w') src = read_source_estimate(w_fname) - src.save(tmp_path.join('tmp'), ftype='w') - src2 = read_source_estimate(tmp_path.join('tmp-lh.w')) + src.save(tmp_path / 'tmp', ftype='w') + src2 = read_source_estimate(tmp_path / 'tmp-lh.w') assert_array_almost_equal(src.data, src2.data) assert_array_almost_equal(src.lh_vertno, src2.lh_vertno) assert_array_almost_equal(src.rh_vertno, src2.rh_vertno) @@ -1186,7 +1185,7 @@ def test_mixed_stc(tmp_path): stc = MixedSourceEstimate(data, vertno, 0, 1) # make sure error is raised for plotting surface with volume source - fname = tmp_path.join('mixed-stc.h5') + fname = tmp_path / 'mixed-stc.h5' stc.save(fname) stc_out = read_source_estimate(fname) assert_array_equal(stc_out.vertices, vertno) @@ -1278,7 +1277,7 @@ def test_vec_stc_basic(tmp_path, klass, kind, dtype): assert_allclose(got_directions, directions * flips) assert_allclose(projected.data, amplitudes * flips) - out_name = tmp_path.join('temp.h5') + out_name = tmp_path / 'temp.h5' stc.save(out_name) stc_read = read_source_estimate(out_name) assert_allclose(stc.data, stc_read.data) diff --git a/mne/utils/tests/test_logging.py b/mne/utils/tests/test_logging.py index 021555cb588..f912312e124 100644 --- a/mne/utils/tests/test_logging.py +++ b/mne/utils/tests/test_logging.py @@ -177,7 +177,7 @@ def test_warn(capsys, tmp_path, monkeypatch): assert captured.out == '' # gh-5592 assert captured.err == '' # this is because pytest.warns took it already # test ignore_namespaces - bad_name = tmp_path.join('bad.fif') + bad_name = tmp_path / 'bad.fif' raw = RawArray(np.zeros((1, 1)), create_info(1, 1000., 'eeg')) with pytest.warns(RuntimeWarning, match='filename') as ws: raw.save(bad_name) diff --git a/mne/utils/tests/test_testing.py b/mne/utils/tests/test_testing.py index 656dad2c390..8c7c97ffa33 100644 --- a/mne/utils/tests/test_testing.py +++ b/mne/utils/tests/test_testing.py @@ -36,7 +36,7 @@ def test_datasets(monkeypatch, tmp_path): """Test dataset config.""" # gh-4192 fake_path = tmp_path.mkdir('MNE-testing-data') - with open(fake_path.join('version.txt'), 'w') as fid: + with open(fake_path / 'version.txt', 'w') as fid: fid.write('9999.9999') monkeypatch.setenv('_MNE_FAKE_HOME_DIR', str(tmp_path)) monkeypatch.setenv('MNE_DATASETS_TESTING_PATH', str(tmp_path)) From 3b124a0dc50cf65017e46f341736303ee7d6f7b7 Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Fri, 29 Oct 2021 23:18:09 +0200 Subject: [PATCH 06/23] Even more fixes --- mne/channels/tests/test_montage.py | 2 +- mne/io/edf/tests/test_edf.py | 10 +++-- mne/io/fiff/tests/test_raw_fiff.py | 61 +++++++++++++++--------------- mne/io/tests/test_meas_info.py | 18 ++++----- mne/tests/test_cov.py | 2 +- mne/tests/test_epochs.py | 4 +- mne/tests/test_event.py | 2 +- mne/tests/test_evoked.py | 12 +++--- 8 files changed, 56 insertions(+), 55 deletions(-) diff --git a/mne/channels/tests/test_montage.py b/mne/channels/tests/test_montage.py index d1afdaa1142..36b15dac6a0 100644 --- a/mne/channels/tests/test_montage.py +++ b/mne/channels/tests/test_montage.py @@ -588,7 +588,7 @@ def test_read_dig_montage_using_polhemus_fastscan_error_handling(tmp_path): EXPECTED_ERR_MSG = "allowed value is '.txt', but got '.bar' instead" with pytest.raises(ValueError, match=EXPECTED_ERR_MSG): - _ = read_polhemus_fastscan(fname=tmp_path.join('foo.bar')) + _ = read_polhemus_fastscan(fname=tmp_path / 'foo.bar') def test_read_dig_polhemus_isotrak_hsp(): diff --git a/mne/io/edf/tests/test_edf.py b/mne/io/edf/tests/test_edf.py index c31595ee023..9f22d8d95ac 100644 --- a/mne/io/edf/tests/test_edf.py +++ b/mne/io/edf/tests/test_edf.py @@ -198,14 +198,15 @@ def test_parse_annotation(tmp_path): b'+1800.2\x1525.5\x14Apnea\x14\x00\x00\x00\x00\x00\x00\x00' b'+123\x14\x14\x00\x00\x00\x00\x00\x00\x00') annot_file = tmp_path / 'annotations.txt' - annot_file.write(annot) + with open(annot_file, "w") as f: + f.write(annot) annot = [a for a in bytes(annot)] annot[1::2] = [a * 256 for a in annot[1::2]] tal_channel_A = np.array(list(map(sum, zip(annot[0::2], annot[1::2]))), dtype=np.int64) - with open(str(annot_file), 'rb') as fid: + with open(annot_file, 'rb') as fid: # ch_data = np.fromfile(fid, dtype=' 0: @@ -253,7 +253,7 @@ def test_multiple_files(tmp_path): # going in reverse order so the last fname is the first file (need later) raws = [None] * len(tmins) for ri in range(len(tmins) - 1, -1, -1): - fname = tmp_path.join('test_raw_split-%d_raw.fif' % ri) + fname = tmp_path / ('test_raw_split-%d_raw.fif' % ri) raw.save(fname, tmin=tmins[ri], tmax=tmaxs[ri]) raws[ri] = read_raw_fif(fname) assert (len(raws[ri].times) == @@ -393,11 +393,11 @@ def test_split_files(tmp_path, mod, monkeypatch): # Test a very close corner case assert_allclose(raw_1.buffer_size_sec, 10., atol=1e-2) # samp rate - split_fname = tmp_path.join(f'split_raw_{mod}.fif') + split_fname = tmp_path / f'split_raw_{mod}.fif' # intended filenames - split_fname_elekta_part2 = tmp_path.join(f'split_raw_{mod}-1.fif') - split_fname_bids_part1 = tmp_path.join(f'split_raw_split-01_{mod}.fif') - split_fname_bids_part2 = tmp_path.join(f'split_raw_split-02_{mod}.fif') + split_fname_elekta_part2 = tmp_path / f'split_raw_{mod}-1.fif' + split_fname_bids_part1 = tmp_path / f'split_raw_split-01_{mod}.fif' + split_fname_bids_part2 = tmp_path / f'split_raw_split-02_{mod}.fif' raw_1.set_annotations(Annotations([2.], [5.5], 'test')) # Check that if BIDS is used and no split is needed it defaults to @@ -515,15 +515,15 @@ def test_split_files(tmp_path, mod, monkeypatch): assert op.isdir(tmp_path) with pytest.raises(ValueError, match='must end with an underscore'): raw_crop.save( - tmp_path.join('test.fif'), split_naming='bids', verbose='error') + tmp_path / 'test.fif', split_naming='bids', verbose='error') # reserved file is deleted - fname = tmp_path.join('test_raw.fif') + fname = tmp_path / 'test_raw.fif' monkeypatch.setattr(base, '_write_raw_fid', _err) with pytest.raises(RuntimeError, match='Killed mid-write'): raw_1.save(fname, split_size='10MB', split_naming='bids') assert op.isfile(fname) - assert not op.isfile(tmp_path.join('test_split-01_raw.fif')) + assert not op.isfile(tmp_path / 'test_split-01_raw.fif') def _err(*args, **kwargs): @@ -564,7 +564,7 @@ def test_load_bad_channels(tmp_path): raw.load_bad_channels(bad_file_works) # Write it out, read it in, and check raw.save(tmp_path / 'foo_raw.fif') - raw_new = read_raw_fif(tmp_path.join('foo_raw.fif')) + raw_new = read_raw_fif(tmp_path / 'foo_raw.fif') assert correct_bads == raw_new.info['bads'] # Reset it raw.info['bads'] = [] @@ -576,14 +576,14 @@ def test_load_bad_channels(tmp_path): with pytest.warns(RuntimeWarning, match='1 bad channel'): raw.load_bad_channels(bad_file_wrong, force=True) # write it out, read it in, and check - raw.save(tmp_path.join('foo_raw.fif'), overwrite=True) - raw_new = read_raw_fif(tmp_path.join('foo_raw.fif')) + raw.save(tmp_path / 'foo_raw.fif', overwrite=True) + raw_new = read_raw_fif(tmp_path / 'foo_raw.fif') assert correct_bads == raw_new.info['bads'] # Check that bad channels are cleared raw.load_bad_channels(None) - raw.save(tmp_path.join('foo_raw.fif'), overwrite=True) - raw_new = read_raw_fif(tmp_path.join('foo_raw.fif')) + raw.save(tmp_path / 'foo_raw.fif', overwrite=True) + raw_new = read_raw_fif(tmp_path / 'foo_raw.fif') assert raw_new.info['bads'] == [] @@ -598,7 +598,7 @@ def test_io_raw(tmp_path): assert ('Raw' in repr(r)) assert (op.basename(fif_fname) in repr(r)) r.info['description'] = chars - temp_file = tmp_path.join('raw.fif') + temp_file = tmp_path / 'raw.fif' r.save(temp_file, overwrite=True) with read_raw_fif(temp_file) as r2: desc2 = r2.info['description'] @@ -611,7 +611,7 @@ def test_io_raw(tmp_path): data = rng.randn(raw._data.shape[0], raw._data.shape[1]) raw._data[:, :] = data # save it somewhere - fname = tmp_path.join('test_copy_raw.fif') + fname = tmp_path / 'test_copy_raw.fif' raw.save(fname, buffer_size_sec=1.0) # read it in, make sure the whole thing matches raw = read_raw_fif(fname) @@ -697,7 +697,7 @@ def test_io_raw_additional(fname_in, fname_out, tmp_path): assert_allclose(raw.info['dig'][0]['r'], raw2.info['dig'][0]['r']) # test warnings on bad filenames - raw_badname = tmp_path.join('test-bad-name.fif.gz') + raw_badname = tmp_path / 'test-bad-name.fif.gz' with pytest.warns(RuntimeWarning, match='raw.fif'): raw.save(raw_badname) with pytest.warns(RuntimeWarning, match='raw.fif'): @@ -796,8 +796,8 @@ def test_proj(tmp_path): raw = read_raw_fif(fif_fname, preload=preload) # write the file with proj. activated, make sure proj has been applied - raw.save(tmp_path.join('raw.fif'), proj=True, overwrite=True) - raw2 = read_raw_fif(tmp_path.join('raw.fif')) + raw.save(tmp_path / 'raw.fif', proj=True, overwrite=True) + raw2 = read_raw_fif(tmp_path / 'raw.fif') data_proj_2, _ = raw2[:, 0:2] assert_allclose(data_proj_1, data_proj_2) assert (all(p['active'] for p in raw2.info['projs'])) @@ -829,7 +829,7 @@ def test_proj(tmp_path): # I/O roundtrip of an MEG projector with a Raw that only contains EEG # data. - out_fname = tmp_path.join('test_raw.fif') + out_fname = tmp_path / 'test_raw.fif' raw = read_raw_fif(test_fif_fname, preload=True).crop(0, 0.002) proj = raw.info['projs'][-1] raw.pick_types(meg=False, eeg=True) @@ -862,7 +862,7 @@ def test_preload_modify(preload, tmp_path): else: raise - tmp_fname = tmp_path.join('raw.fif') + tmp_fname = tmp_path / 'raw.fif' raw.save(tmp_fname, overwrite=True) raw_new = read_raw_fif(tmp_fname) @@ -1098,9 +1098,8 @@ def test_resample(tmp_path, preload, n, npad): # test parallel on upsample raw_resamp.resample(sfreq * 2, n_jobs=2, npad=npad) assert raw_resamp.n_times == len(raw_resamp.times) - raw_resamp.save(tmp_path.join('raw_resamp-raw.fif')) - raw_resamp = read_raw_fif(tmp_path.join('raw_resamp-raw.fif'), - preload=True) + raw_resamp.save(tmp_path / 'raw_resamp-raw.fif') + raw_resamp = read_raw_fif(tmp_path / 'raw_resamp-raw.fif', preload=True) assert sfreq == raw_resamp.info['sfreq'] / 2 assert raw.n_times == raw_resamp.n_times // 2 assert raw_resamp.get_data().shape[1] == raw_resamp.n_times @@ -1378,7 +1377,7 @@ def test_add_channels(): @testing.requires_testing_data def test_save(tmp_path): """Test saving raw.""" - temp_fname = tmp_path.join('test_raw.fif') + temp_fname = tmp_path / 'test_raw.fif' shutil.copyfile(fif_fname, temp_fname) raw = read_raw_fif(temp_fname, preload=False) # can't write over file being read @@ -1394,7 +1393,7 @@ def test_save(tmp_path): annot = Annotations([10], [5], ['test'], orig_time=orig_time) raw.set_annotations(annot) annot = raw.annotations - new_fname = tmp_path.join('break_raw.fif') + new_fname = tmp_path / 'break_raw.fif' raw.save(new_fname, overwrite=True) new_raw = read_raw_fif(new_fname, preload=False) pytest.raises(ValueError, new_raw.save, new_fname) @@ -1437,7 +1436,7 @@ def test_annotation_crop(tmp_path): [1., 1. + 1. / raw.info['sfreq']], atol=1e-3) # make sure we can overwrite the file we loaded when preload=True - new_fname = tmp_path.join('break_raw.fif') + new_fname = tmp_path / 'break_raw.fif' raw.save(new_fname) new_raw = read_raw_fif(new_fname, preload=True) new_raw.save(new_fname, overwrite=True) @@ -1553,7 +1552,7 @@ def test_compensation_raw(tmp_path): def test_compensation_raw_mne(tmp_path): """Test Raw compensation by comparing with MNE-C.""" def compensate_mne(fname, grad): - tmp_fname = tmp_path.join('mne_ctf_test_raw.fif') + tmp_fname = tmp_path / 'mne_ctf_test_raw.fif' cmd = ['mne_process_raw', '--raw', fname, '--save', tmp_fname, '--grad', str(grad), '--projoff', '--filteroff'] run_subprocess(cmd) @@ -1786,7 +1785,7 @@ def test_corrupted(tmp_path): assert dirpos == 12641532 fid.seek(0) data = fid.read(dirpos) - bad_fname = tmp_path.join('test_raw.fif') + bad_fname = tmp_path / 'test_raw.fif' with open(bad_fname, 'wb') as fid: fid.write(data) with pytest.warns(RuntimeWarning, match='.*tag directory.*corrupt.*'): diff --git a/mne/io/tests/test_meas_info.py b/mne/io/tests/test_meas_info.py index b4c9a160e84..fcdc3c43141 100644 --- a/mne/io/tests/test_meas_info.py +++ b/mne/io/tests/test_meas_info.py @@ -269,7 +269,7 @@ def test_read_write_info(tmp_path): with open(temp_file, 'rb') as fid: m1.update(fid.read()) m1 = m1.hexdigest() - temp_file_2 = tmp_path.join('info2.fif') + temp_file_2 = tmp_path / 'info2.fif' assert temp_file_2 != temp_file write_info(temp_file_2, info) m2 = hashlib.md5() @@ -283,7 +283,7 @@ def test_read_write_info(tmp_path): info['meas_date'] = None anonymize_info(info, verbose='error') assert info['meas_date'] is None - tmp_fname_3 = tmp_path.join('info3.fif') + tmp_fname_3 = tmp_path / 'info3.fif' write_info(tmp_fname_3, info) assert info['meas_date'] is None info2 = read_info(tmp_fname_3) @@ -292,7 +292,7 @@ def test_read_write_info(tmp_path): # Check that having a very old date in fine until you try to save it to fif with info._unlock(check_after=True): info['meas_date'] = datetime(1800, 1, 1, 0, 0, 0, tzinfo=timezone.utc) - fname = tmp_path.join('test.fif') + fname = tmp_path / 'test.fif' with pytest.raises(RuntimeError, match='must be between '): write_info(fname, info) @@ -763,7 +763,7 @@ def test_csr_csc(tmp_path): ct = sss_ctc['decoupler'].copy() # CSC assert isinstance(ct, sparse.csc_matrix) - fname = tmp_path.join('test.fif') + fname = tmp_path / 'test.fif' write_info(fname, info) info_read = read_info(fname) ct_read = info_read['proc_history'][0]['max_info']['sss_ctc']['decoupler'] @@ -774,7 +774,7 @@ def test_csr_csc(tmp_path): assert isinstance(csr, sparse.csr_matrix) assert_array_equal(csr.toarray(), ct.toarray()) info['proc_history'][0]['max_info']['sss_ctc']['decoupler'] = csr - fname = tmp_path.join('test1.fif') + fname = tmp_path / 'test1.fif' write_info(fname, info) info_read = read_info(fname) ct_read = info_read['proc_history'][0]['max_info']['sss_ctc']['decoupler'] @@ -960,7 +960,7 @@ def test_channel_name_limit(tmp_path, monkeypatch, fname): # epochs # epochs = Epochs(raw, make_fixed_length_events(raw)) - fname = tmp_path.join('test-epo.fif') + fname = tmp_path / 'test-epo.fif' epochs.save(fname) epochs_read = read_epochs(fname) for ep in (epochs, epochs_read): @@ -970,7 +970,7 @@ def test_channel_name_limit(tmp_path, monkeypatch, fname): # cov epochs.info['bads'] = [] cov = compute_covariance(epochs, verbose='error') - fname = tmp_path.join('test-cov.fif') + fname = tmp_path / 'test-cov.fif' write_cov(fname, cov) cov_read = read_cov(fname) for co in (cov, cov_read): @@ -984,7 +984,7 @@ def test_channel_name_limit(tmp_path, monkeypatch, fname): evoked = epochs.average() evoked.info['bads'] = bads assert evoked.nave == 1 - fname = tmp_path.join('test-ave.fif') + fname = tmp_path / 'test-ave.fif' evoked.save(fname) evoked_read = read_evokeds(fname)[0] for ev in (evoked, evoked_read): @@ -1000,7 +1000,7 @@ def test_channel_name_limit(tmp_path, monkeypatch, fname): src = setup_volume_source_space( pos=dict(rr=[[0, 0, 0.04]], nn=[[0, 1., 0.]])) fwd = make_forward_solution(evoked.info, None, src, sphere) - fname = tmp_path.join('temp-fwd.fif') + fname = tmp_path / 'temp-fwd.fif' write_forward_solution(fname, fwd) fwd_read = read_forward_solution(fname) for fw in (fwd, fwd_read): diff --git a/mne/tests/test_cov.py b/mne/tests/test_cov.py index bfc66f4fbfd..8001a3f59d5 100644 --- a/mne/tests/test_cov.py +++ b/mne/tests/test_cov.py @@ -407,7 +407,7 @@ def test_cov_estimation_with_triggers(rank, tmp_path): # test IO when computation done in Python cov.save(tmp_path / 'test-cov.fif') # test saving - cov_read = read_cov(tmp_path.join('test-cov.fif')) + cov_read = read_cov(tmp_path / 'test-cov.fif') _assert_cov(cov, cov_read, 1e-5) # cov with list of epochs with different projectors diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py index a6b173a790a..f196bf33317 100644 --- a/mne/tests/test_epochs.py +++ b/mne/tests/test_epochs.py @@ -1282,8 +1282,8 @@ def test_split_many_reset(tmp_path): def _assert_splits(fname, n, size): __tracebackhide__ = True assert n >= 0 - next_fnames = [fname] + [ - fname[:-4] + '-%d.fif' % ii for ii in range(1, n + 2)] + next_fnames = [str(fname)] + [ + str(fname)[:-4] + '-%d.fif' % ii for ii in range(1, n + 2)] bad_fname = next_fnames.pop(-1) for ii, this_fname in enumerate(next_fnames[:-1]): assert op.isfile(this_fname), f'Missing file: {this_fname}' diff --git a/mne/tests/test_event.py b/mne/tests/test_event.py index 4f1c7a76956..a20b7d7b838 100644 --- a/mne/tests/test_event.py +++ b/mne/tests/test_event.py @@ -122,7 +122,7 @@ def test_io_events(tmp_path): # Test binary fif.gz IO events2 = read_events(fname_gz) # Use as the gold standard assert_array_almost_equal(events, events2) - fname_temp += '.gz' + fname_temp = str(fname_temp) + '.gz' write_events(fname_temp, events2) events2 = read_events(fname_temp) assert_array_almost_equal(events, events2) diff --git a/mne/tests/test_evoked.py b/mne/tests/test_evoked.py index beb8321c0c6..f5499580297 100644 --- a/mne/tests/test_evoked.py +++ b/mne/tests/test_evoked.py @@ -228,8 +228,8 @@ def test_io_evoked(tmp_path): aves1 = read_evokeds(fname)[1::2] aves2 = read_evokeds(fname, [1, 3]) aves3 = read_evokeds(fname, ['Right Auditory', 'Right visual']) - write_evokeds(tmp_path.join('evoked-ave.fif'), aves1) - aves4 = read_evokeds(tmp_path.join('evoked-ave.fif')) + write_evokeds(tmp_path / 'evoked-ave.fif', aves1) + aves4 = read_evokeds(tmp_path / 'evoked-ave.fif') for aves in [aves2, aves3, aves4]: for [av1, av2] in zip(aves1, aves): assert_array_almost_equal(av1.data, av2.data) @@ -244,20 +244,20 @@ def test_io_evoked(tmp_path): # test saving and reading complex numbers in evokeds ave_complex = ave.copy() ave_complex._data = 1j * ave_complex.data - fname_temp = str(tmp_path.join('complex-ave.fif')) + fname_temp = str(tmp_path / 'complex-ave.fif') ave_complex.save(fname_temp) ave_complex = read_evokeds(fname_temp)[0] assert_allclose(ave.data, ave_complex.data.imag) # test warnings on bad filenames - fname2 = tmp_path.join('test-bad-name.fif') + fname2 = tmp_path / 'test-bad-name.fif' with pytest.warns(RuntimeWarning, match='-ave.fif'): write_evokeds(fname2, ave) with pytest.warns(RuntimeWarning, match='-ave.fif'): read_evokeds(fname2) # test writing when order of bads doesn't match - fname3 = tmp_path.join('test-bad-order-ave.fif') + fname3 = tmp_path / 'test-bad-order-ave.fif' condition = 'Left Auditory' ave4 = read_evokeds(fname, condition) ave4.info['bads'] = ave4.ch_names[:3] @@ -269,7 +269,7 @@ def test_io_evoked(tmp_path): pytest.raises(TypeError, Evoked, fname) # MaxShield - fname_ms = tmp_path.join('test-ave.fif') + fname_ms = tmp_path / 'test-ave.fif' assert (ave.info['maxshield'] is False) with ave.info._unlock(): ave.info['maxshield'] = True From 33fd802ea3f64e9f64f0176db3b5e550aa9615b7 Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Fri, 29 Oct 2021 23:40:01 +0200 Subject: [PATCH 07/23] More fixes --- mne/io/edf/tests/test_edf.py | 4 ++-- mne/io/fiff/tests/test_raw_fiff.py | 2 +- mne/io/tests/test_meas_info.py | 2 +- mne/tests/test_epochs.py | 4 ++-- mne/utils/tests/test_testing.py | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/mne/io/edf/tests/test_edf.py b/mne/io/edf/tests/test_edf.py index 9f22d8d95ac..4dba6b1727c 100644 --- a/mne/io/edf/tests/test_edf.py +++ b/mne/io/edf/tests/test_edf.py @@ -198,7 +198,7 @@ def test_parse_annotation(tmp_path): b'+1800.2\x1525.5\x14Apnea\x14\x00\x00\x00\x00\x00\x00\x00' b'+123\x14\x14\x00\x00\x00\x00\x00\x00\x00') annot_file = tmp_path / 'annotations.txt' - with open(annot_file, "w") as f: + with open(annot_file, "wb") as f: f.write(annot) annot = [a for a in bytes(annot)] @@ -317,7 +317,7 @@ def test_read_annot(tmp_path): b'+1800.2\x1525.5\x14Apnea\x14\x00\x00\x00\x00\x00\x00\x00' b'+123\x14\x14\x00\x00\x00\x00\x00\x00\x00') annot_file = tmp_path / 'annotations.txt' - with open(annot_file, "w") as f: + with open(annot_file, "wb") as f: f.write(annot) onset, duration, desc = _read_annotations_edf(annotations=str(annot_file)) diff --git a/mne/io/fiff/tests/test_raw_fiff.py b/mne/io/fiff/tests/test_raw_fiff.py index 2dc41d4a66f..554843078ec 100644 --- a/mne/io/fiff/tests/test_raw_fiff.py +++ b/mne/io/fiff/tests/test_raw_fiff.py @@ -1643,7 +1643,7 @@ def test_equalize_channels(): def test_memmap(tmp_path): """Test some interesting memmapping cases.""" # concatenate_raw - memmaps = [tmp_path / str(ii) for ii in range(3)] + memmaps = [str(tmp_path / str(ii)) for ii in range(3)] raw_0 = read_raw_fif(test_fif_fname, preload=memmaps[0]) assert raw_0._data.filename == memmaps[0] raw_1 = read_raw_fif(test_fif_fname, preload=memmaps[1]) diff --git a/mne/io/tests/test_meas_info.py b/mne/io/tests/test_meas_info.py index fcdc3c43141..94ffe5385d7 100644 --- a/mne/io/tests/test_meas_info.py +++ b/mne/io/tests/test_meas_info.py @@ -1013,7 +1013,7 @@ def test_channel_name_limit(tmp_path, monkeypatch, fname): # inv # inv = make_inverse_operator(evoked.info, fwd, cov) - fname = tmp_path.join('test-inv.fif') + fname = tmp_path / 'test-inv.fif' write_inverse_operator(fname, inv) inv_read = read_inverse_operator(fname) for iv in (inv, inv_read): diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py index f196bf33317..893bde81845 100644 --- a/mne/tests/test_epochs.py +++ b/mne/tests/test_epochs.py @@ -1210,7 +1210,7 @@ def test_split_saving(tmp_path, split_size, n_epochs, n_files, size, metadata, got_size = _get_split_size(split_size) assert got_size == size _assert_splits(fname, n_files, size) - assert not op.isfile(f'{fname[:-4]}-{n_files + 1}.fif') + assert not op.isfile(f'{str(fname)[:-4]}-{n_files + 1}.fif') for preload in (True, False): epochs2 = mne.read_epochs(fname, preload=preload) assert_allclose(epochs2.get_data(), epochs_data) @@ -1271,7 +1271,7 @@ def test_split_many_reset(tmp_path): mb = 3 * 1024 * 1024 _assert_splits(fname, 6, mb) # reset, then it should work - fname = str(tmp_path.join('temp-reset-epo.fif')) + fname = tmp_path / 'temp-reset-epo.fif' epochs.reset_drop_log_selection() epochs.save(fname, split_size=split_size, verbose='debug') _assert_splits(fname, 4, mb) diff --git a/mne/utils/tests/test_testing.py b/mne/utils/tests/test_testing.py index 8c7c97ffa33..c2b21c027c8 100644 --- a/mne/utils/tests/test_testing.py +++ b/mne/utils/tests/test_testing.py @@ -35,7 +35,7 @@ def test_tempdir(): def test_datasets(monkeypatch, tmp_path): """Test dataset config.""" # gh-4192 - fake_path = tmp_path.mkdir('MNE-testing-data') + fake_path = (tmp_path / 'MNE-testing-data').mkdir() with open(fake_path / 'version.txt', 'w') as fid: fid.write('9999.9999') monkeypatch.setenv('_MNE_FAKE_HOME_DIR', str(tmp_path)) From ef99a113edec05a22511d0657dda3554eb826f00 Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Fri, 29 Oct 2021 23:56:57 +0200 Subject: [PATCH 08/23] Hopefully final set of fixes --- mne/tests/test_epochs.py | 2 +- mne/utils/tests/test_testing.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py index 893bde81845..ca2f11e09a4 100644 --- a/mne/tests/test_epochs.py +++ b/mne/tests/test_epochs.py @@ -1218,7 +1218,7 @@ def test_split_saving(tmp_path, split_size, n_epochs, n_files, size, metadata, # Check that if BIDS is used and no split is needed it defaults to # simple writing without _split- entity. - split_fname = tmp_path / 'test_epo.fif' + split_fname = str(tmp_path / 'test_epo.fif') split_fname_neuromag_part1 = split_fname.replace( 'epo.fif', f'epo-{n_files + 1}.fif') split_fname_bids_part1 = split_fname.replace( diff --git a/mne/utils/tests/test_testing.py b/mne/utils/tests/test_testing.py index c2b21c027c8..cfcf5fe16fa 100644 --- a/mne/utils/tests/test_testing.py +++ b/mne/utils/tests/test_testing.py @@ -35,7 +35,8 @@ def test_tempdir(): def test_datasets(monkeypatch, tmp_path): """Test dataset config.""" # gh-4192 - fake_path = (tmp_path / 'MNE-testing-data').mkdir() + fake_path = tmp_path / 'MNE-testing-data' + fake_path.mkdir() with open(fake_path / 'version.txt', 'w') as fid: fid.write('9999.9999') monkeypatch.setenv('_MNE_FAKE_HOME_DIR', str(tmp_path)) From 2e83b330c5a3a61e7c53eda3d19174678a92f01c Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Sat, 30 Oct 2021 08:57:26 +0200 Subject: [PATCH 09/23] More fun with tmp_dir.joins --- mne/channels/tests/test_montage.py | 4 +-- mne/forward/tests/test_forward.py | 10 +++--- mne/forward/tests/test_make_forward.py | 14 ++++----- mne/io/brainvision/tests/test_brainvision.py | 2 +- mne/io/curry/tests/test_curry.py | 4 +-- mne/io/edf/tests/test_edf.py | 2 +- mne/io/edf/tests/test_gdf.py | 2 +- mne/io/eeglab/tests/test_eeglab.py | 2 +- mne/io/egi/tests/test_egi.py | 4 +-- mne/io/fiff/tests/test_raw_fiff.py | 13 +++++--- mne/io/nirx/tests/test_nirx.py | 2 +- mne/preprocessing/tests/test_ica.py | 16 +++++----- mne/tests/test_bem.py | 8 ++--- mne/tests/test_dipole.py | 2 +- mne/tests/test_freesurfer.py | 2 +- mne/tests/test_source_space.py | 32 ++++++++++---------- mne/utils/tests/test_check.py | 2 +- 17 files changed, 62 insertions(+), 59 deletions(-) diff --git a/mne/channels/tests/test_montage.py b/mne/channels/tests/test_montage.py index 36b15dac6a0..18941e65578 100644 --- a/mne/channels/tests/test_montage.py +++ b/mne/channels/tests/test_montage.py @@ -957,7 +957,7 @@ def test_egi_dig_montage(tmp_path): ) # test round-trip IO - fname_temp = tmp_path.join('egi_test.fif') + fname_temp = tmp_path / 'egi_test.fif' _check_roundtrip(dig_montage, fname_temp, 'unknown') _check_roundtrip(dig_montage_in_head, fname_temp) @@ -1007,7 +1007,7 @@ def test_read_dig_captrak(tmp_path): montage = transform_to_head(montage) # transform_to_head has to be tested _check_roundtrip(montage=montage, - fname=str(tmp_path.join('bvct_test.fif'))) + fname=str(tmp_path / 'bvct_test.fif')) fid, _ = _get_fid_coords(montage.dig) assert_allclose( diff --git a/mne/forward/tests/test_forward.py b/mne/forward/tests/test_forward.py index 166ae988712..cfc9cd8a51c 100644 --- a/mne/forward/tests/test_forward.py +++ b/mne/forward/tests/test_forward.py @@ -97,7 +97,7 @@ def test_io_forward(tmp_path): leadfield = fwd['sol']['data'] assert_equal(leadfield.shape, (n_channels, n_src)) assert_equal(len(fwd['sol']['row_names']), n_channels) - fname_temp = tmp_path.join('test-fwd.fif') + fname_temp = tmp_path / 'test-fwd.fif' with pytest.warns(RuntimeWarning, match='stored on disk'): write_forward_solution(fname_temp, fwd, overwrite=True) @@ -167,7 +167,7 @@ def test_io_forward(tmp_path): # test warnings on bad filenames fwd = read_forward_solution(fname_meeg_grad) - fwd_badname = tmp_path.join('test-bad-name.fif.gz') + fwd_badname = tmp_path / 'test-bad-name.fif.gz' with pytest.warns(RuntimeWarning, match='end with'): write_forward_solution(fwd_badname, fwd) with pytest.warns(RuntimeWarning, match='end with'): @@ -283,7 +283,7 @@ def test_restrict_forward_to_stc(tmp_path): # Test saving the restricted forward object. This only works if all fields # are properly accounted for. - fname_copy = tmp_path.join('copy-fwd.fif') + fname_copy = tmp_path / 'copy-fwd.fif' with pytest.warns(RuntimeWarning, match='stored on disk'): write_forward_solution(fname_copy, fwd_out, overwrite=True) fwd_out_read = read_forward_solution(fname_copy) @@ -352,7 +352,7 @@ def test_restrict_forward_to_label(tmp_path): # Test saving the restricted forward object. This only works if all fields # are properly accounted for. - fname_copy = tmp_path.join('copy-fwd.fif') + fname_copy = tmp_path / 'copy-fwd.fif' write_forward_solution(fname_copy, fwd_out, overwrite=True) fwd_out_read = read_forward_solution(fname_copy) assert_forward_allclose(fwd_out, fwd_out_read) @@ -383,7 +383,7 @@ def test_average_forward_solution(tmp_path): # modify a fwd solution, save it, use MNE to average with old one fwd_copy['sol']['data'] *= 0.5 - fname_copy = str(tmp_path.join('copy-fwd.fif')) + fname_copy = str(tmp_path / 'copy-fwd.fif') write_forward_solution(fname_copy, fwd_copy, overwrite=True) cmd = ('mne_average_forward_solutions', '--fwd', fname_meeg, '--fwd', fname_copy, '--out', fname_copy) diff --git a/mne/forward/tests/test_make_forward.py b/mne/forward/tests/test_make_forward.py index ef9ee5e87ce..ebfeab87479 100644 --- a/mne/forward/tests/test_make_forward.py +++ b/mne/forward/tests/test_make_forward.py @@ -147,7 +147,7 @@ def test_make_forward_solution_kit(tmp_path): 'data', 'test_ctf_comp_raw.fif') # first set up a small testing source space - fname_src_small = tmp_path.join('sample-oct-2-src.fif') + fname_src_small = tmp_path / 'sample-oct-2-src.fif' src = setup_source_space('sample', 'oct2', subjects_dir=subjects_dir, add_dist=False) write_source_spaces(fname_src_small, src) # to enable working with MNE-C @@ -212,7 +212,7 @@ def test_make_forward_solution_kit(tmp_path): subjects_dir=subjects_dir) _compare_forwards(fwd, fwd_py, 274, n_src) - fname_temp = tmp_path.join('test-ctf-fwd.fif') + fname_temp = tmp_path / 'test-ctf-fwd.fif' write_forward_solution(fname_temp, fwd_py) fwd_py2 = read_forward_solution(fname_temp) _compare_forwards(fwd_py, fwd_py2, 274, n_src) @@ -255,11 +255,11 @@ def test_make_forward_solution_discrete(tmp_path): @pytest.mark.timeout(90) # can take longer than 60 sec on Travis def test_make_forward_solution_sphere(tmp_path): """Test making a forward solution with a sphere model.""" - fname_src_small = tmp_path.join('sample-oct-2-src.fif') + fname_src_small = tmp_path / 'sample-oct-2-src.fif' src = setup_source_space('sample', 'oct2', subjects_dir=subjects_dir, add_dist=False) write_source_spaces(fname_src_small, src) # to enable working with MNE-C - out_name = tmp_path.join('tmp-fwd.fif') + out_name = tmp_path / 'tmp-fwd.fif' run_subprocess(['mne_forward_solution', '--meg', '--eeg', '--meas', fname_raw, '--src', fname_src_small, '--mri', fname_trans, '--fwd', out_name]) @@ -331,7 +331,7 @@ def test_forward_mixed_source_space(tmp_path): assert ((coord_frames == FIFF.FIFFV_COORD_HEAD).all()) # run tests for SourceSpaces.export_volume - fname_img = tmp_path.join('temp-image.mgz') + fname_img = tmp_path / 'temp-image.mgz' # head coordinates and mri_resolution, but trans file with pytest.raises(ValueError, match='trans containing mri to head'): @@ -443,7 +443,7 @@ def test_make_forward_dipole(tmp_path): dip_even_samp = Dipole(times, pos, amplitude, ori, gof) # I/O round-trip - fname = str(tmp_path.join('test-fwd.fif')) + fname = str(tmp_path / 'test-fwd.fif') with pytest.warns(RuntimeWarning, match='free orientation'): write_forward_solution(fname, fwd) fwd_read = convert_forward_solution( @@ -466,7 +466,7 @@ def test_make_forward_no_meg(tmp_path): montage = make_standard_montage('standard_1020') info = create_info(['Cz'], 1000., 'eeg').set_montage(montage) fwd = make_forward_solution(info, trans, src, bem) - fname = tmp_path.join('test-fwd.fif') + fname = tmp_path / 'test-fwd.fif' write_forward_solution(fname, fwd) fwd_read = read_forward_solution(fname) assert_allclose(fwd['sol']['data'], fwd_read['sol']['data']) diff --git a/mne/io/brainvision/tests/test_brainvision.py b/mne/io/brainvision/tests/test_brainvision.py index 18ac6ad9f4e..96a27d37174 100644 --- a/mne/io/brainvision/tests/test_brainvision.py +++ b/mne/io/brainvision/tests/test_brainvision.py @@ -628,7 +628,7 @@ def test_read_vmrk_annotations(tmp_path): # delete=False is for Windows compatibility with open(vmrk_path) as myfile: head = [next(myfile) for x in range(6)] - fname = tmp_path.join('temp.vmrk') + fname = tmp_path / 'temp.vmrk' with open(str(fname), 'w') as temp: for item in head: temp.write(item) diff --git a/mne/io/curry/tests/test_curry.py b/mne/io/curry/tests/test_curry.py index 6836c3146a5..7cd1736b5fc 100644 --- a/mne/io/curry/tests/test_curry.py +++ b/mne/io/curry/tests/test_curry.py @@ -336,7 +336,7 @@ def sfreq_testing_data(tmp_path, request): sfreq, time_step = request.param['sfreq'], request.param['time_step'] in_base_name = curry7_bdf_file.strip('dat') - out_base_name = str(tmp_path.join('curry.')) + out_base_name = str(tmp_path / 'curry.') # create dummy empty files for 'dat' and 'rs3' for fname in [out_base_name + ext for ext in ['dat', 'rs3']]: @@ -391,7 +391,7 @@ def _get_read_annotations_mock_info(name_part, mock_dir): version = _get_curry_version(ext) original['info'] = original['base'] + FILE_EXTENSIONS[version]["info"] - modified['base'] = str(mock_dir.join('curry')) + modified['base'] = str(mock_dir / 'curry') modified['event'] = (modified['base'] + FILE_EXTENSIONS[version]["events_cef"]) modified['info'] = modified['base'] + FILE_EXTENSIONS[version]["info"] diff --git a/mne/io/edf/tests/test_edf.py b/mne/io/edf/tests/test_edf.py index 4dba6b1727c..f3f59bd464a 100644 --- a/mne/io/edf/tests/test_edf.py +++ b/mne/io/edf/tests/test_edf.py @@ -128,7 +128,7 @@ def test_bdf_data(): def test_bdf_crop_save_stim_channel(tmp_path): """Test EDF with various sampling rates.""" raw = read_raw_bdf(bdf_stim_channel_path) - raw.save(tmp_path.join('test-raw.fif'), tmin=1.2, tmax=4.0, overwrite=True) + raw.save(tmp_path / 'test-raw.fif', tmin=1.2, tmax=4.0, overwrite=True) @testing.requires_testing_data diff --git a/mne/io/edf/tests/test_gdf.py b/mne/io/edf/tests/test_gdf.py index b8f3fd66ece..d9184d46dbe 100644 --- a/mne/io/edf/tests/test_gdf.py +++ b/mne/io/edf/tests/test_gdf.py @@ -60,7 +60,7 @@ def test_gdf_data(): @testing.requires_testing_data def test_gdf2_birthday(tmp_path): """Test reading raw GDF 2.x files.""" - new_fname = str(tmp_path.join('temp.gdf')) + new_fname = tmp_path / 'temp.gdf' shutil.copyfile(gdf2_path + '.gdf', new_fname) # go back 44.5 years so the subject should show up as 44 offset_edf = ( # to their ref diff --git a/mne/io/eeglab/tests/test_eeglab.py b/mne/io/eeglab/tests/test_eeglab.py index 311bcd33172..9596f6a0898 100644 --- a/mne/io/eeglab/tests/test_eeglab.py +++ b/mne/io/eeglab/tests/test_eeglab.py @@ -369,7 +369,7 @@ def one_chanpos_fname(tmp_path_factory): Notes from when this code was factorized: # test reading file with one event (read old version) """ - fname = str(tmp_path_factory.mktemp('data').join('test_chanpos.set')) + fname = str(tmp_path_factory.mktemp('data') / 'test_chanpos.set') file_conent = dict(EEG={ 'trials': 1, 'nbchan': 3, 'pnts': 3, 'epoch': [], 'event': [], 'srate': 128, 'times': np.array([0., 0.1, 0.2]), diff --git a/mne/io/egi/tests/test_egi.py b/mne/io/egi/tests/test_egi.py index 8e1694090fe..e9a5d4dd7f2 100644 --- a/mne/io/egi/tests/test_egi.py +++ b/mne/io/egi/tests/test_egi.py @@ -112,7 +112,7 @@ def test_egi_mff_pause(fname, skip_times, event_times): ]) def test_egi_mff_pause_chunks(fname, tmp_path): """Test that on-demand of all short segments works (via I/O).""" - fname_temp = tmp_path.join('test_raw.fif') + fname_temp = tmp_path / 'test_raw.fif' raw_data = read_raw_egi(fname, preload=True).get_data() raw = read_raw_egi(fname) with pytest.warns(RuntimeWarning, match='Acquisition skips detected'): @@ -267,7 +267,7 @@ def test_io_egi_pns_mff(tmp_path): assert_array_equal(mat_data, raw_data) # EEG missing - new_mff = str(tmp_path.join('temp.mff')) + new_mff = tmp_path / 'temp.mff' shutil.copytree(egi_mff_pns_fname, new_mff) read_raw_egi(new_mff, verbose='error') os.remove(op.join(new_mff, 'info1.xml')) diff --git a/mne/io/fiff/tests/test_raw_fiff.py b/mne/io/fiff/tests/test_raw_fiff.py index 554843078ec..d1dc61cc5b0 100644 --- a/mne/io/fiff/tests/test_raw_fiff.py +++ b/mne/io/fiff/tests/test_raw_fiff.py @@ -1756,14 +1756,17 @@ def test_bad_acq(fname): def test_split_symlink(tmp_path): """Test split files with symlinks.""" # regression test for gh-9221 - first = str(tmp_path.mkdir('first').join('test_raw.fif')) + (tmp_path / 'first').mkdir() + first = tmp_path / 'first' / 'test_raw.fif' raw = read_raw_fif(fif_fname).pick('meg').load_data() raw.save(first, buffer_size_sec=1, split_size='10MB', verbose=True) - second = first[:-4] + '-1.fif' + second = str(first)[:-4] + '-1.fif' assert op.isfile(second) - assert not op.isfile(first[:-4] + '-2.fif') - new_first = tmp_path.mkdir('a').join('test_raw.fif') - new_second = tmp_path.mkdir('b').join('test_raw-1.fif') + assert not op.isfile(str(first)[:-4] + '-2.fif') + (tmp_path / 'a').mkdir() + (tmp_path / 'b').mkdir() + new_first = tmp_path / 'a' / 'test_raw.fif' + new_second = tmp_path / 'b' / 'test_raw-1.fif' shutil.move(first, new_first) shutil.move(second, new_second) os.symlink(new_first, first) diff --git a/mne/io/nirx/tests/test_nirx.py b/mne/io/nirx/tests/test_nirx.py index 6340e8659d8..f75a69386b3 100644 --- a/mne/io/nirx/tests/test_nirx.py +++ b/mne/io/nirx/tests/test_nirx.py @@ -451,7 +451,7 @@ def test_nirx_15_3_short(): @requires_testing_data def test_encoding(tmp_path): """Test NIRx encoding.""" - fname = str(tmp_path.join('latin')) + fname = tmp_path / 'latin' shutil.copytree(fname_nirx_15_2, fname) hdr_fname = op.join(fname, 'NIRS-2019-10-02_003.hdr') hdr = list() diff --git a/mne/preprocessing/tests/test_ica.py b/mne/preprocessing/tests/test_ica.py index 618d944a4f7..ca2e925c8c7 100644 --- a/mne/preprocessing/tests/test_ica.py +++ b/mne/preprocessing/tests/test_ica.py @@ -241,7 +241,7 @@ def test_ica_noop(n_components, n_pca_components, tmp_path): assert_allclose(raw.get_data(), raw_new.get_data(), err_msg='Id failure') _assert_ica_attributes(ica, data) # and with I/O - fname = tmp_path.join('temp-ica.fif') + fname = tmp_path / 'temp-ica.fif' ica.save(fname) ica = read_ica(fname) raw_new = ica.apply(raw.copy()) @@ -289,7 +289,7 @@ def test_ica_n_iter_(method, tmp_path): assert_equal(ica.n_iter_, max_iter) # Test I/O roundtrip. - output_fname = tmp_path.join('test_ica-ica.fif') + output_fname = tmp_path / 'test_ica-ica.fif' _assert_ica_attributes(ica, raw.get_data('data'), limits=(5, 110)) ica.save(output_fname) ica = read_ica(output_fname) @@ -683,7 +683,7 @@ def test_ica_additional(method, tmp_path, short_raw_epochs): corrmap([ica_different_channels, ica], (0, 0)) # test warnings on bad filenames - ica_badname = tmp_path.join('test-bad-name.fif.gz') + ica_badname = tmp_path / 'test-bad-name.fif.gz' with pytest.warns(RuntimeWarning, match='-ica.fif'): ica.save(ica_badname) with pytest.warns(RuntimeWarning, match='-ica.fif'): @@ -736,7 +736,7 @@ def test_ica_additional(method, tmp_path, short_raw_epochs): ica_raw.notch_filter([10], trans_bandwidth=10, fir_design='firwin') assert ((d1 != ica_raw._data[0]).any()) - test_ica_fname = tmp_path.join('test-ica.fif') + test_ica_fname = tmp_path / 'test-ica.fif' ica.n_pca_components = 2 ica.method = 'fake' ica.save(test_ica_fname) @@ -980,7 +980,7 @@ def test_ica_cov(method, cov, tmp_path, short_raw_epochs): cov = read_cov(cov) # test reading and writing - test_ica_fname = tmp_path.join('test-ica.fif') + test_ica_fname = tmp_path / 'test-ica.fif' kwargs = dict(n_pca_components=4) ica = ICA(noise_cov=cov, n_components=2, method=method, max_iter=1) @@ -1240,7 +1240,7 @@ def test_n_components_none(method, tmp_path): n_components = None random_state = 12345 - output_fname = tmp_path.join('test_ica-ica.fif') + output_fname = tmp_path / 'test_ica-ica.fif' ica = ICA(method=method, n_components=n_components, random_state=random_state) with pytest.warns(None): @@ -1452,8 +1452,8 @@ def test_read_ica_eeglab_mismatch(tmp_path): """Test read_ica_eeglab function when there is a mismatch.""" fname_orig = op.join(test_base_dir, "EEGLAB", "test_raw.set") base = op.basename(fname_orig)[:-3] - shutil.copyfile(fname_orig[:-3] + 'fdt', tmp_path.join(base + 'fdt')) - fname = tmp_path.join(base) + shutil.copyfile(fname_orig[:-3] + 'fdt', tmp_path / (base + 'fdt')) + fname = tmp_path / base data = loadmat(fname_orig) w = data['EEG']['icaweights'][0][0] w[:] = np.random.RandomState(0).randn(*w.shape) diff --git a/mne/tests/test_bem.py b/mne/tests/test_bem.py index d15ac886726..3e759fdf344 100644 --- a/mne/tests/test_bem.py +++ b/mne/tests/test_bem.py @@ -143,7 +143,7 @@ def test_make_sphere_model(): ]) def test_make_bem_model(tmp_path, kwargs, fname): """Test BEM model creation from Python with I/O.""" - fname_temp = tmp_path.join('temp-bem.fif') + fname_temp = tmp_path / 'temp-bem.fif' with catch_logging() as log: model = make_bem_model('sample', ico=2, subjects_dir=subjects_dir, verbose=True, **kwargs) @@ -169,12 +169,12 @@ def test_make_bem_model(tmp_path, kwargs, fname): def test_bem_model_topology(tmp_path): """Test BEM model topological checks.""" # bad topology (not enough neighboring tris) - makedirs(tmp_path.join('foo', 'bem')) + makedirs(tmp_path / 'foo' / 'bem' for fname in ('inner_skull', 'outer_skull', 'outer_skin'): fname += '.surf' copy(op.join(subjects_dir, 'sample', 'bem', fname), - str(tmp_path.join('foo', 'bem', fname))) - outer_fname = tmp_path.join('foo', 'bem', 'outer_skull.surf') + tmp_path / 'foo' / 'bem' / fname) + outer_fname = tmp_path / 'foo' / 'bem' / 'outer_skull.surf' rr, tris = read_surface(outer_fname) tris = tris[:-1] write_surface(outer_fname, rr, tris[:-1], overwrite=True) diff --git a/mne/tests/test_dipole.py b/mne/tests/test_dipole.py index 2cf22de1e0c..fad3a93fd75 100644 --- a/mne/tests/test_dipole.py +++ b/mne/tests/test_dipole.py @@ -466,7 +466,7 @@ def test_bdip(fname_dip_, fname_bdip_, tmp_path): orig_size = os.stat(fname_bdip_).st_size bdip = read_dipole(fname_bdip_) # test round-trip by writing and reading, too - fname = tmp_path.join('test.bdip') + fname = tmp_path / 'test.bdip' bdip.save(fname) bdip_read = read_dipole(fname) write_size = os.stat(str(fname)).st_size diff --git a/mne/tests/test_freesurfer.py b/mne/tests/test_freesurfer.py index e28f478e62b..c2583c36812 100644 --- a/mne/tests/test_freesurfer.py +++ b/mne/tests/test_freesurfer.py @@ -125,7 +125,7 @@ def test_read_freesurfer_lut(fname, tmp_path): # long name (only test on one run) if fname is not None: return - fname = str(tmp_path.join('long.txt')) + fname = tmp_path / 'long.txt' names = ['Anterior_Cingulate_and_Medial_Prefrontal_Cortex-' + hemi for hemi in ('lh', 'rh')] ids = np.arange(1, len(names) + 1) diff --git a/mne/tests/test_source_space.py b/mne/tests/test_source_space.py index 80a9b703ccc..b0b998c698d 100644 --- a/mne/tests/test_source_space.py +++ b/mne/tests/test_source_space.py @@ -172,7 +172,7 @@ def test_add_source_space_distances_limited(tmp_path): n_do = 200 # limit this for speed src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy() src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy() - out_name = tmp_path.join('temp-src.fif') + out_name = tmp_path / 'temp-src.fif' add_source_space_distances(src_new, dist_limit=0.007) write_source_spaces(out_name, src_new) src_new = read_source_spaces(out_name) @@ -206,7 +206,7 @@ def test_add_source_space_distances(tmp_path): n_do = 19 # limit this for speed src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy() src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy() - out_name = tmp_path.join('temp-src.fif') + out_name = tmp_path / 'temp-src.fif' n_jobs = 2 assert n_do % n_jobs != 0 with pytest.raises(ValueError, match='non-negative'): @@ -247,9 +247,9 @@ def test_discrete_source_space(tmp_path): v = src[0]['vertno'] # let's make a discrete version with the C code, and with ours - temp_name = tmp_path.join('temp-src.fif') + temp_name = tmp_path / 'temp-src.fif' # save - temp_pos = tmp_path.join('temp-pos.txt') + temp_pos = tmp_path / 'temp-pos.txt' np.savetxt(str(temp_pos), np.c_[src[0]['rr'][v], src[0]['nn'][v]]) # let's try the spherical one (no bem or surf supplied) run_subprocess(['mne_volume_source_space', '--meters', @@ -288,7 +288,7 @@ def test_discrete_source_space(tmp_path): def test_volume_source_space(tmp_path): """Test setting up volume source spaces.""" src = read_source_spaces(fname_vol) - temp_name = tmp_path.join('temp-src.fif') + temp_name = tmp_path / 'temp-src.fif' surf = read_bem_surfaces(fname_bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN) surf['rr'] *= 1e3 # convert to mm bem_sol = read_bem_solution(fname_bem_3_sol) @@ -343,7 +343,7 @@ def test_other_volume_source_spaces(tmp_path): # Travis doesn't seem to like them # let's try the spherical one (no bem or surf supplied) - temp_name = tmp_path.join('temp-src.fif') + temp_name = tmp_path / 'temp-src.fif' run_subprocess(['mne_volume_source_space', '--grid', '7.0', '--src', temp_name, @@ -463,7 +463,7 @@ def test_setup_source_space(tmp_path): # oct-6 (sample) - auto filename + IO src = read_source_spaces(fname) - temp_name = tmp_path.join('temp-src.fif') + temp_name = tmp_path / 'temp-src.fif' with pytest.warns(None): # sklearn equiv neighbors src_new = setup_source_space('sample', spacing='oct6', subjects_dir=subjects_dir, add_dist=False) @@ -491,7 +491,7 @@ def test_setup_source_space(tmp_path): @pytest.mark.parametrize('spacing', [2, 7]) def test_setup_source_space_spacing(tmp_path, spacing): """Test setting up surface source spaces using a given spacing.""" - copytree(op.join(subjects_dir, 'sample'), str(tmp_path.join('sample'))) + copytree(op.join(subjects_dir, 'sample'), tmp_path / 'sample') args = [] if spacing == 7 else ['--spacing', str(spacing)] with modified_env(SUBJECTS_DIR=str(tmp_path), SUBJECT='sample'): run_subprocess(['mne_setup_source_space'] + args) @@ -535,13 +535,13 @@ def test_read_source_spaces(): def test_write_source_space(tmp_path): """Test reading and writing of source spaces.""" src0 = read_source_spaces(fname, patch_stats=False) - temp_fname = tmp_path.join('tmp-src.fif') + temp_fname = tmp_path / 'tmp-src.fif' write_source_spaces(temp_fname, src0) src1 = read_source_spaces(temp_fname, patch_stats=False) _compare_source_spaces(src0, src1) # test warnings on bad filenames - src_badname = tmp_path.join('test-bad-name.fif.gz') + src_badname = tmp_path / 'test-bad-name.fif.gz' with pytest.warns(RuntimeWarning, match='-src.fif'): write_source_spaces(src_badname, src0) with pytest.warns(RuntimeWarning, match='-src.fif'): @@ -591,7 +591,7 @@ def test_source_space_from_label(tmp_path, pass_ids): assert src[0]['nuse'] == 404 # for our given pos and label # test reading and writing - out_name = tmp_path.join('temp-src.fif') + out_name = tmp_path / 'temp-src.fif' write_source_spaces(out_name, src) src_from_file = read_source_spaces(out_name) _compare_source_spaces(src, src_from_file, mode='approx') @@ -706,7 +706,7 @@ def test_combine_source_spaces(tmp_path): assert len(src) == 4 # test reading and writing - src_out_name = tmp_path.join('temp-src.fif') + src_out_name = tmp_path / 'temp-src.fif' src.save(src_out_name) src_from_file = read_source_spaces(src_out_name) _compare_source_spaces(src, src_from_file, mode='approx') @@ -718,7 +718,7 @@ def test_combine_source_spaces(tmp_path): assert (coord_frames == FIFF.FIFFV_COORD_MRI).all() # test errors for export_volume - image_fname = tmp_path.join('temp-image.mgz') + image_fname = tmp_path / 'temp-image.mgz' # source spaces with no volume with pytest.raises(ValueError, match='at least one volume'): @@ -732,7 +732,7 @@ def test_combine_source_spaces(tmp_path): del disc2 # unrecognized file type - bad_image_fname = tmp_path.join('temp-image.png') + bad_image_fname = tmp_path / 'temp-image.png' # vertices outside vol space warning pytest.raises(ValueError, src.export_volume, bad_image_fname, verbose='error') @@ -745,7 +745,7 @@ def test_combine_source_spaces(tmp_path): src_mixed_coord.export_volume(image_fname, verbose='error') # now actually write it - fname_img = tmp_path.join('img.nii') + fname_img = tmp_path / 'img.nii' for mri_resolution in (False, 'sparse', True): for src, up in ((vol, 705), (srf + vol, 27272), @@ -761,7 +761,7 @@ def test_combine_source_spaces(tmp_path): assert n_src == n_want, src # gh-8004 - temp_aseg = tmp_path.join('aseg.mgz') + temp_aseg = tmp_path / 'aseg.mgz' aseg_img = nib.load(aseg_fname) aseg_affine = aseg_img.affine aseg_affine[:3, :3] *= 0.7 diff --git a/mne/utils/tests/test_check.py b/mne/utils/tests/test_check.py index fcdad7ad3f0..8e3c92b3fb8 100644 --- a/mne/utils/tests/test_check.py +++ b/mne/utils/tests/test_check.py @@ -37,7 +37,7 @@ def test_check(tmp_path): pytest.raises(ValueError, check_random_state, 'foo') pytest.raises(TypeError, _check_fname, 1) _check_fname(Path('./foo')) - fname = str(tmp_path.join('foo')) + fname = tmp_path / 'foo' with open(fname, 'wb'): pass assert op.isfile(fname) From 8b7d51e7abca9e6d67e8186e0b39a6340f6d99b7 Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Sat, 30 Oct 2021 09:01:49 +0200 Subject: [PATCH 10/23] Missing ) --- mne/tests/test_bem.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mne/tests/test_bem.py b/mne/tests/test_bem.py index 3e759fdf344..4dd933f4134 100644 --- a/mne/tests/test_bem.py +++ b/mne/tests/test_bem.py @@ -169,7 +169,7 @@ def test_make_bem_model(tmp_path, kwargs, fname): def test_bem_model_topology(tmp_path): """Test BEM model topological checks.""" # bad topology (not enough neighboring tris) - makedirs(tmp_path / 'foo' / 'bem' + makedirs(tmp_path / 'foo' / 'bem') for fname in ('inner_skull', 'outer_skull', 'outer_skin'): fname += '.surf' copy(op.join(subjects_dir, 'sample', 'bem', fname), From 7d8e9bb2d4c8c317d2f919ee6d2b51393231a938 Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Sat, 30 Oct 2021 09:47:30 +0200 Subject: [PATCH 11/23] More... --- mne/preprocessing/nirs/tests/test_beer_lambert_law.py | 4 ++-- mne/tests/test_source_space.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mne/preprocessing/nirs/tests/test_beer_lambert_law.py b/mne/preprocessing/nirs/tests/test_beer_lambert_law.py index 95a8b66c969..10bc0ffb650 100644 --- a/mne/preprocessing/nirs/tests/test_beer_lambert_law.py +++ b/mne/preprocessing/nirs/tests/test_beer_lambert_law.py @@ -38,8 +38,8 @@ def test_beer_lambert(fname, fmt, tmp_path): assert fmt in ('nirx', 'fif') raw = read_raw_nirx(fname) if fmt == 'fif': - raw.save(tmp_path.join('test_raw.fif')) - raw = read_raw_fif(tmp_path.join('test_raw.fif')) + raw.save(tmp_path / 'test_raw.fif') + raw = read_raw_fif(tmp_path / 'test_raw.fif') assert 'fnirs_cw_amplitude' in raw assert 'fnirs_od' not in raw raw = optical_density(raw) diff --git a/mne/tests/test_source_space.py b/mne/tests/test_source_space.py index b0b998c698d..d48f55e9ff6 100644 --- a/mne/tests/test_source_space.py +++ b/mne/tests/test_source_space.py @@ -496,7 +496,7 @@ def test_setup_source_space_spacing(tmp_path, spacing): with modified_env(SUBJECTS_DIR=str(tmp_path), SUBJECT='sample'): run_subprocess(['mne_setup_source_space'] + args) src = read_source_spaces( - tmp_path / 'sample' / 'bem' / 'sample-%d-src.fif' % spacing + tmp_path / 'sample' / 'bem' / ('sample-%d-src.fif' % spacing) ) src_new = setup_source_space('sample', spacing=spacing, add_dist=False, subjects_dir=subjects_dir) From 0d6f2a2b3dac77c2dbafed73b81a97b8890b1bc7 Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Sat, 30 Oct 2021 13:51:56 +0200 Subject: [PATCH 12/23] Moar! --- mne/gui/tests/test_ieeg_locate_gui.py | 4 ++-- mne/tests/test_morph.py | 12 ++++++------ mne/tests/test_source_estimate.py | 2 +- mne/viz/_brain/tests/test_brain.py | 8 +++++--- 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/mne/gui/tests/test_ieeg_locate_gui.py b/mne/gui/tests/test_ieeg_locate_gui.py index 4eb28a64868..5229b206a67 100644 --- a/mne/gui/tests/test_ieeg_locate_gui.py +++ b/mne/gui/tests/test_ieeg_locate_gui.py @@ -112,8 +112,8 @@ def test_locate_scraper(_locate_ieeg, _fake_CT_coords, tmp_path): with pytest.warns(RuntimeWarning, match='`pial` surface not found'): gui = _locate_ieeg(raw.info, trans, aligned_ct, subject=subject, subjects_dir=subjects_dir) - tmp_path.mkdir('_images') - image_path = str(tmp_path.join('_images', 'temp.png')) + (tmp_path / '_images').mkdir() + image_path = str(tmp_path / '_images' / 'temp.png') gallery_conf = dict(builder_name='html', src_dir=str(tmp_path)) block_vars = dict( example_globals=dict(gui=gui), diff --git a/mne/tests/test_morph.py b/mne/tests/test_morph.py index a523147a693..d993e2cd1b8 100644 --- a/mne/tests/test_morph.py +++ b/mne/tests/test_morph.py @@ -256,9 +256,9 @@ def test_surface_vector_source_morph(tmp_path): assert 'surface' in repr(source_morph_surf) # check loading and saving for surf - source_morph_surf.save(tmp_path.join('42.h5')) + source_morph_surf.save(tmp_path / '42.h5') - source_morph_surf_r = read_source_morph(tmp_path.join('42.h5')) + source_morph_surf_r = read_source_morph(tmp_path / '42.h5') assert (all([read == saved for read, saved in zip(sorted(source_morph_surf_r.__dict__), @@ -342,14 +342,14 @@ def test_volume_source_morph_basic(tmp_path): subjects_dir=subjects_dir) # two different ways of saving - source_morph_vol.save(tmp_path.join('vol')) + source_morph_vol.save(tmp_path / 'vol') # check loading - source_morph_vol_r = read_source_morph(tmp_path.join('vol-morph.h5')) + source_morph_vol_r = read_source_morph(tmp_path / 'vol-morph.h5') # check for invalid file name handling () with pytest.raises(IOError, match='not found'): - read_source_morph(tmp_path.join('42')) + read_source_morph(tmp_path / '42') # check morph stc_vol_morphed = source_morph_vol.apply(stc_vol) @@ -563,7 +563,7 @@ def test_volume_source_morph_round_trip( stc_from_unit_rt = morph_to_from.apply(morph_from_to.apply(stc_from_unit)) assert_power_preserved(stc_from_unit, stc_from_unit_rt, limits=limits) if morph_mat: - fname = tmp_path.join('temp-morph.h5') + fname = tmp_path / 'temp-morph.h5' morph_to_from.save(fname) morph_to_from = read_source_morph(fname) assert morph_to_from.vol_morph_mat is None diff --git a/mne/tests/test_source_estimate.py b/mne/tests/test_source_estimate.py index 8cf8ea67ff0..d1f8cc4a203 100644 --- a/mne/tests/test_source_estimate.py +++ b/mne/tests/test_source_estimate.py @@ -438,7 +438,7 @@ def test_io_stc_h5(tmp_path, is_complex, vector): else: stc = _fake_stc(is_complex=is_complex) pytest.raises(ValueError, stc.save, tmp_path / 'tmp', ftype='foo') - out_name = tmp_path / 'tmp' + out_name = str(tmp_path / 'tmp') stc.save(out_name, ftype='h5') stc.save(out_name, ftype='h5') # test overwrite stc3 = read_source_estimate(out_name) diff --git a/mne/viz/_brain/tests/test_brain.py b/mne/viz/_brain/tests/test_brain.py index 9959fc92ffc..7709799bdd2 100644 --- a/mne/viz/_brain/tests/test_brain.py +++ b/mne/viz/_brain/tests/test_brain.py @@ -474,8 +474,10 @@ def tiny(tmp_path): # This is a minimal version of what we need for our viz-with-timeviewer # support currently subject = 'test' - subject_dir = tmp_path.mkdir(subject) - surf_dir = subject_dir.mkdir('surf') + (tmp_path / subject).mkdir() + subject_dir = tmp_path / subject + (subject_dir / 'surf').mkdir() + surf_dir = subject_dir / 'surf' rng = np.random.RandomState(0) rr = rng.randn(4, 3) tris = np.array([[0, 1, 2], [2, 1, 3]]) @@ -796,7 +798,7 @@ def test_brain_traces(renderer_interactive_pyvistaqt, hemi, src, tmp_path, check_version('sphinx_gallery')): brain.close() return - fnames = [str(tmp_path.join(f'temp_{ii}.png')) for ii in range(2)] + fnames = [str(tmp_path / f'temp_{ii}.png') for ii in range(2)] block_vars = dict(image_path_iterator=iter(fnames), example_globals=dict(brain=brain)) block = ('code', """ From a2c835eb400b13f2a3de2cb1832fffcd9afd5646 Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Mon, 1 Nov 2021 19:55:05 +0100 Subject: [PATCH 13/23] Fix test --- mne/io/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mne/io/base.py b/mne/io/base.py index b8ce6a0294a..6f2fb7f4161 100644 --- a/mne/io/base.py +++ b/mne/io/base.py @@ -1424,7 +1424,7 @@ def save(self, fname, picks=None, tmin=0, tmax=None, buffer_size_sec=None, check_fname(fname, 'raw', endings, endings_err=endings_err) split_size = _get_split_size(split_size) - if not self.preload and fname in self._filenames: + if not self.preload and str(fname) in self._filenames: raise ValueError('You cannot save data to the same file.' ' Please use a different filename.') From 68c1b0cfb4f2c531a55b4fc63eb982be3c550275 Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Mon, 1 Nov 2021 20:03:14 +0100 Subject: [PATCH 14/23] Use _check_fname() instead of str() --- mne/io/base.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mne/io/base.py b/mne/io/base.py index 6f2fb7f4161..e8dc4f55bb5 100644 --- a/mne/io/base.py +++ b/mne/io/base.py @@ -1421,10 +1421,11 @@ def save(self, fname, picks=None, tmin=0, tmax=None, buffer_size_sec=None, '_meg.fif', '_eeg.fif', '_ieeg.fif') endings += tuple([f'{e}.gz' for e in endings]) endings_err = ('.fif', '.fif.gz') + fname = _check_fname() check_fname(fname, 'raw', endings, endings_err=endings_err) split_size = _get_split_size(split_size) - if not self.preload and str(fname) in self._filenames: + if not self.preload and fname in self._filenames: raise ValueError('You cannot save data to the same file.' ' Please use a different filename.') From e5d5e116e1dbcdcc0220d296300e3b11d6bcefbe Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Mon, 1 Nov 2021 20:13:59 +0100 Subject: [PATCH 15/23] Forgot argument --- mne/io/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mne/io/base.py b/mne/io/base.py index e8dc4f55bb5..a131b99a6cf 100644 --- a/mne/io/base.py +++ b/mne/io/base.py @@ -1421,7 +1421,7 @@ def save(self, fname, picks=None, tmin=0, tmax=None, buffer_size_sec=None, '_meg.fif', '_eeg.fif', '_ieeg.fif') endings += tuple([f'{e}.gz' for e in endings]) endings_err = ('.fif', '.fif.gz') - fname = _check_fname() + fname = _check_fname(fname) check_fname(fname, 'raw', endings, endings_err=endings_err) split_size = _get_split_size(split_size) From e09fcf432910809d906db72cb44e6383fcfb8c4c Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Mon, 1 Nov 2021 20:55:00 +0100 Subject: [PATCH 16/23] Set overwrite=True --- mne/io/base.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mne/io/base.py b/mne/io/base.py index a131b99a6cf..4cbeffbd1c6 100644 --- a/mne/io/base.py +++ b/mne/io/base.py @@ -1421,7 +1421,9 @@ def save(self, fname, picks=None, tmin=0, tmax=None, buffer_size_sec=None, '_meg.fif', '_eeg.fif', '_ieeg.fif') endings += tuple([f'{e}.gz' for e in endings]) endings_err = ('.fif', '.fif.gz') - fname = _check_fname(fname) + + # convert to str, check for overwrite a few lines later + fname = _check_fname(fname, overwrite=True) check_fname(fname, 'raw', endings, endings_err=endings_err) split_size = _get_split_size(split_size) From 784a7691ab75754808ae756eedcc82ae7d123076 Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Mon, 1 Nov 2021 21:33:34 +0100 Subject: [PATCH 17/23] Fix more joins --- mne/conftest.py | 2 +- mne/viz/_brain/tests/test_brain.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mne/conftest.py b/mne/conftest.py index fbea072900d..d8f335fa594 100644 --- a/mne/conftest.py +++ b/mne/conftest.py @@ -493,7 +493,7 @@ def pixel_ratio(): def subjects_dir_tmp(tmp_path): """Copy MNE-testing-data subjects_dir to a temp dir for manipulation.""" for key in ('sample', 'fsaverage'): - shutil.copytree(op.join(subjects_dir, key), str(tmp_path.join(key))) + shutil.copytree(op.join(subjects_dir, key), str(tmp_path / key)) return str(tmp_path) diff --git a/mne/viz/_brain/tests/test_brain.py b/mne/viz/_brain/tests/test_brain.py index 7709799bdd2..5972df336b8 100644 --- a/mne/viz/_brain/tests/test_brain.py +++ b/mne/viz/_brain/tests/test_brain.py @@ -482,12 +482,12 @@ def tiny(tmp_path): rr = rng.randn(4, 3) tris = np.array([[0, 1, 2], [2, 1, 3]]) curv = rng.randn(len(rr)) - with open(surf_dir.join('lh.curv'), 'wb') as fid: + with open(surf_dir / 'lh.curv', 'wb') as fid: fid.write(np.array([255, 255, 255], dtype=np.uint8)) fid.write(np.array([len(rr), 0, 1], dtype='>i4')) fid.write(curv.astype('>f4')) - write_surface(surf_dir.join('lh.white'), rr, tris) - write_surface(surf_dir.join('rh.white'), rr, tris) # needed for vertex tc + write_surface(surf_dir / 'lh.white', rr, tris) + write_surface(surf_dir / 'rh.white', rr, tris) # needed for vertex tc vertices = [np.arange(len(rr)), []] data = rng.randn(len(rr), 10) stc = SourceEstimate(data, vertices, 0, 1, subject) From 983e5612dd6f844f0636ae7ee960563d0a531311 Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Mon, 1 Nov 2021 22:34:05 +0100 Subject: [PATCH 18/23] Missing parens --- mne/tests/test_source_estimate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mne/tests/test_source_estimate.py b/mne/tests/test_source_estimate.py index d1f8cc4a203..56a5216b4a0 100644 --- a/mne/tests/test_source_estimate.py +++ b/mne/tests/test_source_estimate.py @@ -173,7 +173,7 @@ def test_volume_stc(tmp_path): stc = VolVectorSourceEstimate(data, [vertno], 0, 1) ext = 'h5' klass = VolVectorSourceEstimate - fname_temp = tmp_path / 'temp-vl.' + ext + fname_temp = tmp_path / ('temp-vl.' + ext) stc_new = stc n = 3 if ext == 'h5' else 2 for ii in range(n): From f8746f1bb3a49d6784855213e793a1d4ae263b24 Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Tue, 2 Nov 2021 11:10:58 +0100 Subject: [PATCH 19/23] Replace more tmpdir with tmp_path --- mne/gui/tests/test_coreg_gui.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/mne/gui/tests/test_coreg_gui.py b/mne/gui/tests/test_coreg_gui.py index 56c38bb4385..3ea1486ed1a 100644 --- a/mne/gui/tests/test_coreg_gui.py +++ b/mne/gui/tests/test_coreg_gui.py @@ -385,12 +385,11 @@ def GetEventPosition(self): @pytest.mark.parametrize( 'inst_path', (raw_path, 'gen_montage', ctf_raw_path, nirx_15_0_raw_path, nirsport2_raw_path, snirf_nirsport2_raw_path)) -def test_coreg_gui_pyvista_file_support(inst_path, tmpdir, +def test_coreg_gui_pyvista_file_support(inst_path, tmp_path, renderer_interactive_pyvistaqt): """Test reading supported files.""" from mne.gui import coregistration - tempdir = str(tmpdir) if inst_path == 'gen_montage': # generate a montage fig to use as inst. tmp_info = read_info(raw_path) @@ -401,7 +400,7 @@ def test_coreg_gui_pyvista_file_support(inst_path, tmpdir, dig = DigMontage(dig=tmp_info['dig'], ch_names=eeg_chans) - inst_path = op.join(tempdir, 'tmp-dig.fif') + inst_path = tmp_path / 'tmp-dig.fif' dig.save(inst_path) # Suppressing warnings here is not ideal. @@ -416,13 +415,11 @@ def test_coreg_gui_pyvista_file_support(inst_path, tmpdir, @pytest.mark.slowtest @testing.requires_testing_data -def test_coreg_gui_pyvista(tmpdir, renderer_interactive_pyvistaqt): +def test_coreg_gui_pyvista(tmp_path, renderer_interactive_pyvistaqt): """Test that using CoregistrationUI matches mne coreg.""" from mne.gui import coregistration - tempdir = str(tmpdir) config = get_config(home_dir=os.environ.get('_MNE_FAKE_HOME_DIR')) - tmp_trans = op.join(tempdir, 'tmp-trans.fif') - + tmp_trans = tmp_path / 'tmp-trans.fif' coreg = coregistration(subject='sample', subjects_dir=subjects_dir, trans=fname_trans) assert not coreg._lock_fids From f44ee99825a425ad3db3d1f480f957650b6e3429 Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Tue, 2 Nov 2021 12:14:25 +0100 Subject: [PATCH 20/23] Convert Path to str --- mne/gui/_coreg.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mne/gui/_coreg.py b/mne/gui/_coreg.py index fb319bfe751..ac5861eaf29 100644 --- a/mne/gui/_coreg.py +++ b/mne/gui/_coreg.py @@ -230,6 +230,8 @@ def _set_info_file(self, fname): self._widgets["info_file"].set_value(0, '') return + fname = _check_fname(fname) # convert to str + # ctf ds `files` are actually directories if fname.endswith(('.ds',)): self._info_file = _check_fname( From 46604490a633c0baed0dcb57652102c6b5734be9 Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Tue, 2 Nov 2021 13:21:02 +0100 Subject: [PATCH 21/23] Overwrite --- mne/gui/_coreg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mne/gui/_coreg.py b/mne/gui/_coreg.py index ac5861eaf29..4c859f6c99b 100644 --- a/mne/gui/_coreg.py +++ b/mne/gui/_coreg.py @@ -230,7 +230,7 @@ def _set_info_file(self, fname): self._widgets["info_file"].set_value(0, '') return - fname = _check_fname(fname) # convert to str + fname = _check_fname(fname, overwrite=True) # convert to str # ctf ds `files` are actually directories if fname.endswith(('.ds',)): From a70f25c783f722eb82c6dc2c1585ec136f880aad Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Tue, 2 Nov 2021 14:42:30 +0100 Subject: [PATCH 22/23] Forgot merge conflict --- mne/tests/test_epochs.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py index ca2f11e09a4..26c544084d8 100644 --- a/mne/tests/test_epochs.py +++ b/mne/tests/test_epochs.py @@ -1243,12 +1243,8 @@ def test_split_saving(tmp_path, split_size, n_epochs, n_files, size, metadata, assert op.isfile(split_fname_bids_part1) -<<<<<<< HEAD @pytest.mark.slowtest -def test_split_many_reset(tmpdir): -======= def test_split_many_reset(tmp_path): ->>>>>>> 50051e845 (Replace tmpdir with tmp_path) """Test splitting with many events and using reset.""" data = np.zeros((1000, 1, 1024)) # 1 ch, 1024 samples assert data[0, 0].nbytes == 8192 # 8 kB per epoch From 1c2e4442deec80f13a917b4c121d761e3b4bbdef Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Tue, 2 Nov 2021 15:49:53 -0400 Subject: [PATCH 23/23] FIX: Specific --- mne/tests/test_source_estimate.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mne/tests/test_source_estimate.py b/mne/tests/test_source_estimate.py index 56a5216b4a0..4f20833b360 100644 --- a/mne/tests/test_source_estimate.py +++ b/mne/tests/test_source_estimate.py @@ -437,7 +437,9 @@ def test_io_stc_h5(tmp_path, is_complex, vector): stc = _fake_vec_stc(is_complex=is_complex) else: stc = _fake_stc(is_complex=is_complex) - pytest.raises(ValueError, stc.save, tmp_path / 'tmp', ftype='foo') + match = 'can only be written' if vector else "Invalid value for the 'ftype" + with pytest.raises(ValueError, match=match): + stc.save(tmp_path / 'tmp.h5', ftype='foo') out_name = str(tmp_path / 'tmp') stc.save(out_name, ftype='h5') stc.save(out_name, ftype='h5') # test overwrite