diff --git a/pyproject.toml b/pyproject.toml index daefebf91c29..af45211b85a7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -203,7 +203,26 @@ ignore = [ reportMissingTypeStubs = true reportDeprecated = true -typeCheckingMode = "standard" +typeCheckingMode = "strict" +reportUnknownVariableType = "none" +reportPrivateUsage = "none" +reportUnknownMemberType = "none" +reportUnknownParameterType = "none" +reportMissingTypeArgument = "none" +reportMissingParameterType = "none" +reportUnknownArgumentType = "none" +reportUnusedFunction = "none" +reportUnusedImport = "none" +reportUnknownLambdaType = "none" +reportUnnecessaryIsInstance = "none" +reportArgumentType = "none" +reportUntypedNamedTuple = "none" +reportUnnecessaryComparison = "none" +reportConstantRedefinition = "none" +reportUnusedClass = "none" +reportUnnecessaryCast = "none" # mypy already checks this. If it fails for pyright its because mypy requires it +reportAssignmentType = "none" +reportUnnecessaryContains = "none" [tool.pytest.ini_options] minversion = "7.2" diff --git a/src/qcodes/dataset/legacy_import.py b/src/qcodes/dataset/legacy_import.py index c74ef68e2864..babd3955209e 100644 --- a/src/qcodes/dataset/legacy_import.py +++ b/src/qcodes/dataset/legacy_import.py @@ -33,7 +33,7 @@ def setup_measurement( """ meas = Measurement(exp=exp) - for arrayname, array in dataset.arrays.items(): + for array in dataset.arrays.values(): if array.is_setpoint: setarrays = None else: @@ -121,7 +121,7 @@ def import_dat_file(location: str | Path, exp: Experiment | None = None) -> list run_ids = [] with meas.run() as datasaver: datasaver.dataset.add_metadata("snapshot", json.dumps(loaded_data.snapshot())) - for arrayname, array in loaded_data.arrays.items(): + for array in loaded_data.arrays.values(): if not array.is_setpoint: run_id = store_array_to_database(datasaver, array) run_ids.append(run_id) diff --git a/src/qcodes/dataset/plotting.py b/src/qcodes/dataset/plotting.py index 2f509072ca94..f7267b767f23 100644 --- a/src/qcodes/dataset/plotting.py +++ b/src/qcodes/dataset/plotting.py @@ -217,8 +217,8 @@ def plot_dataset( if axeslist is None: axeslist = [] - for i in range(nplots): - _, ax = plt.subplots(1, 1, **subplots_kwargs) + for _ in range(nplots): + __, ax = plt.subplots(1, 1, **subplots_kwargs) axeslist.append(ax) else: if len(subplots_kwargs) != 0: diff --git a/src/qcodes/dataset/sqlite/connection.py b/src/qcodes/dataset/sqlite/connection.py index b919e26dadaa..c469c806d477 100644 --- a/src/qcodes/dataset/sqlite/connection.py +++ b/src/qcodes/dataset/sqlite/connection.py @@ -21,7 +21,7 @@ log = logging.getLogger(__name__) -class ConnectionPlus(wrapt.ObjectProxy): +class ConnectionPlus(wrapt.ObjectProxy): # pyright: ignore[reportUntypedBaseClass] """ A class to extend the sqlite3.Connection object. Since sqlite3.Connection has no __dict__, we can not directly add attributes to its instance diff --git a/src/qcodes/extensions/_refactor.py b/src/qcodes/extensions/_refactor.py index 5055c759a417..c50da2e263e5 100644 --- a/src/qcodes/extensions/_refactor.py +++ b/src/qcodes/extensions/_refactor.py @@ -1,6 +1,7 @@ from __future__ import annotations import argparse +import logging import os from ast import literal_eval from dataclasses import dataclass @@ -16,6 +17,8 @@ "qcodes-refactor requires that QCoDeS is installed with refactor extra dependencies." ) from er +_LOG = logging.getLogger(__name__) + @dataclass class Extracted: @@ -88,6 +91,9 @@ def visit_Arg(self, node: cst.Arg) -> None: case cst.Arg(keyword=cst.Name("parameter_class"), value=cst.Name(e_value)): # arg is parameter class self.annotations.parameter_class = e_value + case cst.Arg(): + _LOG.info("Unexpected node %s", str(node)) + pass def leave_Call(self, original_node: cst.Call, updated_node: cst.Call) -> cst.Call: call_name = _get_call_name(updated_node) diff --git a/src/qcodes/instrument/mockers/ami430.py b/src/qcodes/instrument/mockers/ami430.py index e7c75ff6689c..329c3cbc616c 100644 --- a/src/qcodes/instrument/mockers/ami430.py +++ b/src/qcodes/instrument/mockers/ami430.py @@ -161,7 +161,9 @@ def _handle_messages(self, msg): handler = self.handlers[key][gs] if callable(handler): - rval = handler(args) + # some of the callables in the dict does not take arguments. + # ignore that warning for now since this is mock code only + rval = handler(args) # pyright: ignore[reportCallIssue] else: rval = handler diff --git a/src/qcodes/instrument_drivers/Keysight/keysightb1500/KeysightB1500_module.py b/src/qcodes/instrument_drivers/Keysight/keysightb1500/KeysightB1500_module.py index 75997076a54d..87736b0d242e 100644 --- a/src/qcodes/instrument_drivers/Keysight/keysightb1500/KeysightB1500_module.py +++ b/src/qcodes/instrument_drivers/Keysight/keysightb1500/KeysightB1500_module.py @@ -85,7 +85,7 @@ def parse_module_query_response(response: str) -> dict[SlotNr, str]: return { SlotNr(slot_nr): model - for slot_nr, (model, rev) in enumerate(moduleinfo, start=1) + for slot_nr, (model, _) in enumerate(moduleinfo, start=1) if model != "0" } @@ -271,7 +271,7 @@ def convert_dummy_val_to_nan(param: _FMTResponse) -> None: param: This must be of type named tuple _FMTResponse. """ - for index, value in enumerate(param.value): + for index, _ in enumerate(param.value): param.value[index] = _convert_to_nan_if_dummy_value(param.value[index]) diff --git a/src/qcodes/instrument_drivers/american_magnetics/AMI430.py b/src/qcodes/instrument_drivers/american_magnetics/AMI430.py index 89771d512a98..52c1d4c8290b 100644 --- a/src/qcodes/instrument_drivers/american_magnetics/AMI430.py +++ b/src/qcodes/instrument_drivers/american_magnetics/AMI430.py @@ -1004,11 +1004,11 @@ def _adjust_child_instruments(self, values: tuple[float, float, float]) -> None: raise ValueError("_set_fields aborted; field would exceed limit") # Check if the individual instruments are ready - for name, value in zip(["x", "y", "z"], values): + for name in ("x", "y", "z"): instrument = getattr(self, f"_instrument_{name}") if instrument.ramping_state() == "ramping": - msg = "_set_fields aborted; magnet {} is already ramping" - raise AMI430Exception(msg.format(instrument)) + msg = f"_set_fields aborted; magnet {instrument} is already ramping" + raise AMI430Exception(msg) # Now that we know we can proceed, call the individual instruments diff --git a/src/qcodes/instrument_drivers/american_magnetics/AMI430_visa.py b/src/qcodes/instrument_drivers/american_magnetics/AMI430_visa.py index e0394679f9f1..677d5c1482f2 100644 --- a/src/qcodes/instrument_drivers/american_magnetics/AMI430_visa.py +++ b/src/qcodes/instrument_drivers/american_magnetics/AMI430_visa.py @@ -1099,11 +1099,11 @@ def _adjust_child_instruments(self, values: tuple[float, float, float]) -> None: raise ValueError("_set_fields aborted; field would exceed limit") # Check if the individual instruments are ready - for name, value in zip(["x", "y", "z"], values): + for name in ("x", "y", "z"): instrument = getattr(self, f"_instrument_{name}") if instrument.ramping_state() == "ramping": - msg = "_set_fields aborted; magnet {} is already ramping" - raise AMI430Exception(msg.format(instrument)) + msg = f"_set_fields aborted; magnet {instrument} is already ramping" + raise AMI430Exception(msg) # Now that we know we can proceed, call the individual instruments diff --git a/src/qcodes/instrument_drivers/signal_hound/SignalHound_USB_SA124B.py b/src/qcodes/instrument_drivers/signal_hound/SignalHound_USB_SA124B.py index e3b8d66e0b9d..8208bc4d535a 100644 --- a/src/qcodes/instrument_drivers/signal_hound/SignalHound_USB_SA124B.py +++ b/src/qcodes/instrument_drivers/signal_hound/SignalHound_USB_SA124B.py @@ -742,7 +742,7 @@ def _get_sweep_data(self) -> np.ndarray: data = np.zeros(sweep_len) Navg = self.avg() - for i in range(Navg): + for _ in range(Navg): datamin = np.zeros((sweep_len), dtype=np.float32) datamax = np.zeros((sweep_len), dtype=np.float32) diff --git a/src/qcodes/instrument_drivers/tektronix/AWG5014.py b/src/qcodes/instrument_drivers/tektronix/AWG5014.py index 658ca7d90a76..674d1e2fe193 100644 --- a/src/qcodes/instrument_drivers/tektronix/AWG5014.py +++ b/src/qcodes/instrument_drivers/tektronix/AWG5014.py @@ -964,7 +964,7 @@ def generate_sequence_cfg(self) -> dict[str, float]: """ log.info("Generating sequence_cfg") - AWG_sequence_cfg = { + AWG_sequence_cfg: dict[str, float] = { "SAMPLING_RATE": self.clock_freq.get(), "CLOCK_SOURCE": ( 1 if self.clock_source().startswith("INT") else 2 diff --git a/src/qcodes/parameters/parameter_base.py b/src/qcodes/parameters/parameter_base.py index 6ce74a533218..7f3641bc2e4b 100644 --- a/src/qcodes/parameters/parameter_base.py +++ b/src/qcodes/parameters/parameter_base.py @@ -748,7 +748,7 @@ def set_wrapper(value: ParamDataType, **kwargs: Any) -> None: # a list containing only `value`. steps = self.get_ramp_values(value, step=self.step) - for step_index, val_step in enumerate(steps): + for val_step in steps: # even if the final value is valid we may be generating # steps that are not so validate them too self.validate(val_step) diff --git a/tests/dataset/test_data_set_cache.py b/tests/dataset/test_data_set_cache.py index 465d21fd3581..08851a3c9fa2 100644 --- a/tests/dataset/test_data_set_cache.py +++ b/tests/dataset/test_data_set_cache.py @@ -329,7 +329,7 @@ def test_cache_1d( _assert_parameter_data_is_identical( dataset2.get_parameter_data(), dataset2.cache.data() ) - for i, v in enumerate(setpoints_values): + for v in setpoints_values: setpoints_param.set(v) meas_vals1 = [(param, param.get()) for param in meas_parameters1] @@ -808,7 +808,7 @@ def test_cache_1d_shape( _assert_parameter_data_is_identical( dataset.get_parameter_data(), dataset.cache.data() ) - for i, v in enumerate(setpoints_values): + for v in setpoints_values: n_points_measured += 1 setpoints_param.set(v) diff --git a/tests/dataset/test_database_extract_runs.py b/tests/dataset/test_database_extract_runs.py index e2b5bf498dea..7b0d727e8d49 100644 --- a/tests/dataset/test_database_extract_runs.py +++ b/tests/dataset/test_database_extract_runs.py @@ -579,13 +579,13 @@ def test_combine_runs( source_1_exp = Experiment(conn=source_conn_1, name="exp1", sample_name="no_sample") source_1_datasets = [ - DataSet(conn=source_conn_1, exp_id=source_1_exp.exp_id) for i in range(10) + DataSet(conn=source_conn_1, exp_id=source_1_exp.exp_id) for _ in range(10) ] source_2_exp = Experiment(conn=source_conn_2, name="exp2", sample_name="no_sample") source_2_datasets = [ - DataSet(conn=source_conn_2, exp_id=source_2_exp.exp_id) for i in range(10) + DataSet(conn=source_conn_2, exp_id=source_2_exp.exp_id) for _ in range(10) ] guids_1 = {dataset.guid for dataset in source_1_datasets} @@ -664,10 +664,10 @@ def test_copy_datasets_and_add_new( source_exp_1 = Experiment(conn=source_conn, name="exp1", sample_name="no_sample") source_exp_2 = Experiment(conn=source_conn, name="exp2", sample_name="no_sample") source_datasets_1 = [ - DataSet(conn=source_conn, exp_id=source_exp_1.exp_id) for i in range(5) + DataSet(conn=source_conn, exp_id=source_exp_1.exp_id) for _ in range(5) ] source_datasets_2 = [ - DataSet(conn=source_conn, exp_id=source_exp_2.exp_id) for i in range(5) + DataSet(conn=source_conn, exp_id=source_exp_2.exp_id) for _ in range(5) ] source_datasets = source_datasets_1 + source_datasets_2 @@ -708,7 +708,7 @@ def test_copy_datasets_and_add_new( # add additional runs and verify that the ids and counters increase as # expected - new_datasets = [DataSet(conn=target_conn, exp_id=exp.exp_id) for i in range(3)] + new_datasets = [DataSet(conn=target_conn, exp_id=exp.exp_id) for _ in range(3)] for ds in new_datasets: ds.set_interdependencies(some_interdeps[1]) diff --git a/tests/drivers/test_tektronix_AWG70000A.py b/tests/drivers/test_tektronix_AWG70000A.py index 0bc9d596d438..004bb65ca9ca 100644 --- a/tests/drivers/test_tektronix_AWG70000A.py +++ b/tests/drivers/test_tektronix_AWG70000A.py @@ -251,7 +251,7 @@ def test_makeSEQXFile(awg2, random_wfm_m1_m2_package) -> None: event_jumps = [0] * seqlen event_jump_to = [0] * seqlen go_to = [0] * seqlen - wfms = [[wfmpkg() for i in range(seqlen)] for j in range(chans)] + wfms = [[wfmpkg() for _ in range(seqlen)] for __ in range(chans)] amplitudes = [0.5] * chans seqname = "testseq" diff --git a/tests/parameter/test_combined_par.py b/tests/parameter/test_combined_par.py index 1877d4581cfa..ee2ad7a582f2 100644 --- a/tests/parameter/test_combined_par.py +++ b/tests/parameter/test_combined_par.py @@ -100,7 +100,7 @@ def test_aggregator( ) results = [] - for i, value in enumerate(sweep_values): + for value in sweep_values: res = sweep_values.set(value) results.append(sweep_values._aggregate(*res)) diff --git a/tests/test_channels.py b/tests/test_channels.py index cd2d634211ab..472fdc45a696 100644 --- a/tests/test_channels.py +++ b/tests/test_channels.py @@ -26,9 +26,6 @@ def _make_dci() -> Generator[DummyChannelInstrument, None, None]: @pytest.fixture(scope="function", name="dci_with_list") def _make_dci_with_list() -> Generator[Instrument, None, None]: - for i in range(10): - pass - dci = Instrument(name="dciwl") channels = ChannelList(dci, "ListElem", DummyChannel, snapshotable=False) for chan_name in ("A", "B", "C", "D", "E", "F"):