Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ repos:
- id: check-added-large-files
- id: check-toml
- repo: https://github.com/python-poetry/poetry
rev: 2.2.0
rev: 2.2.1
hooks:
- id: poetry-check
- repo: https://github.com/tox-dev/pyproject-fmt
Expand Down Expand Up @@ -42,7 +42,7 @@ repos:
- id: yamllint
exclude: ".pre-commit-config.yaml"
- repo: https://github.com/psf/black
rev: 25.1.0
rev: 25.9.0
hooks:
- id: black
- repo: https://github.com/asottile/blacken-docs
Expand All @@ -64,7 +64,7 @@ repos:
hooks:
- id: vulture
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.18.1
rev: v1.18.2
hooks:
- id: mypy
exclude: >
Expand Down
8 changes: 4 additions & 4 deletions cytotable/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ def _get_table_columns_and_types(
ddb_reader.execute(
select_query.replace("&select_source", select_source)
)
.arrow()
.fetch_arrow_table()
.to_pylist()
)

Expand All @@ -122,7 +122,7 @@ def _get_table_columns_and_types(
ddb_reader.execute(
select_query.replace("&select_source", "arrow_data_tbl")
)
.arrow()
.fetch_arrow_table()
.to_pylist()
)
else:
Expand Down Expand Up @@ -509,7 +509,7 @@ def _source_pageset_to_parquet(
/* optional ordering per pageset */
{"ORDER BY " + source['page_key'] if sort_output else ""};
"""
).arrow(),
).fetch_arrow_table(),
where=result_filepath,
)
# Include exception handling to read mixed-type data
Expand Down Expand Up @@ -925,7 +925,7 @@ def _join_source_pageset(
/* optional sorting per pagset */
{"ORDER BY " + page_key if sort_output else ""};
"""
).arrow()
).fetch_arrow_table()

# drop nulls if specified
if drop_null:
Expand Down
2 changes: 1 addition & 1 deletion cytotable/sources.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def _get_source_filepaths(
""",
parameters=[str(element["source_path"])],
)
.arrow()["table_name"]
.fetch_arrow_table()["table_name"]
.to_pylist()
# make sure the table names match with compartment + metadata names
if targets is not None
Expand Down
4 changes: 2 additions & 2 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -500,7 +500,7 @@ def col_renames(name: str, table: pa.Table):
AND nuclei.Metadata_ObjectNumber = cytoplasm.Metadata_Cytoplasm_Parent_Nuclei
"""
)
.arrow()
.fetch_arrow_table()
)

# reversed order column check as col removals will change index order
Expand Down Expand Up @@ -566,7 +566,7 @@ def fixture_cellprofiler_merged_nf1data(
AND nuclei.Nuclei_Number_Object_Number = cytoplasm.Cytoplasm_Parent_Nuclei
"""
)
.arrow()
.fetch_arrow_table()
.drop_null()
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
FROM read_csv_auto('{data_file}')
"""
)
.arrow()
.fetch_arrow_table()
.schema,
}
)
Expand Down Expand Up @@ -65,7 +65,7 @@
/* select rows C and D to limit the dataset */
AND data_file."ROW" in ('C', 'D')
"""
).arrow(),
).fetch_arrow_table(),
# output the filtered data as a CSV to a new location
output_file=(
f"{TARGET_DATA_DIR}/{OUTPUT_FILENAME}"
Expand Down
6 changes: 3 additions & 3 deletions tests/test_convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -691,7 +691,7 @@ def test_run_export_workflow(
ignore_errors=TRUE)
"""
)
.arrow()
.fetch_arrow_table()
)
parquet_result = parquet.ParquetDataset(
path_or_paths=flattened_result["table"],
Expand Down Expand Up @@ -756,7 +756,7 @@ def test_run_export_workflow_unsorted(
ignore_errors=TRUE)
"""
)
.arrow()
.fetch_arrow_table()
)
parquet_result = parquet.ParquetDataset(
path_or_paths=flattened_result["table"],
Expand Down Expand Up @@ -1354,7 +1354,7 @@ def test_in_carta_to_parquet(
SELECT *
FROM read_csv_auto('{data_dir}/*.csv')
"""
).arrow()
).fetch_arrow_table()

# process the data with cytotable using in-carta preset
cytotable_result = convert(
Expand Down