Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 19 additions & 12 deletions python/pyiceberg/io/pyarrow.py
Original file line number Diff line number Diff line change
Expand Up @@ -391,7 +391,7 @@ def visit_timestamp(self, _: TimestampType) -> pa.DataType:
return pa.timestamp(unit="us")

def visit_timestampz(self, _: TimestamptzType) -> pa.DataType:
return pa.timestamp(unit="us", tz="+00:00")
return pa.timestamp(unit="us", tz="UTC")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why change all uses to UTC rather than +00:00? Aren't these the same thing?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Technically yes. #6945 has a deeper explanation for this. It is because the test data is written using a docker container that is using UTC.


def visit_string(self, _: StringType) -> pa.DataType:
return pa.string()
Expand Down Expand Up @@ -477,7 +477,7 @@ def _file_to_table(
projected_schema: Schema,
projected_field_ids: Set[int],
case_sensitive: bool,
) -> pa.Table:
) -> Optional[pa.Table]:
_, path = PyArrowFileIO.parse_location(task.file.file_path)

# Get the schema
Expand Down Expand Up @@ -512,10 +512,11 @@ def _file_to_table(
columns=[col.name for col in file_project_schema.columns],
)

if pyarrow_filter is not None:
arrow_table = arrow_table.filter(pyarrow_filter)

return to_requested_schema(projected_schema, file_project_schema, arrow_table)
# If there is no data, we don't have to go through the schema
if len(arrow_table) > 0:
return to_requested_schema(projected_schema, file_project_schema, arrow_table)
else:
return None


def project_table(
Expand Down Expand Up @@ -547,16 +548,22 @@ def project_table(
}.union(extract_field_ids(bound_row_filter))

with ThreadPool() as pool:
tables = pool.starmap(
func=_file_to_table,
iterable=[(fs, task, bound_row_filter, projected_schema, projected_field_ids, case_sensitive) for task in tasks],
chunksize=None, # we could use this to control how to materialize the generator of tasks (we should also make the expression above lazy)
)
tables = [
table
for table in pool.starmap(
func=_file_to_table,
iterable=[(fs, task, bound_row_filter, projected_schema, projected_field_ids, case_sensitive) for task in tasks],
chunksize=None, # we could use this to control how to materialize the generator of tasks (we should also make the expression above lazy)
)
if table is not None
]

if len(tables) > 1:
return pa.concat_tables(tables)
else:
elif len(tables) == 1:
return tables[0]
else:
return pa.Table.from_batches([], schema=schema_to_pyarrow(projected_schema))


def to_requested_schema(requested_schema: Schema, file_schema: Schema, table: pa.Table) -> pa.Table:
Expand Down
2 changes: 1 addition & 1 deletion python/tests/io/test_pyarrow.py
Original file line number Diff line number Diff line change
Expand Up @@ -377,7 +377,7 @@ def test_timestamp_type_to_pyarrow() -> None:

def test_timestamptz_type_to_pyarrow() -> None:
iceberg_type = TimestamptzType()
assert visit(iceberg_type, _ConvertToArrowSchema()) == pa.timestamp(unit="us", tz="+00:00")
assert visit(iceberg_type, _ConvertToArrowSchema()) == pa.timestamp(unit="us", tz="UTC")


def test_string_type_to_pyarrow() -> None:
Expand Down