-
Notifications
You must be signed in to change notification settings - Fork 4k
ARROW-1291: [Python] Cast non-string DataFrame columns to strings in RecordBatch/Table.from_pandas #911
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
ARROW-1291: [Python] Cast non-string DataFrame columns to strings in RecordBatch/Table.from_pandas #911
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -155,7 +155,7 @@ def index_level_name(index, i): | |
| return '__index_level_{:d}__'.format(i) | ||
|
|
||
|
|
||
| def construct_metadata(df, index_levels, preserve_index, types): | ||
| def construct_metadata(df, column_names, index_levels, preserve_index, types): | ||
| """Returns a dictionary containing enough metadata to reconstruct a pandas | ||
| DataFrame as an Arrow Table, including index columns. | ||
|
|
||
|
|
@@ -170,41 +170,77 @@ def construct_metadata(df, index_levels, preserve_index, types): | |
| ------- | ||
| dict | ||
| """ | ||
| ncolumns = len(df.columns) | ||
| ncolumns = len(column_names) | ||
| df_types = types[:ncolumns] | ||
| index_types = types[ncolumns:ncolumns + len(index_levels)] | ||
|
|
||
| column_metadata = [ | ||
| get_column_metadata(df[col_name], name=sanitized_name, | ||
| arrow_type=arrow_type) | ||
| for col_name, sanitized_name, arrow_type in | ||
| zip(df.columns, column_names, df_types) | ||
| ] | ||
|
|
||
| if preserve_index: | ||
| index_column_names = [index_level_name(level, i) | ||
| for i, level in enumerate(index_levels)] | ||
| index_column_metadata = [ | ||
| get_column_metadata(level, name=index_level_name(level, i), | ||
| arrow_type=arrow_type) | ||
| for i, (level, arrow_type) in enumerate(zip(index_levels, | ||
| index_types)) | ||
| ] | ||
| else: | ||
| index_column_names = index_column_metadata = [] | ||
|
|
||
| return { | ||
| b'pandas': json.dumps( | ||
| { | ||
| 'index_columns': [ | ||
| index_level_name(level, i) | ||
| for i, level in enumerate(index_levels) | ||
| ] if preserve_index else [], | ||
| 'columns': [ | ||
| get_column_metadata( | ||
| df[name], | ||
| name=name, | ||
| arrow_type=arrow_type | ||
| ) | ||
| for name, arrow_type in zip(df.columns, df_types) | ||
| ] + ( | ||
| [ | ||
| get_column_metadata( | ||
| level, | ||
| name=index_level_name(level, i), | ||
| arrow_type=arrow_type | ||
| ) | ||
| for i, (level, arrow_type) in enumerate( | ||
| zip(index_levels, index_types) | ||
| ) | ||
| ] if preserve_index else [] | ||
| ), | ||
| 'pandas_version': pd.__version__, | ||
| } | ||
| ).encode('utf8') | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'll start making more local variables :) |
||
| b'pandas': json.dumps({ | ||
| 'index_columns': index_column_names, | ||
| 'columns': column_metadata + index_column_metadata, | ||
| 'pandas_version': pd.__version__ | ||
| }).encode('utf8') | ||
| } | ||
|
|
||
|
|
||
| def dataframe_to_arrays(df, timestamps_to_ms, schema, preserve_index): | ||
| names = [] | ||
| arrays = [] | ||
| index_columns = [] | ||
| types = [] | ||
| type = None | ||
|
|
||
| if preserve_index: | ||
| n = len(getattr(df.index, 'levels', [df.index])) | ||
| index_columns.extend(df.index.get_level_values(i) for i in range(n)) | ||
|
|
||
| for name in df.columns: | ||
| col = df[name] | ||
| if not isinstance(name, six.string_types): | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This allows anything that isn't a string including floats, timestamps, and other any wacky thing someone puts in a column index. Should this be more strict about what
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In a lot of cases it will just be "Index". I'd rather have someone complaining about this rather than pre-emptively guessing what will be the right thing to do
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Fair enough. |
||
| name = str(name) | ||
|
|
||
| if schema is not None: | ||
| field = schema.field_by_name(name) | ||
| type = getattr(field, "type", None) | ||
|
|
||
| array = pa.Array.from_pandas( | ||
| col, type=type, timestamps_to_ms=timestamps_to_ms | ||
| ) | ||
| arrays.append(array) | ||
| names.append(name) | ||
| types.append(array.type) | ||
|
|
||
| for i, column in enumerate(index_columns): | ||
| array = pa.Array.from_pandas(column, timestamps_to_ms=timestamps_to_ms) | ||
| arrays.append(array) | ||
| names.append(index_level_name(column, i)) | ||
| types.append(array.type) | ||
|
|
||
| metadata = construct_metadata( | ||
| df, names, index_columns, preserve_index, types | ||
| ) | ||
| return names, arrays, metadata | ||
|
|
||
|
|
||
| def table_to_blockmanager(table, nthreads=1): | ||
| import pandas.core.internals as _int | ||
| from pyarrow.compat import DatetimeTZDtype | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -317,51 +317,6 @@ cdef int _schema_from_arrays( | |
| return 0 | ||
|
|
||
|
|
||
| cdef tuple _dataframe_to_arrays( | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Out of curiosity , why was this written in cython originally?
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Started small, got bigger =) |
||
| df, | ||
| bint timestamps_to_ms, | ||
| Schema schema, | ||
| bint preserve_index | ||
| ): | ||
| cdef: | ||
| list names = [] | ||
| list arrays = [] | ||
| list index_columns = [] | ||
| list types = [] | ||
| DataType type = None | ||
| dict metadata | ||
| Py_ssize_t i | ||
| Py_ssize_t n | ||
|
|
||
| if preserve_index: | ||
| n = len(getattr(df.index, 'levels', [df.index])) | ||
| index_columns.extend(df.index.get_level_values(i) for i in range(n)) | ||
|
|
||
| for name in df.columns: | ||
| col = df[name] | ||
| if schema is not None: | ||
| field = schema.field_by_name(name) | ||
| type = getattr(field, "type", None) | ||
|
|
||
| array = Array.from_pandas( | ||
| col, type=type, timestamps_to_ms=timestamps_to_ms | ||
| ) | ||
| arrays.append(array) | ||
| names.append(name) | ||
| types.append(array.type) | ||
|
|
||
| for i, column in enumerate(index_columns): | ||
| array = Array.from_pandas(column, timestamps_to_ms=timestamps_to_ms) | ||
| arrays.append(array) | ||
| names.append(pdcompat.index_level_name(column, i)) | ||
| types.append(array.type) | ||
|
|
||
| metadata = pdcompat.construct_metadata( | ||
| df, index_columns, preserve_index, types | ||
| ) | ||
| return names, arrays, metadata | ||
|
|
||
|
|
||
| cdef class RecordBatch: | ||
| """ | ||
| Batch of rows of columns of equal length | ||
|
|
@@ -570,7 +525,7 @@ cdef class RecordBatch: | |
| ------- | ||
| pyarrow.RecordBatch | ||
| """ | ||
| names, arrays, metadata = _dataframe_to_arrays( | ||
| names, arrays, metadata = pdcompat.dataframe_to_arrays( | ||
| df, False, schema, preserve_index | ||
| ) | ||
| return cls.from_arrays(arrays, names, metadata) | ||
|
|
@@ -748,7 +703,7 @@ cdef class Table: | |
| >>> pa.Table.from_pandas(df) | ||
| <pyarrow.lib.Table object at 0x7f05d1fb1b40> | ||
| """ | ||
| names, arrays, metadata = _dataframe_to_arrays( | ||
| names, arrays, metadata = pdcompat.dataframe_to_arrays( | ||
| df, | ||
| timestamps_to_ms=timestamps_to_ms, | ||
| schema=schema, | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -109,6 +109,11 @@ def test_all_none_category(self): | |
| df['a'] = df['a'].astype('category') | ||
| self._check_pandas_roundtrip(df) | ||
|
|
||
| def test_non_string_columns(self): | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. There should be a test for additional column types that either fails or explicitly succeeds based on what we decide about allowing other types in.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I suppose we can leave this as the only test right now and say that anything other integers or strings is undefined behavior. |
||
| df = pd.DataFrame({0: [1, 2, 3]}) | ||
| table = pa.Table.from_pandas(df) | ||
| assert table.column(0).name == '0' | ||
|
|
||
| def test_float_no_nulls(self): | ||
| data = {} | ||
| fields = [] | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why pass the column_names instead of:
?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
these got sanitized earlier as part of creating the schema