Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 27 additions & 5 deletions python/pyarrow/table.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -1408,6 +1408,24 @@ cdef _sanitize_arrays(arrays, names, schema, metadata,
return converted_arrays


cdef _truncate_array_string(arr_str, length):
delim_stack = []
partial_end = 0

for pos, char in enumerate(arr_str):
if char == "[":
delim_stack.append("]")
continue
elif char == "]":
assert delim_stack[-1] == "]"
delim_stack.pop()
elif char != ",":
continue

if pos >= length:
return arr_str[:(pos + 1)] + "...]" * len(delim_stack)


cdef class RecordBatch(_PandasConvertible):
"""
Batch of rows of columns of equal length
Expand Down Expand Up @@ -2726,7 +2744,7 @@ cdef class Table(_PandasConvertible):
raise TypeError("Do not call Table's constructor directly, use one of "
"the `Table.from_*` functions instead.")

def to_string(self, *, show_metadata=False, preview_cols=0):
def to_string(self, *, show_metadata=False, preview_cols=0, cols_char_limit=120):
"""
Return human-readable string representation of Table.

Expand All @@ -2736,6 +2754,8 @@ cdef class Table(_PandasConvertible):
Display Field-level and Schema-level KeyValueMetadata.
preview_cols : int, default 0
Display values of the columns for the first N columns.
cols_char_limit : int, default 120
Max number of characters to show in each column preview

Returns
-------
Expand All @@ -2751,10 +2771,12 @@ cdef class Table(_PandasConvertible):
if preview_cols:
pieces.append('----')
for i in range(min(self.num_columns, preview_cols)):
pieces.append('{}: {}'.format(
self.field(i).name,
self.column(i).to_string(indent=0, skip_new_lines=True)
))
col_string = self.column(i).to_string(
indent=0, skip_new_lines=True)
if len(col_string) > cols_char_limit:
col_string = _truncate_array_string(
col_string, cols_char_limit)
pieces.append('{}: {}'.format(self.field(i).name, col_string))
Copy link
Contributor

@edponce edponce Jan 13, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ideally, we could always truncate "correctly" and consistently, that is, truncate at a delimiter and show closing brackets: [[1,2,3,4,...]]

But several questions first:

  • Is the delimiter always a comma or known in a variable?
  • Is the table representation always encoded as a list of lists? Or can there be a less/more nesting?

Assuming "yes" for the questions above. There are several cases that can occur:

  1. No truncation: [[1,2,3,4]]
  2. Truncate does not reaches the final end bracket (# of brackets is arbitrary): [[1,2,3,4]...
  3. Truncate mid-value: [10,20,30,4...
  4. Truncate at a value delimiter: [[1,2,3,...
  5. Truncate at a list delimiter: [[1,2,3,4],...
  6. Truncate at a list ending bracket: [[1,2,3,4]...

To display all truncated cases "correctly" and consistently, after slicing the cols_char_limit, you would need

  • a stack to keep track of open/close bracket pairs
  • check which symbol is truncation occurring on.

Then resolve as follows:

  • If it is a delimiter, then add ... and match with closing brackets
  • If it is a closing bracket
    • and there are only closing bracket left, then add them to obtain the full representation.
    • add ... and match with closing brackets
  • Else it is truncating a value, so remove (or add) characters until reaching a delimiter/bracket

Most of these checks can be identified with regex.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@edponce I fully agree that, ideally, this truncation is "smart" about where to cut off and add ....
But in general it's also the question to what extent this is worth the extra complexity (depending on how complex it would be of course). Instead of parsing the string, another option could also be to slice the number of elements before converting to string (although for nested data types that won't necessarily work as desired).

Now, on the short-term (for 0.7.0), I personally find it more important that we at least do some truncation (because currently the repr can be completely useful / annoying by flooding your terminal)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I understand and agree with the current approach. Simply leaving notes as food for thought or future reference.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for the feedback. I implemented a very basic version of this for now. This looks pretty good for this example:

>>> from random import sample, choice
>>> import pyarrow as pa
>>> arr_int = pa.array(range(50))
>>> tree_parts = ["roots", "trunk", "crown", "seeds"]
>>> arr_list = pa.array([sample(tree_parts, k=choice(range(len(tree_parts)))) for _ in range(50)])
>>> arr_struct = pa.StructArray.from_arrays([arr_int, arr_list], names=['int_nested', 'list_nested'])
>>> arr_map = pa.array(
...     [
...         [(part, choice(range(10))) for part in sample(tree_parts, k=choice(range(len(tree_parts))))]
...         for _ in range(50)
...     ],
...     type=pa.map_(pa.utf8(), pa.int64())
... )
>>> table = pa.table({
...     'int': pa.chunked_array([arr_int] * 10),
...     'list': pa.chunked_array([arr_list] * 10),
...     'struct': pa.chunked_array([arr_struct] * 10),
...     'map': pa.chunked_array([arr_map] * 10),
... })
>>> print(table)
pyarrow.Table
int: int64
list: list<item: string>
  child 0, item: string
struct: struct<int_nested: int64, list_nested: list<item: string>>
  child 0, int_nested: int64
  child 1, list_nested: list<item: string>
      child 0, item: string
map: map<string, int64>
  child 0, entries: struct<key: string not null, value: int64> not null
      child 0, key: string not null
      child 1, value: int64
----
int: [[0,1,2,3,4,5,6,7,8,9,...,40,41,42,43,44,45,46,47,48,49],[0,1,2,3,4,5,6,7,8,9,...,40,41,42,43,44,45,46,47,48,49],[0,1,2,3,...]...]
list: [[["seeds","trunk","roots"],["trunk","crown"],["crown"],["trunk"],["crown"],[],["roots","seeds"],["roots"],["trunk","roots"]...]...]
struct: [  -- is_valid: all not null  -- child 0 type: int64
    [
      0,
      1,
      2,
      3,
      4,
      5,
      6,...]...]
map: [[    keys:["seeds","crown","trunk"]values:[7,8,7],    keys:["roots","crown"]values:[8,4],    keys:["crown","roots","trunk"]...]...]

The unfortunate thing is it will have bad behavior in the case of string columns containing [. For example,

>>> pa.table({'x': pa.array(["[" * 100]* 500)})
x: [["[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[","[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[",...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]...]

I think that kind of behavior is pretty unavoidable until we push this limit into the PrettyPrinter implementation itself.

Copy link
Contributor

@edponce edponce Jan 18, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there a JIRA/PR for implementing similar functionality in C++ Pretty Printer?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if preview_cols < self.num_columns:
pieces.append('...')
return '\n'.join(pieces)
Expand Down
18 changes: 18 additions & 0 deletions python/pyarrow/tests/test_table.py
Original file line number Diff line number Diff line change
Expand Up @@ -1805,6 +1805,24 @@ def test_table_repr_to_string_ellipsis():
c1: [[10,20,30,40,10,...,40,10,20,30,40]]"""


def test_table_repr_cols_char_limit():
# Schema passed explicitly
schema = pa.schema([pa.field('c0', pa.int16(),
metadata={'key': 'value'}),
pa.field('c1', pa.int32())],
metadata={b'foo': b'bar'})

tab = pa.table([pa.array([1, 2, 3, 4]*10, type='int16'),
pa.array([10, 20, 30, 40]*10, type='int32')],
schema=schema)
assert tab.to_string(preview_cols=10, cols_char_limit=50) == """pyarrow.Table
c0: int16
c1: int32
----
c0: [[1,2,3,4,1,2,3,4,1,2,...,3,4,1,2,3,4,1,2,3,4]]
c1: [[10,20,30,40,10,20,30,40,10,20,...,30,40,10,20,30,...]...]"""


def test_table_function_unicode_schema():
col_a = "äääh"
col_b = "öööf"
Expand Down