From 21f6527369928f8762708ba5370dcfb51d597231 Mon Sep 17 00:00:00 2001 From: Neville Dipale Date: Mon, 24 Aug 2020 18:14:35 +0200 Subject: [PATCH] ARROW-9841: [Rust] Update checked-in fbs files Built and manually fixed errors on required fields --- rust/arrow/src/ipc/gen/File.rs | 2 +- rust/arrow/src/ipc/gen/Message.rs | 255 ++++++++++++++ rust/arrow/src/ipc/gen/Schema.rs | 156 ++++++++- rust/arrow/src/ipc/gen/SparseTensor.rs | 442 +++++++++++++++++++++---- rust/arrow/src/ipc/gen/Tensor.rs | 70 ++-- 5 files changed, 817 insertions(+), 108 deletions(-) diff --git a/rust/arrow/src/ipc/gen/File.rs b/rust/arrow/src/ipc/gen/File.rs index a805100baf7..c268f127fe4 100644 --- a/rust/arrow/src/ipc/gen/File.rs +++ b/rust/arrow/src/ipc/gen/File.rs @@ -88,7 +88,7 @@ impl Block { self.metaDataLength_.from_little_endian() } /// Length of the data (this is aligned so there can be a gap between this and - /// the metatdata). + /// the metadata). pub fn bodyLength<'a>(&'a self) -> i64 { self.bodyLength_.from_little_endian() } diff --git a/rust/arrow/src/ipc/gen/Message.rs b/rust/arrow/src/ipc/gen/Message.rs index 0907ea84fb9..adbdd08376e 100644 --- a/rust/arrow/src/ipc/gen/Message.rs +++ b/rust/arrow/src/ipc/gen/Message.rs @@ -25,6 +25,123 @@ use flatbuffers::EndianScalar; use std::{cmp::Ordering, mem}; // automatically generated by the FlatBuffers compiler, do not modify +#[allow(non_camel_case_types)] +#[repr(i8)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub enum CompressionType { + LZ4_FRAME = 0, + ZSTD = 1, +} + +const ENUM_MIN_COMPRESSION_TYPE: i8 = 0; +const ENUM_MAX_COMPRESSION_TYPE: i8 = 1; + +impl<'a> flatbuffers::Follow<'a> for CompressionType { + type Inner = Self; + #[inline] + fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + flatbuffers::read_scalar_at::(buf, loc) + } +} + +impl flatbuffers::EndianScalar for CompressionType { + #[inline] + fn to_little_endian(self) -> Self { + let n = i8::to_le(self as i8); + let p = &n as *const i8 as *const CompressionType; + unsafe { *p } + } + #[inline] + fn from_little_endian(self) -> Self { + let n = i8::from_le(self as i8); + let p = &n as *const i8 as *const CompressionType; + unsafe { *p } + } +} + +impl flatbuffers::Push for CompressionType { + type Output = CompressionType; + #[inline] + fn push(&self, dst: &mut [u8], _rest: &[u8]) { + flatbuffers::emplace_scalar::(dst, *self); + } +} + +#[allow(non_camel_case_types)] +const ENUM_VALUES_COMPRESSION_TYPE: [CompressionType; 2] = + [CompressionType::LZ4_FRAME, CompressionType::ZSTD]; + +#[allow(non_camel_case_types)] +const ENUM_NAMES_COMPRESSION_TYPE: [&'static str; 2] = ["LZ4_FRAME", "ZSTD"]; + +pub fn enum_name_compression_type(e: CompressionType) -> &'static str { + let index = e as i8; + ENUM_NAMES_COMPRESSION_TYPE[index as usize] +} + +/// Provided for forward compatibility in case we need to support different +/// strategies for compressing the IPC message body (like whole-body +/// compression rather than buffer-level) in the future +#[allow(non_camel_case_types)] +#[repr(i8)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub enum BodyCompressionMethod { + /// Each constituent buffer is first compressed with the indicated + /// compressor, and then written with the uncompressed length in the first 8 + /// bytes as a 64-bit little-endian signed integer followed by the compressed + /// buffer bytes (and then padding as required by the protocol). The + /// uncompressed length may be set to -1 to indicate that the data that + /// follows is not compressed, which can be useful for cases where + /// compression does not yield appreciable savings. + BUFFER = 0, +} + +const ENUM_MIN_BODY_COMPRESSION_METHOD: i8 = 0; +const ENUM_MAX_BODY_COMPRESSION_METHOD: i8 = 0; + +impl<'a> flatbuffers::Follow<'a> for BodyCompressionMethod { + type Inner = Self; + #[inline] + fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + flatbuffers::read_scalar_at::(buf, loc) + } +} + +impl flatbuffers::EndianScalar for BodyCompressionMethod { + #[inline] + fn to_little_endian(self) -> Self { + let n = i8::to_le(self as i8); + let p = &n as *const i8 as *const BodyCompressionMethod; + unsafe { *p } + } + #[inline] + fn from_little_endian(self) -> Self { + let n = i8::from_le(self as i8); + let p = &n as *const i8 as *const BodyCompressionMethod; + unsafe { *p } + } +} + +impl flatbuffers::Push for BodyCompressionMethod { + type Output = BodyCompressionMethod; + #[inline] + fn push(&self, dst: &mut [u8], _rest: &[u8]) { + flatbuffers::emplace_scalar::(dst, *self); + } +} + +#[allow(non_camel_case_types)] +const ENUM_VALUES_BODY_COMPRESSION_METHOD: [BodyCompressionMethod; 1] = + [BodyCompressionMethod::BUFFER]; + +#[allow(non_camel_case_types)] +const ENUM_NAMES_BODY_COMPRESSION_METHOD: [&'static str; 1] = ["BUFFER"]; + +pub fn enum_name_body_compression_method(e: BodyCompressionMethod) -> &'static str { + let index = e as i8; + ENUM_NAMES_BODY_COMPRESSION_METHOD[index as usize] +} + /// ---------------------------------------------------------------------- /// The root Message type /// This union enables us to easily send different message types without @@ -184,6 +301,118 @@ impl FieldNode { } } +pub enum BodyCompressionOffset {} +#[derive(Copy, Clone, Debug, PartialEq)] + +/// Optional compression for the memory buffers constituting IPC message +/// bodies. Intended for use with RecordBatch but could be used for other +/// message types +pub struct BodyCompression<'a> { + pub _tab: flatbuffers::Table<'a>, +} + +impl<'a> flatbuffers::Follow<'a> for BodyCompression<'a> { + type Inner = BodyCompression<'a>; + #[inline] + fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table { buf: buf, loc: loc }, + } + } +} + +impl<'a> BodyCompression<'a> { + #[inline] + pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + BodyCompression { _tab: table } + } + #[allow(unused_mut)] + pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, + args: &'args BodyCompressionArgs, + ) -> flatbuffers::WIPOffset> { + let mut builder = BodyCompressionBuilder::new(_fbb); + builder.add_method(args.method); + builder.add_codec(args.codec); + builder.finish() + } + + pub const VT_CODEC: flatbuffers::VOffsetT = 4; + pub const VT_METHOD: flatbuffers::VOffsetT = 6; + + /// Compressor library + #[inline] + pub fn codec(&self) -> CompressionType { + self._tab + .get::( + BodyCompression::VT_CODEC, + Some(CompressionType::LZ4_FRAME), + ) + .unwrap() + } + /// Indicates the way the record batch body was compressed + #[inline] + pub fn method(&self) -> BodyCompressionMethod { + self._tab + .get::( + BodyCompression::VT_METHOD, + Some(BodyCompressionMethod::BUFFER), + ) + .unwrap() + } +} + +pub struct BodyCompressionArgs { + pub codec: CompressionType, + pub method: BodyCompressionMethod, +} +impl<'a> Default for BodyCompressionArgs { + #[inline] + fn default() -> Self { + BodyCompressionArgs { + codec: CompressionType::LZ4_FRAME, + method: BodyCompressionMethod::BUFFER, + } + } +} +pub struct BodyCompressionBuilder<'a: 'b, 'b> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, + start_: flatbuffers::WIPOffset, +} +impl<'a: 'b, 'b> BodyCompressionBuilder<'a, 'b> { + #[inline] + pub fn add_codec(&mut self, codec: CompressionType) { + self.fbb_.push_slot::( + BodyCompression::VT_CODEC, + codec, + CompressionType::LZ4_FRAME, + ); + } + #[inline] + pub fn add_method(&mut self, method: BodyCompressionMethod) { + self.fbb_.push_slot::( + BodyCompression::VT_METHOD, + method, + BodyCompressionMethod::BUFFER, + ); + } + #[inline] + pub fn new( + _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, + ) -> BodyCompressionBuilder<'a, 'b> { + let start = _fbb.start_table(); + BodyCompressionBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } +} + pub enum RecordBatchOffset {} #[derive(Copy, Clone, Debug, PartialEq)] @@ -216,6 +445,9 @@ impl<'a> RecordBatch<'a> { ) -> flatbuffers::WIPOffset> { let mut builder = RecordBatchBuilder::new(_fbb); builder.add_length(args.length); + if let Some(x) = args.compression { + builder.add_compression(x); + } if let Some(x) = args.buffers { builder.add_buffers(x); } @@ -228,6 +460,7 @@ impl<'a> RecordBatch<'a> { pub const VT_LENGTH: flatbuffers::VOffsetT = 4; pub const VT_NODES: flatbuffers::VOffsetT = 6; pub const VT_BUFFERS: flatbuffers::VOffsetT = 8; + pub const VT_COMPRESSION: flatbuffers::VOffsetT = 10; /// number of records / rows. The arrays in the batch should all have this /// length @@ -262,12 +495,22 @@ impl<'a> RecordBatch<'a> { ) .map(|v| v.safe_slice()) } + /// Optional compression of the message body + #[inline] + pub fn compression(&self) -> Option> { + self._tab + .get::>>( + RecordBatch::VT_COMPRESSION, + None, + ) + } } pub struct RecordBatchArgs<'a> { pub length: i64, pub nodes: Option>>, pub buffers: Option>>, + pub compression: Option>>, } impl<'a> Default for RecordBatchArgs<'a> { #[inline] @@ -276,6 +519,7 @@ impl<'a> Default for RecordBatchArgs<'a> { length: 0, nodes: None, buffers: None, + compression: None, } } } @@ -308,6 +552,17 @@ impl<'a: 'b, 'b> RecordBatchBuilder<'a, 'b> { ); } #[inline] + pub fn add_compression( + &mut self, + compression: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>( + RecordBatch::VT_COMPRESSION, + compression, + ); + } + #[inline] pub fn new( _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, ) -> RecordBatchBuilder<'a, 'b> { diff --git a/rust/arrow/src/ipc/gen/Schema.rs b/rust/arrow/src/ipc/gen/Schema.rs index 24136adef6a..1e868949fd6 100644 --- a/rust/arrow/src/ipc/gen/Schema.rs +++ b/rust/arrow/src/ipc/gen/Schema.rs @@ -26,18 +26,24 @@ use std::{cmp::Ordering, mem}; #[repr(i16)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub enum MetadataVersion { - /// 0.1.0 + /// 0.1.0 (October 2016). V1 = 0, - /// 0.2.0 + /// 0.2.0 (February 2017). Non-backwards compatible with V1. V2 = 1, - /// 0.3.0 -> 0.7.1 + /// 0.3.0 -> 0.7.1 (May - December 2017). Non-backwards compatible with V2. V3 = 2, - /// >= 0.8.0 + /// >= 0.8.0 (December 2017). Non-backwards compatible with V3. V4 = 3, + /// >= 1.0.0 (July 2020. Backwards compatible with V4 (V5 readers can read V4 + /// metadata and IPC messages). Implementations are recommended to provide a + /// V4 compatibility mode with V5 format changes disabled. + /// + /// TODO: Add list of non-forward compatible changes. + V5 = 4, } const ENUM_MIN_METADATA_VERSION: i16 = 0; -const ENUM_MAX_METADATA_VERSION: i16 = 3; +const ENUM_MAX_METADATA_VERSION: i16 = 4; impl<'a> flatbuffers::Follow<'a> for MetadataVersion { type Inner = Self; @@ -71,21 +77,104 @@ impl flatbuffers::Push for MetadataVersion { } #[allow(non_camel_case_types)] -const ENUM_VALUES_METADATA_VERSION: [MetadataVersion; 4] = [ +const ENUM_VALUES_METADATA_VERSION: [MetadataVersion; 5] = [ MetadataVersion::V1, MetadataVersion::V2, MetadataVersion::V3, MetadataVersion::V4, + MetadataVersion::V5, ]; #[allow(non_camel_case_types)] -const ENUM_NAMES_METADATA_VERSION: [&'static str; 4] = ["V1", "V2", "V3", "V4"]; +const ENUM_NAMES_METADATA_VERSION: [&'static str; 5] = ["V1", "V2", "V3", "V4", "V5"]; pub fn enum_name_metadata_version(e: MetadataVersion) -> &'static str { let index = e as i16; ENUM_NAMES_METADATA_VERSION[index as usize] } +/// Represents Arrow Features that might not have full support +/// within implementations. This is intended to be used in +/// two scenarios: +/// 1. A mechanism for readers of Arrow Streams +/// and files to understand that the stream or file makes +/// use of a feature that isn't supported or unknown to +/// the implementation (and therefore can meet the Arrow +/// forward compatibility guarantees). +/// 2. A means of negotiating between a client and server +/// what features a stream is allowed to use. The enums +/// values here are intented to represent higher level +/// features, additional details maybe negotiated +/// with key-value pairs specific to the protocol. +/// +/// Enums added to this list should be assigned power-of-two values +/// to facilitate exchanging and comparing bitmaps for supported +/// features. +#[allow(non_camel_case_types)] +#[repr(i64)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub enum Feature { + /// Needed to make flatbuffers happy. + UNUSED = 0, + /// The stream makes use of multiple full dictionaries with the + /// same ID and assumes clients implement dictionary replacement + /// correctly. + DICTIONARY_REPLACEMENT = 1, + /// The stream makes use of compressed bodies as described + /// in Message.fbs. + COMPRESSED_BODY = 2, +} + +const ENUM_MIN_FEATURE: i64 = 0; +const ENUM_MAX_FEATURE: i64 = 2; + +impl<'a> flatbuffers::Follow<'a> for Feature { + type Inner = Self; + #[inline] + fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + flatbuffers::read_scalar_at::(buf, loc) + } +} + +impl flatbuffers::EndianScalar for Feature { + #[inline] + fn to_little_endian(self) -> Self { + let n = i64::to_le(self as i64); + let p = &n as *const i64 as *const Feature; + unsafe { *p } + } + #[inline] + fn from_little_endian(self) -> Self { + let n = i64::from_le(self as i64); + let p = &n as *const i64 as *const Feature; + unsafe { *p } + } +} + +impl flatbuffers::Push for Feature { + type Output = Feature; + #[inline] + fn push(&self, dst: &mut [u8], _rest: &[u8]) { + flatbuffers::emplace_scalar::(dst, *self); + } +} + +#[allow(non_camel_case_types)] +const ENUM_VALUES_FEATURE: [Feature; 3] = [ + Feature::UNUSED, + Feature::DICTIONARY_REPLACEMENT, + Feature::COMPRESSED_BODY, +]; + +#[allow(non_camel_case_types)] +const ENUM_NAMES_FEATURE: [&'static str; 3] = + ["UNUSED", "DICTIONARY_REPLACEMENT", "COMPRESSED_BODY"]; + +pub fn enum_name_feature(e: Feature) -> &'static str { + let index = e as i64; + ENUM_NAMES_FEATURE[index as usize] +} + #[allow(non_camel_case_types)] #[repr(i16)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] @@ -1757,6 +1846,10 @@ impl<'a: 'b, 'b> BoolBuilder<'a, 'b> { pub enum DecimalOffset {} #[derive(Copy, Clone, Debug, PartialEq)] +/// Exact decimal value represented as an integer value in two's +/// complement. Currently only 128-bit (16-byte) integers are used but this may +/// be expanded in the future. The representation uses the endianness indicated +/// in the Schema. pub struct Decimal<'a> { pub _tab: flatbuffers::Table<'a>, } @@ -1782,6 +1875,7 @@ impl<'a> Decimal<'a> { args: &'args DecimalArgs, ) -> flatbuffers::WIPOffset> { let mut builder = DecimalBuilder::new(_fbb); + builder.add_bitWidth(args.bitWidth); builder.add_scale(args.scale); builder.add_precision(args.precision); builder.finish() @@ -1789,6 +1883,7 @@ impl<'a> Decimal<'a> { pub const VT_PRECISION: flatbuffers::VOffsetT = 4; pub const VT_SCALE: flatbuffers::VOffsetT = 6; + pub const VT_BITWIDTH: flatbuffers::VOffsetT = 8; /// Total number of decimal digits #[inline] @@ -1802,11 +1897,22 @@ impl<'a> Decimal<'a> { pub fn scale(&self) -> i32 { self._tab.get::(Decimal::VT_SCALE, Some(0)).unwrap() } + /// Number of bits per value. The only accepted width right now is 128 but + /// this field exists for forward compatibility so that other bit widths may + /// be supported in future format versions. We use bitWidth for consistency + /// with Int::bitWidth. + #[inline] + pub fn bitWidth(&self) -> i32 { + self._tab + .get::(Decimal::VT_BITWIDTH, Some(128)) + .unwrap() + } } pub struct DecimalArgs { pub precision: i32, pub scale: i32, + pub bitWidth: i32, } impl<'a> Default for DecimalArgs { #[inline] @@ -1814,6 +1920,7 @@ impl<'a> Default for DecimalArgs { DecimalArgs { precision: 0, scale: 0, + bitWidth: 128, } } } @@ -1832,6 +1939,11 @@ impl<'a: 'b, 'b> DecimalBuilder<'a, 'b> { self.fbb_.push_slot::(Decimal::VT_SCALE, scale, 0); } #[inline] + pub fn add_bitWidth(&mut self, bitWidth: i32) { + self.fbb_ + .push_slot::(Decimal::VT_BITWIDTH, bitWidth, 128); + } + #[inline] pub fn new( _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, ) -> DecimalBuilder<'a, 'b> { @@ -2463,8 +2575,11 @@ impl<'a> DictionaryEncoding<'a> { .get::(DictionaryEncoding::VT_ID, Some(0)) .unwrap() } - /// The dictionary indices are constrained to be positive integers. If this - /// field is null, the indices must be signed int32 + /// The dictionary indices are constrained to be non-negative integers. If + /// this field is null, the indices must be signed int32. To maximize + /// cross-language compatibility and performance, implementations are + /// recommended to prefer signed integer types over unsigned integer types + /// and to avoid uint64 indices unless they are required by an application. #[inline] pub fn indexType(&self) -> Option> { self._tab.get::>>( @@ -3019,6 +3134,9 @@ impl<'a> Schema<'a> { args: &'args SchemaArgs<'args>, ) -> flatbuffers::WIPOffset> { let mut builder = SchemaBuilder::new(_fbb); + if let Some(x) = args.features { + builder.add_features(x); + } if let Some(x) = args.custom_metadata { builder.add_custom_metadata(x); } @@ -3032,6 +3150,7 @@ impl<'a> Schema<'a> { pub const VT_ENDIANNESS: flatbuffers::VOffsetT = 4; pub const VT_FIELDS: flatbuffers::VOffsetT = 6; pub const VT_CUSTOM_METADATA: flatbuffers::VOffsetT = 8; + pub const VT_FEATURES: flatbuffers::VOffsetT = 10; /// endianness of the buffer /// it is Little Endian by default @@ -3058,6 +3177,15 @@ impl<'a> Schema<'a> { flatbuffers::Vector>>, >>(Schema::VT_CUSTOM_METADATA, None) } + /// Features used in the stream/file. + #[inline] + pub fn features(&self) -> Option> { + self._tab + .get::>>( + Schema::VT_FEATURES, + None, + ) + } } pub struct SchemaArgs<'a> { @@ -3072,6 +3200,7 @@ pub struct SchemaArgs<'a> { flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset>>, >, >, + pub features: Option>>, } impl<'a> Default for SchemaArgs<'a> { #[inline] @@ -3080,6 +3209,7 @@ impl<'a> Default for SchemaArgs<'a> { endianness: Endianness::Little, fields: None, custom_metadata: None, + features: None, } } } @@ -3119,6 +3249,14 @@ impl<'a: 'b, 'b> SchemaBuilder<'a, 'b> { ); } #[inline] + pub fn add_features( + &mut self, + features: flatbuffers::WIPOffset>, + ) { + self.fbb_ + .push_slot_always::>(Schema::VT_FEATURES, features); + } + #[inline] pub fn new( _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, ) -> SchemaBuilder<'a, 'b> { diff --git a/rust/arrow/src/ipc/gen/SparseTensor.rs b/rust/arrow/src/ipc/gen/SparseTensor.rs index c7168973ebf..a620a38878e 100644 --- a/rust/arrow/src/ipc/gen/SparseTensor.rs +++ b/rust/arrow/src/ipc/gen/SparseTensor.rs @@ -89,10 +89,11 @@ pub enum SparseTensorIndex { NONE = 0, SparseTensorIndexCOO = 1, SparseMatrixIndexCSX = 2, + SparseTensorIndexCSF = 3, } const ENUM_MIN_SPARSE_TENSOR_INDEX: u8 = 0; -const ENUM_MAX_SPARSE_TENSOR_INDEX: u8 = 2; +const ENUM_MAX_SPARSE_TENSOR_INDEX: u8 = 3; impl<'a> flatbuffers::Follow<'a> for SparseTensorIndex { type Inner = Self; @@ -126,15 +127,20 @@ impl flatbuffers::Push for SparseTensorIndex { } #[allow(non_camel_case_types)] -const ENUM_VALUES_SPARSE_TENSOR_INDEX: [SparseTensorIndex; 3] = [ +const ENUM_VALUES_SPARSE_TENSOR_INDEX: [SparseTensorIndex; 4] = [ SparseTensorIndex::NONE, SparseTensorIndex::SparseTensorIndexCOO, SparseTensorIndex::SparseMatrixIndexCSX, + SparseTensorIndex::SparseTensorIndexCSF, ]; #[allow(non_camel_case_types)] -const ENUM_NAMES_SPARSE_TENSOR_INDEX: [&'static str; 3] = - ["NONE", "SparseTensorIndexCOO", "SparseMatrixIndexCSX"]; +const ENUM_NAMES_SPARSE_TENSOR_INDEX: [&'static str; 4] = [ + "NONE", + "SparseTensorIndexCOO", + "SparseMatrixIndexCSX", + "SparseTensorIndexCSF", +]; pub fn enum_name_sparse_tensor_index(e: SparseTensorIndex) -> &'static str { let index = e as u8; @@ -218,13 +224,16 @@ impl<'a> SparseTensorIndexCOO<'a> { /// The type of values in indicesBuffer #[inline] - pub fn indicesType(&self) -> Option> { - self._tab.get::>>( - SparseTensorIndexCOO::VT_INDICESTYPE, - None, - ) + pub fn indicesType(&self) -> Int<'a> { + self._tab + .get::>>( + SparseTensorIndexCOO::VT_INDICESTYPE, + None, + ) + .unwrap() } /// Non-negative byte offsets to advance one value cell along each dimension + /// If omitted, default to row-major order (C-like). #[inline] pub fn indicesStrides(&self) -> Option> { self._tab @@ -235,9 +244,10 @@ impl<'a> SparseTensorIndexCOO<'a> { } /// The location and size of the indices matrix's data #[inline] - pub fn indicesBuffer(&self) -> Option<&'a Buffer> { + pub fn indicesBuffer(&self) -> &'a Buffer { self._tab .get::(SparseTensorIndexCOO::VT_INDICESBUFFER, None) + .unwrap() } } @@ -250,9 +260,9 @@ impl<'a> Default for SparseTensorIndexCOOArgs<'a> { #[inline] fn default() -> Self { SparseTensorIndexCOOArgs { - indicesType: None, + indicesType: None, // required field indicesStrides: None, - indicesBuffer: None, + indicesBuffer: None, // required field } } } @@ -298,6 +308,10 @@ impl<'a: 'b, 'b> SparseTensorIndexCOOBuilder<'a, 'b> { #[inline] pub fn finish(self) -> flatbuffers::WIPOffset> { let o = self.fbb_.end_table(self.start_); + self.fbb_ + .required(o, SparseTensorIndexCOO::VT_INDICESTYPE, "indices_type"); + self.fbb_ + .required(o, SparseTensorIndexCOO::VT_INDICESBUFFER, "indices_buffer"); flatbuffers::WIPOffset::new(o.value()) } } @@ -365,11 +379,13 @@ impl<'a> SparseMatrixIndexCSX<'a> { } /// The type of values in indptrBuffer #[inline] - pub fn indptrType(&self) -> Option> { - self._tab.get::>>( - SparseMatrixIndexCSX::VT_INDPTRTYPE, - None, - ) + pub fn indptrType(&self) -> Int<'a> { + self._tab + .get::>>( + SparseMatrixIndexCSX::VT_INDPTRTYPE, + None, + ) + .unwrap() } /// indptrBuffer stores the location and size of indptr array that /// represents the range of the rows. @@ -394,17 +410,20 @@ impl<'a> SparseMatrixIndexCSX<'a> { /// /// indptr(X) = [0, 2, 3, 5, 5, 8, 10]. #[inline] - pub fn indptrBuffer(&self) -> Option<&'a Buffer> { + pub fn indptrBuffer(&self) -> &'a Buffer { self._tab .get::(SparseMatrixIndexCSX::VT_INDPTRBUFFER, None) + .unwrap() } /// The type of values in indicesBuffer #[inline] - pub fn indicesType(&self) -> Option> { - self._tab.get::>>( - SparseMatrixIndexCSX::VT_INDICESTYPE, - None, - ) + pub fn indicesType(&self) -> Int<'a> { + self._tab + .get::>>( + SparseMatrixIndexCSX::VT_INDICESTYPE, + None, + ) + .unwrap() } /// indicesBuffer stores the location and size of the array that /// contains the column indices of the corresponding non-zero values. @@ -416,9 +435,10 @@ impl<'a> SparseMatrixIndexCSX<'a> { /// /// Note that the indices are sorted in lexicographical order for each row. #[inline] - pub fn indicesBuffer(&self) -> Option<&'a Buffer> { + pub fn indicesBuffer(&self) -> &'a Buffer { self._tab .get::(SparseMatrixIndexCSX::VT_INDICESBUFFER, None) + .unwrap() } } @@ -434,10 +454,10 @@ impl<'a> Default for SparseMatrixIndexCSXArgs<'a> { fn default() -> Self { SparseMatrixIndexCSXArgs { compressedAxis: SparseMatrixCompressedAxis::Row, - indptrType: None, - indptrBuffer: None, - indicesType: None, - indicesBuffer: None, + indptrType: None, // required field + indptrBuffer: None, // required field + indicesType: None, // required field + indicesBuffer: None, // required field } } } @@ -495,6 +515,276 @@ impl<'a: 'b, 'b> SparseMatrixIndexCSXBuilder<'a, 'b> { #[inline] pub fn finish(self) -> flatbuffers::WIPOffset> { let o = self.fbb_.end_table(self.start_); + self.fbb_ + .required(o, SparseMatrixIndexCSX::VT_INDPTRTYPE, "indptr_type"); + self.fbb_ + .required(o, SparseMatrixIndexCSX::VT_INDPTRBUFFER, "indptr_buffer"); + self.fbb_ + .required(o, SparseMatrixIndexCSX::VT_INDICESTYPE, "indices_type"); + self.fbb_ + .required(o, SparseMatrixIndexCSX::VT_INDICESBUFFER, "indices_buffer"); + flatbuffers::WIPOffset::new(o.value()) + } +} + +pub enum SparseTensorIndexCSFOffset {} +#[derive(Copy, Clone, Debug, PartialEq)] + +/// Compressed Sparse Fiber (CSF) sparse tensor index. +pub struct SparseTensorIndexCSF<'a> { + pub _tab: flatbuffers::Table<'a>, +} + +impl<'a> flatbuffers::Follow<'a> for SparseTensorIndexCSF<'a> { + type Inner = SparseTensorIndexCSF<'a>; + #[inline] + fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table { buf: buf, loc: loc }, + } + } +} + +impl<'a> SparseTensorIndexCSF<'a> { + #[inline] + pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + SparseTensorIndexCSF { _tab: table } + } + #[allow(unused_mut)] + pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, + args: &'args SparseTensorIndexCSFArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = SparseTensorIndexCSFBuilder::new(_fbb); + if let Some(x) = args.axisOrder { + builder.add_axisOrder(x); + } + if let Some(x) = args.indicesBuffers { + builder.add_indicesBuffers(x); + } + if let Some(x) = args.indicesType { + builder.add_indicesType(x); + } + if let Some(x) = args.indptrBuffers { + builder.add_indptrBuffers(x); + } + if let Some(x) = args.indptrType { + builder.add_indptrType(x); + } + builder.finish() + } + + pub const VT_INDPTRTYPE: flatbuffers::VOffsetT = 4; + pub const VT_INDPTRBUFFERS: flatbuffers::VOffsetT = 6; + pub const VT_INDICESTYPE: flatbuffers::VOffsetT = 8; + pub const VT_INDICESBUFFERS: flatbuffers::VOffsetT = 10; + pub const VT_AXISORDER: flatbuffers::VOffsetT = 12; + + /// CSF is a generalization of compressed sparse row (CSR) index. + /// See [smith2017knl]: http://shaden.io/pub-files/smith2017knl.pdf + /// + /// CSF index recursively compresses each dimension of a tensor into a set + /// of prefix trees. Each path from a root to leaf forms one tensor + /// non-zero index. CSF is implemented with two arrays of buffers and one + /// arrays of integers. + /// + /// For example, let X be a 2x3x4x5 tensor and let it have the following + /// 8 non-zero values: + /// + /// X[0, 0, 0, 1] := 1 + /// X[0, 0, 0, 2] := 2 + /// X[0, 1, 0, 0] := 3 + /// X[0, 1, 0, 2] := 4 + /// X[0, 1, 1, 0] := 5 + /// X[1, 1, 1, 0] := 6 + /// X[1, 1, 1, 1] := 7 + /// X[1, 1, 1, 2] := 8 + /// + /// As a prefix tree this would be represented as: + /// + /// // 0 1 + /// // / \ | + /// // 0 1 1 + /// // / / \ | + /// // 0 0 1 1 + /// // /| /| | /| | + /// // 1 2 0 2 0 0 1 2 + /// The type of values in indptrBuffers + #[inline] + pub fn indptrType(&self) -> Int<'a> { + self._tab + .get::>>( + SparseTensorIndexCSF::VT_INDPTRTYPE, + None, + ) + .unwrap() + } + /// indptrBuffers stores the sparsity structure. + /// Each two consecutive dimensions in a tensor correspond to a buffer in + /// indptrBuffers. A pair of consecutive values at indptrBuffers[dim][i] + /// and indptrBuffers[dim][i + 1] signify a range of nodes in + /// indicesBuffers[dim + 1] who are children of indicesBuffers[dim][i] node. + /// + /// For example, the indptrBuffers for the above X is: + /// + /// indptrBuffer(X) = [ + /// [0, 2, 3], + /// [0, 1, 3, 4], + /// [0, 2, 4, 5, 8] + /// ]. + /// + #[inline] + pub fn indptrBuffers(&self) -> &'a [Buffer] { + self._tab + .get::>>( + SparseTensorIndexCSF::VT_INDPTRBUFFERS, + None, + ) + .map(|v| v.safe_slice()) + .unwrap() + } + /// The type of values in indicesBuffers + #[inline] + pub fn indicesType(&self) -> Int<'a> { + self._tab + .get::>>( + SparseTensorIndexCSF::VT_INDICESTYPE, + None, + ) + .unwrap() + } + /// indicesBuffers stores values of nodes. + /// Each tensor dimension corresponds to a buffer in indicesBuffers. + /// For example, the indicesBuffers for the above X is: + /// + /// indicesBuffer(X) = [ + /// [0, 1], + /// [0, 1, 1], + /// [0, 0, 1, 1], + /// [1, 2, 0, 2, 0, 0, 1, 2] + /// ]. + /// + #[inline] + pub fn indicesBuffers(&self) -> &'a [Buffer] { + self._tab + .get::>>( + SparseTensorIndexCSF::VT_INDICESBUFFERS, + None, + ) + .map(|v| v.safe_slice()) + .unwrap() + } + /// axisOrder stores the sequence in which dimensions were traversed to + /// produce the prefix tree. + /// For example, the axisOrder for the above X is: + /// + /// axisOrder(X) = [0, 1, 2, 3]. + /// + #[inline] + pub fn axisOrder(&self) -> flatbuffers::Vector<'a, i32> { + self._tab + .get::>>( + SparseTensorIndexCSF::VT_AXISORDER, + None, + ) + .unwrap() + } +} + +pub struct SparseTensorIndexCSFArgs<'a> { + pub indptrType: Option>>, + pub indptrBuffers: Option>>, + pub indicesType: Option>>, + pub indicesBuffers: Option>>, + pub axisOrder: Option>>, +} +impl<'a> Default for SparseTensorIndexCSFArgs<'a> { + #[inline] + fn default() -> Self { + SparseTensorIndexCSFArgs { + indptrType: None, // required field + indptrBuffers: None, // required field + indicesType: None, // required field + indicesBuffers: None, // required field + axisOrder: None, // required field + } + } +} +pub struct SparseTensorIndexCSFBuilder<'a: 'b, 'b> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, + start_: flatbuffers::WIPOffset, +} +impl<'a: 'b, 'b> SparseTensorIndexCSFBuilder<'a, 'b> { + #[inline] + pub fn add_indptrType(&mut self, indptrType: flatbuffers::WIPOffset>) { + self.fbb_.push_slot_always::>( + SparseTensorIndexCSF::VT_INDPTRTYPE, + indptrType, + ); + } + #[inline] + pub fn add_indptrBuffers( + &mut self, + indptrBuffers: flatbuffers::WIPOffset>, + ) { + self.fbb_.push_slot_always::>( + SparseTensorIndexCSF::VT_INDPTRBUFFERS, + indptrBuffers, + ); + } + #[inline] + pub fn add_indicesType(&mut self, indicesType: flatbuffers::WIPOffset>) { + self.fbb_.push_slot_always::>( + SparseTensorIndexCSF::VT_INDICESTYPE, + indicesType, + ); + } + #[inline] + pub fn add_indicesBuffers( + &mut self, + indicesBuffers: flatbuffers::WIPOffset>, + ) { + self.fbb_.push_slot_always::>( + SparseTensorIndexCSF::VT_INDICESBUFFERS, + indicesBuffers, + ); + } + #[inline] + pub fn add_axisOrder( + &mut self, + axisOrder: flatbuffers::WIPOffset>, + ) { + self.fbb_.push_slot_always::>( + SparseTensorIndexCSF::VT_AXISORDER, + axisOrder, + ); + } + #[inline] + pub fn new( + _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, + ) -> SparseTensorIndexCSFBuilder<'a, 'b> { + let start = _fbb.start_table(); + SparseTensorIndexCSFBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + self.fbb_ + .required(o, SparseTensorIndexCSF::VT_INDPTRTYPE, "indptr_type"); + self.fbb_ + .required(o, SparseTensorIndexCSF::VT_INDPTRBUFFERS, "indptr_buffers"); + self.fbb_ + .required(o, SparseTensorIndexCSF::VT_INDICESTYPE, "indices_type"); + self.fbb_.required( + o, + SparseTensorIndexCSF::VT_INDICESBUFFERS, + "indices_buffers", + ); + self.fbb_ + .required(o, SparseTensorIndexCSF::VT_AXISORDER, "axis_order"); flatbuffers::WIPOffset::new(o.value()) } } @@ -563,22 +853,24 @@ impl<'a> SparseTensor<'a> { /// Currently only fixed-width value types are supported, /// no strings or nested types. #[inline] - pub fn type_(&self) -> Option> { + pub fn type_(&self) -> flatbuffers::Table<'a> { self._tab .get::>>( SparseTensor::VT_TYPE_, None, ) + .unwrap() } /// The dimensions of the tensor, optionally named. #[inline] pub fn shape( &self, - ) -> Option>>> - { - self._tab.get::>>, - >>(SparseTensor::VT_SHAPE, None) + ) -> flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset>> { + self._tab + .get::>>, + >>(SparseTensor::VT_SHAPE, None) + .unwrap() } /// The number of non-zero values in a sparse tensor. #[inline] @@ -598,23 +890,26 @@ impl<'a> SparseTensor<'a> { } /// Sparse tensor index #[inline] - pub fn sparseIndex(&self) -> Option> { + pub fn sparseIndex(&self) -> flatbuffers::Table<'a> { self._tab .get::>>( SparseTensor::VT_SPARSEINDEX, None, ) + .unwrap() } /// The location and size of the tensor's data #[inline] - pub fn data(&self) -> Option<&'a Buffer> { - self._tab.get::(SparseTensor::VT_DATA, None) + pub fn data(&self) -> &'a Buffer { + self._tab + .get::(SparseTensor::VT_DATA, None) + .unwrap() } #[inline] #[allow(non_snake_case)] pub fn type_as_null(&self) -> Option> { if self.type_type() == Type::Null { - self.type_().map(|u| Null::init_from_table(u)) + Some(Null::init_from_table(self.type_())) } else { None } @@ -624,7 +919,7 @@ impl<'a> SparseTensor<'a> { #[allow(non_snake_case)] pub fn type_as_int(&self) -> Option> { if self.type_type() == Type::Int { - self.type_().map(|u| Int::init_from_table(u)) + Some(Int::init_from_table(self.type_())) } else { None } @@ -634,7 +929,7 @@ impl<'a> SparseTensor<'a> { #[allow(non_snake_case)] pub fn type_as_floating_point(&self) -> Option> { if self.type_type() == Type::FloatingPoint { - self.type_().map(|u| FloatingPoint::init_from_table(u)) + Some(FloatingPoint::init_from_table(self.type_())) } else { None } @@ -644,7 +939,7 @@ impl<'a> SparseTensor<'a> { #[allow(non_snake_case)] pub fn type_as_binary(&self) -> Option> { if self.type_type() == Type::Binary { - self.type_().map(|u| Binary::init_from_table(u)) + Some(Binary::init_from_table(self.type_())) } else { None } @@ -654,7 +949,7 @@ impl<'a> SparseTensor<'a> { #[allow(non_snake_case)] pub fn type_as_utf_8(&self) -> Option> { if self.type_type() == Type::Utf8 { - self.type_().map(|u| Utf8::init_from_table(u)) + Some(Utf8::init_from_table(self.type_())) } else { None } @@ -664,7 +959,7 @@ impl<'a> SparseTensor<'a> { #[allow(non_snake_case)] pub fn type_as_bool(&self) -> Option> { if self.type_type() == Type::Bool { - self.type_().map(|u| Bool::init_from_table(u)) + Some(Bool::init_from_table(self.type_())) } else { None } @@ -674,7 +969,7 @@ impl<'a> SparseTensor<'a> { #[allow(non_snake_case)] pub fn type_as_decimal(&self) -> Option> { if self.type_type() == Type::Decimal { - self.type_().map(|u| Decimal::init_from_table(u)) + Some(Decimal::init_from_table(self.type_())) } else { None } @@ -684,7 +979,7 @@ impl<'a> SparseTensor<'a> { #[allow(non_snake_case)] pub fn type_as_date(&self) -> Option> { if self.type_type() == Type::Date { - self.type_().map(|u| Date::init_from_table(u)) + Some(Date::init_from_table(self.type_())) } else { None } @@ -694,7 +989,7 @@ impl<'a> SparseTensor<'a> { #[allow(non_snake_case)] pub fn type_as_time(&self) -> Option> { if self.type_type() == Type::Time { - self.type_().map(|u| Time::init_from_table(u)) + Some(Time::init_from_table(self.type_())) } else { None } @@ -704,7 +999,7 @@ impl<'a> SparseTensor<'a> { #[allow(non_snake_case)] pub fn type_as_timestamp(&self) -> Option> { if self.type_type() == Type::Timestamp { - self.type_().map(|u| Timestamp::init_from_table(u)) + Some(Timestamp::init_from_table(self.type_())) } else { None } @@ -714,7 +1009,7 @@ impl<'a> SparseTensor<'a> { #[allow(non_snake_case)] pub fn type_as_interval(&self) -> Option> { if self.type_type() == Type::Interval { - self.type_().map(|u| Interval::init_from_table(u)) + Some(Interval::init_from_table(self.type_())) } else { None } @@ -724,7 +1019,7 @@ impl<'a> SparseTensor<'a> { #[allow(non_snake_case)] pub fn type_as_list(&self) -> Option> { if self.type_type() == Type::List { - self.type_().map(|u| List::init_from_table(u)) + Some(List::init_from_table(self.type_())) } else { None } @@ -734,7 +1029,7 @@ impl<'a> SparseTensor<'a> { #[allow(non_snake_case)] pub fn type_as_struct_(&self) -> Option> { if self.type_type() == Type::Struct_ { - self.type_().map(|u| Struct_::init_from_table(u)) + Some(Struct_::init_from_table(self.type_())) } else { None } @@ -744,7 +1039,7 @@ impl<'a> SparseTensor<'a> { #[allow(non_snake_case)] pub fn type_as_union(&self) -> Option> { if self.type_type() == Type::Union { - self.type_().map(|u| Union::init_from_table(u)) + Some(Union::init_from_table(self.type_())) } else { None } @@ -754,7 +1049,7 @@ impl<'a> SparseTensor<'a> { #[allow(non_snake_case)] pub fn type_as_fixed_size_binary(&self) -> Option> { if self.type_type() == Type::FixedSizeBinary { - self.type_().map(|u| FixedSizeBinary::init_from_table(u)) + Some(FixedSizeBinary::init_from_table(self.type_())) } else { None } @@ -764,7 +1059,7 @@ impl<'a> SparseTensor<'a> { #[allow(non_snake_case)] pub fn type_as_fixed_size_list(&self) -> Option> { if self.type_type() == Type::FixedSizeList { - self.type_().map(|u| FixedSizeList::init_from_table(u)) + Some(FixedSizeList::init_from_table(self.type_())) } else { None } @@ -774,7 +1069,7 @@ impl<'a> SparseTensor<'a> { #[allow(non_snake_case)] pub fn type_as_map(&self) -> Option> { if self.type_type() == Type::Map { - self.type_().map(|u| Map::init_from_table(u)) + Some(Map::init_from_table(self.type_())) } else { None } @@ -784,7 +1079,7 @@ impl<'a> SparseTensor<'a> { #[allow(non_snake_case)] pub fn type_as_duration(&self) -> Option> { if self.type_type() == Type::Duration { - self.type_().map(|u| Duration::init_from_table(u)) + Some(Duration::init_from_table(self.type_())) } else { None } @@ -794,7 +1089,7 @@ impl<'a> SparseTensor<'a> { #[allow(non_snake_case)] pub fn type_as_large_binary(&self) -> Option> { if self.type_type() == Type::LargeBinary { - self.type_().map(|u| LargeBinary::init_from_table(u)) + Some(LargeBinary::init_from_table(self.type_())) } else { None } @@ -804,7 +1099,7 @@ impl<'a> SparseTensor<'a> { #[allow(non_snake_case)] pub fn type_as_large_utf_8(&self) -> Option> { if self.type_type() == Type::LargeUtf8 { - self.type_().map(|u| LargeUtf8::init_from_table(u)) + Some(LargeUtf8::init_from_table(self.type_())) } else { None } @@ -814,7 +1109,7 @@ impl<'a> SparseTensor<'a> { #[allow(non_snake_case)] pub fn type_as_large_list(&self) -> Option> { if self.type_type() == Type::LargeList { - self.type_().map(|u| LargeList::init_from_table(u)) + Some(LargeList::init_from_table(self.type_())) } else { None } @@ -826,8 +1121,7 @@ impl<'a> SparseTensor<'a> { &self, ) -> Option> { if self.sparseIndex_type() == SparseTensorIndex::SparseTensorIndexCOO { - self.sparseIndex() - .map(|u| SparseTensorIndexCOO::init_from_table(u)) + Some(SparseTensorIndexCOO::init_from_table(self.sparseIndex())) } else { None } @@ -839,8 +1133,19 @@ impl<'a> SparseTensor<'a> { &self, ) -> Option> { if self.sparseIndex_type() == SparseTensorIndex::SparseMatrixIndexCSX { - self.sparseIndex() - .map(|u| SparseMatrixIndexCSX::init_from_table(u)) + Some(SparseMatrixIndexCSX::init_from_table(self.sparseIndex())) + } else { + None + } + } + + #[inline] + #[allow(non_snake_case)] + pub fn sparseIndex_as_sparse_tensor_index_csf( + &self, + ) -> Option> { + if self.sparseIndex_type() == SparseTensorIndex::SparseTensorIndexCSF { + Some(SparseTensorIndexCSF::init_from_table(self.sparseIndex())) } else { None } @@ -865,12 +1170,12 @@ impl<'a> Default for SparseTensorArgs<'a> { fn default() -> Self { SparseTensorArgs { type_type: Type::NONE, - type_: None, - shape: None, + type_: None, // required field + shape: None, // required field non_zero_length: 0, sparseIndex_type: SparseTensorIndex::NONE, - sparseIndex: None, - data: None, + sparseIndex: None, // required field + data: None, // required field } } } @@ -943,6 +1248,11 @@ impl<'a: 'b, 'b> SparseTensorBuilder<'a, 'b> { #[inline] pub fn finish(self) -> flatbuffers::WIPOffset> { let o = self.fbb_.end_table(self.start_); + self.fbb_.required(o, SparseTensor::VT_TYPE_, "type_"); + self.fbb_.required(o, SparseTensor::VT_SHAPE, "shape"); + self.fbb_ + .required(o, SparseTensor::VT_SPARSEINDEX, "sparse_index"); + self.fbb_.required(o, SparseTensor::VT_DATA, "data"); flatbuffers::WIPOffset::new(o.value()) } } diff --git a/rust/arrow/src/ipc/gen/Tensor.rs b/rust/arrow/src/ipc/gen/Tensor.rs index 9cc2885e338..8532cee4770 100644 --- a/rust/arrow/src/ipc/gen/Tensor.rs +++ b/rust/arrow/src/ipc/gen/Tensor.rs @@ -180,24 +180,27 @@ impl<'a> Tensor<'a> { /// The type of data contained in a value cell. Currently only fixed-width /// value types are supported, no strings or nested types #[inline] - pub fn type_(&self) -> Option> { + pub fn type_(&self) -> flatbuffers::Table<'a> { self._tab .get::>>( Tensor::VT_TYPE_, None, ) + .unwrap() } /// The dimensions of the tensor, optionally named #[inline] pub fn shape( &self, - ) -> Option>>> - { - self._tab.get::>>, - >>(Tensor::VT_SHAPE, None) + ) -> flatbuffers::Vector<'a, flatbuffers::ForwardsUOffset>> { + self._tab + .get::>>, + >>(Tensor::VT_SHAPE, None) + .unwrap() } /// Non-negative byte offsets to advance one value cell along each dimension + /// If omitted, default to row-major order (C-like). #[inline] pub fn strides(&self) -> Option> { self._tab @@ -208,14 +211,14 @@ impl<'a> Tensor<'a> { } /// The location and size of the tensor's data #[inline] - pub fn data(&self) -> Option<&'a Buffer> { - self._tab.get::(Tensor::VT_DATA, None) + pub fn data(&self) -> &'a Buffer { + self._tab.get::(Tensor::VT_DATA, None).unwrap() } #[inline] #[allow(non_snake_case)] pub fn type_as_null(&self) -> Option> { if self.type_type() == Type::Null { - self.type_().map(|u| Null::init_from_table(u)) + Some(Null::init_from_table(self.type_())) } else { None } @@ -225,7 +228,7 @@ impl<'a> Tensor<'a> { #[allow(non_snake_case)] pub fn type_as_int(&self) -> Option> { if self.type_type() == Type::Int { - self.type_().map(|u| Int::init_from_table(u)) + Some(Int::init_from_table(self.type_())) } else { None } @@ -235,7 +238,7 @@ impl<'a> Tensor<'a> { #[allow(non_snake_case)] pub fn type_as_floating_point(&self) -> Option> { if self.type_type() == Type::FloatingPoint { - self.type_().map(|u| FloatingPoint::init_from_table(u)) + Some(FloatingPoint::init_from_table(self.type_())) } else { None } @@ -245,7 +248,7 @@ impl<'a> Tensor<'a> { #[allow(non_snake_case)] pub fn type_as_binary(&self) -> Option> { if self.type_type() == Type::Binary { - self.type_().map(|u| Binary::init_from_table(u)) + Some(Binary::init_from_table(self.type_())) } else { None } @@ -255,7 +258,7 @@ impl<'a> Tensor<'a> { #[allow(non_snake_case)] pub fn type_as_utf_8(&self) -> Option> { if self.type_type() == Type::Utf8 { - self.type_().map(|u| Utf8::init_from_table(u)) + Some(Utf8::init_from_table(self.type_())) } else { None } @@ -265,7 +268,7 @@ impl<'a> Tensor<'a> { #[allow(non_snake_case)] pub fn type_as_bool(&self) -> Option> { if self.type_type() == Type::Bool { - self.type_().map(|u| Bool::init_from_table(u)) + Some(Bool::init_from_table(self.type_())) } else { None } @@ -275,7 +278,7 @@ impl<'a> Tensor<'a> { #[allow(non_snake_case)] pub fn type_as_decimal(&self) -> Option> { if self.type_type() == Type::Decimal { - self.type_().map(|u| Decimal::init_from_table(u)) + Some(Decimal::init_from_table(self.type_())) } else { None } @@ -285,7 +288,7 @@ impl<'a> Tensor<'a> { #[allow(non_snake_case)] pub fn type_as_date(&self) -> Option> { if self.type_type() == Type::Date { - self.type_().map(|u| Date::init_from_table(u)) + Some(Date::init_from_table(self.type_())) } else { None } @@ -295,7 +298,7 @@ impl<'a> Tensor<'a> { #[allow(non_snake_case)] pub fn type_as_time(&self) -> Option> { if self.type_type() == Type::Time { - self.type_().map(|u| Time::init_from_table(u)) + Some(Time::init_from_table(self.type_())) } else { None } @@ -305,7 +308,7 @@ impl<'a> Tensor<'a> { #[allow(non_snake_case)] pub fn type_as_timestamp(&self) -> Option> { if self.type_type() == Type::Timestamp { - self.type_().map(|u| Timestamp::init_from_table(u)) + Some(Timestamp::init_from_table(self.type_())) } else { None } @@ -315,7 +318,7 @@ impl<'a> Tensor<'a> { #[allow(non_snake_case)] pub fn type_as_interval(&self) -> Option> { if self.type_type() == Type::Interval { - self.type_().map(|u| Interval::init_from_table(u)) + Some(Interval::init_from_table(self.type_())) } else { None } @@ -325,7 +328,7 @@ impl<'a> Tensor<'a> { #[allow(non_snake_case)] pub fn type_as_list(&self) -> Option> { if self.type_type() == Type::List { - self.type_().map(|u| List::init_from_table(u)) + Some(List::init_from_table(self.type_())) } else { None } @@ -335,7 +338,7 @@ impl<'a> Tensor<'a> { #[allow(non_snake_case)] pub fn type_as_struct_(&self) -> Option> { if self.type_type() == Type::Struct_ { - self.type_().map(|u| Struct_::init_from_table(u)) + Some(Struct_::init_from_table(self.type_())) } else { None } @@ -345,7 +348,7 @@ impl<'a> Tensor<'a> { #[allow(non_snake_case)] pub fn type_as_union(&self) -> Option> { if self.type_type() == Type::Union { - self.type_().map(|u| Union::init_from_table(u)) + Some(Union::init_from_table(self.type_())) } else { None } @@ -355,7 +358,7 @@ impl<'a> Tensor<'a> { #[allow(non_snake_case)] pub fn type_as_fixed_size_binary(&self) -> Option> { if self.type_type() == Type::FixedSizeBinary { - self.type_().map(|u| FixedSizeBinary::init_from_table(u)) + Some(FixedSizeBinary::init_from_table(self.type_())) } else { None } @@ -365,7 +368,7 @@ impl<'a> Tensor<'a> { #[allow(non_snake_case)] pub fn type_as_fixed_size_list(&self) -> Option> { if self.type_type() == Type::FixedSizeList { - self.type_().map(|u| FixedSizeList::init_from_table(u)) + Some(FixedSizeList::init_from_table(self.type_())) } else { None } @@ -375,7 +378,7 @@ impl<'a> Tensor<'a> { #[allow(non_snake_case)] pub fn type_as_map(&self) -> Option> { if self.type_type() == Type::Map { - self.type_().map(|u| Map::init_from_table(u)) + Some(Map::init_from_table(self.type_())) } else { None } @@ -385,7 +388,7 @@ impl<'a> Tensor<'a> { #[allow(non_snake_case)] pub fn type_as_duration(&self) -> Option> { if self.type_type() == Type::Duration { - self.type_().map(|u| Duration::init_from_table(u)) + Some(Duration::init_from_table(self.type_())) } else { None } @@ -395,7 +398,7 @@ impl<'a> Tensor<'a> { #[allow(non_snake_case)] pub fn type_as_large_binary(&self) -> Option> { if self.type_type() == Type::LargeBinary { - self.type_().map(|u| LargeBinary::init_from_table(u)) + Some(LargeBinary::init_from_table(self.type_())) } else { None } @@ -405,7 +408,7 @@ impl<'a> Tensor<'a> { #[allow(non_snake_case)] pub fn type_as_large_utf_8(&self) -> Option> { if self.type_type() == Type::LargeUtf8 { - self.type_().map(|u| LargeUtf8::init_from_table(u)) + Some(LargeUtf8::init_from_table(self.type_())) } else { None } @@ -415,7 +418,7 @@ impl<'a> Tensor<'a> { #[allow(non_snake_case)] pub fn type_as_large_list(&self) -> Option> { if self.type_type() == Type::LargeList { - self.type_().map(|u| LargeList::init_from_table(u)) + Some(LargeList::init_from_table(self.type_())) } else { None } @@ -438,10 +441,10 @@ impl<'a> Default for TensorArgs<'a> { fn default() -> Self { TensorArgs { type_type: Type::NONE, - type_: None, - shape: None, + type_: None, // required field + shape: None, // required field strides: None, - data: None, + data: None, // required field } } } @@ -498,6 +501,9 @@ impl<'a: 'b, 'b> TensorBuilder<'a, 'b> { #[inline] pub fn finish(self) -> flatbuffers::WIPOffset> { let o = self.fbb_.end_table(self.start_); + self.fbb_.required(o, Tensor::VT_TYPE_, "type_"); + self.fbb_.required(o, Tensor::VT_SHAPE, "shape"); + self.fbb_.required(o, Tensor::VT_DATA, "data"); flatbuffers::WIPOffset::new(o.value()) } }