public class ParquetSplitReaderUtil extends Object
ParquetColumnarRowSplitReader.
NOTE: reference from Flink release 1.11.2 ParquetSplitReaderUtil, modify to support INT64
based TIMESTAMP_MILLIS as ConvertedType, should remove when Flink supports that.
| Constructor and Description |
|---|
ParquetSplitReaderUtil() |
| Modifier and Type | Method and Description |
|---|---|
static org.apache.flink.formats.parquet.vector.reader.ColumnReader |
createColumnReader(boolean utcTimestamp,
org.apache.flink.table.types.logical.LogicalType fieldType,
org.apache.parquet.schema.Type physicalType,
List<org.apache.parquet.column.ColumnDescriptor> descriptors,
org.apache.parquet.column.page.PageReadStore pages) |
static org.apache.flink.table.data.columnar.vector.writable.WritableColumnVector |
createWritableColumnVector(int batchSize,
org.apache.flink.table.types.logical.LogicalType fieldType,
org.apache.parquet.schema.Type physicalType,
List<org.apache.parquet.column.ColumnDescriptor> descriptors) |
static ParquetColumnarRowSplitReader |
genPartColumnarRowReader(boolean utcTimestamp,
boolean caseSensitive,
org.apache.hadoop.conf.Configuration conf,
String[] fullFieldNames,
org.apache.flink.table.types.DataType[] fullFieldTypes,
Map<String,Object> partitionSpec,
int[] selectedFields,
int batchSize,
org.apache.flink.core.fs.Path path,
long splitStart,
long splitLength,
org.apache.parquet.filter2.predicate.FilterPredicate filterPredicate,
org.apache.parquet.filter.UnboundRecordFilter recordFilter)
Util for generating partitioned
ParquetColumnarRowSplitReader. |
public static ParquetColumnarRowSplitReader genPartColumnarRowReader(boolean utcTimestamp, boolean caseSensitive, org.apache.hadoop.conf.Configuration conf, String[] fullFieldNames, org.apache.flink.table.types.DataType[] fullFieldTypes, Map<String,Object> partitionSpec, int[] selectedFields, int batchSize, org.apache.flink.core.fs.Path path, long splitStart, long splitLength, org.apache.parquet.filter2.predicate.FilterPredicate filterPredicate, org.apache.parquet.filter.UnboundRecordFilter recordFilter) throws IOException
ParquetColumnarRowSplitReader.IOExceptionpublic static org.apache.flink.formats.parquet.vector.reader.ColumnReader createColumnReader(boolean utcTimestamp,
org.apache.flink.table.types.logical.LogicalType fieldType,
org.apache.parquet.schema.Type physicalType,
List<org.apache.parquet.column.ColumnDescriptor> descriptors,
org.apache.parquet.column.page.PageReadStore pages)
throws IOException
IOExceptionpublic static org.apache.flink.table.data.columnar.vector.writable.WritableColumnVector createWritableColumnVector(int batchSize,
org.apache.flink.table.types.logical.LogicalType fieldType,
org.apache.parquet.schema.Type physicalType,
List<org.apache.parquet.column.ColumnDescriptor> descriptors)
Copyright © 2023 The Apache Software Foundation. All rights reserved.