public abstract class ParquetRecordReaderBase extends Object
| Modifier and Type | Field and Description |
|---|---|
protected org.apache.hadoop.fs.Path |
filePath |
protected org.apache.hadoop.mapred.FileSplit |
fileSplit |
protected List<org.apache.parquet.hadoop.metadata.BlockMetaData> |
filteredBlocks |
protected org.apache.hadoop.mapred.JobConf |
jobConf |
protected Boolean |
legacyConversionEnabled |
static org.slf4j.Logger |
LOG |
protected org.apache.parquet.hadoop.ParquetInputSplit |
parquetInputSplit |
protected org.apache.parquet.hadoop.metadata.ParquetMetadata |
parquetMetadata |
protected ProjectionPusher |
projectionPusher |
protected org.apache.parquet.hadoop.ParquetFileReader |
reader |
protected int |
schemaSize |
protected SerDeStats |
serDeStats |
protected Boolean |
skipProlepticConversion |
protected boolean |
skipTimestampConversion |
| Modifier | Constructor and Description |
|---|---|
protected |
ParquetRecordReaderBase(org.apache.hadoop.mapred.JobConf conf,
org.apache.hadoop.mapred.InputSplit oldSplit) |
| Modifier and Type | Method and Description |
|---|---|
List<org.apache.parquet.hadoop.metadata.BlockMetaData> |
getFilteredBlocks() |
protected org.apache.parquet.hadoop.metadata.ParquetMetadata |
getParquetMetadata(org.apache.hadoop.fs.Path path,
org.apache.hadoop.mapred.JobConf conf) |
protected org.apache.parquet.hadoop.ParquetInputSplit |
getSplit(org.apache.hadoop.mapred.JobConf conf)
gets a ParquetInputSplit corresponding to a split given by Hive
|
SerDeStats |
getStats() |
org.apache.parquet.filter2.compat.FilterCompat.Filter |
setFilter(org.apache.hadoop.mapred.JobConf conf,
org.apache.parquet.schema.MessageType schema) |
protected void |
setupMetadataAndParquetSplit(org.apache.hadoop.mapred.JobConf conf,
org.apache.parquet.hadoop.metadata.ParquetMetadata metadata) |
public static final org.slf4j.Logger LOG
protected final org.apache.hadoop.mapred.FileSplit fileSplit
protected org.apache.hadoop.fs.Path filePath
protected org.apache.parquet.hadoop.ParquetInputSplit parquetInputSplit
protected org.apache.parquet.hadoop.metadata.ParquetMetadata parquetMetadata
protected ProjectionPusher projectionPusher
protected boolean skipTimestampConversion
protected Boolean skipProlepticConversion
protected Boolean legacyConversionEnabled
protected SerDeStats serDeStats
protected final org.apache.hadoop.mapred.JobConf jobConf
protected int schemaSize
protected List<org.apache.parquet.hadoop.metadata.BlockMetaData> filteredBlocks
protected org.apache.parquet.hadoop.ParquetFileReader reader
protected ParquetRecordReaderBase(org.apache.hadoop.mapred.JobConf conf,
org.apache.hadoop.mapred.InputSplit oldSplit)
throws IOException
IOExceptionprotected void setupMetadataAndParquetSplit(org.apache.hadoop.mapred.JobConf conf,
org.apache.parquet.hadoop.metadata.ParquetMetadata metadata)
throws IOException
IOExceptionprotected org.apache.parquet.hadoop.ParquetInputSplit getSplit(org.apache.hadoop.mapred.JobConf conf)
throws IOException
conf - The JobConf of the Hive jobIOException - if the config cannot be enhanced or if the footer cannot be read from the fileprotected org.apache.parquet.hadoop.metadata.ParquetMetadata getParquetMetadata(org.apache.hadoop.fs.Path path,
org.apache.hadoop.mapred.JobConf conf)
throws IOException
IOExceptionpublic org.apache.parquet.filter2.compat.FilterCompat.Filter setFilter(org.apache.hadoop.mapred.JobConf conf,
org.apache.parquet.schema.MessageType schema)
public List<org.apache.parquet.hadoop.metadata.BlockMetaData> getFilteredBlocks()
public SerDeStats getStats()
Copyright © 2024 The Apache Software Foundation. All rights reserved.