public static class HoodieCombineHiveInputFormat.HoodieCombineFileInputFormatShim<K,V>
extends org.apache.hadoop.mapred.lib.CombineFileInputFormat<K,V>
implements org.apache.hadoop.hive.shims.HadoopShims.CombineFileInputFormatShim<K,V>
| Constructor and Description |
|---|
HoodieCombineFileInputFormatShim() |
| Modifier and Type | Method and Description |
|---|---|
protected HoodieParquetInputFormat |
createParquetInputFormat() |
protected HoodieParquetRealtimeInputFormat |
createParquetRealtimeInputFormat() |
void |
createPool(org.apache.hadoop.mapred.JobConf conf,
org.apache.hadoop.fs.PathFilter... filters) |
org.apache.hadoop.fs.Path[] |
getInputPathsShim(org.apache.hadoop.mapred.JobConf conf) |
org.apache.hadoop.hive.shims.HadoopShimsSecure.InputSplitShim |
getInputSplitShim() |
org.apache.hadoop.mapred.RecordReader<K,V> |
getRecordReader(org.apache.hadoop.mapred.InputSplit split,
org.apache.hadoop.mapred.JobConf job,
org.apache.hadoop.mapred.Reporter reporter) |
org.apache.hadoop.mapred.RecordReader |
getRecordReader(org.apache.hadoop.mapred.JobConf job,
org.apache.hadoop.mapred.lib.CombineFileSplit split,
org.apache.hadoop.mapred.Reporter reporter,
Class<org.apache.hadoop.mapred.RecordReader<K,V>> rrClass) |
org.apache.hadoop.mapred.lib.CombineFileSplit[] |
getSplits(org.apache.hadoop.mapred.JobConf job,
int numSplits) |
protected List<org.apache.hadoop.fs.FileStatus> |
listStatus(org.apache.hadoop.mapreduce.JobContext job) |
void |
setHoodieFilter(boolean hoodieFilter) |
void |
setRealTime(boolean realTime) |
createPool, createRecordReader, isSplitable, isSplitable, listStatuscreatePool, createPool, getFileBlockLocations, getSplits, setMaxSplitSize, setMinSplitSizeNode, setMinSplitSizeRackaddInputPath, addInputPathRecursively, addInputPaths, computeSplitSize, getBlockIndex, getFormatMinSplitSize, getInputDirRecursive, getInputPathFilter, getInputPaths, getMaxSplitSize, getMinSplitSize, makeSplit, makeSplit, setInputDirRecursive, setInputPathFilter, setInputPaths, setInputPaths, setMaxInputSplitSize, setMinInputSplitSizeprotected HoodieParquetInputFormat createParquetInputFormat()
protected HoodieParquetRealtimeInputFormat createParquetRealtimeInputFormat()
public org.apache.hadoop.fs.Path[] getInputPathsShim(org.apache.hadoop.mapred.JobConf conf)
public void createPool(org.apache.hadoop.mapred.JobConf conf,
org.apache.hadoop.fs.PathFilter... filters)
public org.apache.hadoop.mapred.RecordReader<K,V> getRecordReader(org.apache.hadoop.mapred.InputSplit split, org.apache.hadoop.mapred.JobConf job, org.apache.hadoop.mapred.Reporter reporter) throws IOException
getRecordReader in interface org.apache.hadoop.mapred.InputFormat<K,V>getRecordReader in class org.apache.hadoop.mapred.lib.CombineFileInputFormat<K,V>IOExceptionprotected List<org.apache.hadoop.fs.FileStatus> listStatus(org.apache.hadoop.mapreduce.JobContext job) throws IOException
listStatus in class org.apache.hadoop.mapreduce.lib.input.FileInputFormat<K,V>IOExceptionpublic org.apache.hadoop.mapred.lib.CombineFileSplit[] getSplits(org.apache.hadoop.mapred.JobConf job,
int numSplits)
throws IOException
public org.apache.hadoop.hive.shims.HadoopShimsSecure.InputSplitShim getInputSplitShim()
public org.apache.hadoop.mapred.RecordReader getRecordReader(org.apache.hadoop.mapred.JobConf job,
org.apache.hadoop.mapred.lib.CombineFileSplit split,
org.apache.hadoop.mapred.Reporter reporter,
Class<org.apache.hadoop.mapred.RecordReader<K,V>> rrClass)
throws IOException
getRecordReader in interface org.apache.hadoop.hive.shims.HadoopShims.CombineFileInputFormatShim<K,V>IOExceptionpublic void setHoodieFilter(boolean hoodieFilter)
public void setRealTime(boolean realTime)
Copyright © 2022 The Apache Software Foundation. All rights reserved.