| Modifier and Type | Method and Description |
|---|---|
TableDesc |
AlterTableConcatenateDesc.getTableDesc() |
| Constructor and Description |
|---|
AlterTableConcatenateDesc(TableName tableName,
Map<String,String> partitionSpec,
ListBucketingCtx lbCtx,
org.apache.hadoop.fs.Path inputDir,
org.apache.hadoop.fs.Path outputDir,
Class<? extends org.apache.hadoop.mapred.InputFormat> inputFormatClass,
TableDesc tableDesc) |
| Modifier and Type | Field and Description |
|---|---|
static TableDesc |
Utilities.defaultTd |
protected TableDesc[] |
CommonJoinOperator.spillTableDesc |
TableDesc |
Utilities.MissingBucketsContext.tableInfo |
| Modifier and Type | Method and Description |
|---|---|
static TableDesc |
JoinUtil.getSpillTableDesc(Byte alias,
TableDesc[] spillTableDesc,
JoinDesc conf,
boolean noFilter) |
static TableDesc |
Utilities.getTableDesc(String cols,
String colTypes) |
static TableDesc |
Utilities.getTableDesc(Table tbl) |
TableDesc |
TableScanOperator.getTableDescSkewJoin() |
TableDesc |
FetchTask.getTblDesc()
Return the tableDesc of the fetchWork.
|
static TableDesc[] |
JoinUtil.initSpillTables(JoinDesc conf,
boolean noFilter) |
| Modifier and Type | Method and Description |
|---|---|
static void |
Utilities.copyJobSecretToTableProperties(TableDesc tbl)
Copy job credentials to table properties
|
static void |
Utilities.copyTableJobPropertiesToConf(TableDesc tbl,
org.apache.hadoop.mapred.JobConf job)
Copies the storage handler properties configured for a table descriptor to a runtime job
configuration.
|
static void |
Utilities.copyTablePropertiesToConf(TableDesc tbl,
org.apache.hadoop.mapred.JobConf job)
Copies the storage handler properties configured for a table descriptor to a runtime job
configuration.
|
static int |
Utilities.getFooterCount(TableDesc table,
org.apache.hadoop.mapred.JobConf job)
Get footer line count for a table.
|
static int |
Utilities.getHeaderCount(TableDesc table)
Get header line count for a table.
|
static PartitionDesc |
Utilities.getPartitionDesc(Partition part,
TableDesc tableDesc) |
static PartitionDesc |
Utilities.getPartitionDescFromTableDesc(TableDesc tblDesc,
Partition part,
boolean usePartSchemaProperties) |
static RowContainer<List<Object>> |
JoinUtil.getRowContainer(org.apache.hadoop.conf.Configuration hconf,
List<ObjectInspector> structFieldObjectInspectors,
Byte alias,
int containerSize,
TableDesc[] spillTableDesc,
JoinDesc conf,
boolean noFilter,
org.apache.hadoop.mapred.Reporter reporter) |
static AbstractSerDe |
JoinUtil.getSpillSerDe(byte alias,
TableDesc[] spillTableDesc,
JoinDesc conf,
boolean noFilter) |
static TableDesc |
JoinUtil.getSpillTableDesc(Byte alias,
TableDesc[] spillTableDesc,
JoinDesc conf,
boolean noFilter) |
void |
TableScanOperator.setTableDescSkewJoin(TableDesc tableDesc) |
| Constructor and Description |
|---|
MissingBucketsContext(TableDesc tableInfo,
int numBuckets,
boolean isCompressed) |
| Modifier and Type | Method and Description |
|---|---|
static TableDesc |
PTFRowContainer.createTableDesc(StructObjectInspector oI) |
| Modifier and Type | Method and Description |
|---|---|
void |
RowContainer.setTableDesc(TableDesc tblDesc) |
| Modifier and Type | Method and Description |
|---|---|
TableDesc |
ReduceRecordSource.getKeyTableDesc() |
| Modifier and Type | Method and Description |
|---|---|
protected org.apache.hadoop.hive.ql.exec.tez.DynamicPartitionPruner.SourceInfo |
DynamicPartitionPruner.createSourceInfo(TableDesc t,
ExprNodeDesc partKeyExpr,
ExprNodeDesc predicate,
String columnName,
String columnType,
org.apache.hadoop.mapred.JobConf jobConf) |
| Constructor and Description |
|---|
VectorMapJoinFastLongHashMap(boolean isFullOuter,
boolean minMaxEnabled,
VectorMapJoinDesc.HashTableKeyType hashTableKeyType,
int initialCapacity,
float loadFactor,
int writeBuffersSize,
long estimatedKeyCount,
TableDesc tableDesc) |
VectorMapJoinFastLongHashMapContainer(boolean isFullOuter,
boolean minMaxEnabled,
VectorMapJoinDesc.HashTableKeyType hashTableKeyType,
int initialCapacity,
float loadFactor,
int writeBuffersSize,
long estimatedKeyCount,
TableDesc tableDesc,
int numHTs) |
VectorMapJoinFastLongHashMultiSet(boolean isFullOuter,
boolean minMaxEnabled,
VectorMapJoinDesc.HashTableKeyType hashTableKeyType,
int initialCapacity,
float loadFactor,
int writeBuffersSize,
long estimatedKeyCount,
TableDesc tableDesc) |
VectorMapJoinFastLongHashMultiSetContainer(boolean isFullOuter,
boolean minMaxEnabled,
VectorMapJoinDesc.HashTableKeyType hashTableKeyType,
int initialCapacity,
float loadFactor,
int writeBuffersSize,
long estimatedKeyCount,
TableDesc tableDesc,
int numHTs) |
VectorMapJoinFastLongHashSet(boolean isFullOuter,
boolean minMaxEnabled,
VectorMapJoinDesc.HashTableKeyType hashTableKeyType,
int initialCapacity,
float loadFactor,
int writeBuffersSize,
long estimatedKeyCount,
TableDesc tableDesc) |
VectorMapJoinFastLongHashSetContainer(boolean isFullOuter,
boolean minMaxEnabled,
VectorMapJoinDesc.HashTableKeyType hashTableKeyType,
int initialCapacity,
float loadFactor,
int writeBuffersSize,
long estimatedKeyCount,
TableDesc tableDesc,
int numHTs) |
VectorMapJoinFastLongHashTable(boolean isFullOuter,
boolean minMaxEnabled,
VectorMapJoinDesc.HashTableKeyType hashTableKeyType,
int initialCapacity,
float loadFactor,
int writeBuffersSize,
long estimatedKeyCount,
TableDesc tableDesc) |
VectorMapJoinFastStringCommon(TableDesc tableDesc) |
VectorMapJoinFastStringHashMap(boolean isFullOuter,
int initialCapacity,
float loadFactor,
int writeBuffersSize,
long estimatedKeyCount,
TableDesc tableDesc) |
VectorMapJoinFastStringHashMapContainer(boolean isFullOuter,
int initialCapacity,
float loadFactor,
int writeBuffersSize,
long estimatedKeyCount,
TableDesc tableDesc,
int numHTs) |
VectorMapJoinFastStringHashMultiSet(boolean isFullOuter,
int initialCapacity,
float loadFactor,
int writeBuffersSize,
long estimatedKeyCount,
TableDesc tableDesc) |
VectorMapJoinFastStringHashMultiSetContainer(boolean isFullOuter,
int initialCapacity,
float loadFactor,
int writeBuffersSize,
long estimatedKeyCount,
TableDesc tableDesc,
int numHTs) |
VectorMapJoinFastStringHashSet(boolean isFullOuter,
int initialCapacity,
float loadFactor,
int writeBuffersSize,
long estimatedKeyCount,
TableDesc tableDesc) |
VectorMapJoinFastStringHashSetContainer(boolean isFullOuter,
int initialCapacity,
float loadFactor,
int writeBuffersSize,
long estimatedKeyCount,
TableDesc tableDesc,
int numHTs) |
| Modifier and Type | Method and Description |
|---|---|
TableDesc |
VectorMapJoinOptimizedStringCommon.getTableDesc() |
TableDesc |
VectorMapJoinOptimizedLongCommon.getTableDesc() |
| Modifier and Type | Method and Description |
|---|---|
static RecordUpdater |
HiveFileFormatUtils.getAcidRecordUpdater(org.apache.hadoop.mapred.JobConf jc,
TableDesc tableInfo,
int bucket,
FileSinkDesc conf,
org.apache.hadoop.fs.Path outPath,
ObjectInspector inspector,
org.apache.hadoop.mapred.Reporter reporter,
int rowIdColNum) |
static RecordUpdater |
HiveFileFormatUtils.getAcidRecordUpdater(org.apache.hadoop.mapred.JobConf jc,
TableDesc tableInfo,
int bucket,
FileSinkDesc conf,
org.apache.hadoop.fs.Path outPath,
ObjectInspector inspector,
org.apache.hadoop.mapred.Reporter reporter,
int rowIdColNum,
Integer attemptId) |
static HiveOutputFormat<?,?> |
HiveFileFormatUtils.getHiveOutputFormat(org.apache.hadoop.conf.Configuration conf,
TableDesc tableDesc) |
static FileSinkOperator.RecordWriter |
HiveFileFormatUtils.getHiveRecordWriter(org.apache.hadoop.mapred.JobConf jc,
TableDesc tableInfo,
Class<? extends org.apache.hadoop.io.Writable> outputClass,
FileSinkDesc conf,
org.apache.hadoop.fs.Path outPath,
org.apache.hadoop.mapred.Reporter reporter) |
protected ValidWriteIdList |
HiveInputFormat.getMmValidWriteIds(org.apache.hadoop.mapred.JobConf conf,
TableDesc table,
ValidWriteIdList validWriteIdList) |
| Modifier and Type | Method and Description |
|---|---|
void |
MergeFileWork.resolveDynamicPartitionStoredAsSubDirsMerge(HiveConf conf,
org.apache.hadoop.fs.Path path,
TableDesc tblDesc,
List<String> aliases,
PartitionDesc partDesc) |
| Constructor and Description |
|---|
MergeFileWork(List<org.apache.hadoop.fs.Path> inputPaths,
org.apache.hadoop.fs.Path outputDir,
boolean hasDynamicPartitions,
String srcTblInputFormat,
TableDesc tbl) |
MergeFileWork(List<org.apache.hadoop.fs.Path> inputPaths,
org.apache.hadoop.fs.Path outputDir,
String srcTblInputFormat,
TableDesc tbl) |
| Modifier and Type | Method and Description |
|---|---|
void |
SyslogStorageHandler.configureInputJobCredentials(TableDesc tableDesc,
Map<String,String> secrets) |
void |
SyslogStorageHandler.configureInputJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties) |
void |
SyslogStorageHandler.configureJobConf(TableDesc tableDesc,
org.apache.hadoop.mapred.JobConf jobConf) |
void |
SyslogStorageHandler.configureOutputJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties) |
void |
SyslogStorageHandler.configureTableJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties) |
| Modifier and Type | Method and Description |
|---|---|
void |
DefaultStorageHandler.configureInputJobCredentials(TableDesc tableDesc,
Map<String,String> secrets) |
void |
HiveStorageHandler.configureInputJobCredentials(TableDesc tableDesc,
Map<String,String> secrets)
This method is called to allow the StorageHandlers the chance to
populate secret keys into the job's credentials.
|
void |
DefaultStorageHandler.configureInputJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties) |
void |
HiveStorageHandler.configureInputJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties)
This method is called to allow the StorageHandlers the chance
to populate the JobContext.getConfiguration() with properties that
maybe be needed by the handler's bundled artifacts (ie InputFormat, SerDe, etc).
|
void |
DefaultStorageHandler.configureJobConf(TableDesc tableDesc,
org.apache.hadoop.mapred.JobConf jobConf) |
void |
HiveStorageHandler.configureJobConf(TableDesc tableDesc,
org.apache.hadoop.mapred.JobConf jobConf)
Called just before submitting MapReduce job.
|
void |
DefaultStorageHandler.configureOutputJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties) |
void |
HiveStorageHandler.configureOutputJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties)
This method is called to allow the StorageHandlers the chance
to populate the JobContext.getConfiguration() with properties that
maybe be needed by the handler's bundled artifacts (ie InputFormat, SerDe, etc).
|
void |
DefaultStorageHandler.configureTableJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties) |
void |
HiveStorageHandler.configureTableJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties)
Deprecated.
|
| Modifier and Type | Method and Description |
|---|---|
List<TableDesc> |
GenMRProcContext.GenMRUnionCtx.getTTDesc() |
| Modifier and Type | Method and Description |
|---|---|
void |
GenMRProcContext.GenMRUnionCtx.addTTDesc(TableDesc tt_desc) |
static TableScanOperator |
GenMapRedUtils.createTemporaryFile(Operator<? extends OperatorDesc> parent,
Operator<? extends OperatorDesc> child,
org.apache.hadoop.fs.Path taskTmpDir,
TableDesc tt_desc,
ParseContext parseCtx)
Break the pipeline between parent and child, and then
output data generated by parent to a temporary file stored in taskTmpDir.
|
static void |
GenMapRedUtils.setTaskPlan(org.apache.hadoop.fs.Path path,
String alias,
Operator<? extends OperatorDesc> topOp,
MapWork plan,
boolean local,
TableDesc ttDesc)
set the current task in the mapredWork.
|
| Modifier and Type | Method and Description |
|---|---|
static void |
GenMapRedUtils.finalMapWorkChores(List<Task<?>> tasks,
org.apache.hadoop.conf.Configuration conf,
com.google.common.collect.Interner<TableDesc> interner)
Called at the end of TaskCompiler::compile
This currently does the following for each map work
1.
|
| Modifier and Type | Method and Description |
|---|---|
TableDesc |
ParseContext.getFetchTableDesc() |
TableDesc |
RuntimeValuesInfo.getTableDesc() |
| Modifier and Type | Method and Description |
|---|---|
void |
ParseContext.setFetchTabledesc(TableDesc fetchTableDesc) |
void |
RuntimeValuesInfo.setTableDesc(TableDesc tableDesc) |
| Modifier and Type | Method and Description |
|---|---|
static TableDesc |
PlanUtils.getDefaultQueryOutputTableDesc(String cols,
String colTypes,
String fileFormat,
Class<? extends Deserializer> serdeClass) |
static TableDesc |
PlanUtils.getDefaultTableDesc(CreateTableDesc directoryDesc,
String cols,
String colTypes) |
static TableDesc |
PlanUtils.getDefaultTableDesc(String separatorCode,
String columns)
Generate the table descriptor of MetadataTypedColumnsetSerDe with the
separatorCode and column names (comma separated string).
|
static TableDesc |
PlanUtils.getDefaultTableDesc(String separatorCode,
String columns,
boolean lastColumnTakesRestOfTheLine)
Generate the table descriptor of MetadataTypedColumnsetSerDe with the
separatorCode and column names (comma separated string), and whether the
last column should take the rest of the line.
|
static TableDesc |
PlanUtils.getDefaultTableDesc(String separatorCode,
String columns,
String columnTypes,
boolean lastColumnTakesRestOfTheLine)
Generate the table descriptor of MetadataTypedColumnsetSerDe with the
separatorCode and column names (comma separated string), and whether the
last column should take the rest of the line.
|
static TableDesc |
PlanUtils.getIntermediateFileTableDesc(List<org.apache.hadoop.hive.metastore.api.FieldSchema> fieldSchemas)
Generate the table descriptor for intermediate files.
|
TableDesc |
ReduceWork.getKeyDesc() |
TableDesc |
ReduceSinkDesc.getKeySerializeInfo() |
TableDesc |
JoinDesc.getKeyTableDesc() |
TableDesc |
HashTableSinkDesc.getKeyTableDesc() |
TableDesc |
MapJoinDesc.getKeyTblDesc() |
TableDesc |
HashTableSinkDesc.getKeyTblDesc() |
static TableDesc |
PlanUtils.getMapJoinKeyTableDesc(org.apache.hadoop.conf.Configuration conf,
List<org.apache.hadoop.hive.metastore.api.FieldSchema> fieldSchemas)
Generate the table descriptor for Map-side join key.
|
static TableDesc |
PlanUtils.getMapJoinValueTableDesc(List<org.apache.hadoop.hive.metastore.api.FieldSchema> fieldSchemas)
Generate the table descriptor for Map-side join value.
|
static TableDesc |
PlanUtils.getReduceKeyTableDesc(List<org.apache.hadoop.hive.metastore.api.FieldSchema> fieldSchemas,
String order,
String nullOrder)
Generate the table descriptor for reduce key.
|
static TableDesc |
PlanUtils.getReduceValueTableDesc(List<org.apache.hadoop.hive.metastore.api.FieldSchema> fieldSchemas)
Generate the table descriptor for intermediate files.
|
TableDesc |
ScriptDesc.getScriptErrInfo() |
TableDesc |
ScriptDesc.getScriptInputInfo() |
TableDesc |
ScriptDesc.getScriptOutputInfo() |
TableDesc |
LoadTableDesc.getTable() |
TableDesc |
AppMasterEventDesc.getTable() |
TableDesc |
PartitionDesc.getTableDesc() |
static TableDesc |
PlanUtils.getTableDesc(Class<? extends Deserializer> serdeClass,
String separatorCode,
String columns)
Generate the table descriptor of given serde with the separatorCode and
column names (comma separated string).
|
static TableDesc |
PlanUtils.getTableDesc(Class<? extends Deserializer> serdeClass,
String separatorCode,
String columns,
boolean lastColumnTakesRestOfTheLine)
Generate the table descriptor of the serde specified with the separatorCode
and column names (comma separated string), and whether the last column
should take the rest of the line.
|
static TableDesc |
PlanUtils.getTableDesc(Class<? extends Deserializer> serdeClass,
String separatorCode,
String columns,
String columnTypes,
List<org.apache.hadoop.hive.metastore.api.FieldSchema> partCols,
boolean lastColumnTakesRestOfTheLine) |
static TableDesc |
PlanUtils.getTableDesc(Class<? extends Deserializer> serdeClass,
String separatorCode,
String columns,
String columnTypes,
List<org.apache.hadoop.hive.metastore.api.FieldSchema> partCols,
boolean lastColumnTakesRestOfTheLine,
String fileFormat) |
static TableDesc |
PlanUtils.getTableDesc(CreateMaterializedViewDesc crtViewDesc,
String cols,
String colTypes)
Generate a table descriptor from a createViewDesc.
|
static TableDesc |
PlanUtils.getTableDesc(CreateTableDesc crtTblDesc,
String cols,
String colTypes)
Generate a table descriptor from a createTableDesc.
|
static TableDesc |
PartitionDesc.getTableDesc(Table table) |
TableDesc |
FileSinkDesc.getTableInfo() |
TableDesc |
HashTableDummyDesc.getTbl() |
TableDesc |
FetchWork.getTblDesc() |
TableDesc |
ReduceSinkDesc.getValueSerializeInfo() |
| Modifier and Type | Method and Description |
|---|---|
Map<String,List<TableDesc>> |
MapWork.getEventSourceTableDescMap() |
List<TableDesc> |
DemuxDesc.getKeysSerializeInfos() |
Map<Byte,TableDesc> |
JoinDesc.getSkewKeysValuesTables() |
Map<Byte,TableDesc> |
HashTableSinkDesc.getSkewKeysValuesTables() |
List<TableDesc> |
ReduceWork.getTagToValueDesc() |
List<TableDesc> |
MapJoinDesc.getValueFilteredTblDescs() |
List<TableDesc> |
DemuxDesc.getValuesSerializeInfos() |
List<TableDesc> |
MapJoinDesc.getValueTblDescs() |
List<TableDesc> |
HashTableSinkDesc.getValueTblDescs() |
List<TableDesc> |
HashTableSinkDesc.getValueTblFilteredDescs() |
| Modifier and Type | Method and Description |
|---|---|
static void |
PlanUtils.configureInputJobPropertiesForStorageHandler(TableDesc tableDesc)
Loads the storage handler (if one exists) for the given table
and invokes
HiveStorageHandler.configureInputJobProperties(TableDesc, java.util.Map). |
static void |
PlanUtils.configureJobConf(TableDesc tableDesc,
org.apache.hadoop.mapred.JobConf jobConf) |
static void |
PlanUtils.configureOutputJobPropertiesForStorageHandler(TableDesc tableDesc)
Loads the storage handler (if one exists) for the given table
and invokes
HiveStorageHandler.configureOutputJobProperties(TableDesc, java.util.Map). |
void |
MapWork.resolveDynamicPartitionStoredAsSubDirsMerge(HiveConf conf,
org.apache.hadoop.fs.Path path,
TableDesc tblDesc,
List<String> aliases,
PartitionDesc partDesc) |
void |
ReduceWork.setKeyDesc(TableDesc keyDesc)
If the plan has a reducer and correspondingly a reduce-sink, then store the TableDesc pointing
to keySerializeInfo of the ReduceSink
|
void |
ReduceSinkDesc.setKeySerializeInfo(TableDesc keySerializeInfo) |
void |
JoinDesc.setKeyTableDesc(TableDesc keyTblDesc) |
void |
HashTableSinkDesc.setKeyTableDesc(TableDesc keyTableDesc) |
void |
MapJoinDesc.setKeyTblDesc(TableDesc keyTblDesc) |
void |
HashTableSinkDesc.setKeyTblDesc(TableDesc keyTblDesc) |
void |
ScriptDesc.setScriptErrInfo(TableDesc scriptErrInfo) |
void |
ScriptDesc.setScriptInputInfo(TableDesc scriptInputInfo) |
void |
ScriptDesc.setScriptOutputInfo(TableDesc scriptOutputInfo) |
void |
LoadTableDesc.setTable(TableDesc table) |
void |
AppMasterEventDesc.setTable(TableDesc table) |
void |
PartitionDesc.setTableDesc(TableDesc tableDesc) |
void |
FileSinkDesc.setTableInfo(TableDesc tableInfo) |
void |
HashTableDummyDesc.setTbl(TableDesc tbl) |
void |
FetchWork.setTblDesc(TableDesc tblDesc) |
void |
ReduceSinkDesc.setValueSerializeInfo(TableDesc valueSerializeInfo) |
| Modifier and Type | Method and Description |
|---|---|
void |
PartitionDesc.intern(com.google.common.collect.Interner<TableDesc> interner) |
void |
MapWork.internTable(com.google.common.collect.Interner<TableDesc> interner) |
void |
MapWork.setEventSourceTableDescMap(Map<String,List<TableDesc>> map) |
void |
DemuxDesc.setKeysSerializeInfos(List<TableDesc> keysSerializeInfos) |
void |
JoinDesc.setSkewKeysValuesTables(Map<Byte,TableDesc> skewKeysValuesTables) |
void |
HashTableSinkDesc.setSkewKeysValuesTables(Map<Byte,TableDesc> skewKeysValuesTables) |
void |
ReduceWork.setTagToValueDesc(List<TableDesc> tagToValueDesc) |
void |
MapJoinDesc.setValueFilteredTblDescs(List<TableDesc> valueFilteredTblDescs) |
void |
DemuxDesc.setValuesSerializeInfos(List<TableDesc> valuesSerializeInfos) |
void |
MapJoinDesc.setValueTblDescs(List<TableDesc> valueTblDescs) |
void |
HashTableSinkDesc.setValueTblDescs(List<TableDesc> valueTblDescs) |
void |
HashTableSinkDesc.setValueTblFilteredDescs(List<TableDesc> valueTblFilteredDescs) |
| Constructor and Description |
|---|
FetchWork(List<org.apache.hadoop.fs.Path> partDir,
List<PartitionDesc> partDesc,
TableDesc tblDesc) |
FetchWork(List<org.apache.hadoop.fs.Path> partDir,
List<PartitionDesc> partDesc,
TableDesc tblDesc,
int limit) |
FetchWork(org.apache.hadoop.fs.Path tblDir,
TableDesc tblDesc) |
FetchWork(org.apache.hadoop.fs.Path tblDir,
TableDesc tblDesc,
int limit) |
FileSinkDesc(org.apache.hadoop.fs.Path dirName,
TableDesc tableInfo,
boolean compressed) |
FileSinkDesc(org.apache.hadoop.fs.Path dirName,
TableDesc tableInfo,
boolean compressed,
int destTableId,
boolean multiFileSpray,
boolean canBeMerged,
int numFiles,
int totalFiles,
List<ExprNodeDesc> partitionCols,
DynamicPartitionCtx dpCtx,
org.apache.hadoop.fs.Path destPath,
Long mmWriteId,
boolean isMmCtas,
boolean isInsertOverwrite,
boolean isQuery,
boolean isCTASorCM,
boolean isDirectInsert,
AcidUtils.Operation acidOperation,
boolean deleteOfSplitUpdate) |
LoadTableDesc(org.apache.hadoop.fs.Path sourcePath,
TableDesc table,
DynamicPartitionCtx dpCtx,
AcidUtils.Operation writeType,
boolean isReplace,
Long writeId) |
LoadTableDesc(org.apache.hadoop.fs.Path sourcePath,
TableDesc table,
Map<String,String> partitionSpec)
For DDL operations that are not ACID compliant.
|
LoadTableDesc(org.apache.hadoop.fs.Path sourcePath,
TableDesc table,
Map<String,String> partitionSpec,
AcidUtils.Operation writeType,
Long currentWriteId) |
LoadTableDesc(org.apache.hadoop.fs.Path sourcePath,
TableDesc table,
Map<String,String> partitionSpec,
LoadTableDesc.LoadFileType loadFileType,
AcidUtils.Operation writeType,
Long currentWriteId) |
LoadTableDesc(org.apache.hadoop.fs.Path sourcePath,
TableDesc table,
Map<String,String> partitionSpec,
LoadTableDesc.LoadFileType loadFileType,
Long writeId)
For use with non-ACID compliant operations, such as LOAD
|
MapJoinDesc(Map<Byte,List<ExprNodeDesc>> keys,
TableDesc keyTblDesc,
Map<Byte,List<ExprNodeDesc>> values,
List<TableDesc> valueTblDescs,
List<TableDesc> valueFilteredTblDescs,
List<String> outputColumnNames,
int posBigTable,
JoinCondDesc[] conds,
Map<Byte,List<ExprNodeDesc>> filters,
boolean noOuterJoin,
String dumpFilePrefix,
MemoryMonitorInfo memoryMonitorInfo,
long inMemoryDataSize) |
PartitionDesc(Partition part,
TableDesc tableDesc) |
PartitionDesc(Partition part,
TableDesc tblDesc,
boolean usePartSchemaProperties) |
PartitionDesc(TableDesc table,
LinkedHashMap<String,String> partSpec) |
ReduceSinkDesc(List<ExprNodeDesc> keyCols,
int numDistributionKeys,
List<ExprNodeDesc> valueCols,
List<String> outputKeyColumnNames,
List<List<Integer>> distinctColumnIndices,
List<String> outputValueColumnNames,
int tag,
List<ExprNodeDesc> partitionCols,
int numReducers,
TableDesc keySerializeInfo,
TableDesc valueSerializeInfo,
AcidUtils.Operation writeType) |
ScriptDesc(String scriptCmd,
TableDesc scriptInputInfo,
Class<? extends RecordWriter> inRecordWriterClass,
TableDesc scriptOutputInfo,
Class<? extends RecordReader> outRecordReaderClass,
Class<? extends RecordReader> errRecordReaderClass,
TableDesc scriptErrInfo) |
| Constructor and Description |
|---|
DemuxDesc(Map<Integer,Integer> newTagToOldTag,
Map<Integer,Integer> newTagToChildIndex,
Map<Integer,Integer> childIndexToOriginalNumParents,
List<TableDesc> keysSerializeInfos,
List<TableDesc> valuesSerializeInfos) |
DemuxDesc(Map<Integer,Integer> newTagToOldTag,
Map<Integer,Integer> newTagToChildIndex,
Map<Integer,Integer> childIndexToOriginalNumParents,
List<TableDesc> keysSerializeInfos,
List<TableDesc> valuesSerializeInfos) |
MapJoinDesc(Map<Byte,List<ExprNodeDesc>> keys,
TableDesc keyTblDesc,
Map<Byte,List<ExprNodeDesc>> values,
List<TableDesc> valueTblDescs,
List<TableDesc> valueFilteredTblDescs,
List<String> outputColumnNames,
int posBigTable,
JoinCondDesc[] conds,
Map<Byte,List<ExprNodeDesc>> filters,
boolean noOuterJoin,
String dumpFilePrefix,
MemoryMonitorInfo memoryMonitorInfo,
long inMemoryDataSize) |
MapJoinDesc(Map<Byte,List<ExprNodeDesc>> keys,
TableDesc keyTblDesc,
Map<Byte,List<ExprNodeDesc>> values,
List<TableDesc> valueTblDescs,
List<TableDesc> valueFilteredTblDescs,
List<String> outputColumnNames,
int posBigTable,
JoinCondDesc[] conds,
Map<Byte,List<ExprNodeDesc>> filters,
boolean noOuterJoin,
String dumpFilePrefix,
MemoryMonitorInfo memoryMonitorInfo,
long inMemoryDataSize) |
Copyright © 2022 The Apache Software Foundation. All rights reserved.