| Modifier and Type | Method and Description |
|---|---|
Table |
Context.getMaterializedTable(String cteName) |
Table |
Context.getTempTableForLoad() |
| Modifier and Type | Method and Description |
|---|---|
void |
Context.addMaterializedTable(String cteName,
Table table) |
void |
Context.setTempTableForLoad(Table tempTableForLoad) |
| Modifier and Type | Method and Description |
|---|---|
Table |
InsertCommitHookDesc.getTable() |
| Constructor and Description |
|---|
InsertCommitHookDesc(Table table,
boolean overwrite) |
| Modifier and Type | Method and Description |
|---|---|
protected abstract void |
AbstractAlterTableOperation.doAlteration(Table table,
Partition partition) |
protected org.apache.hadoop.hive.metastore.api.StorageDescriptor |
AbstractAlterTableOperation.getStorageDescriptor(Table tbl,
Partition part) |
static boolean |
AlterTableUtils.isFullPartitionSpec(Table table,
Map<String,String> partitionSpec) |
static boolean |
AlterTableUtils.isSchemaEvolutionEnabled(Table table,
org.apache.hadoop.conf.Configuration conf) |
protected void |
AbstractAlterTableAnalyzer.setAcidDdlDesc(Table table,
DDLDesc.DDLDescWithWriteId desc) |
protected void |
AbstractBaseAlterTableAnalyzer.validateAlterTableType(Table table,
AlterTableType op,
boolean expectView) |
| Modifier and Type | Method and Description |
|---|---|
protected void |
AlterTableAddColumnsOperation.doAlteration(Table table,
Partition partition) |
| Modifier and Type | Method and Description |
|---|---|
protected void |
AlterTableChangeColumnOperation.doAlteration(Table table,
Partition partition) |
| Modifier and Type | Method and Description |
|---|---|
protected void |
AlterTableReplaceColumnsOperation.doAlteration(Table table,
Partition partition) |
| Modifier and Type | Method and Description |
|---|---|
protected void |
AlterTableUpdateColumnsOperation.doAlteration(Table table,
Partition partition) |
| Modifier and Type | Method and Description |
|---|---|
Table |
CreateTableDesc.toTable(HiveConf conf) |
| Modifier and Type | Method and Description |
|---|---|
static boolean |
CreateTableOperation.doesTableNeedLocation(Table tbl) |
static void |
CreateTableOperation.makeLocationQualified(Table table,
HiveConf conf) |
| Modifier and Type | Method and Description |
|---|---|
abstract void |
DescTableFormatter.describeTable(HiveConf conf,
DataOutputStream out,
String columnPath,
String tableName,
Table table,
Partition partition,
List<org.apache.hadoop.hive.metastore.api.FieldSchema> columns,
boolean isFormatted,
boolean isExtended,
boolean isOutputPadded,
List<org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj> columnStats) |
void |
JsonDescTableFormatter.describeTable(HiveConf conf,
DataOutputStream out,
String columnPath,
String tableName,
Table table,
Partition partition,
List<org.apache.hadoop.hive.metastore.api.FieldSchema> columns,
boolean isFormatted,
boolean isExtended,
boolean isOutputPadded,
List<org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj> columnStats) |
| Modifier and Type | Method and Description |
|---|---|
void |
JsonShowTableStatusFormatter.showTableStatus(DataOutputStream out,
Hive db,
HiveConf conf,
List<Table> tables,
Partition partition) |
void |
TextShowTableStatusFormatter.showTableStatus(DataOutputStream out,
Hive db,
HiveConf conf,
List<Table> tables,
Partition partition) |
abstract void |
ShowTableStatusFormatter.showTableStatus(DataOutputStream out,
Hive db,
HiveConf conf,
List<Table> tables,
Partition par) |
| Modifier and Type | Method and Description |
|---|---|
protected void |
AlterTableSetOwnerOperation.doAlteration(Table table,
Partition partition) |
| Modifier and Type | Method and Description |
|---|---|
Table |
PreInsertTableDesc.getTable() |
| Constructor and Description |
|---|
PreInsertTableDesc(Table table,
boolean overwrite) |
| Modifier and Type | Method and Description |
|---|---|
protected void |
AlterTableUnsetPropertiesOperation.doAlteration(Table table,
Partition partition) |
protected void |
AlterTableSetPropertiesOperation.doAlteration(Table table,
Partition partition) |
| Modifier and Type | Method and Description |
|---|---|
protected void |
AlterTableRenameOperation.doAlteration(Table table,
Partition partition) |
| Constructor and Description |
|---|
TruncateTableDesc(TableName tableName,
Map<String,String> partSpec,
ReplicationSpec replicationSpec,
Table table) |
TruncateTableDesc(TableName tableName,
Map<String,String> partSpec,
ReplicationSpec replicationSpec,
Table table,
List<Integer> columnIndexes,
org.apache.hadoop.fs.Path inputDir,
org.apache.hadoop.fs.Path outputDir,
ListBucketingCtx lbCtx) |
| Modifier and Type | Method and Description |
|---|---|
static void |
PartitionUtils.addTablePartsOutputs(Hive db,
Set<WriteEntity> outputs,
Table table,
List<Map<String,String>> partitionSpecs,
boolean allowMany,
WriteEntity.WriteType writeType)
Add the table partitions to be modified in the output, so that it is available for the pre-execution hook.
|
static Partition |
PartitionUtils.getPartition(Hive db,
Table table,
Map<String,String> partitionSpec,
boolean throwException) |
static List<Partition> |
PartitionUtils.getPartitions(Hive db,
Table table,
Map<String,String> partitionSpec,
boolean throwException) |
| Modifier and Type | Method and Description |
|---|---|
protected void |
AlterTableAddPartitionAnalyzer.postProcess(TableName tableName,
Table table,
AlterTableAddPartitionDesc desc,
Task<DDLWork> ddlTask)
Add partition for Transactional tables needs to add (copy/rename) the data so that it lands
in a delta_x_x/ folder in the partition dir.
|
protected void |
AlterViewAddPartitionAnalyzer.postProcess(TableName tableName,
Table table,
AlterTableAddPartitionDesc desc,
Task<DDLWork> ddlTask) |
| Constructor and Description |
|---|
AlterTableDropPartitionDesc(TableName tableName,
Map<Integer,List<ExprNodeGenericFuncDesc>> partSpecs,
boolean ifPurge,
ReplicationSpec replicationSpec,
boolean deleteData,
Table table) |
| Modifier and Type | Method and Description |
|---|---|
Table |
AlterTableExchangePartitionsDesc.getDestinationTable() |
Table |
AlterTableExchangePartitionsDesc.getSourceTable() |
| Constructor and Description |
|---|
AlterTableExchangePartitionsDesc(Table sourceTable,
Table destinationTable,
Map<String,String> partitionSpecs) |
| Constructor and Description |
|---|
AlterTableRenamePartitionDesc(TableName tableName,
Map<String,String> oldPartSpec,
Map<String,String> newPartSpec,
ReplicationSpec replicationSpec,
Table table) |
| Modifier and Type | Method and Description |
|---|---|
protected void |
AlterTableSetPartitionSpecOperation.doAlteration(Table table,
Partition partition) |
| Modifier and Type | Method and Description |
|---|---|
protected void |
AlterTableNotClusteredOperation.doAlteration(Table table,
Partition partition) |
protected void |
AlterTableIntoBucketsOperation.doAlteration(Table table,
Partition partition) |
protected void |
AlterTableClusteredByOperation.doAlteration(Table table,
Partition partition) |
protected void |
AlterTableNotSortedOperation.doAlteration(Table table,
Partition partition) |
| Modifier and Type | Method and Description |
|---|---|
protected void |
AlterTableSetSerdeOperation.doAlteration(Table table,
Partition partition) |
protected void |
AlterTableUnsetSerdePropsOperation.doAlteration(Table table,
Partition partition) |
protected void |
AlterTableSetSerdePropsOperation.doAlteration(Table table,
Partition partition) |
| Modifier and Type | Method and Description |
|---|---|
protected void |
AlterTableSetFileFormatOperation.doAlteration(Table table,
Partition partition) |
| Modifier and Type | Method and Description |
|---|---|
protected void |
AlterTableSetLocationOperation.doAlteration(Table table,
Partition partition) |
| Modifier and Type | Method and Description |
|---|---|
protected void |
AlterTableNotSkewedOperation.doAlteration(Table table,
Partition partition) |
protected void |
AlterTableSetSkewedLocationOperation.doAlteration(Table table,
Partition partition) |
protected void |
AlterTableSkewedByOperation.doAlteration(Table table,
Partition partition) |
| Modifier and Type | Method and Description |
|---|---|
Table |
CreateMaterializedViewDesc.toTable(HiveConf conf) |
| Modifier and Type | Method and Description |
|---|---|
protected void |
AbstractCreateViewAnalyzer.validateReplaceWithPartitions(String viewName,
Table oldView,
List<org.apache.hadoop.hive.metastore.api.FieldSchema> partitionColumns) |
| Modifier and Type | Method and Description |
|---|---|
void |
ExplainTask.addConstraints(Table tbl,
List<String> constraints,
Set<String> allTableNames,
DDLPlanUtils ddlPlanUtils) |
void |
ExplainTask.addCreateTableStatement(Table table,
List<String> tableCreateStmt,
DDLPlanUtils ddlPlanUtils) |
void |
ExplainTask.addPKandBasicStats(Table tbl,
List<String> basicDef,
DDLPlanUtils ddlPlanUtils) |
static void |
Utilities.addSchemaEvolutionToTableScanOperator(Table table,
TableScanOperator tableScanOp) |
void |
ExplainTask.addStats(Table table,
List<String> alterTableStmt,
Map<String,List<Partition>> tablePartitionsMap,
DDLPlanUtils ddlPlanUtils) |
static String |
ArchiveUtils.conflictingArchiveNameOrNull(Hive db,
Table tbl,
LinkedHashMap<String,String> partSpec)
Determines if one can insert into partition(s), or there's a conflict with
archive.
|
static ArchiveUtils.PartSpecInfo |
ArchiveUtils.PartSpecInfo.create(Table tbl,
Map<String,String> partSpec)
Extract partial prefix specification from table and key-value map
|
org.apache.hadoop.fs.Path |
ArchiveUtils.PartSpecInfo.createPath(Table tbl)
Creates path where partitions matching prefix should lie in filesystem
|
void |
DDLPlanUtils.getAlterTableStmtDefaultConstraint(DefaultConstraint dc,
Table tb,
List<String> constraints) |
void |
DDLPlanUtils.getAlterTableStmtNotNullConstraint(NotNullConstraint nc,
Table tb,
List<String> constraints) |
String |
DDLPlanUtils.getAlterTableStmtTableStatsBasic(Table tbl)
Parses the basic table statistics for the given table.
|
List<String> |
DDLPlanUtils.getAlterTableStmtTableStatsColsAll(Table tbl)
Parses the ColumnStatistics for all the columns in a given table and adds the alter table update
statistics command for each column.
|
String |
DDLPlanUtils.getCreateTableCommand(Table table,
boolean isRelative) |
String |
DDLPlanUtils.getCreateViewCommand(Table table,
boolean isRelative) |
String |
DDLPlanUtils.getCreateViewStmt(Table table)
Returns the create view statement for the given view.
|
List<String> |
DDLPlanUtils.getDDLPlanForPartitionWithStats(Table table,
Map<String,List<Partition>> tableToPartitionList) |
List<String> |
DDLPlanUtils.getTableColumnNames(Table tbl) |
Map<String,String> |
DDLPlanUtils.getTableColumnsToType(Table tbl) |
static TableDesc |
Utilities.getTableDesc(Table tbl) |
List<String> |
DDLPlanUtils.populateConstraints(Table tb,
Set<String> allTableNames)
Add all the constraints for the given table.
|
| Modifier and Type | Method and Description |
|---|---|
static Map<Integer,List<ExprNodeGenericFuncDesc>> |
ReplUtils.genPartSpecs(Table table,
List<Map<String,String>> partitions) |
| Modifier and Type | Method and Description |
|---|---|
Table |
Entity.getT() |
Table |
Entity.getTable()
Get the table associated with the entity.
|
| Modifier and Type | Method and Description |
|---|---|
static WriteEntity.WriteType |
WriteEntity.determineAlterTableWriteType(AlterTableType op,
Table table,
HiveConf conf)
Determine the type of lock to request for a given alter table type.
|
void |
Entity.setT(Table t) |
| Constructor and Description |
|---|
Entity(Table t,
boolean complete)
Constructor for a table.
|
ReadEntity(Table t)
Constructor.
|
ReadEntity(Table t,
ReadEntity parent) |
ReadEntity(Table t,
ReadEntity parent,
boolean isDirect) |
WriteEntity(Table t,
WriteEntity.WriteType type)
Constructor for a table.
|
WriteEntity(Table t,
WriteEntity.WriteType type,
boolean complete) |
| Modifier and Type | Method and Description |
|---|---|
static boolean |
AcidUtils.acidTableWithoutTransactions(Table table) |
static List<org.apache.hadoop.fs.FileStatus> |
AcidUtils.getAcidFilesForStats(Table table,
org.apache.hadoop.fs.Path dir,
org.apache.hadoop.conf.Configuration jc,
org.apache.hadoop.fs.FileSystem fs) |
static AcidUtils.AcidOperationalProperties |
AcidUtils.getAcidOperationalProperties(Table table)
Returns the acidOperationalProperties for a given table.
|
static List<VirtualColumn> |
AcidUtils.getAcidVirtualColumns(Table table)
Returns the virtual columns needed for update queries.
|
static AcidUtils.TableSnapshot |
AcidUtils.getTableSnapshot(org.apache.hadoop.conf.Configuration conf,
Table tbl) |
static AcidUtils.TableSnapshot |
AcidUtils.getTableSnapshot(org.apache.hadoop.conf.Configuration conf,
Table tbl,
boolean isStatsUpdater)
Note: this is generally called in Hive.java; so, the callers of Hive.java make sure
to set up the acid state during compile, and Hive.java retrieves it if needed.
|
static AcidUtils.TableSnapshot |
AcidUtils.getTableSnapshot(org.apache.hadoop.conf.Configuration conf,
Table tbl,
String dbName,
String tblName,
boolean isStatsUpdater)
Note: this is generally called in Hive.java; so, the callers of Hive.java make sure
to set up the acid state during compile, and Hive.java retrieves it if needed.
|
static boolean |
AcidUtils.isFullAcidTable(Table table)
Should produce the same result as
TxnUtils.isAcidTable(org.apache.hadoop.hive.metastore.api.Table) |
static boolean |
AcidUtils.isInsertOnlyTable(Table table) |
static boolean |
AcidUtils.isLocklessReadsEnabled(Table table,
HiveConf conf) |
static boolean |
AcidUtils.isNonNativeAcidTable(Table table) |
static boolean |
AcidUtils.isTableSoftDeleteEnabled(Table table,
HiveConf conf) |
static Boolean |
AcidUtils.isToFullAcid(Table table,
Map<String,String> props) |
static Boolean |
AcidUtils.isToInsertOnlyTable(Table tbl,
Map<String,String> props)
The method for altering table props; may set the table to MM, non-MM, or not affect MM.
|
static boolean |
AcidUtils.isTransactionalTable(Table table) |
static void |
AcidUtils.validateAcidFiles(Table table,
org.apache.hadoop.fs.FileStatus[] srcs,
org.apache.hadoop.fs.FileSystem fs)
Safety check to make sure a file take from one acid table is not added into another acid table
since the ROW__IDs embedded as part a write to one table won't make sense in different
table/cluster.
|
| Modifier and Type | Method and Description |
|---|---|
static boolean |
ParquetHiveSerDe.isParquetTable(Table table) |
| Constructor and Description |
|---|
HiveLockObject(Table tbl,
HiveLockObject.HiveLockObjectData lockData) |
| Modifier and Type | Method and Description |
|---|---|
Table |
Table.copy() |
Table |
Partition.getTable() |
Table |
Hive.getTable(String tableName)
Returns metadata for the table named tableName
|
Table |
Hive.getTable(String tableName,
boolean throwException)
Returns metadata for the table named tableName
|
Table |
Hive.getTable(String dbName,
String tableName)
Returns metadata of the table
|
Table |
Hive.getTable(String dbName,
String tableName,
boolean throwException)
Returns metadata of the table
|
Table |
Hive.getTable(String dbName,
String tableName,
boolean throwException,
boolean checkTransactional)
Returns metadata of the table.
|
Table |
Hive.getTable(String dbName,
String tableName,
String metaTableName,
boolean throwException)
Returns metadata of the table
|
Table |
Hive.getTable(String dbName,
String tableName,
String metaTableName,
boolean throwException,
boolean checkTransactional)
Returns metadata of the table.
|
Table |
Hive.getTable(String dbName,
String tableName,
String metaTableName,
boolean throwException,
boolean checkTransactional,
boolean getColumnStats)
Returns metadata of the table.
|
Table |
Hive.getTable(TableName tableName)
Returns metadata of the table
|
Table |
Hive.getTranslateTableDryrun(org.apache.hadoop.hive.metastore.api.Table tbl)
Dry run that translates table
|
Table |
Table.makeCopy()
This api is used by getMetaData which require deep copy of metastore.api.table
and constraints copy
|
Table |
Hive.newTable(String tableName) |
| Modifier and Type | Method and Description |
|---|---|
List<Table> |
Hive.getAllMaterializedViewObjects(String dbName)
Get all materialized views for the specified database.
|
List<Table> |
Hive.getAllMaterializedViewObjectsForRewriting() |
List<Table> |
Hive.getAllTableObjects(String dbName)
Get all tables for the specified database.
|
List<Table> |
Hive.getMaterializedViewObjectsByPattern(String dbName,
String pattern)
Get materialized views for the specified database that match the provided regex pattern.
|
List<Table> |
Hive.getTableObjects(String dbName,
String pattern,
TableType tableType) |
static Map<String,Table> |
SessionHiveMetaStoreClient.getTempTablesForDatabase(String dbName,
String tblName) |
| Modifier and Type | Method and Description |
|---|---|
default List<org.apache.hadoop.hive.metastore.api.FieldSchema> |
HiveStorageHandler.acidSelectColumns(Table table,
Context.Operation operation)
UpdateDeleteSemanticAnalyzer rewrites DELETE/UPDATE queries into INSERT
queries. |
default List<org.apache.hadoop.hive.metastore.api.FieldSchema> |
HiveStorageHandler.acidSortColumns(Table table,
Context.Operation operation)
UpdateDeleteSemanticAnalyzer rewrites DELETE/UPDATE queries into INSERT
queries. |
default boolean |
HiveStorageHandler.addDynamicSplitPruningEdge(Table table,
ExprNodeDesc syntheticFilterPredicate)
Test if the storage handler allows the push-down of join filter predicate to prune further the splits.
|
static void |
Hive.addWriteNotificationLog(HiveConf conf,
Table tbl,
List<String> partitionVals,
Long txnId,
Long writeId,
List<org.apache.hadoop.fs.FileStatus> newFiles,
List<org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest> requestList) |
void |
Hive.addWriteNotificationLog(Table tbl,
Map<String,String> partitionSpec,
List<org.apache.hadoop.fs.FileStatus> newFiles,
Long writeId,
List<org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest> requestList) |
void |
Hive.alterTable(String catName,
String dbName,
String tblName,
Table newTbl,
boolean cascade,
org.apache.hadoop.hive.metastore.api.EnvironmentContext environmentContext,
boolean transactional) |
void |
Hive.alterTable(String catName,
String dbName,
String tblName,
Table newTbl,
boolean cascade,
org.apache.hadoop.hive.metastore.api.EnvironmentContext environmentContext,
boolean transactional,
long replWriteId) |
void |
Hive.alterTable(String fullyQlfdTblName,
Table newTbl,
boolean cascade,
org.apache.hadoop.hive.metastore.api.EnvironmentContext environmentContext,
boolean transactional) |
void |
Hive.alterTable(String fullyQlfdTblName,
Table newTbl,
boolean cascade,
org.apache.hadoop.hive.metastore.api.EnvironmentContext environmentContext,
boolean transactional,
long writeId) |
void |
Hive.alterTable(String fullyQlfdTblName,
Table newTbl,
org.apache.hadoop.hive.metastore.api.EnvironmentContext environmentContext,
boolean transactional)
Updates the existing table metadata with the new metadata.
|
void |
Hive.alterTable(Table newTbl,
boolean cascade,
org.apache.hadoop.hive.metastore.api.EnvironmentContext environmentContext,
boolean transactional) |
void |
Hive.alterTableExecuteOperation(Table table,
AlterTableExecuteSpec executeSpec) |
void |
Table.copyConstraints(Table tbl)
This shouldn't use get apis because those api call metastore
to fetch constraints.
|
default DynamicPartitionCtx |
HiveStorageHandler.createDPContext(HiveConf conf,
Table table,
Context.Operation writeOperation)
Creates a DynamicPartitionCtx instance that will be set up by the storage handler itself.
|
HiveRelOptMaterialization |
HiveMaterializedViewsRegistry.createMaterialization(HiveConf conf,
Table materializedViewTable)
Parses and creates a materialization.
|
void |
HiveMaterializedViewsRegistry.createMaterializedView(HiveConf conf,
Table materializedViewTable)
Adds a newly created materialized view to the cache.
|
static org.apache.hadoop.hive.metastore.api.Partition |
Partition.createMetaPartitionObject(Table tbl,
Map<String,String> partSpec,
org.apache.hadoop.fs.Path location) |
Partition |
Hive.createPartition(Table tbl,
Map<String,String> partSpec)
Creates a partition.
|
void |
Hive.createTable(Table tbl)
Creates the table with the give objects
|
void |
Hive.createTable(Table tbl,
boolean ifNotExists) |
void |
Hive.createTable(Table tbl,
boolean ifNotExists,
List<org.apache.hadoop.hive.metastore.api.SQLPrimaryKey> primaryKeys,
List<org.apache.hadoop.hive.metastore.api.SQLForeignKey> foreignKeys,
List<org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint> uniqueConstraints,
List<org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint> notNullConstraints,
List<org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint> defaultConstraints,
List<org.apache.hadoop.hive.metastore.api.SQLCheckConstraint> checkConstraints)
Creates the table with the given objects.
|
void |
HiveMaterializedViewsRegistry.dropMaterializedView(Table materializedViewTable)
Removes the materialized view from the cache (based on table object equality), if exists.
|
void |
Hive.dropTable(Table table,
boolean ifPurge) |
boolean |
Table.equalsWithIgnoreWriteId(Table tbl)
This method ignores the write Id, while comparing two tables.
|
default void |
HiveStorageHandler.executeOperation(Table table,
AlterTableExecuteSpec executeSpec)
Execute an operation on storage handler level
|
Set<Partition> |
Hive.getAllPartitionsOf(Table tbl)
Get all the partitions; unlike
Hive.getPartitions(Table), does not include auth. |
default SnapshotContext |
HiveStorageHandler.getCurrentSnapshotContext(Table table)
Query the most recent unique snapshot's context of the passed table.
|
static List<org.apache.hadoop.hive.metastore.api.FieldSchema> |
Hive.getFieldsFromDeserializerForMsStorage(Table tbl,
Deserializer deserializer,
org.apache.hadoop.conf.Configuration conf) |
int |
Hive.getNumPartitionsByFilter(Table tbl,
String filter)
Get a number of Partitions by filter.
|
Partition |
Hive.getPartition(Table tbl,
Map<String,String> partSpec,
boolean forceCreate) |
Partition |
Hive.getPartition(Table tbl,
Map<String,String> partSpec,
boolean forceCreate,
String partPath,
boolean inheritTableSpecs)
Returns partition metadata
|
org.apache.hadoop.hive.metastore.api.Partition |
Hive.getPartition(Table t,
String dbName,
String tableName,
List<String> params) |
List<String> |
Hive.getPartitionNames(Table tbl,
ExprNodeGenericFuncDesc expr,
String order,
short maxParts) |
List<Partition> |
Hive.getPartitions(Table tbl)
get all the partitions that the table has
|
List<Partition> |
Hive.getPartitions(Table tbl,
Map<String,String> partialPartSpec)
get all the partitions of the table that matches the given partial
specification.
|
List<Partition> |
Hive.getPartitions(Table tbl,
Map<String,String> partialPartSpec,
short limit)
get all the partitions of the table that matches the given partial
specification.
|
boolean |
Hive.getPartitionsByExpr(Table tbl,
ExprNodeDesc expr,
HiveConf conf,
List<Partition> partitions)
Get a list of Partitions by expr.
|
List<Partition> |
Hive.getPartitionsByFilter(Table tbl,
String filter)
Get a list of Partitions by filter.
|
List<org.apache.hadoop.hive.metastore.api.Partition> |
Hive.getPartitionsByNames(org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesRequest req,
Table table) |
List<org.apache.hadoop.hive.metastore.api.Partition> |
Hive.getPartitionsByNames(String dbName,
String tableName,
List<String> partitionNames,
Table t) |
List<Partition> |
Hive.getPartitionsByNames(Table tbl,
List<String> partNames)
Get all partitions of the table that matches the list of given partition names.
|
List<Partition> |
Hive.getPartitionsByNames(Table tbl,
List<String> partNames,
boolean getColStats)
Get all partitions of the table that matches the list of given partition names.
|
List<Partition> |
Hive.getPartitionsByNames(Table tbl,
Map<String,String> partialPartSpec)
get all the partitions of the table that matches the given partial
specification.
|
default List<TransformSpec> |
HiveStorageHandler.getPartitionTransformSpec(Table table)
Return a list of partition transform specifications.
|
StorageHandlerInfo |
Hive.getStorageHandlerInfo(Table table) |
protected void |
Partition.initialize(Table table,
org.apache.hadoop.hive.metastore.api.Partition tPartition)
Initializes this object with the given variables
|
Boolean |
Hive.isOutdatedMaterializedView(Table materializedViewTable,
Set<TableName> tablesUsed,
boolean forceMVContentsUpToDate,
HiveTxnManager txnMgr)
Utility method that returns whether a materialized view is outdated (true), not outdated
(false), or it cannot be determined (null).
|
Partition |
Hive.loadPartition(org.apache.hadoop.fs.Path loadPath,
Table tbl,
Map<String,String> partSpec,
LoadTableDesc.LoadFileType loadFileType,
boolean inheritTableSpecs,
boolean inheritLocation,
boolean isSkewedStoreAsSubdir,
boolean isSrcLocal,
boolean isAcidIUDoperation,
boolean resetStatistics,
Long writeId,
int stmtId,
boolean isInsertOverwrite,
boolean isDirectInsert)
Load a directory into a Hive Table Partition - Alters existing content of
the partition with the contents of loadPath.
|
void |
MaterializedViewsCache.putIfAbsent(Table materializedViewTable,
HiveRelOptMaterialization materialization) |
void |
MaterializedViewsCache.refresh(Table oldMaterializedViewTable,
Table materializedViewTable,
HiveRelOptMaterialization newMaterialization) |
void |
HiveMaterializedViewsRegistry.refreshMaterializedView(HiveConf conf,
Table materializedViewTable)
Update the materialized view in the registry (if materialized view exists).
|
void |
HiveMaterializedViewsRegistry.refreshMaterializedView(HiveConf conf,
Table oldMaterializedViewTable,
Table materializedViewTable)
Update the materialized view in the registry (if existing materialized view matches).
|
void |
MaterializedViewsCache.remove(Table materializedViewTable) |
void |
Hive.renamePartition(Table tbl,
Map<String,String> oldPartSpec,
Partition newPart,
long replWriteId)
Rename a old partition to new partition
|
void |
Partition.setTable(Table table)
Should be only used by serialization.
|
default List<org.apache.hadoop.hive.metastore.api.FieldSchema> |
HiveStorageHandler.sortColumns(Table table)
Collect the columns that are used to sort the content of the data files
|
default HiveStorageHandler.AcidSupportType |
HiveStorageHandler.supportsAcidOperations(Table table)
Specifies whether the table supports ACID operations or not (DELETE, UPDATE and MERGE statements).
|
| Modifier and Type | Method and Description |
|---|---|
boolean |
Hive.validateMaterializedViewsFromRegistry(List<Table> cachedMaterializedViewTables,
Set<TableName> tablesUsed,
HiveTxnManager txnMgr)
Validate that the materialized views retrieved from registry are still up-to-date.
|
| Constructor and Description |
|---|
DummyPartition(Table tbl,
String name) |
DummyPartition(Table tbl,
String name,
Map<String,String> partSpec) |
Partition(Table tbl)
create an empty partition.
|
Partition(Table tbl,
Map<String,String> partSpec,
org.apache.hadoop.fs.Path location)
Create partition object with the given info.
|
Partition(Table tbl,
org.apache.hadoop.hive.metastore.api.Partition tp) |
PartitionIterable(Hive db,
Table table,
Map<String,String> partialPartitionSpec,
int batchSize)
Primary constructor that fetches all partitions in a given table, given
a Hive object and a table object, and a partial partition spec.
|
PartitionIterable(Hive db,
Table table,
Map<String,String> partialPartitionSpec,
int batchSize,
boolean getColStats)
Primary constructor that fetches all partitions in a given table, given
a Hive object and a table object, and a partial partition spec.
|
| Modifier and Type | Method and Description |
|---|---|
protected long |
SizeBasedBigTableSelectorForAutoSMJ.getSize(HiveConf conf,
Table table) |
| Modifier and Type | Method and Description |
|---|---|
Table |
RelOptHiveTable.getHiveTableMD() |
| Constructor and Description |
|---|
RelOptHiveTable(org.apache.calcite.plan.RelOptSchema calciteSchema,
org.apache.calcite.rel.type.RelDataTypeFactory typeFactory,
List<String> qualifiedTblName,
org.apache.calcite.rel.type.RelDataType rowType,
Table hiveTblMetadata,
List<ColumnInfo> hiveNonPartitionCols,
List<ColumnInfo> hivePartitionCols,
List<VirtualColumn> hiveVirtualCols,
HiveConf hconf,
Hive db,
ParsedQueryTables tabNameToTabObject,
Map<String,PrunedPartitionList> partitionCache,
Map<String,ColumnStatsList> colStatsCache,
AtomicInteger noColsMissingStats) |
| Modifier and Type | Method and Description |
|---|---|
org.apache.calcite.rel.RelNode |
HiveRelFieldTrimmer.trim(org.apache.calcite.tools.RelBuilder relBuilder,
org.apache.calcite.rel.RelNode root,
ColumnAccessInfo columnAccessInfo,
Map<HiveProject,Table> viewToTableSchema) |
| Modifier and Type | Method and Description |
|---|---|
static Table |
HiveMaterializedViewUtils.extractTable(org.apache.calcite.plan.RelOptMaterialization materialization) |
| Modifier and Type | Method and Description |
|---|---|
static Boolean |
HiveMaterializedViewUtils.isOutdatedMaterializedView(String validTxnsList,
HiveTxnManager txnMgr,
Hive db,
Set<TableName> tablesUsed,
Table materializedViewTable)
Utility method that returns whether a materialized view is outdated (true), not outdated
(false), or it cannot be determined (null).
|
| Modifier and Type | Method and Description |
|---|---|
static boolean |
HiveMaterializedViewUtils.checkPrivilegeForMaterializedViews(List<Table> cachedMVTableList)
Validate if given materialized view has SELECT privileges for current user
|
| Modifier and Type | Method and Description |
|---|---|
Map<String,org.apache.commons.lang3.tuple.Pair<SelectOperator,Table>> |
LineageCtx.Index.getFinalSelectOps() |
| Modifier and Type | Method and Description |
|---|---|
boolean |
OpTraitsRulesProcFactory.TableScanRule.checkBucketedTable(Table tbl,
ParseContext pGraphContext,
PrunedPartitionList prunedParts) |
| Modifier and Type | Method and Description |
|---|---|
static boolean |
PartitionPruner.onlyContainsPartnCols(Table tab,
ExprNodeDesc expr)
Find out whether the condition only contains partitioned columns.
|
static PrunedPartitionList |
PartitionPruner.prune(Table tab,
ExprNodeDesc prunerExpr,
HiveConf conf,
String alias,
Map<String,PrunedPartitionList> prunedPartitionsMap)
Get the partition list for the table that satisfies the partition pruner
condition.
|
| Modifier and Type | Field and Description |
|---|---|
protected Table |
RewriteSemanticAnalyzer.ColumnAppender.table |
Table |
BaseSemanticAnalyzer.TableSpec.tableHandle |
| Modifier and Type | Method and Description |
|---|---|
static Table |
ImportSemanticAnalyzer.createNewTableMetadataObject(ImportTableDesc tblDesc,
boolean isRepl) |
Table |
QueryTables.get(String name) |
Table |
QBMetaData.getDestTableForAlias(String alias) |
Table |
SemanticAnalyzer.getDummyTable() |
Table |
ParsedQueryTables.getParsedTable(String name) |
Table |
QueryTables.getParsedTable(String name) |
Table |
PrunedPartitionList.getSourceTable() |
Table |
QBMetaData.getSrcForAlias(String alias) |
static Table |
AnalyzeCommandUtils.getTable(ASTNode tree,
BaseSemanticAnalyzer sa) |
protected static Table |
RewriteSemanticAnalyzer.getTable(ASTNode tabRef,
Hive db,
boolean throwException) |
protected Table |
BaseSemanticAnalyzer.getTable(String tblName) |
protected Table |
BaseSemanticAnalyzer.getTable(String tblName,
boolean throwException) |
protected Table |
BaseSemanticAnalyzer.getTable(String database,
String tblName,
boolean throwException) |
protected Table |
BaseSemanticAnalyzer.getTable(String database,
String tblName,
String metaTableName,
boolean throwException) |
protected Table |
BaseSemanticAnalyzer.getTable(TableName tn) |
protected Table |
BaseSemanticAnalyzer.getTable(TableName tn,
boolean throwException) |
Table |
QBMetaData.getTableForAlias(String alias) |
Table |
SemanticAnalyzer.getTableObjectByName(String tableName) |
protected Table |
SemanticAnalyzer.getTableObjectByName(String tableName,
boolean throwException) |
protected Table |
CalcitePlanner.getTableObjectByName(String tabName,
boolean throwException) |
protected Table |
SemanticAnalyzer.getTargetTable(QB qb,
String dest) |
static Table |
HiveTableName.setFrom(String dbTable,
Table table)
Set a @
Table object's table and db names based on the provided string. |
static Table |
ImportSemanticAnalyzer.tableIfExists(ImportTableDesc tblDesc,
Hive db)
Utility method that returns a table if one corresponding to the destination
tblDesc is found.
|
| Modifier and Type | Method and Description |
|---|---|
HashMap<String,Table> |
QBMetaData.getAliasToTable() |
protected List<Table> |
CalcitePlanner.CalcitePlannerAction.getMaterializedViewsUsed(org.apache.calcite.rel.RelNode plan) |
Map<String,Table> |
QBMetaData.getNameToDestTable() |
Map<SelectOperator,Table> |
ParseContext.getViewProjectToTableSchema() |
HashMap<String,Table> |
QB.getViewToTabSchema() |
| Modifier and Type | Method and Description |
|---|---|
protected void |
SplitUpdateSemanticAnalyzer.analyze(ASTNode tree,
Table table,
ASTNode tabNameNode) |
protected abstract void |
RewriteSemanticAnalyzer.analyze(ASTNode tree,
Table table,
ASTNode tableName) |
void |
MergeSemanticAnalyzer.analyze(ASTNode tree,
Table targetTable,
ASTNode tableNameNode) |
protected void |
AcidExportSemanticAnalyzer.analyze(ASTNode tree,
Table table,
ASTNode tableNameNode) |
protected void |
UpdateDeleteSemanticAnalyzer.analyze(ASTNode tree,
Table table,
ASTNode tabNameNode) |
void |
SplitMergeSemanticAnalyzer.analyze(ASTNode tree,
Table targetTable,
ASTNode tableNameNode) |
protected void |
MergeSemanticAnalyzer.analyzeMerge(ASTNode tree,
Table targetTable,
ASTNode targetNameNode)
Here we take a Merge statement AST and generate a semantically equivalent multi-insert
statement to execute.
|
protected void |
RewriteSemanticAnalyzer.checkValidSetClauseTarget(ASTNode colName,
Table targetTable)
Assert that we are not asked to update a bucketing column or partition column.
|
protected Map<String,ASTNode> |
RewriteSemanticAnalyzer.collectSetColumnsAndExpressions(ASTNode setClause,
Set<String> setRCols,
Table targetTable) |
static void |
EximUtil.createExportDump(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path metadataPath,
Table tableHandle,
Iterable<Partition> partitions,
ReplicationSpec replicationSpec,
HiveConf hiveConf) |
protected static String |
ColumnStatsSemanticAnalyzer.genRewrittenQuery(Table tbl,
HiveConf conf,
Map<String,String> partSpec,
boolean isPartitionStats,
boolean useTableValues)
Generates a SQL statement that will compute the stats for all columns
included in the input table.
|
protected Map<String,String> |
SemanticAnalyzer.getColNameToDefaultValueMap(Table tbl) |
protected static List<String> |
ColumnStatsSemanticAnalyzer.getColumnTypes(Table tbl,
List<String> colNames) |
protected List<String> |
SemanticAnalyzer.getDefaultConstraints(Table tbl,
List<String> targetSchema)
This method creates a list of default constraints which corresponds to
given schema (targetSchema) or target table's column schema (if targetSchema is null)
|
static Map<Integer,List<ExprNodeGenericFuncDesc>> |
ParseUtils.getFullPartitionSpecs(org.antlr.runtime.tree.CommonTree ast,
Table table,
org.apache.hadoop.conf.Configuration conf,
boolean canGroupExprs)
Get the partition specs from the tree.
|
List<Map<String,String>> |
BaseSemanticAnalyzer.getPartitionSpecs(Table tbl,
org.antlr.runtime.tree.CommonTree ast)
Get the partition specs from the tree.
|
static Map<String,String> |
AnalyzeCommandUtils.getPartKeyValuePairsFromAST(Table tbl,
ASTNode tree,
HiveConf hiveConf) |
static Map<String,String> |
BaseSemanticAnalyzer.getValidatedPartSpec(Table table,
ASTNode astNode,
HiveConf conf,
boolean shouldBeFull) |
boolean |
BaseSemanticAnalyzer.isValidPrefixSpec(Table tTable,
Map<String,String> spec)
Checks if given specification is proper specification for prefix of
partition cols, for table partitioned by ds, hr, min valid ones are
(ds='2008-04-08'), (ds='2008-04-08', hr='12'), (ds='2008-04-08', hr='12', min='30')
invalid one is for example (ds='2008-04-08', min='30')
|
static TableName |
HiveTableName.of(Table table)
|
void |
QueryTables.put(String name,
Table table) |
void |
QB.rewriteViewToSubq(String alias,
String viewName,
QBExpr qbexpr,
Table tab) |
void |
QBMetaData.setDestForAlias(String alias,
Table tab) |
static Table |
HiveTableName.setFrom(String dbTable,
Table table)
Set a @
Table object's table and db names based on the provided string. |
void |
QBMetaData.setSrcForAlias(String alias,
Table tab) |
protected void |
RewriteSemanticAnalyzer.setUpAccessControlInfoForUpdate(Table mTable,
Map<String,ASTNode> setCols)
For updates, we need to set the column access info so that it contains information on
the columns we are updating.
|
protected void |
RewriteSemanticAnalyzer.updateOutputs(Table targetTable)
SemanticAnalyzer will generate a WriteEntity for the target table since it doesn't know/check
if the read and write are of the same table in "insert ...
|
static void |
BaseSemanticAnalyzer.validatePartColumnType(Table tbl,
Map<String,String> partSpec,
ASTNode astNode,
HiveConf conf) |
static void |
BaseSemanticAnalyzer.validatePartSpec(Table tbl,
Map<String,String> partSpec,
ASTNode astNode,
HiveConf conf,
boolean shouldBeFull) |
protected void |
RewriteSemanticAnalyzer.validateTargetTable(Table mTable)
Assert it supports Acid write.
|
protected static void |
BaseSemanticAnalyzer.validateUnsupportedPartitionClause(Table tbl,
boolean partitionClausePresent)
Throws an UnsupportedOperationException in case the query has a partition clause but the table is never partitioned
on the HMS-level.
|
| Constructor and Description |
|---|
ColumnAppender(Table table,
HiveConf conf,
String subQueryAlias) |
ColumnStatsAutoGatherContext(SemanticAnalyzer sa,
HiveConf conf,
Operator<? extends OperatorDesc> op,
Table tbl,
Map<String,String> partSpec,
boolean isInsertInto,
Context ctx) |
NativeAcidColumnAppender(Table table,
HiveConf conf,
String subQueryAlias) |
NonNativeAcidColumnAppender(Table table,
HiveConf conf,
String subQueryAlias) |
PrunedPartitionList(Table source,
Set<Partition> partitions,
List<String> referred,
boolean hasUnknowns) |
PrunedPartitionList(Table source,
String key,
Set<Partition> partitions,
List<String> referred,
boolean hasUnknowns) |
TableSpec(Table table) |
TableSpec(Table tableHandle,
List<Partition> partitions) |
| Constructor and Description |
|---|
ParseContext(QueryState queryState,
Map<TableScanOperator,ExprNodeDesc> opToPartPruner,
Map<TableScanOperator,PrunedPartitionList> opToPartList,
Map<String,TableScanOperator> topOps,
Set<JoinOperator> joinOps,
Set<SMBMapJoinOperator> smbMapJoinOps,
List<LoadTableDesc> loadTableWork,
List<LoadFileDesc> loadFileWork,
List<ColumnStatsAutoGatherContext> columnStatsAutoGatherContexts,
Context ctx,
Map<String,String> idToTableNameMap,
int destTableId,
UnionProcContext uCtx,
List<AbstractMapJoinOperator<? extends MapJoinDesc>> listMapJoinOpsNoReducer,
Map<String,PrunedPartitionList> prunedPartitions,
QueryTables tabNameToTabObject,
Map<TableScanOperator,FilterDesc.SampleDesc> opToSamplePruner,
GlobalLimitCtx globalLimitCtx,
Map<String,SplitSample> nameToSplitSample,
Set<ReadEntity> semanticInputs,
List<Task<?>> rootTasks,
Map<TableScanOperator,Map<String,ExprNodeDesc>> opToPartToSkewedPruner,
Map<String,ReadEntity> viewAliasToInput,
List<ReduceSinkOperator> reduceSinkOperatorsAddedByEnforceBucketingSorting,
BaseSemanticAnalyzer.AnalyzeRewriteContext analyzeRewrite,
CreateTableDesc createTableDesc,
CreateMaterializedViewDesc createViewDesc,
MaterializedViewUpdateDesc materializedViewUpdateDesc,
QueryProperties queryProperties,
Map<SelectOperator,Table> viewProjectToTableSchema) |
| Modifier and Type | Method and Description |
|---|---|
HiveWrapper.Tuple<Table> |
HiveWrapper.table(String tableName,
HiveConf conf) |
HiveWrapper.Tuple<Table> |
HiveWrapper.table(Table tblObj) |
| Modifier and Type | Method and Description |
|---|---|
static boolean |
Utils.shouldDumpMetaDataOnlyForExternalTables(Table table,
HiveConf conf) |
static boolean |
Utils.shouldReplicate(ReplicationSpec replicationSpec,
Table tableHandle,
boolean isEventDump,
Set<String> bootstrapTableList,
org.apache.hadoop.hive.common.repl.ReplScope oldReplScope,
HiveConf hiveConf)
validates if a table can be exported, similar to EximUtil.shouldExport with few replication
specific checks.
|
HiveWrapper.Tuple<Table> |
HiveWrapper.table(Table tblObj) |
| Constructor and Description |
|---|
TableSerializer(Table tableHandle,
Iterable<Partition> partitions,
HiveConf hiveConf) |
| Modifier and Type | Method and Description |
|---|---|
static org.apache.calcite.rex.RexNode |
RexNodeTypeCheck.genConstraintsExpr(HiveConf conf,
org.apache.calcite.rex.RexBuilder rexBuilder,
Table targetTable,
boolean updateStatement,
RowResolver inputRR) |
static ExprNodeDesc |
ExprNodeTypeCheck.genConstraintsExpr(HiveConf conf,
Table targetTable,
boolean updateStatement,
RowResolver inputRR) |
| Modifier and Type | Field and Description |
|---|---|
Table |
DeferredWorkContext.table |
| Modifier and Type | Method and Description |
|---|---|
Table |
LoadTableDesc.getMdTable() |
Table |
FileSinkDesc.getTable() |
Table |
StatsWork.getTable() |
Table |
TableScanDesc.getTableMetadata() |
Table |
ImportTableDesc.toTable(HiveConf conf) |
| Modifier and Type | Method and Description |
|---|---|
static ExportWork.MmContext |
ExportWork.MmContext.createIfNeeded(Table t) |
static TableDesc |
PartitionDesc.getTableDesc(Table table) |
void |
LoadTableDesc.setMdTable(Table mdTable) |
void |
FileSinkDesc.setTable(Table table) |
void |
TableScanDesc.setTableMetadata(Table tableMetadata) |
| Constructor and Description |
|---|
ImportTableDesc(String dbName,
Table table) |
StatsWork(Table table,
BasicStatsWork basicStatsWork,
HiveConf hconf) |
StatsWork(Table table,
HiveConf hconf) |
TableScanDesc(String alias,
List<VirtualColumn> vcs,
Table tblMetadata) |
TableScanDesc(String alias,
List<VirtualColumn> vcs,
Table tblMetadata,
TableScanOperator.ProbeDecodeContext probeDecodeContext) |
TableScanDesc(String alias,
Table tblMetadata) |
TableScanDesc(Table tblMetadata) |
| Modifier and Type | Class and Description |
|---|---|
static class |
AuthorizationPreEventListener.TableWrapper |
| Modifier and Type | Method and Description |
|---|---|
abstract void |
HiveMultiPartitionAuthorizationProviderBase.authorize(Table table,
Iterable<Partition> partitions,
Privilege[] requiredReadPrivileges,
Privilege[] requiredWritePrivileges)
Authorization method for partition sets.
|
void |
MetaStoreAuthzAPIAuthorizerEmbedOnly.authorize(Table table,
Partition part,
List<String> columns,
Privilege[] readRequiredPriv,
Privilege[] writeRequiredPriv) |
void |
HiveAuthorizationProvider.authorize(Table table,
Partition part,
List<String> columns,
Privilege[] readRequiredPriv,
Privilege[] writeRequiredPriv)
Authorization privileges against a list of columns.
|
void |
BitSetCheckedAuthorizationProvider.authorize(Table table,
Partition part,
List<String> columns,
Privilege[] inputRequiredPriv,
Privilege[] outputRequiredPriv) |
void |
StorageBasedAuthorizationProvider.authorize(Table table,
Partition part,
List<String> columns,
Privilege[] readRequiredPriv,
Privilege[] writeRequiredPriv) |
void |
MetaStoreAuthzAPIAuthorizerEmbedOnly.authorize(Table table,
Privilege[] readRequiredPriv,
Privilege[] writeRequiredPriv) |
void |
HiveAuthorizationProvider.authorize(Table table,
Privilege[] readRequiredPriv,
Privilege[] writeRequiredPriv)
Authorization privileges against a hive table object.
|
void |
BitSetCheckedAuthorizationProvider.authorize(Table table,
Privilege[] inputRequiredPriv,
Privilege[] outputRequiredPriv) |
void |
StorageBasedAuthorizationProvider.authorize(Table table,
Privilege[] readRequiredPriv,
Privilege[] writeRequiredPriv) |
| Constructor and Description |
|---|
PartitionWrapper(Table table,
org.apache.hadoop.hive.metastore.api.Partition mapiPart) |
| Modifier and Type | Method and Description |
|---|---|
Map<String,Map<String,Table>> |
SessionState.getTempTables() |
| Modifier and Type | Method and Description |
|---|---|
abstract Table |
Partish.getTable() |
| Modifier and Type | Method and Description |
|---|---|
static boolean |
StatsUtils.areBasicStatsUptoDateForQueryAnswering(Table table,
Map<String,String> params)
Are the basic stats for the table up-to-date for query planning.
|
static boolean |
StatsUtils.areColumnStatsUptoDateForQueryAnswering(Table table,
Map<String,String> params,
String colName)
Are the column stats for the table up-to-date for query planning.
|
static Partish |
Partish.buildFor(Table table) |
static Partish |
Partish.buildFor(Table table,
Partition part) |
static boolean |
BasicStatsNoJobTask.canUseBasicStats(Table table,
Class<? extends org.apache.hadoop.mapred.InputFormat> inputFormat) |
static boolean |
BasicStatsNoJobTask.canUseFooterScan(Table table,
Class<? extends org.apache.hadoop.mapred.InputFormat> inputFormat) |
static Statistics |
StatsUtils.collectStatistics(HiveConf conf,
PrunedPartitionList partList,
ColumnStatsList colStatsCache,
Table table,
TableScanOperator tableScanOperator)
Collect table, partition and column level statistics
|
static Statistics |
StatsUtils.collectStatistics(HiveConf conf,
PrunedPartitionList partList,
Table table,
List<ColumnInfo> schema,
List<String> neededColumns,
ColumnStatsList colStatsCache,
List<String> referencedColumns,
boolean needColStats) |
static String |
BasicStatsTask.getAggregationPrefix(Table table,
Partition partition) |
static List<Long> |
StatsUtils.getBasicStatForPartitions(Table table,
List<Partition> parts,
String statType)
Get basic stats of partitions
|
static long |
StatsUtils.getBasicStatForTable(Table table,
String statType)
Deprecated.
|
static long |
StatsUtils.getErasureCodedFiles(Table table)
Get number of Erasure Coded files for a table
|
static long |
StatsUtils.getFileSizeForTable(HiveConf conf,
Table table)
Find the bytes on disk occupied by a table
|
static long |
StatsUtils.getNumRows(HiveConf conf,
List<ColumnInfo> schema,
Table table,
PrunedPartitionList partitionList,
AtomicInteger noColsMissingStats)
Returns number of rows if it exists.
|
static long |
StatsUtils.getNumRows(Table table)
Deprecated.
|
static long |
StatsUtils.getRawDataSize(Table table)
Get raw data size of a give table
|
static List<ColStatistics> |
StatsUtils.getTableColumnStats(Table table,
List<ColumnInfo> schema,
List<String> neededColumns,
ColumnStatsList colStatsCache,
boolean fetchColStats)
Get table level column statistics from metastore for needed columns
|
static long |
StatsUtils.getTotalSize(Table table)
Get total size of a give table
|
int |
ColStatsProcessor.persistColumnStats(Hive db,
Table tbl) |
int |
ColStatsProcessor.process(Hive db,
Table tbl) |
int |
IStatsProcessor.process(Hive db,
Table tbl) |
int |
BasicStatsTask.process(Hive db,
Table tbl) |
int |
BasicStatsNoJobTask.process(Hive db,
Table tbl) |
Copyright © 2022 The Apache Software Foundation. All rights reserved.