public class SemiTransactionalHiveMetastore extends Object
| Modifier and Type | Class and Description |
|---|---|
static class |
SemiTransactionalHiveMetastore.Action<T> |
| Constructor and Description |
|---|
SemiTransactionalHiveMetastore(HdfsEnvironment hdfsEnvironment,
HiveMetastoreClosure delegate,
Executor renameExecutor,
Executor dropExecutor,
boolean skipDeletionForAlter,
boolean skipTargetCleanupOnRollback,
Optional<io.airlift.units.Duration> hiveTransactionHeartbeatInterval,
ScheduledExecutorService heartbeatService) |
| Modifier and Type | Method and Description |
|---|---|
void |
addColumn(HiveIdentity identity,
String databaseName,
String tableName,
String columnName,
HiveType columnType,
String columnComment) |
void |
addPartition(ConnectorSession session,
String databaseName,
String tableName,
Partition partition,
org.apache.hadoop.fs.Path currentLocation,
PartitionStatistics statistics) |
void |
beginQuery(ConnectorSession session) |
void |
cleanupQuery(ConnectorSession session) |
void |
commentTable(HiveIdentity identity,
String databaseName,
String tableName,
Optional<String> comment) |
void |
commit() |
void |
createDatabase(HiveIdentity identity,
Database database) |
void |
createRole(String role,
String grantor) |
void |
createTable(ConnectorSession session,
Table table,
PrincipalPrivileges principalPrivileges,
Optional<org.apache.hadoop.fs.Path> currentPath,
boolean ignoreExisting,
PartitionStatistics statistics)
currentLocation needs to be supplied if a writePath exists for the table. |
void |
declareIntentionToWrite(ConnectorSession session,
LocationHandle.WriteMode writeMode,
org.apache.hadoop.fs.Path stagingPathRoot,
SchemaTableName schemaTableName) |
void |
dropColumn(HiveIdentity identity,
String databaseName,
String tableName,
String columnName) |
void |
dropDatabase(HiveIdentity identity,
String schemaName) |
void |
dropPartition(ConnectorSession session,
String databaseName,
String tableName,
List<String> partitionValues,
boolean deleteData) |
void |
dropRole(String role) |
void |
dropTable(ConnectorSession session,
String databaseName,
String tableName) |
void |
finishInsertIntoExistingPartition(ConnectorSession session,
String databaseName,
String tableName,
List<String> partitionValues,
org.apache.hadoop.fs.Path currentLocation,
List<String> fileNames,
PartitionStatistics statisticsUpdate) |
void |
finishInsertIntoExistingTable(ConnectorSession session,
String databaseName,
String tableName,
org.apache.hadoop.fs.Path currentLocation,
List<String> fileNames,
PartitionStatistics statisticsUpdate) |
HivePageSinkMetadata |
generatePageSinkMetadata(HiveIdentity identity,
SchemaTableName schemaTableName) |
List<String> |
getAllDatabases() |
List<String> |
getAllTables(String databaseName) |
List<String> |
getAllViews(String databaseName) |
Optional<Database> |
getDatabase(String databaseName) |
Optional<List<String>> |
getPartitionNames(HiveIdentity identity,
String databaseName,
String tableName) |
Optional<List<String>> |
getPartitionNamesByParts(HiveIdentity identity,
String databaseName,
String tableName,
List<String> parts) |
Map<String,Optional<Partition>> |
getPartitionsByNames(HiveIdentity identity,
String databaseName,
String tableName,
List<String> partitionNames) |
Map<String,PartitionStatistics> |
getPartitionStatistics(HiveIdentity identity,
String databaseName,
String tableName,
Set<String> partitionNames) |
Set<ColumnStatisticType> |
getSupportedColumnStatistics(Type type) |
Optional<Table> |
getTable(HiveIdentity identity,
String databaseName,
String tableName) |
PartitionStatistics |
getTableStatistics(HiveIdentity identity,
String databaseName,
String tableName) |
Optional<org.apache.hadoop.hive.common.ValidTxnWriteIdList> |
getValidWriteIds(ConnectorSession session,
HiveTableHandle tableHandle) |
void |
grantRoles(Set<String> roles,
Set<HivePrincipal> grantees,
boolean adminOption,
HivePrincipal grantor) |
void |
grantTablePrivileges(HiveIdentity identity,
String databaseName,
String tableName,
HivePrincipal grantee,
Set<HivePrivilegeInfo> privileges) |
Set<RoleGrant> |
listRoleGrants(HivePrincipal principal) |
Set<String> |
listRoles() |
Set<HivePrivilegeInfo> |
listTablePrivileges(HiveIdentity identity,
String databaseName,
String tableName,
Optional<HivePrincipal> principal) |
void |
renameColumn(HiveIdentity identity,
String databaseName,
String tableName,
String oldColumnName,
String newColumnName) |
void |
renameDatabase(HiveIdentity identity,
String source,
String target) |
void |
renameTable(HiveIdentity identity,
String databaseName,
String tableName,
String newDatabaseName,
String newTableName) |
void |
replaceTable(HiveIdentity identity,
String databaseName,
String tableName,
Table table,
PrincipalPrivileges principalPrivileges) |
void |
revokeRoles(Set<String> roles,
Set<HivePrincipal> grantees,
boolean adminOption,
HivePrincipal grantor) |
void |
revokeTablePrivileges(HiveIdentity identity,
String databaseName,
String tableName,
HivePrincipal grantee,
Set<HivePrivilegeInfo> privileges) |
void |
rollback() |
void |
setDatabaseOwner(HiveIdentity identity,
String source,
HivePrincipal principal) |
void |
setPartitionStatistics(HiveIdentity identity,
Table table,
Map<List<String>,PartitionStatistics> partitionStatisticsMap) |
void |
setTableStatistics(HiveIdentity identity,
Table table,
PartitionStatistics tableStatistics) |
void |
testOnlyCheckIsReadOnly() |
void |
testOnlyThrowOnCleanupFailures() |
void |
truncateUnpartitionedTable(ConnectorSession session,
String databaseName,
String tableName) |
public SemiTransactionalHiveMetastore(HdfsEnvironment hdfsEnvironment, HiveMetastoreClosure delegate, Executor renameExecutor, Executor dropExecutor, boolean skipDeletionForAlter, boolean skipTargetCleanupOnRollback, Optional<io.airlift.units.Duration> hiveTransactionHeartbeatInterval, ScheduledExecutorService heartbeatService)
public Optional<Table> getTable(HiveIdentity identity, String databaseName, String tableName)
public Set<ColumnStatisticType> getSupportedColumnStatistics(Type type)
public PartitionStatistics getTableStatistics(HiveIdentity identity, String databaseName, String tableName)
public Map<String,PartitionStatistics> getPartitionStatistics(HiveIdentity identity, String databaseName, String tableName, Set<String> partitionNames)
public HivePageSinkMetadata generatePageSinkMetadata(HiveIdentity identity, SchemaTableName schemaTableName)
public void createDatabase(HiveIdentity identity, Database database)
public void dropDatabase(HiveIdentity identity, String schemaName)
public void renameDatabase(HiveIdentity identity, String source, String target)
public void setDatabaseOwner(HiveIdentity identity, String source, HivePrincipal principal)
public void setTableStatistics(HiveIdentity identity, Table table, PartitionStatistics tableStatistics)
public void setPartitionStatistics(HiveIdentity identity, Table table, Map<List<String>,PartitionStatistics> partitionStatisticsMap)
public void createTable(ConnectorSession session, Table table, PrincipalPrivileges principalPrivileges, Optional<org.apache.hadoop.fs.Path> currentPath, boolean ignoreExisting, PartitionStatistics statistics)
currentLocation needs to be supplied if a writePath exists for the table.public void dropTable(ConnectorSession session, String databaseName, String tableName)
public void replaceTable(HiveIdentity identity, String databaseName, String tableName, Table table, PrincipalPrivileges principalPrivileges)
public void renameTable(HiveIdentity identity, String databaseName, String tableName, String newDatabaseName, String newTableName)
public void commentTable(HiveIdentity identity, String databaseName, String tableName, Optional<String> comment)
public void addColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment)
public void renameColumn(HiveIdentity identity, String databaseName, String tableName, String oldColumnName, String newColumnName)
public void dropColumn(HiveIdentity identity, String databaseName, String tableName, String columnName)
public void finishInsertIntoExistingTable(ConnectorSession session, String databaseName, String tableName, org.apache.hadoop.fs.Path currentLocation, List<String> fileNames, PartitionStatistics statisticsUpdate)
public void truncateUnpartitionedTable(ConnectorSession session, String databaseName, String tableName)
public Optional<List<String>> getPartitionNames(HiveIdentity identity, String databaseName, String tableName)
public Optional<List<String>> getPartitionNamesByParts(HiveIdentity identity, String databaseName, String tableName, List<String> parts)
public Map<String,Optional<Partition>> getPartitionsByNames(HiveIdentity identity, String databaseName, String tableName, List<String> partitionNames)
public void addPartition(ConnectorSession session, String databaseName, String tableName, Partition partition, org.apache.hadoop.fs.Path currentLocation, PartitionStatistics statistics)
public void dropPartition(ConnectorSession session, String databaseName, String tableName, List<String> partitionValues, boolean deleteData)
public void finishInsertIntoExistingPartition(ConnectorSession session, String databaseName, String tableName, List<String> partitionValues, org.apache.hadoop.fs.Path currentLocation, List<String> fileNames, PartitionStatistics statisticsUpdate)
public void dropRole(String role)
public void grantRoles(Set<String> roles, Set<HivePrincipal> grantees, boolean adminOption, HivePrincipal grantor)
public void revokeRoles(Set<String> roles, Set<HivePrincipal> grantees, boolean adminOption, HivePrincipal grantor)
public Set<RoleGrant> listRoleGrants(HivePrincipal principal)
public Set<HivePrivilegeInfo> listTablePrivileges(HiveIdentity identity, String databaseName, String tableName, Optional<HivePrincipal> principal)
public void grantTablePrivileges(HiveIdentity identity, String databaseName, String tableName, HivePrincipal grantee, Set<HivePrivilegeInfo> privileges)
public void revokeTablePrivileges(HiveIdentity identity, String databaseName, String tableName, HivePrincipal grantee, Set<HivePrivilegeInfo> privileges)
public void declareIntentionToWrite(ConnectorSession session, LocationHandle.WriteMode writeMode, org.apache.hadoop.fs.Path stagingPathRoot, SchemaTableName schemaTableName)
public void commit()
public void rollback()
public void beginQuery(ConnectorSession session)
public Optional<org.apache.hadoop.hive.common.ValidTxnWriteIdList> getValidWriteIds(ConnectorSession session, HiveTableHandle tableHandle)
public void cleanupQuery(ConnectorSession session)
public void testOnlyCheckIsReadOnly()
public void testOnlyThrowOnCleanupFailures()
Copyright © 2012–2020. All rights reserved.