Package io.trino.plugin.hive.metastore
Class SemiTransactionalHiveMetastore
java.lang.Object
io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore
- All Implemented Interfaces:
SqlStandardAccessControlMetadataMetastore
public class SemiTransactionalHiveMetastore
extends Object
implements SqlStandardAccessControlMetadataMetastore
-
Nested Class Summary
Nested ClassesModifier and TypeClassDescriptionstatic classstatic final record -
Constructor Summary
ConstructorsConstructorDescriptionSemiTransactionalHiveMetastore(HdfsEnvironment hdfsEnvironment, HiveMetastoreClosure delegate, Executor fileSystemExecutor, Executor dropExecutor, Executor updateExecutor, boolean skipDeletionForAlter, boolean skipTargetCleanupOnRollback, boolean deleteSchemaLocationsFallback, Optional<io.airlift.units.Duration> hiveTransactionHeartbeatInterval, ScheduledExecutorService heartbeatService, TableInvalidationCallback tableInvalidationCallback) -
Method Summary
Modifier and TypeMethodDescriptionvoidaddColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) voidaddDynamicPartitions(String dbName, String tableName, List<String> partitionNames, long transactionId, long writeId, AcidOperation operation) voidaddPartition(ConnectorSession session, String databaseName, String tableName, Partition partition, Location currentLocation, Optional<List<String>> files, PartitionStatistics statistics, boolean cleanExtraOutputFilesOnCommit) voidalterPartitions(String dbName, String tableName, List<Partition> partitions, long writeId) beginInsert(ConnectorSession session, Table table) beginMerge(ConnectorSession session, Table table) voidbeginQuery(ConnectorSession session) voidstatic voidcleanExtraOutputFiles(HdfsEnvironment hdfsEnvironment, HdfsContext hdfsContext, String queryId, Location location, Set<String> filesToKeep) voidcleanupQuery(ConnectorSession session) voidvoidcommentTable(String databaseName, String tableName, Optional<String> comment) voidcommit()voidcommitTransaction(long transactionId) voidcreateDatabase(ConnectorSession session, Database database) voidcreateRole(String role, String grantor) voidcreateTable(ConnectorSession session, Table table, PrincipalPrivileges principalPrivileges, Optional<org.apache.hadoop.fs.Path> currentPath, Optional<List<String>> files, boolean ignoreExisting, PartitionStatistics statistics, boolean cleanExtraOutputFilesOnCommit) currentLocationneeds to be supplied if a writePath exists for the table.declareIntentionToWrite(ConnectorSession session, LocationHandle.WriteMode writeMode, Location stagingPathRoot, SchemaTableName schemaTableName) voiddropColumn(String databaseName, String tableName, String columnName) voiddropDatabase(ConnectorSession session, String schemaName) voiddropDeclaredIntentionToWrite(String declarationId) voiddropPartition(ConnectorSession session, String databaseName, String tableName, List<String> partitionValues, boolean deleteData) voidvoiddropTable(ConnectorSession session, String databaseName, String tableName) voidfinishChangingExistingTable(AcidOperation acidOperation, ConnectorSession session, String databaseName, String tableName, Location currentLocation, List<String> fileNames, PartitionStatistics statisticsUpdate, boolean cleanExtraOutputFilesOnCommit) voidfinishInsertIntoExistingPartitions(ConnectorSession session, String databaseName, String tableName, List<SemiTransactionalHiveMetastore.PartitionUpdateInfo> partitionUpdateInfos, boolean cleanExtraOutputFilesOnCommit) voidfinishMerge(ConnectorSession session, String databaseName, String tableName, Location currentLocation, List<PartitionUpdateAndMergeResults> partitionUpdateAndMergeResults, List<Partition> partitions) generatePageSinkMetadata(SchemaTableName schemaTableName) getAllTables(String databaseName) getAllViews(String databaseName) getDatabase(String databaseName) getDatabaseOwner(String databaseName) getPartitionNames(String databaseName, String tableName) getPartitionNamesByFilter(String databaseName, String tableName, List<String> columnNames, TupleDomain<String> partitionKeysFilter) getPartitionsByNames(String databaseName, String tableName, List<String> partitionNames) getPartitionStatistics(String databaseName, String tableName, Set<String> columns, Set<String> partitionNames) getValidWriteIds(ConnectorSession session, HiveTableHandle tableHandle) voidgrantRoles(Set<String> roles, Set<HivePrincipal> grantees, boolean adminOption, HivePrincipal grantor) voidgrantTablePrivileges(String databaseName, String tableName, HivePrincipal grantee, HivePrincipal grantor, Set<HivePrivilegeInfo.HivePrivilege> privileges, boolean grantOption) booleanbooleanisReadableWithinTransaction(String databaseName, String tableName) listGrantedPrincipals(String role) listRoleGrants(HivePrincipal principal) listTablePrivileges(String databaseName, String tableName, Optional<HivePrincipal> principal) voidrenameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) voidrenameDatabase(String source, String target) voidrenameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) voidreplaceTable(String databaseName, String tableName, Table table, PrincipalPrivileges principalPrivileges) voidrevokeRoles(Set<String> roles, Set<HivePrincipal> grantees, boolean adminOption, HivePrincipal grantor) voidrevokeTablePrivileges(String databaseName, String tableName, HivePrincipal grantee, HivePrincipal grantor, Set<HivePrivilegeInfo.HivePrivilege> privileges, boolean grantOption) voidrollback()voidsetDatabaseOwner(String source, HivePrincipal principal) voidsetPartitionStatistics(Table table, Map<List<String>, PartitionStatistics> partitionStatisticsMap) voidsetTableOwner(String schema, String table, HivePrincipal principal) voidsetTableStatistics(Table table, PartitionStatistics tableStatistics) booleanshouldDeleteDatabaseData(ConnectorSession session, String schemaName) voidvoidvoidtruncateUnpartitionedTable(ConnectorSession session, String databaseName, String tableName) Get the underlying metastore closure.voidupdateTableWriteId(String dbName, String tableName, long transactionId, long writeId, OptionalLong rowCountChange)
-
Constructor Details
-
SemiTransactionalHiveMetastore
public SemiTransactionalHiveMetastore(HdfsEnvironment hdfsEnvironment, HiveMetastoreClosure delegate, Executor fileSystemExecutor, Executor dropExecutor, Executor updateExecutor, boolean skipDeletionForAlter, boolean skipTargetCleanupOnRollback, boolean deleteSchemaLocationsFallback, Optional<io.airlift.units.Duration> hiveTransactionHeartbeatInterval, ScheduledExecutorService heartbeatService, TableInvalidationCallback tableInvalidationCallback)
-
-
Method Details
-
getAllDatabases
-
unsafeGetRawHiveMetastoreClosure
Get the underlying metastore closure. Use this method with caution as it bypasses the current transactional state, so modifications made in the transaction are visible. -
getDatabase
-
getAllTables
-
getAllTables
-
getTable
-
isReadableWithinTransaction
-
getSupportedColumnStatistics
-
getTableStatistics
-
getPartitionStatistics
-
generatePageSinkMetadata
-
getAllViews
-
getAllViews
-
createDatabase
-
dropDatabase
-
shouldDeleteDatabaseData
-
renameDatabase
-
setDatabaseOwner
-
setTableStatistics
-
setPartitionStatistics
public void setPartitionStatistics(Table table, Map<List<String>, PartitionStatistics> partitionStatisticsMap) -
createTable
public void createTable(ConnectorSession session, Table table, PrincipalPrivileges principalPrivileges, Optional<org.apache.hadoop.fs.Path> currentPath, Optional<List<String>> files, boolean ignoreExisting, PartitionStatistics statistics, boolean cleanExtraOutputFilesOnCommit) currentLocationneeds to be supplied if a writePath exists for the table. -
dropTable
-
replaceTable
public void replaceTable(String databaseName, String tableName, Table table, PrincipalPrivileges principalPrivileges) -
renameTable
-
commentTable
-
setTableOwner
-
commentColumn
-
addColumn
-
renameColumn
-
dropColumn
-
finishChangingExistingTable
public void finishChangingExistingTable(AcidOperation acidOperation, ConnectorSession session, String databaseName, String tableName, Location currentLocation, List<String> fileNames, PartitionStatistics statisticsUpdate, boolean cleanExtraOutputFilesOnCommit) -
truncateUnpartitionedTable
public void truncateUnpartitionedTable(ConnectorSession session, String databaseName, String tableName) -
finishMerge
public void finishMerge(ConnectorSession session, String databaseName, String tableName, Location currentLocation, List<PartitionUpdateAndMergeResults> partitionUpdateAndMergeResults, List<Partition> partitions) -
getPartitionNames
-
getPartitionNamesByFilter
-
getPartitionsByNames
-
addPartition
public void addPartition(ConnectorSession session, String databaseName, String tableName, Partition partition, Location currentLocation, Optional<List<String>> files, PartitionStatistics statistics, boolean cleanExtraOutputFilesOnCommit) -
dropPartition
public void dropPartition(ConnectorSession session, String databaseName, String tableName, List<String> partitionValues, boolean deleteData) -
finishInsertIntoExistingPartitions
public void finishInsertIntoExistingPartitions(ConnectorSession session, String databaseName, String tableName, List<SemiTransactionalHiveMetastore.PartitionUpdateInfo> partitionUpdateInfos, boolean cleanExtraOutputFilesOnCommit) -
createRole
- Specified by:
createRolein interfaceSqlStandardAccessControlMetadataMetastore
-
dropRole
- Specified by:
dropRolein interfaceSqlStandardAccessControlMetadataMetastore
-
listRoles
- Specified by:
listRolesin interfaceSqlStandardAccessControlMetadataMetastore
-
grantRoles
public void grantRoles(Set<String> roles, Set<HivePrincipal> grantees, boolean adminOption, HivePrincipal grantor) - Specified by:
grantRolesin interfaceSqlStandardAccessControlMetadataMetastore
-
revokeRoles
public void revokeRoles(Set<String> roles, Set<HivePrincipal> grantees, boolean adminOption, HivePrincipal grantor) - Specified by:
revokeRolesin interfaceSqlStandardAccessControlMetadataMetastore
-
listGrantedPrincipals
- Specified by:
listGrantedPrincipalsin interfaceSqlStandardAccessControlMetadataMetastore
-
listRoleGrants
- Specified by:
listRoleGrantsin interfaceSqlStandardAccessControlMetadataMetastore
-
getDatabaseOwner
- Specified by:
getDatabaseOwnerin interfaceSqlStandardAccessControlMetadataMetastore
-
listTablePrivileges
public Set<HivePrivilegeInfo> listTablePrivileges(String databaseName, String tableName, Optional<HivePrincipal> principal) - Specified by:
listTablePrivilegesin interfaceSqlStandardAccessControlMetadataMetastore
-
grantTablePrivileges
public void grantTablePrivileges(String databaseName, String tableName, HivePrincipal grantee, HivePrincipal grantor, Set<HivePrivilegeInfo.HivePrivilege> privileges, boolean grantOption) - Specified by:
grantTablePrivilegesin interfaceSqlStandardAccessControlMetadataMetastore
-
revokeTablePrivileges
public void revokeTablePrivileges(String databaseName, String tableName, HivePrincipal grantee, HivePrincipal grantor, Set<HivePrivilegeInfo.HivePrivilege> privileges, boolean grantOption) - Specified by:
revokeTablePrivilegesin interfaceSqlStandardAccessControlMetadataMetastore
-
declareIntentionToWrite
public String declareIntentionToWrite(ConnectorSession session, LocationHandle.WriteMode writeMode, Location stagingPathRoot, SchemaTableName schemaTableName) -
dropDeclaredIntentionToWrite
-
isFinished
public boolean isFinished() -
commit
public void commit() -
rollback
public void rollback() -
checkSupportsHiveAcidTransactions
public void checkSupportsHiveAcidTransactions() -
beginQuery
-
beginInsert
-
beginMerge
-
getValidWriteIds
public Optional<ValidTxnWriteIdList> getValidWriteIds(ConnectorSession session, HiveTableHandle tableHandle) -
cleanupQuery
-
testOnlyCheckIsReadOnly
public void testOnlyCheckIsReadOnly() -
testOnlyThrowOnCleanupFailures
public void testOnlyThrowOnCleanupFailures() -
updateTableWriteId
public void updateTableWriteId(String dbName, String tableName, long transactionId, long writeId, OptionalLong rowCountChange) -
alterPartitions
-
addDynamicPartitions
public void addDynamicPartitions(String dbName, String tableName, List<String> partitionNames, long transactionId, long writeId, AcidOperation operation) -
commitTransaction
public void commitTransaction(long transactionId) -
cleanExtraOutputFiles
public static void cleanExtraOutputFiles(HdfsEnvironment hdfsEnvironment, HdfsContext hdfsContext, String queryId, Location location, Set<String> filesToKeep)
-