Package io.trino.plugin.hive.metastore
Class SemiTransactionalHiveMetastore
- java.lang.Object
-
- io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore
-
- All Implemented Interfaces:
SqlStandardAccessControlMetadataMetastore
public class SemiTransactionalHiveMetastore extends Object implements SqlStandardAccessControlMetadataMetastore
-
-
Nested Class Summary
Nested Classes Modifier and Type Class Description static classSemiTransactionalHiveMetastore.Action<T>
-
Constructor Summary
Constructors Constructor Description SemiTransactionalHiveMetastore(HdfsEnvironment hdfsEnvironment, HiveMetastoreClosure delegate, Executor renameExecutor, Executor dropExecutor, Executor updateExecutor, boolean skipDeletionForAlter, boolean skipTargetCleanupOnRollback, boolean deleteSchemaLocationsFallback, Optional<io.airlift.units.Duration> hiveTransactionHeartbeatInterval, ScheduledExecutorService heartbeatService, TableInvalidationCallback tableInvalidationCallback)
-
Method Summary
All Methods Static Methods Instance Methods Concrete Methods Modifier and Type Method Description voidaddColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment)voidaddDynamicPartitions(String dbName, String tableName, List<String> partitionNames, long transactionId, long writeId, AcidOperation operation)voidaddPartition(ConnectorSession session, String databaseName, String tableName, Partition partition, org.apache.hadoop.fs.Path currentLocation, Optional<List<String>> files, PartitionStatistics statistics, boolean cleanExtraOutputFilesOnCommit)voidalterPartitions(String dbName, String tableName, List<Partition> partitions, long writeId)AcidTransactionbeginDelete(ConnectorSession session, Table table)AcidTransactionbeginInsert(ConnectorSession session, Table table)voidbeginQuery(ConnectorSession session)AcidTransactionbeginUpdate(ConnectorSession session, Table table, HiveUpdateProcessor updateProcessor)static voidcleanExtraOutputFiles(HdfsEnvironment hdfsEnvironment, HdfsEnvironment.HdfsContext hdfsContext, String queryId, org.apache.hadoop.fs.Path path, Set<String> filesToKeep)voidcleanupQuery(ConnectorSession session)voidcommentColumn(String databaseName, String tableName, String columnName, Optional<String> comment)voidcommentTable(String databaseName, String tableName, Optional<String> comment)voidcommit()voidcommitTransaction(long transactionId)voidcreateDatabase(Database database)voidcreateRole(String role, String grantor)voidcreateTable(ConnectorSession session, Table table, PrincipalPrivileges principalPrivileges, Optional<org.apache.hadoop.fs.Path> currentPath, Optional<List<String>> files, boolean ignoreExisting, PartitionStatistics statistics, boolean cleanExtraOutputFilesOnCommit)currentLocationneeds to be supplied if a writePath exists for the table.StringdeclareIntentionToWrite(ConnectorSession session, LocationHandle.WriteMode writeMode, org.apache.hadoop.fs.Path stagingPathRoot, SchemaTableName schemaTableName)voiddropColumn(String databaseName, String tableName, String columnName)voiddropDatabase(ConnectorSession session, String schemaName)voiddropDeclaredIntentionToWrite(String declarationId)voiddropPartition(ConnectorSession session, String databaseName, String tableName, List<String> partitionValues, boolean deleteData)voiddropRole(String role)voiddropTable(ConnectorSession session, String databaseName, String tableName)voidfinishInsertIntoExistingPartition(ConnectorSession session, String databaseName, String tableName, List<String> partitionValues, org.apache.hadoop.fs.Path currentLocation, List<String> fileNames, PartitionStatistics statisticsUpdate, boolean cleanExtraOutputFilesOnCommit)voidfinishInsertIntoExistingTable(ConnectorSession session, String databaseName, String tableName, org.apache.hadoop.fs.Path currentLocation, List<String> fileNames, PartitionStatistics statisticsUpdate, boolean cleanExtraOutputFilesOnCommit)voidfinishRowLevelDelete(ConnectorSession session, String databaseName, String tableName, org.apache.hadoop.fs.Path currentLocation, List<PartitionAndStatementId> partitionAndStatementIds)voidfinishUpdate(ConnectorSession session, String databaseName, String tableName, org.apache.hadoop.fs.Path currentLocation, List<PartitionAndStatementId> partitionAndStatementIds)HivePageSinkMetadatageneratePageSinkMetadata(SchemaTableName schemaTableName)List<String>getAllDatabases()List<String>getAllTables(String databaseName)List<String>getAllViews(String databaseName)Optional<Database>getDatabase(String databaseName)Optional<List<String>>getPartitionNames(String databaseName, String tableName)Optional<List<String>>getPartitionNamesByFilter(String databaseName, String tableName, List<String> columnNames, TupleDomain<String> partitionKeysFilter)Map<String,Optional<Partition>>getPartitionsByNames(String databaseName, String tableName, List<String> partitionNames)Map<String,PartitionStatistics>getPartitionStatistics(String databaseName, String tableName, Set<String> partitionNames)Set<ColumnStatisticType>getSupportedColumnStatistics(Type type)Optional<Table>getTable(String databaseName, String tableName)PartitionStatisticsgetTableStatistics(String databaseName, String tableName)Optional<org.apache.hadoop.hive.common.ValidTxnWriteIdList>getValidWriteIds(ConnectorSession session, HiveTableHandle tableHandle)voidgrantRoles(Set<String> roles, Set<HivePrincipal> grantees, boolean adminOption, HivePrincipal grantor)voidgrantTablePrivileges(String databaseName, String tableName, HivePrincipal grantee, HivePrincipal grantor, Set<HivePrivilegeInfo.HivePrivilege> privileges, boolean grantOption)booleanisFinished()booleanisReadableWithinTransaction(String databaseName, String tableName)Set<RoleGrant>listGrantedPrincipals(String role)Set<RoleGrant>listRoleGrants(HivePrincipal principal)Set<String>listRoles()Set<HivePrivilegeInfo>listTablePrivileges(String databaseName, String tableName, Optional<HivePrincipal> principal)voidrenameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName)voidrenameDatabase(String source, String target)voidrenameTable(String databaseName, String tableName, String newDatabaseName, String newTableName)voidreplaceTable(String databaseName, String tableName, Table table, PrincipalPrivileges principalPrivileges)voidrevokeRoles(Set<String> roles, Set<HivePrincipal> grantees, boolean adminOption, HivePrincipal grantor)voidrevokeTablePrivileges(String databaseName, String tableName, HivePrincipal grantee, HivePrincipal grantor, Set<HivePrivilegeInfo.HivePrivilege> privileges, boolean grantOption)voidrollback()voidsetDatabaseOwner(String source, HivePrincipal principal)voidsetPartitionStatistics(Table table, Map<List<String>,PartitionStatistics> partitionStatisticsMap)voidsetTableOwner(String schema, String table, HivePrincipal principal)voidsetTableStatistics(Table table, PartitionStatistics tableStatistics)voidtestOnlyCheckIsReadOnly()voidtestOnlyThrowOnCleanupFailures()voidtruncateUnpartitionedTable(ConnectorSession session, String databaseName, String tableName)HiveMetastoreClosureunsafeGetRawHiveMetastoreClosure()Get the underlying metastore closure.voidupdateTableWriteId(String dbName, String tableName, long transactionId, long writeId, OptionalLong rowCountChange)
-
-
-
Constructor Detail
-
SemiTransactionalHiveMetastore
public SemiTransactionalHiveMetastore(HdfsEnvironment hdfsEnvironment, HiveMetastoreClosure delegate, Executor renameExecutor, Executor dropExecutor, Executor updateExecutor, boolean skipDeletionForAlter, boolean skipTargetCleanupOnRollback, boolean deleteSchemaLocationsFallback, Optional<io.airlift.units.Duration> hiveTransactionHeartbeatInterval, ScheduledExecutorService heartbeatService, TableInvalidationCallback tableInvalidationCallback)
-
-
Method Detail
-
unsafeGetRawHiveMetastoreClosure
public HiveMetastoreClosure unsafeGetRawHiveMetastoreClosure()
Get the underlying metastore closure. Use this method with caution as it bypasses the current transactional state, so modifications made in the transaction are visible.
-
isReadableWithinTransaction
public boolean isReadableWithinTransaction(String databaseName, String tableName)
-
getSupportedColumnStatistics
public Set<ColumnStatisticType> getSupportedColumnStatistics(Type type)
-
getTableStatistics
public PartitionStatistics getTableStatistics(String databaseName, String tableName)
-
getPartitionStatistics
public Map<String,PartitionStatistics> getPartitionStatistics(String databaseName, String tableName, Set<String> partitionNames)
-
generatePageSinkMetadata
public HivePageSinkMetadata generatePageSinkMetadata(SchemaTableName schemaTableName)
-
createDatabase
public void createDatabase(Database database)
-
dropDatabase
public void dropDatabase(ConnectorSession session, String schemaName)
-
setDatabaseOwner
public void setDatabaseOwner(String source, HivePrincipal principal)
-
setTableStatistics
public void setTableStatistics(Table table, PartitionStatistics tableStatistics)
-
setPartitionStatistics
public void setPartitionStatistics(Table table, Map<List<String>,PartitionStatistics> partitionStatisticsMap)
-
createTable
public void createTable(ConnectorSession session, Table table, PrincipalPrivileges principalPrivileges, Optional<org.apache.hadoop.fs.Path> currentPath, Optional<List<String>> files, boolean ignoreExisting, PartitionStatistics statistics, boolean cleanExtraOutputFilesOnCommit)
currentLocationneeds to be supplied if a writePath exists for the table.
-
dropTable
public void dropTable(ConnectorSession session, String databaseName, String tableName)
-
replaceTable
public void replaceTable(String databaseName, String tableName, Table table, PrincipalPrivileges principalPrivileges)
-
renameTable
public void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName)
-
commentTable
public void commentTable(String databaseName, String tableName, Optional<String> comment)
-
setTableOwner
public void setTableOwner(String schema, String table, HivePrincipal principal)
-
commentColumn
public void commentColumn(String databaseName, String tableName, String columnName, Optional<String> comment)
-
addColumn
public void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment)
-
renameColumn
public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName)
-
finishInsertIntoExistingTable
public void finishInsertIntoExistingTable(ConnectorSession session, String databaseName, String tableName, org.apache.hadoop.fs.Path currentLocation, List<String> fileNames, PartitionStatistics statisticsUpdate, boolean cleanExtraOutputFilesOnCommit)
-
truncateUnpartitionedTable
public void truncateUnpartitionedTable(ConnectorSession session, String databaseName, String tableName)
-
finishRowLevelDelete
public void finishRowLevelDelete(ConnectorSession session, String databaseName, String tableName, org.apache.hadoop.fs.Path currentLocation, List<PartitionAndStatementId> partitionAndStatementIds)
-
finishUpdate
public void finishUpdate(ConnectorSession session, String databaseName, String tableName, org.apache.hadoop.fs.Path currentLocation, List<PartitionAndStatementId> partitionAndStatementIds)
-
getPartitionNames
public Optional<List<String>> getPartitionNames(String databaseName, String tableName)
-
getPartitionNamesByFilter
public Optional<List<String>> getPartitionNamesByFilter(String databaseName, String tableName, List<String> columnNames, TupleDomain<String> partitionKeysFilter)
-
getPartitionsByNames
public Map<String,Optional<Partition>> getPartitionsByNames(String databaseName, String tableName, List<String> partitionNames)
-
addPartition
public void addPartition(ConnectorSession session, String databaseName, String tableName, Partition partition, org.apache.hadoop.fs.Path currentLocation, Optional<List<String>> files, PartitionStatistics statistics, boolean cleanExtraOutputFilesOnCommit)
-
dropPartition
public void dropPartition(ConnectorSession session, String databaseName, String tableName, List<String> partitionValues, boolean deleteData)
-
finishInsertIntoExistingPartition
public void finishInsertIntoExistingPartition(ConnectorSession session, String databaseName, String tableName, List<String> partitionValues, org.apache.hadoop.fs.Path currentLocation, List<String> fileNames, PartitionStatistics statisticsUpdate, boolean cleanExtraOutputFilesOnCommit)
-
createRole
public void createRole(String role, String grantor)
- Specified by:
createRolein interfaceSqlStandardAccessControlMetadataMetastore
-
dropRole
public void dropRole(String role)
- Specified by:
dropRolein interfaceSqlStandardAccessControlMetadataMetastore
-
listRoles
public Set<String> listRoles()
- Specified by:
listRolesin interfaceSqlStandardAccessControlMetadataMetastore
-
grantRoles
public void grantRoles(Set<String> roles, Set<HivePrincipal> grantees, boolean adminOption, HivePrincipal grantor)
- Specified by:
grantRolesin interfaceSqlStandardAccessControlMetadataMetastore
-
revokeRoles
public void revokeRoles(Set<String> roles, Set<HivePrincipal> grantees, boolean adminOption, HivePrincipal grantor)
- Specified by:
revokeRolesin interfaceSqlStandardAccessControlMetadataMetastore
-
listGrantedPrincipals
public Set<RoleGrant> listGrantedPrincipals(String role)
- Specified by:
listGrantedPrincipalsin interfaceSqlStandardAccessControlMetadataMetastore
-
listRoleGrants
public Set<RoleGrant> listRoleGrants(HivePrincipal principal)
- Specified by:
listRoleGrantsin interfaceSqlStandardAccessControlMetadataMetastore
-
listTablePrivileges
public Set<HivePrivilegeInfo> listTablePrivileges(String databaseName, String tableName, Optional<HivePrincipal> principal)
- Specified by:
listTablePrivilegesin interfaceSqlStandardAccessControlMetadataMetastore
-
grantTablePrivileges
public void grantTablePrivileges(String databaseName, String tableName, HivePrincipal grantee, HivePrincipal grantor, Set<HivePrivilegeInfo.HivePrivilege> privileges, boolean grantOption)
- Specified by:
grantTablePrivilegesin interfaceSqlStandardAccessControlMetadataMetastore
-
revokeTablePrivileges
public void revokeTablePrivileges(String databaseName, String tableName, HivePrincipal grantee, HivePrincipal grantor, Set<HivePrivilegeInfo.HivePrivilege> privileges, boolean grantOption)
- Specified by:
revokeTablePrivilegesin interfaceSqlStandardAccessControlMetadataMetastore
-
declareIntentionToWrite
public String declareIntentionToWrite(ConnectorSession session, LocationHandle.WriteMode writeMode, org.apache.hadoop.fs.Path stagingPathRoot, SchemaTableName schemaTableName)
-
dropDeclaredIntentionToWrite
public void dropDeclaredIntentionToWrite(String declarationId)
-
isFinished
public boolean isFinished()
-
commit
public void commit()
-
rollback
public void rollback()
-
beginQuery
public void beginQuery(ConnectorSession session)
-
beginInsert
public AcidTransaction beginInsert(ConnectorSession session, Table table)
-
beginDelete
public AcidTransaction beginDelete(ConnectorSession session, Table table)
-
beginUpdate
public AcidTransaction beginUpdate(ConnectorSession session, Table table, HiveUpdateProcessor updateProcessor)
-
getValidWriteIds
public Optional<org.apache.hadoop.hive.common.ValidTxnWriteIdList> getValidWriteIds(ConnectorSession session, HiveTableHandle tableHandle)
-
cleanupQuery
public void cleanupQuery(ConnectorSession session)
-
testOnlyCheckIsReadOnly
public void testOnlyCheckIsReadOnly()
-
testOnlyThrowOnCleanupFailures
public void testOnlyThrowOnCleanupFailures()
-
updateTableWriteId
public void updateTableWriteId(String dbName, String tableName, long transactionId, long writeId, OptionalLong rowCountChange)
-
alterPartitions
public void alterPartitions(String dbName, String tableName, List<Partition> partitions, long writeId)
-
addDynamicPartitions
public void addDynamicPartitions(String dbName, String tableName, List<String> partitionNames, long transactionId, long writeId, AcidOperation operation)
-
commitTransaction
public void commitTransaction(long transactionId)
-
cleanExtraOutputFiles
public static void cleanExtraOutputFiles(HdfsEnvironment hdfsEnvironment, HdfsEnvironment.HdfsContext hdfsContext, String queryId, org.apache.hadoop.fs.Path path, Set<String> filesToKeep)
-
-