Package io.trino.plugin.hive.metastore
Class SemiTransactionalHiveMetastore
- java.lang.Object
-
- io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore
-
- All Implemented Interfaces:
SqlStandardAccessControlMetadataMetastore
public class SemiTransactionalHiveMetastore extends Object implements SqlStandardAccessControlMetadataMetastore
-
-
Nested Class Summary
Nested Classes Modifier and Type Class Description static classSemiTransactionalHiveMetastore.Action<T>
-
Constructor Summary
Constructors Constructor Description SemiTransactionalHiveMetastore(HdfsEnvironment hdfsEnvironment, HiveMetastoreClosure delegate, Executor renameExecutor, Executor dropExecutor, Executor updateExecutor, boolean skipDeletionForAlter, boolean skipTargetCleanupOnRollback, Optional<io.airlift.units.Duration> hiveTransactionHeartbeatInterval, ScheduledExecutorService heartbeatService)
-
Method Summary
All Methods Instance Methods Concrete Methods Modifier and Type Method Description voidacquireTableWriteLock(HiveIdentity identity, String queryId, long transactionId, String dbName, String tableName, org.apache.hadoop.hive.metastore.api.DataOperationType operation, boolean isPartitioned)voidaddColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment)voidaddDynamicPartitions(HiveIdentity identity, String dbName, String tableName, List<String> partitionNames, long transactionId, long writeId, AcidOperation operation)voidaddPartition(ConnectorSession session, String databaseName, String tableName, Partition partition, org.apache.hadoop.fs.Path currentLocation, PartitionStatistics statistics)longallocateWriteId(HiveIdentity identity, String dbName, String tableName, long transactionId)voidalterPartitions(HiveIdentity identity, String dbName, String tableName, List<Partition> partitions, long writeId)AcidTransactionbeginDelete(ConnectorSession session, Table table)AcidTransactionbeginInsert(ConnectorSession session, Table table)voidbeginQuery(ConnectorSession session)AcidTransactionbeginUpdate(ConnectorSession session, Table table, HiveUpdateProcessor updateProcessor)voidcleanupQuery(ConnectorSession session)voidcommentColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, Optional<String> comment)voidcommentTable(HiveIdentity identity, String databaseName, String tableName, Optional<String> comment)voidcommit()voidcommitTransaction(HiveIdentity identity, long transactionId)voidcreateDatabase(HiveIdentity identity, Database database)voidcreateRole(String role, String grantor)voidcreateTable(ConnectorSession session, Table table, PrincipalPrivileges principalPrivileges, Optional<org.apache.hadoop.fs.Path> currentPath, boolean ignoreExisting, PartitionStatistics statistics)currentLocationneeds to be supplied if a writePath exists for the table.voiddeclareIntentionToWrite(ConnectorSession session, LocationHandle.WriteMode writeMode, org.apache.hadoop.fs.Path stagingPathRoot, SchemaTableName schemaTableName)voiddropColumn(HiveIdentity identity, String databaseName, String tableName, String columnName)voiddropDatabase(HiveIdentity identity, String schemaName)voiddropPartition(ConnectorSession session, String databaseName, String tableName, List<String> partitionValues, boolean deleteData)voiddropRole(String role)voiddropTable(ConnectorSession session, String databaseName, String tableName)voidfinishInsertIntoExistingPartition(ConnectorSession session, String databaseName, String tableName, List<String> partitionValues, org.apache.hadoop.fs.Path currentLocation, List<String> fileNames, PartitionStatistics statisticsUpdate)voidfinishInsertIntoExistingTable(ConnectorSession session, String databaseName, String tableName, org.apache.hadoop.fs.Path currentLocation, List<String> fileNames, PartitionStatistics statisticsUpdate)voidfinishRowLevelDelete(ConnectorSession session, String databaseName, String tableName, org.apache.hadoop.fs.Path currentLocation, List<PartitionAndStatementId> partitionAndStatementIds)voidfinishUpdate(ConnectorSession session, String databaseName, String tableName, org.apache.hadoop.fs.Path currentLocation, List<PartitionAndStatementId> partitionAndStatementIds)HivePageSinkMetadatageneratePageSinkMetadata(HiveIdentity identity, SchemaTableName schemaTableName)List<String>getAllDatabases()List<String>getAllTables(String databaseName)List<String>getAllViews(String databaseName)Optional<Database>getDatabase(String databaseName)Optional<List<String>>getPartitionNames(HiveIdentity identity, String databaseName, String tableName)Optional<List<String>>getPartitionNamesByFilter(HiveIdentity identity, String databaseName, String tableName, List<String> columnNames, TupleDomain<String> partitionKeysFilter)Map<String,Optional<Partition>>getPartitionsByNames(HiveIdentity identity, String databaseName, String tableName, List<String> partitionNames)Map<String,PartitionStatistics>getPartitionStatistics(HiveIdentity identity, String databaseName, String tableName, Set<String> partitionNames)Set<ColumnStatisticType>getSupportedColumnStatistics(Type type)Optional<Table>getTable(HiveIdentity identity, String databaseName, String tableName)StringgetTableOwner(HiveIdentity identity, String databaseName, String tableName)PartitionStatisticsgetTableStatistics(HiveIdentity identity, String databaseName, String tableName)Optional<org.apache.hadoop.hive.common.ValidTxnWriteIdList>getValidWriteIds(ConnectorSession session, HiveTableHandle tableHandle)voidgrantRoles(Set<String> roles, Set<HivePrincipal> grantees, boolean adminOption, HivePrincipal grantor)voidgrantTablePrivileges(HiveIdentity identity, String databaseName, String tableName, HivePrincipal grantee, Set<HivePrivilegeInfo> privileges)booleanisFinished()Set<RoleGrant>listGrantedPrincipals(String role)Set<RoleGrant>listRoleGrants(HivePrincipal principal)Set<String>listRoles()Set<HivePrivilegeInfo>listTablePrivileges(HiveIdentity identity, String databaseName, String tableName, Optional<HivePrincipal> principal)voidrenameColumn(HiveIdentity identity, String databaseName, String tableName, String oldColumnName, String newColumnName)voidrenameDatabase(HiveIdentity identity, String source, String target)voidrenameTable(HiveIdentity identity, String databaseName, String tableName, String newDatabaseName, String newTableName)voidreplaceTable(HiveIdentity identity, String databaseName, String tableName, Table table, PrincipalPrivileges principalPrivileges)voidrevokeRoles(Set<String> roles, Set<HivePrincipal> grantees, boolean adminOption, HivePrincipal grantor)voidrevokeTablePrivileges(HiveIdentity identity, String databaseName, String tableName, HivePrincipal grantee, Set<HivePrivilegeInfo> privileges)voidrollback()voidsetDatabaseOwner(HiveIdentity identity, String source, HivePrincipal principal)voidsetPartitionStatistics(HiveIdentity identity, Table table, Map<List<String>,PartitionStatistics> partitionStatisticsMap)voidsetTableOwner(HiveIdentity identity, String schema, String table, HivePrincipal principal)voidsetTableStatistics(HiveIdentity identity, Table table, PartitionStatistics tableStatistics)voidtestOnlyCheckIsReadOnly()voidtestOnlyThrowOnCleanupFailures()voidtruncateUnpartitionedTable(ConnectorSession session, String databaseName, String tableName)voidupdateTableWriteId(HiveIdentity identity, String dbName, String tableName, long transactionId, long writeId, OptionalLong rowCountChange)
-
-
-
Constructor Detail
-
SemiTransactionalHiveMetastore
public SemiTransactionalHiveMetastore(HdfsEnvironment hdfsEnvironment, HiveMetastoreClosure delegate, Executor renameExecutor, Executor dropExecutor, Executor updateExecutor, boolean skipDeletionForAlter, boolean skipTargetCleanupOnRollback, Optional<io.airlift.units.Duration> hiveTransactionHeartbeatInterval, ScheduledExecutorService heartbeatService)
-
-
Method Detail
-
getTable
public Optional<Table> getTable(HiveIdentity identity, String databaseName, String tableName)
-
getSupportedColumnStatistics
public Set<ColumnStatisticType> getSupportedColumnStatistics(Type type)
-
getTableStatistics
public PartitionStatistics getTableStatistics(HiveIdentity identity, String databaseName, String tableName)
-
getPartitionStatistics
public Map<String,PartitionStatistics> getPartitionStatistics(HiveIdentity identity, String databaseName, String tableName, Set<String> partitionNames)
-
generatePageSinkMetadata
public HivePageSinkMetadata generatePageSinkMetadata(HiveIdentity identity, SchemaTableName schemaTableName)
-
createDatabase
public void createDatabase(HiveIdentity identity, Database database)
-
dropDatabase
public void dropDatabase(HiveIdentity identity, String schemaName)
-
renameDatabase
public void renameDatabase(HiveIdentity identity, String source, String target)
-
setDatabaseOwner
public void setDatabaseOwner(HiveIdentity identity, String source, HivePrincipal principal)
-
setTableStatistics
public void setTableStatistics(HiveIdentity identity, Table table, PartitionStatistics tableStatistics)
-
setPartitionStatistics
public void setPartitionStatistics(HiveIdentity identity, Table table, Map<List<String>,PartitionStatistics> partitionStatisticsMap)
-
createTable
public void createTable(ConnectorSession session, Table table, PrincipalPrivileges principalPrivileges, Optional<org.apache.hadoop.fs.Path> currentPath, boolean ignoreExisting, PartitionStatistics statistics)
currentLocationneeds to be supplied if a writePath exists for the table.
-
dropTable
public void dropTable(ConnectorSession session, String databaseName, String tableName)
-
replaceTable
public void replaceTable(HiveIdentity identity, String databaseName, String tableName, Table table, PrincipalPrivileges principalPrivileges)
-
renameTable
public void renameTable(HiveIdentity identity, String databaseName, String tableName, String newDatabaseName, String newTableName)
-
commentTable
public void commentTable(HiveIdentity identity, String databaseName, String tableName, Optional<String> comment)
-
setTableOwner
public void setTableOwner(HiveIdentity identity, String schema, String table, HivePrincipal principal)
-
commentColumn
public void commentColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, Optional<String> comment)
-
addColumn
public void addColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment)
-
renameColumn
public void renameColumn(HiveIdentity identity, String databaseName, String tableName, String oldColumnName, String newColumnName)
-
dropColumn
public void dropColumn(HiveIdentity identity, String databaseName, String tableName, String columnName)
-
finishInsertIntoExistingTable
public void finishInsertIntoExistingTable(ConnectorSession session, String databaseName, String tableName, org.apache.hadoop.fs.Path currentLocation, List<String> fileNames, PartitionStatistics statisticsUpdate)
-
truncateUnpartitionedTable
public void truncateUnpartitionedTable(ConnectorSession session, String databaseName, String tableName)
-
finishRowLevelDelete
public void finishRowLevelDelete(ConnectorSession session, String databaseName, String tableName, org.apache.hadoop.fs.Path currentLocation, List<PartitionAndStatementId> partitionAndStatementIds)
-
finishUpdate
public void finishUpdate(ConnectorSession session, String databaseName, String tableName, org.apache.hadoop.fs.Path currentLocation, List<PartitionAndStatementId> partitionAndStatementIds)
-
getPartitionNames
public Optional<List<String>> getPartitionNames(HiveIdentity identity, String databaseName, String tableName)
-
getPartitionNamesByFilter
public Optional<List<String>> getPartitionNamesByFilter(HiveIdentity identity, String databaseName, String tableName, List<String> columnNames, TupleDomain<String> partitionKeysFilter)
-
getPartitionsByNames
public Map<String,Optional<Partition>> getPartitionsByNames(HiveIdentity identity, String databaseName, String tableName, List<String> partitionNames)
-
addPartition
public void addPartition(ConnectorSession session, String databaseName, String tableName, Partition partition, org.apache.hadoop.fs.Path currentLocation, PartitionStatistics statistics)
-
dropPartition
public void dropPartition(ConnectorSession session, String databaseName, String tableName, List<String> partitionValues, boolean deleteData)
-
finishInsertIntoExistingPartition
public void finishInsertIntoExistingPartition(ConnectorSession session, String databaseName, String tableName, List<String> partitionValues, org.apache.hadoop.fs.Path currentLocation, List<String> fileNames, PartitionStatistics statisticsUpdate)
-
createRole
public void createRole(String role, String grantor)
- Specified by:
createRolein interfaceSqlStandardAccessControlMetadataMetastore
-
dropRole
public void dropRole(String role)
- Specified by:
dropRolein interfaceSqlStandardAccessControlMetadataMetastore
-
listRoles
public Set<String> listRoles()
- Specified by:
listRolesin interfaceSqlStandardAccessControlMetadataMetastore
-
grantRoles
public void grantRoles(Set<String> roles, Set<HivePrincipal> grantees, boolean adminOption, HivePrincipal grantor)
- Specified by:
grantRolesin interfaceSqlStandardAccessControlMetadataMetastore
-
revokeRoles
public void revokeRoles(Set<String> roles, Set<HivePrincipal> grantees, boolean adminOption, HivePrincipal grantor)
- Specified by:
revokeRolesin interfaceSqlStandardAccessControlMetadataMetastore
-
listGrantedPrincipals
public Set<RoleGrant> listGrantedPrincipals(String role)
- Specified by:
listGrantedPrincipalsin interfaceSqlStandardAccessControlMetadataMetastore
-
listRoleGrants
public Set<RoleGrant> listRoleGrants(HivePrincipal principal)
- Specified by:
listRoleGrantsin interfaceSqlStandardAccessControlMetadataMetastore
-
listTablePrivileges
public Set<HivePrivilegeInfo> listTablePrivileges(HiveIdentity identity, String databaseName, String tableName, Optional<HivePrincipal> principal)
- Specified by:
listTablePrivilegesin interfaceSqlStandardAccessControlMetadataMetastore
-
getTableOwner
public String getTableOwner(HiveIdentity identity, String databaseName, String tableName)
-
grantTablePrivileges
public void grantTablePrivileges(HiveIdentity identity, String databaseName, String tableName, HivePrincipal grantee, Set<HivePrivilegeInfo> privileges)
- Specified by:
grantTablePrivilegesin interfaceSqlStandardAccessControlMetadataMetastore
-
revokeTablePrivileges
public void revokeTablePrivileges(HiveIdentity identity, String databaseName, String tableName, HivePrincipal grantee, Set<HivePrivilegeInfo> privileges)
- Specified by:
revokeTablePrivilegesin interfaceSqlStandardAccessControlMetadataMetastore
-
declareIntentionToWrite
public void declareIntentionToWrite(ConnectorSession session, LocationHandle.WriteMode writeMode, org.apache.hadoop.fs.Path stagingPathRoot, SchemaTableName schemaTableName)
-
isFinished
public boolean isFinished()
-
commit
public void commit()
-
rollback
public void rollback()
-
beginQuery
public void beginQuery(ConnectorSession session)
-
beginInsert
public AcidTransaction beginInsert(ConnectorSession session, Table table)
-
beginDelete
public AcidTransaction beginDelete(ConnectorSession session, Table table)
-
beginUpdate
public AcidTransaction beginUpdate(ConnectorSession session, Table table, HiveUpdateProcessor updateProcessor)
-
getValidWriteIds
public Optional<org.apache.hadoop.hive.common.ValidTxnWriteIdList> getValidWriteIds(ConnectorSession session, HiveTableHandle tableHandle)
-
cleanupQuery
public void cleanupQuery(ConnectorSession session)
-
testOnlyCheckIsReadOnly
public void testOnlyCheckIsReadOnly()
-
testOnlyThrowOnCleanupFailures
public void testOnlyThrowOnCleanupFailures()
-
allocateWriteId
public long allocateWriteId(HiveIdentity identity, String dbName, String tableName, long transactionId)
-
acquireTableWriteLock
public void acquireTableWriteLock(HiveIdentity identity, String queryId, long transactionId, String dbName, String tableName, org.apache.hadoop.hive.metastore.api.DataOperationType operation, boolean isPartitioned)
-
updateTableWriteId
public void updateTableWriteId(HiveIdentity identity, String dbName, String tableName, long transactionId, long writeId, OptionalLong rowCountChange)
-
alterPartitions
public void alterPartitions(HiveIdentity identity, String dbName, String tableName, List<Partition> partitions, long writeId)
-
addDynamicPartitions
public void addDynamicPartitions(HiveIdentity identity, String dbName, String tableName, List<String> partitionNames, long transactionId, long writeId, AcidOperation operation)
-
commitTransaction
public void commitTransaction(HiveIdentity identity, long transactionId)
-
-