public class SemiTransactionalHiveMetastore extends Object
| Modifier and Type | Class and Description |
|---|---|
static class |
SemiTransactionalHiveMetastore.Action<T> |
static class |
SemiTransactionalHiveMetastore.PartitionAndMore |
static class |
SemiTransactionalHiveMetastore.WriteMode |
| Constructor and Description |
|---|
SemiTransactionalHiveMetastore(HdfsEnvironment hdfsEnvironment,
ExtendedHiveMetastore delegate,
Executor renameExecutor,
boolean skipDeletionForAlter) |
| Modifier and Type | Method and Description |
|---|---|
void |
addColumn(String databaseName,
String tableName,
String columnName,
HiveType columnType,
String columnComment) |
void |
addPartition(ConnectorSession session,
String databaseName,
String tableName,
Partition partition,
org.apache.hadoop.fs.Path currentLocation) |
void |
commit() |
void |
createDatabase(Database database) |
void |
createTable(ConnectorSession session,
Table table,
PrincipalPrivileges principalPrivileges,
Optional<org.apache.hadoop.fs.Path> currentPath,
boolean ignoreExisting)
currentLocation needs to be supplied if a writePath exists for the table. |
void |
declareIntentionToWrite(ConnectorSession session,
SemiTransactionalHiveMetastore.WriteMode writeMode,
org.apache.hadoop.fs.Path stagingPathRoot,
String filePrefix,
SchemaTableName schemaTableName) |
void |
dropColumn(String databaseName,
String tableName,
String columnName) |
void |
dropDatabase(String schemaName) |
void |
dropPartition(ConnectorSession session,
String databaseName,
String tableName,
List<String> partitionValues) |
void |
dropTable(ConnectorSession session,
String databaseName,
String tableName) |
void |
finishInsertIntoExistingPartition(ConnectorSession session,
String databaseName,
String tableName,
List<String> partitionValues,
org.apache.hadoop.fs.Path currentLocation,
List<String> fileNames) |
void |
finishInsertIntoExistingTable(ConnectorSession session,
String databaseName,
String tableName,
org.apache.hadoop.fs.Path currentLocation,
List<String> fileNames) |
HivePageSinkMetadata |
generatePageSinkMetadata(SchemaTableName schemaTableName) |
List<String> |
getAllDatabases() |
Optional<List<String>> |
getAllTables(String databaseName) |
Optional<List<String>> |
getAllViews(String databaseName) |
Optional<Database> |
getDatabase(String databaseName) |
Set<HivePrivilegeInfo> |
getDatabasePrivileges(String user,
String databaseName) |
Optional<Partition> |
getPartition(String databaseName,
String tableName,
List<String> partitionValues) |
Optional<Map<String,Map<String,HiveColumnStatistics>>> |
getPartitionColumnStatistics(String databaseName,
String tableName,
Set<String> partitionNames,
Set<String> columnNames) |
Optional<List<String>> |
getPartitionNames(String databaseName,
String tableName) |
Optional<List<String>> |
getPartitionNamesByParts(String databaseName,
String tableName,
List<String> parts) |
Map<String,Optional<Partition>> |
getPartitionsByNames(String databaseName,
String tableName,
List<String> partitionNames) |
Set<String> |
getRoles(String user) |
Optional<Table> |
getTable(String databaseName,
String tableName) |
Optional<Map<String,HiveColumnStatistics>> |
getTableColumnStatistics(String databaseName,
String tableName,
Set<String> columnNames) |
Set<HivePrivilegeInfo> |
getTablePrivileges(String user,
String databaseName,
String tableName) |
void |
grantTablePrivileges(String databaseName,
String tableName,
String grantee,
Set<HivePrivilegeInfo> privileges) |
void |
renameColumn(String databaseName,
String tableName,
String oldColumnName,
String newColumnName) |
void |
renameDatabase(String source,
String target) |
void |
renameTable(String databaseName,
String tableName,
String newDatabaseName,
String newTableName) |
void |
replaceView(String databaseName,
String tableName,
Table table,
PrincipalPrivileges principalPrivileges) |
void |
revokeTablePrivileges(String databaseName,
String tableName,
String grantee,
Set<HivePrivilegeInfo> privileges) |
void |
rollback() |
void |
testOnlyCheckIsReadOnly() |
void |
testOnlyThrowOnCleanupFailures() |
void |
truncateUnpartitionedTable(ConnectorSession session,
String databaseName,
String tableName) |
public SemiTransactionalHiveMetastore(HdfsEnvironment hdfsEnvironment, ExtendedHiveMetastore delegate, Executor renameExecutor, boolean skipDeletionForAlter)
public Optional<Map<String,HiveColumnStatistics>> getTableColumnStatistics(String databaseName, String tableName, Set<String> columnNames)
public Optional<Map<String,Map<String,HiveColumnStatistics>>> getPartitionColumnStatistics(String databaseName, String tableName, Set<String> partitionNames, Set<String> columnNames)
public HivePageSinkMetadata generatePageSinkMetadata(SchemaTableName schemaTableName)
public void createDatabase(Database database)
public void dropDatabase(String schemaName)
public void createTable(ConnectorSession session, Table table, PrincipalPrivileges principalPrivileges, Optional<org.apache.hadoop.fs.Path> currentPath, boolean ignoreExisting)
currentLocation needs to be supplied if a writePath exists for the table.public void dropTable(ConnectorSession session, String databaseName, String tableName)
public void replaceView(String databaseName, String tableName, Table table, PrincipalPrivileges principalPrivileges)
public void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName)
public void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment)
public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName)
public void finishInsertIntoExistingTable(ConnectorSession session, String databaseName, String tableName, org.apache.hadoop.fs.Path currentLocation, List<String> fileNames)
public void truncateUnpartitionedTable(ConnectorSession session, String databaseName, String tableName)
public Optional<List<String>> getPartitionNames(String databaseName, String tableName)
public Optional<List<String>> getPartitionNamesByParts(String databaseName, String tableName, List<String> parts)
public Optional<Partition> getPartition(String databaseName, String tableName, List<String> partitionValues)
public Map<String,Optional<Partition>> getPartitionsByNames(String databaseName, String tableName, List<String> partitionNames)
public void addPartition(ConnectorSession session, String databaseName, String tableName, Partition partition, org.apache.hadoop.fs.Path currentLocation)
public void dropPartition(ConnectorSession session, String databaseName, String tableName, List<String> partitionValues)
public void finishInsertIntoExistingPartition(ConnectorSession session, String databaseName, String tableName, List<String> partitionValues, org.apache.hadoop.fs.Path currentLocation, List<String> fileNames)
public Set<HivePrivilegeInfo> getDatabasePrivileges(String user, String databaseName)
public Set<HivePrivilegeInfo> getTablePrivileges(String user, String databaseName, String tableName)
public void grantTablePrivileges(String databaseName, String tableName, String grantee, Set<HivePrivilegeInfo> privileges)
public void revokeTablePrivileges(String databaseName, String tableName, String grantee, Set<HivePrivilegeInfo> privileges)
public void declareIntentionToWrite(ConnectorSession session, SemiTransactionalHiveMetastore.WriteMode writeMode, org.apache.hadoop.fs.Path stagingPathRoot, String filePrefix, SchemaTableName schemaTableName)
public void commit()
public void rollback()
public void testOnlyCheckIsReadOnly()
public void testOnlyThrowOnCleanupFailures()
Copyright © 2012-2017. All Rights Reserved.