| Constructor and Description |
|---|
BackgroundHiveSplitLoader(Table table,
Iterable<HivePartitionMetadata> partitions,
TupleDomain<? extends ColumnHandle> compactEffectivePredicate,
Optional<BackgroundHiveSplitLoader.BucketSplitInfo> tableBucketInfo,
ConnectorSession session,
HdfsEnvironment hdfsEnvironment,
NamenodeStats namenodeStats,
DirectoryLister directoryLister,
Executor executor,
int loaderConcurrency,
boolean recursiveDirWalkerEnabled,
boolean ignoreAbsentPartitions,
Optional<org.apache.hadoop.hive.common.ValidWriteIdList> validWriteIds) |
GenericHiveRecordCursorProvider(HdfsEnvironment hdfsEnvironment,
io.airlift.units.DataSize textMaxLineLength) |
GenericHiveRecordCursorProvider(HdfsEnvironment hdfsEnvironment,
HiveConfig config) |
HiveLocationService(HdfsEnvironment hdfsEnvironment) |
HiveMetadata(CatalogName catalogName,
SemiTransactionalHiveMetastore metastore,
HdfsEnvironment hdfsEnvironment,
HivePartitionManager partitionManager,
org.joda.time.DateTimeZone timeZone,
boolean allowCorruptWritesForTesting,
boolean writesToNonManagedTablesEnabled,
boolean createsOfNonManagedTablesEnabled,
boolean translateHiveViews,
TypeManager typeManager,
LocationService locationService,
io.airlift.json.JsonCodec<PartitionUpdate> partitionUpdateCodec,
TypeTranslator typeTranslator,
String prestoVersion,
HiveStatisticsProvider hiveStatisticsProvider,
AccessControlMetadata accessControlMetadata) |
HiveMetadataFactory(CatalogName catalogName,
HiveConfig hiveConfig,
HiveMetastore metastore,
HdfsEnvironment hdfsEnvironment,
HivePartitionManager partitionManager,
ExecutorService executorService,
ScheduledExecutorService heartbeatService,
TypeManager typeManager,
LocationService locationService,
io.airlift.json.JsonCodec<PartitionUpdate> partitionUpdateCodec,
TypeTranslator typeTranslator,
NodeVersion nodeVersion,
AccessControlMetadataFactory accessControlMetadataFactory) |
HiveMetadataFactory(CatalogName catalogName,
HiveMetastore metastore,
HdfsEnvironment hdfsEnvironment,
HivePartitionManager partitionManager,
org.joda.time.DateTimeZone timeZone,
int maxConcurrentFileRenames,
int maxConcurrentMetastoreDrops,
boolean allowCorruptWritesForTesting,
boolean skipDeletionForAlter,
boolean skipTargetCleanupOnRollback,
boolean writesToNonManagedTablesEnabled,
boolean createsOfNonManagedTablesEnabled,
boolean translateHiveViews,
long perTransactionCacheMaximumSize,
Optional<io.airlift.units.Duration> hiveTransactionHeartbeatInterval,
TypeManager typeManager,
LocationService locationService,
io.airlift.json.JsonCodec<PartitionUpdate> partitionUpdateCodec,
ExecutorService executorService,
ScheduledExecutorService heartbeatService,
TypeTranslator typeTranslator,
String prestoVersion,
AccessControlMetadataFactory accessControlMetadataFactory) |
HivePageSink(HiveWriterFactory writerFactory,
List<HiveColumnHandle> inputColumns,
Optional<HiveBucketProperty> bucketProperty,
PageIndexerFactory pageIndexerFactory,
HdfsEnvironment hdfsEnvironment,
int maxOpenWriters,
com.google.common.util.concurrent.ListeningExecutorService writeVerificationExecutor,
io.airlift.json.JsonCodec<PartitionUpdate> partitionUpdateCodec,
ConnectorSession session) |
HivePageSinkProvider(Set<HiveFileWriterFactory> fileWriterFactories,
HdfsEnvironment hdfsEnvironment,
PageSorter pageSorter,
HiveMetastore metastore,
PageIndexerFactory pageIndexerFactory,
TypeManager typeManager,
HiveConfig config,
LocationService locationService,
io.airlift.json.JsonCodec<PartitionUpdate> partitionUpdateCodec,
NodeManager nodeManager,
io.airlift.event.client.EventClient eventClient,
HiveSessionProperties hiveSessionProperties,
HiveWriterStats hiveWriterStats) |
HivePageSourceProvider(TypeManager typeManager,
HiveConfig hiveConfig,
HdfsEnvironment hdfsEnvironment,
Set<HivePageSourceFactory> pageSourceFactories,
Set<HiveRecordCursorProvider> cursorProviders,
GenericHiveRecordCursorProvider genericCursorProvider) |
HiveSplitManager(Function<HiveTransactionHandle,SemiTransactionalHiveMetastore> metastoreProvider,
HivePartitionManager partitionManager,
NamenodeStats namenodeStats,
HdfsEnvironment hdfsEnvironment,
DirectoryLister directoryLister,
Executor executor,
CoercionPolicy coercionPolicy,
io.airlift.stats.CounterStat highMemorySplitSourceCounter,
int maxOutstandingSplits,
io.airlift.units.DataSize maxOutstandingSplitsSize,
int minPartitionBatchSize,
int maxPartitionBatchSize,
int maxInitialSplits,
int splitLoaderConcurrency,
Integer maxSplitsPerSecond,
boolean recursiveDfsWalkerEnabled) |
HiveSplitManager(HiveConfig hiveConfig,
Function<HiveTransactionHandle,SemiTransactionalHiveMetastore> metastoreProvider,
HivePartitionManager partitionManager,
NamenodeStats namenodeStats,
HdfsEnvironment hdfsEnvironment,
DirectoryLister directoryLister,
ExecutorService executorService,
VersionEmbedder versionEmbedder,
CoercionPolicy coercionPolicy) |
HiveWriterFactory(Set<HiveFileWriterFactory> fileWriterFactories,
String schemaName,
String tableName,
boolean isCreateTable,
List<HiveColumnHandle> inputColumns,
HiveStorageFormat tableStorageFormat,
HiveStorageFormat partitionStorageFormat,
Map<String,String> additionalTableParameters,
OptionalInt bucketCount,
List<SortingColumn> sortedBy,
LocationHandle locationHandle,
LocationService locationService,
String queryId,
HivePageSinkMetadataProvider pageSinkMetadataProvider,
TypeManager typeManager,
HdfsEnvironment hdfsEnvironment,
PageSorter pageSorter,
io.airlift.units.DataSize sortBufferSize,
int maxOpenSortFiles,
boolean immutablePartitions,
ConnectorSession session,
NodeManager nodeManager,
io.airlift.event.client.EventClient eventClient,
HiveSessionProperties hiveSessionProperties,
HiveWriterStats hiveWriterStats) |
RcFileFileWriterFactory(HdfsEnvironment hdfsEnvironment,
TypeManager typeManager,
NodeVersion nodeVersion,
org.joda.time.DateTimeZone hiveStorageTimeZone,
FileFormatDataSourceStats stats) |
RcFileFileWriterFactory(HdfsEnvironment hdfsEnvironment,
TypeManager typeManager,
NodeVersion nodeVersion,
HiveConfig hiveConfig,
FileFormatDataSourceStats stats) |
| Constructor and Description |
|---|
SemiTransactionalHiveMetastore(HdfsEnvironment hdfsEnvironment,
HiveMetastoreClosure delegate,
Executor renameExecutor,
Executor dropExecutor,
boolean skipDeletionForAlter,
boolean skipTargetCleanupOnRollback,
Optional<io.airlift.units.Duration> hiveTransactionHeartbeatInterval,
ScheduledExecutorService heartbeatService) |
| Constructor and Description |
|---|
FileHiveMetastore(HdfsEnvironment hdfsEnvironment,
FileHiveMetastoreConfig config) |
FileHiveMetastore(HdfsEnvironment hdfsEnvironment,
String catalogDirectory,
String metastoreUser) |
| Constructor and Description |
|---|
GlueHiveMetastore(HdfsEnvironment hdfsEnvironment,
GlueHiveMetastoreConfig glueConfig,
GlueColumnStatisticsProvider columnStatisticsProvider,
Executor executor,
Optional<com.amazonaws.handlers.RequestHandler2> requestHandler) |
| Constructor and Description |
|---|
ThriftHiveMetastore(MetastoreLocator metastoreLocator,
HiveConfig hiveConfig,
ThriftMetastoreConfig thriftConfig,
HiveAuthenticationConfig authenticationConfig,
HdfsEnvironment hdfsEnvironment) |
| Constructor and Description |
|---|
OrcDeleteDeltaPageSource(org.apache.hadoop.fs.Path path,
long fileSize,
OrcReaderOptions options,
String sessionUser,
org.apache.hadoop.conf.Configuration configuration,
HdfsEnvironment hdfsEnvironment,
FileFormatDataSourceStats stats) |
OrcDeleteDeltaPageSourceFactory(OrcReaderOptions options,
String sessionUser,
org.apache.hadoop.conf.Configuration configuration,
HdfsEnvironment hdfsEnvironment,
FileFormatDataSourceStats stats) |
OrcDeletedRows(String sourceFileName,
OrcDeleteDeltaPageSourceFactory pageSourceFactory,
String sessionUser,
org.apache.hadoop.conf.Configuration configuration,
HdfsEnvironment hdfsEnvironment,
DeleteDeltaLocations deleteDeltaLocations) |
OrcFileWriterFactory(HdfsEnvironment hdfsEnvironment,
TypeManager typeManager,
NodeVersion nodeVersion,
org.joda.time.DateTimeZone hiveStorageTimeZone,
boolean writeLegacyVersion,
FileFormatDataSourceStats readStats,
OrcWriterOptions orcWriterOptions) |
OrcFileWriterFactory(HdfsEnvironment hdfsEnvironment,
TypeManager typeManager,
NodeVersion nodeVersion,
HiveConfig hiveConfig,
OrcWriterConfig orcWriterConfig,
FileFormatDataSourceStats readStats,
OrcWriterConfig config) |
OrcPageSourceFactory(OrcReaderConfig config,
HdfsEnvironment hdfsEnvironment,
FileFormatDataSourceStats stats) |
OrcPageSourceFactory(OrcReaderOptions orcReaderOptions,
HdfsEnvironment hdfsEnvironment,
FileFormatDataSourceStats stats) |
| Modifier and Type | Method and Description |
|---|---|
static HivePageSourceFactory.ReaderPageSourceWithProjections |
ParquetPageSourceFactory.createPageSource(org.apache.hadoop.fs.Path path,
long start,
long length,
long fileSize,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
boolean useColumnNames,
HdfsEnvironment hdfsEnvironment,
org.apache.hadoop.conf.Configuration configuration,
String user,
FileFormatDataSourceStats stats,
ParquetReaderOptions options)
This method is available for other callers to use directly.
|
| Constructor and Description |
|---|
ParquetPageSourceFactory(HdfsEnvironment hdfsEnvironment,
FileFormatDataSourceStats stats,
ParquetReaderConfig config) |
| Constructor and Description |
|---|
RegisterPartitionProcedure(HiveConfig hiveConfig,
TransactionalMetadataFactory hiveMetadataFactory,
HiveMetastore metastore,
HdfsEnvironment hdfsEnvironment) |
SyncPartitionMetadataProcedure(TransactionalMetadataFactory hiveMetadataFactory,
HdfsEnvironment hdfsEnvironment) |
| Constructor and Description |
|---|
RcFilePageSourceFactory(TypeManager typeManager,
HdfsEnvironment hdfsEnvironment,
FileFormatDataSourceStats stats) |
| Constructor and Description |
|---|
S3SelectRecordCursorProvider(HdfsEnvironment hdfsEnvironment,
PrestoS3ClientFactory s3ClientFactory) |
| Modifier and Type | Method and Description |
|---|---|
static void |
HiveWriteUtils.createDirectory(HdfsEnvironment.HdfsContext context,
HdfsEnvironment hdfsEnvironment,
org.apache.hadoop.fs.Path path) |
static org.apache.hadoop.fs.Path |
HiveWriteUtils.createTemporaryPath(ConnectorSession session,
HdfsEnvironment.HdfsContext context,
HdfsEnvironment hdfsEnvironment,
org.apache.hadoop.fs.Path targetPath) |
static org.apache.hadoop.fs.Path |
HiveWriteUtils.getTableDefaultLocation(Database database,
HdfsEnvironment.HdfsContext context,
HdfsEnvironment hdfsEnvironment,
String schemaName,
String tableName) |
static org.apache.hadoop.fs.Path |
HiveWriteUtils.getTableDefaultLocation(HdfsEnvironment.HdfsContext context,
SemiTransactionalHiveMetastore metastore,
HdfsEnvironment hdfsEnvironment,
String schemaName,
String tableName) |
static boolean |
HiveWriteUtils.isHdfsEncrypted(HdfsEnvironment.HdfsContext context,
HdfsEnvironment hdfsEnvironment,
org.apache.hadoop.fs.Path path) |
static boolean |
HiveWriteUtils.isS3FileSystem(HdfsEnvironment.HdfsContext context,
HdfsEnvironment hdfsEnvironment,
org.apache.hadoop.fs.Path path) |
static boolean |
HiveWriteUtils.isViewFileSystem(HdfsEnvironment.HdfsContext context,
HdfsEnvironment hdfsEnvironment,
org.apache.hadoop.fs.Path path) |
static boolean |
HiveWriteUtils.pathExists(HdfsEnvironment.HdfsContext context,
HdfsEnvironment hdfsEnvironment,
org.apache.hadoop.fs.Path path) |
Copyright © 2012–2020. All rights reserved.