| Modifier and Type | Method and Description |
|---|---|
static void |
HiveWriteUtils.createDirectory(HdfsEnvironment.HdfsContext context,
HdfsEnvironment hdfsEnvironment,
org.apache.hadoop.fs.Path path) |
static org.apache.hadoop.fs.Path |
HiveWriteUtils.createTemporaryPath(HdfsEnvironment.HdfsContext context,
HdfsEnvironment hdfsEnvironment,
org.apache.hadoop.fs.Path targetPath) |
static org.apache.hadoop.fs.Path |
HiveWriteUtils.getTableDefaultLocation(HdfsEnvironment.HdfsContext context,
SemiTransactionalHiveMetastore metastore,
HdfsEnvironment hdfsEnvironment,
String schemaName,
String tableName) |
static boolean |
HiveWriteUtils.isS3FileSystem(HdfsEnvironment.HdfsContext context,
HdfsEnvironment hdfsEnvironment,
org.apache.hadoop.fs.Path path) |
static boolean |
HiveWriteUtils.isViewFileSystem(HdfsEnvironment.HdfsContext context,
HdfsEnvironment hdfsEnvironment,
org.apache.hadoop.fs.Path path) |
static boolean |
HiveWriteUtils.pathExists(HdfsEnvironment.HdfsContext context,
HdfsEnvironment hdfsEnvironment,
org.apache.hadoop.fs.Path path) |
| Constructor and Description |
|---|
BackgroundHiveSplitLoader(String connectorId,
Table table,
Iterable<HivePartitionMetadata> partitions,
TupleDomain<? extends ColumnHandle> compactEffectivePredicate,
Optional<HiveBucketHandle> bucketHandle,
List<HiveBucketing.HiveBucket> buckets,
ConnectorSession session,
HdfsEnvironment hdfsEnvironment,
NamenodeStats namenodeStats,
DirectoryLister directoryLister,
Executor executor,
int maxPartitionBatchSize,
int maxInitialSplits,
boolean recursiveDirWalkerEnabled) |
GenericHiveRecordCursorProvider(HdfsEnvironment hdfsEnvironment) |
HiveLocationService(HdfsEnvironment hdfsEnvironment) |
HiveMetadata(String connectorId,
SemiTransactionalHiveMetastore metastore,
HdfsEnvironment hdfsEnvironment,
HivePartitionManager partitionManager,
org.joda.time.DateTimeZone timeZone,
boolean allowCorruptWritesForTesting,
boolean respectTableFormat,
boolean bucketWritingEnabled,
boolean writesToNonManagedTablesEnabled,
HiveStorageFormat defaultStorageFormat,
TypeManager typeManager,
LocationService locationService,
TableParameterCodec tableParameterCodec,
io.airlift.json.JsonCodec<PartitionUpdate> partitionUpdateCodec,
TypeTranslator typeTranslator,
String prestoVersion,
HiveStatisticsProvider hiveStatisticsProvider) |
HiveMetadataFactory(HiveConnectorId connectorId,
ExtendedHiveMetastore metastore,
HdfsEnvironment hdfsEnvironment,
HivePartitionManager partitionManager,
org.joda.time.DateTimeZone timeZone,
int maxConcurrentFileRenames,
boolean allowCorruptWritesForTesting,
boolean respectTableFormat,
boolean skipDeletionForAlter,
boolean bucketWritingEnabled,
boolean writesToNonManagedTablesEnabled,
HiveStorageFormat defaultStorageFormat,
long perTransactionCacheMaximumSize,
TypeManager typeManager,
LocationService locationService,
TableParameterCodec tableParameterCodec,
io.airlift.json.JsonCodec<PartitionUpdate> partitionUpdateCodec,
ExecutorService executorService,
TypeTranslator typeTranslator,
String prestoVersion) |
HiveMetadataFactory(HiveConnectorId connectorId,
HiveClientConfig hiveClientConfig,
ExtendedHiveMetastore metastore,
HdfsEnvironment hdfsEnvironment,
HivePartitionManager partitionManager,
ExecutorService executorService,
TypeManager typeManager,
LocationService locationService,
TableParameterCodec tableParameterCodec,
io.airlift.json.JsonCodec<PartitionUpdate> partitionUpdateCodec,
TypeTranslator typeTranslator,
NodeVersion nodeVersion) |
HivePageSink(HiveWriterFactory writerFactory,
List<HiveColumnHandle> inputColumns,
Optional<HiveBucketProperty> bucketProperty,
PageIndexerFactory pageIndexerFactory,
TypeManager typeManager,
HdfsEnvironment hdfsEnvironment,
int maxOpenWriters,
com.google.common.util.concurrent.ListeningExecutorService writeVerificationExecutor,
io.airlift.json.JsonCodec<PartitionUpdate> partitionUpdateCodec,
ConnectorSession session) |
HivePageSinkProvider(Set<HiveFileWriterFactory> fileWriterFactories,
HdfsEnvironment hdfsEnvironment,
ExtendedHiveMetastore metastore,
PageIndexerFactory pageIndexerFactory,
TypeManager typeManager,
HiveClientConfig config,
LocationService locationService,
io.airlift.json.JsonCodec<PartitionUpdate> partitionUpdateCodec,
NodeManager nodeManager,
io.airlift.event.client.EventClient eventClient,
HiveSessionProperties hiveSessionProperties) |
HivePageSourceProvider(HiveClientConfig hiveClientConfig,
HdfsEnvironment hdfsEnvironment,
Set<HiveRecordCursorProvider> cursorProviders,
Set<HivePageSourceFactory> pageSourceFactories,
TypeManager typeManager) |
HiveSplitManager(HiveConnectorId connectorId,
java.util.function.Function<HiveTransactionHandle,SemiTransactionalHiveMetastore> metastoreProvider,
NamenodeStats namenodeStats,
HdfsEnvironment hdfsEnvironment,
DirectoryLister directoryLister,
Executor executor,
CoercionPolicy coercionPolicy,
io.airlift.stats.CounterStat highMemorySplitSourceCounter,
int maxOutstandingSplits,
int minPartitionBatchSize,
int maxPartitionBatchSize,
int maxInitialSplits,
boolean recursiveDfsWalkerEnabled) |
HiveSplitManager(HiveConnectorId connectorId,
HiveClientConfig hiveClientConfig,
java.util.function.Function<HiveTransactionHandle,SemiTransactionalHiveMetastore> metastoreProvider,
NamenodeStats namenodeStats,
HdfsEnvironment hdfsEnvironment,
DirectoryLister directoryLister,
ExecutorService executorService,
CoercionPolicy coercionPolicy) |
HiveWriterFactory(Set<HiveFileWriterFactory> fileWriterFactories,
String schemaName,
String tableName,
boolean isCreateTable,
List<HiveColumnHandle> inputColumns,
HiveStorageFormat tableStorageFormat,
HiveStorageFormat partitionStorageFormat,
OptionalInt bucketCount,
LocationHandle locationHandle,
LocationService locationService,
String filePrefix,
HivePageSinkMetadataProvider pageSinkMetadataProvider,
TypeManager typeManager,
HdfsEnvironment hdfsEnvironment,
boolean immutablePartitions,
ConnectorSession session,
NodeManager nodeManager,
io.airlift.event.client.EventClient eventClient,
HiveSessionProperties hiveSessionProperties) |
OrcFileWriterFactory(HdfsEnvironment hdfsEnvironment,
TypeManager typeManager,
NodeVersion nodeVersion,
org.joda.time.DateTimeZone hiveStorageTimeZone,
FileFormatDataSourceStats stats) |
OrcFileWriterFactory(HdfsEnvironment hdfsEnvironment,
TypeManager typeManager,
NodeVersion nodeVersion,
HiveClientConfig hiveClientConfig,
FileFormatDataSourceStats stats) |
RcFileFileWriterFactory(HdfsEnvironment hdfsEnvironment,
TypeManager typeManager,
NodeVersion nodeVersion,
org.joda.time.DateTimeZone hiveStorageTimeZone,
FileFormatDataSourceStats stats) |
RcFileFileWriterFactory(HdfsEnvironment hdfsEnvironment,
TypeManager typeManager,
NodeVersion nodeVersion,
HiveClientConfig hiveClientConfig,
FileFormatDataSourceStats stats) |
| Constructor and Description |
|---|
SemiTransactionalHiveMetastore(HdfsEnvironment hdfsEnvironment,
ExtendedHiveMetastore delegate,
Executor renameExecutor,
boolean skipDeletionForAlter) |
| Constructor and Description |
|---|
FileHiveMetastore(HdfsEnvironment hdfsEnvironment,
FileHiveMetastoreConfig config) |
FileHiveMetastore(HdfsEnvironment hdfsEnvironment,
String catalogDirectory,
String metastoreUser) |
| Modifier and Type | Method and Description |
|---|---|
static OrcPageSource |
OrcPageSourceFactory.createOrcPageSource(MetadataReader metadataReader,
HdfsEnvironment hdfsEnvironment,
String sessionUser,
org.apache.hadoop.conf.Configuration configuration,
org.apache.hadoop.fs.Path path,
long start,
long length,
long fileSize,
List<HiveColumnHandle> columns,
boolean useOrcColumnNames,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
io.airlift.units.DataSize maxMergeDistance,
io.airlift.units.DataSize maxBufferSize,
io.airlift.units.DataSize streamBufferSize,
io.airlift.units.DataSize maxReadBlockSize,
boolean lazyReadSmallRanges,
boolean orcBloomFiltersEnabled,
FileFormatDataSourceStats stats) |
| Constructor and Description |
|---|
DwrfPageSourceFactory(TypeManager typeManager,
HdfsEnvironment hdfsEnvironment,
FileFormatDataSourceStats stats) |
OrcPageSourceFactory(TypeManager typeManager,
boolean useOrcColumnNames,
HdfsEnvironment hdfsEnvironment,
FileFormatDataSourceStats stats) |
OrcPageSourceFactory(TypeManager typeManager,
HiveClientConfig config,
HdfsEnvironment hdfsEnvironment,
FileFormatDataSourceStats stats) |
| Modifier and Type | Method and Description |
|---|---|
static ParquetPageSource |
ParquetPageSourceFactory.createParquetPageSource(HdfsEnvironment hdfsEnvironment,
String user,
org.apache.hadoop.conf.Configuration configuration,
org.apache.hadoop.fs.Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
boolean useParquetColumnNames,
TypeManager typeManager,
boolean predicatePushdownEnabled,
TupleDomain<HiveColumnHandle> effectivePredicate) |
| Constructor and Description |
|---|
ParquetHiveRecordCursor(HdfsEnvironment hdfsEnvironment,
String sessionUser,
org.apache.hadoop.conf.Configuration configuration,
org.apache.hadoop.fs.Path path,
long start,
long length,
long fileSize,
Properties splitSchema,
List<HiveColumnHandle> columns,
boolean useParquetColumnNames,
TypeManager typeManager,
boolean predicatePushdownEnabled,
TupleDomain<HiveColumnHandle> effectivePredicate) |
ParquetPageSourceFactory(TypeManager typeManager,
boolean useParquetColumnNames,
HdfsEnvironment hdfsEnvironment) |
ParquetPageSourceFactory(TypeManager typeManager,
HiveClientConfig config,
HdfsEnvironment hdfsEnvironment) |
ParquetRecordCursorProvider(boolean useParquetColumnNames,
HdfsEnvironment hdfsEnvironment) |
ParquetRecordCursorProvider(HiveClientConfig hiveClientConfig,
HdfsEnvironment hdfsEnvironment) |
| Constructor and Description |
|---|
RcFilePageSourceFactory(TypeManager typeManager,
HdfsEnvironment hdfsEnvironment,
FileFormatDataSourceStats stats) |
Copyright © 2012-2017. All Rights Reserved.