| Package | Description |
|---|---|
| com.facebook.presto.hive | |
| com.facebook.presto.hive.orc | |
| com.facebook.presto.hive.pagefile | |
| com.facebook.presto.hive.parquet | |
| com.facebook.presto.hive.rcfile |
| Modifier and Type | Method and Description |
|---|---|
static HiveColumnHandle |
HiveColumnHandle.bucketColumnHandle()
The column indicating the bucket id.
|
static HiveColumnHandle |
HiveColumnHandle.fileModifiedTimeColumnHandle() |
static HiveColumnHandle |
HiveColumnHandle.fileSizeColumnHandle() |
HiveColumnHandle |
HivePageSourceProvider.ColumnMapping.getHiveColumnHandle() |
static HiveColumnHandle |
HiveColumnHandle.pathColumnHandle() |
static HiveColumnHandle |
HiveColumnHandle.updateRowIdHandle() |
| Modifier and Type | Method and Description |
|---|---|
static HivePageSourceProvider.ColumnMapping |
HivePageSourceProvider.ColumnMapping.aggregated(HiveColumnHandle hiveColumnHandle,
int index) |
static Optional<String> |
HiveUtil.getPrefilledColumnValue(HiveColumnHandle columnHandle,
HivePartitionKey partitionKey,
Path path,
OptionalInt bucketNumber,
long fileSize,
long fileModifiedTime) |
static Subfield |
HiveColumnHandle.getPushedDownSubfield(HiveColumnHandle column)
Return the pushed down subfield if the column represents one
|
static HivePageSourceProvider.ColumnMapping |
HivePageSourceProvider.ColumnMapping.interim(HiveColumnHandle hiveColumnHandle,
int index) |
static boolean |
HiveColumnHandle.isBucketColumnHandle(HiveColumnHandle column) |
static boolean |
HiveColumnHandle.isFileModifiedTimeColumnHandle(HiveColumnHandle column) |
static boolean |
HiveColumnHandle.isFileSizeColumnHandle(HiveColumnHandle column) |
static boolean |
HiveColumnHandle.isPathColumnHandle(HiveColumnHandle column) |
static boolean |
HiveColumnHandle.isPushedDownSubfield(HiveColumnHandle column) |
static HivePageSourceProvider.ColumnMapping |
HivePageSourceProvider.ColumnMapping.prefilled(HiveColumnHandle hiveColumnHandle,
Optional<String> prefilledValue,
Optional<HiveType> coerceFrom) |
static HivePageSourceProvider.ColumnMapping |
HivePageSourceProvider.ColumnMapping.regular(HiveColumnHandle hiveColumnHandle,
int index,
Optional<HiveType> coerceFrom) |
| Modifier and Type | Method and Description |
|---|---|
static List<HivePageSourceProvider.ColumnMapping> |
HivePageSourceProvider.ColumnMapping.buildColumnMappings(List<HivePartitionKey> partitionKeys,
List<HiveColumnHandle> columns,
List<HiveColumnHandle> requiredInterimColumns,
TableToPartitionMapping tableToPartitionMapping,
Path path,
OptionalInt bucketNumber,
long fileSize,
long fileModifiedTime) |
static List<HivePageSourceProvider.ColumnMapping> |
HivePageSourceProvider.ColumnMapping.buildColumnMappings(List<HivePartitionKey> partitionKeys,
List<HiveColumnHandle> columns,
List<HiveColumnHandle> requiredInterimColumns,
TableToPartitionMapping tableToPartitionMapping,
Path path,
OptionalInt bucketNumber,
long fileSize,
long fileModifiedTime) |
String |
IonSqlQueryBuilder.buildSql(List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> tupleDomain) |
String |
IonSqlQueryBuilder.buildSql(List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> tupleDomain) |
static Optional<ConnectorPageSource> |
HivePageSourceProvider.createHivePageSource(Set<HiveRecordCursorProvider> cursorProviders,
Set<HiveBatchPageSourceFactory> pageSourceFactories,
org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
OptionalInt tableBucketNumber,
long start,
long length,
long fileSize,
long fileModifiedTime,
Storage storage,
TupleDomain<HiveColumnHandle> effectivePredicate,
List<HiveColumnHandle> hiveColumns,
Map<String,HiveColumnHandle> predicateColumns,
List<HivePartitionKey> partitionKeys,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
SchemaTableName tableName,
List<HiveColumnHandle> partitionKeyColumnHandles,
List<Column> tableDataColumns,
Map<String,String> tableParameters,
int partitionDataColumnCount,
TableToPartitionMapping tableToPartitionMapping,
Optional<HiveSplit.BucketConversion> bucketConversion,
boolean s3SelectPushdownEnabled,
HiveFileContext hiveFileContext,
RowExpression remainingPredicate,
boolean isPushdownFilterEnabled,
RowExpressionService rowExpressionService,
Optional<EncryptionInformation> encryptionInformation,
Map<String,String> customSplitInfo) |
static Optional<ConnectorPageSource> |
HivePageSourceProvider.createHivePageSource(Set<HiveRecordCursorProvider> cursorProviders,
Set<HiveBatchPageSourceFactory> pageSourceFactories,
org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
OptionalInt tableBucketNumber,
long start,
long length,
long fileSize,
long fileModifiedTime,
Storage storage,
TupleDomain<HiveColumnHandle> effectivePredicate,
List<HiveColumnHandle> hiveColumns,
Map<String,HiveColumnHandle> predicateColumns,
List<HivePartitionKey> partitionKeys,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
SchemaTableName tableName,
List<HiveColumnHandle> partitionKeyColumnHandles,
List<Column> tableDataColumns,
Map<String,String> tableParameters,
int partitionDataColumnCount,
TableToPartitionMapping tableToPartitionMapping,
Optional<HiveSplit.BucketConversion> bucketConversion,
boolean s3SelectPushdownEnabled,
HiveFileContext hiveFileContext,
RowExpression remainingPredicate,
boolean isPushdownFilterEnabled,
RowExpressionService rowExpressionService,
Optional<EncryptionInformation> encryptionInformation,
Map<String,String> customSplitInfo) |
static Optional<ConnectorPageSource> |
HivePageSourceProvider.createHivePageSource(Set<HiveRecordCursorProvider> cursorProviders,
Set<HiveBatchPageSourceFactory> pageSourceFactories,
org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
OptionalInt tableBucketNumber,
long start,
long length,
long fileSize,
long fileModifiedTime,
Storage storage,
TupleDomain<HiveColumnHandle> effectivePredicate,
List<HiveColumnHandle> hiveColumns,
Map<String,HiveColumnHandle> predicateColumns,
List<HivePartitionKey> partitionKeys,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
SchemaTableName tableName,
List<HiveColumnHandle> partitionKeyColumnHandles,
List<Column> tableDataColumns,
Map<String,String> tableParameters,
int partitionDataColumnCount,
TableToPartitionMapping tableToPartitionMapping,
Optional<HiveSplit.BucketConversion> bucketConversion,
boolean s3SelectPushdownEnabled,
HiveFileContext hiveFileContext,
RowExpression remainingPredicate,
boolean isPushdownFilterEnabled,
RowExpressionService rowExpressionService,
Optional<EncryptionInformation> encryptionInformation,
Map<String,String> customSplitInfo) |
static Optional<ConnectorPageSource> |
HivePageSourceProvider.createHivePageSource(Set<HiveRecordCursorProvider> cursorProviders,
Set<HiveBatchPageSourceFactory> pageSourceFactories,
org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
OptionalInt tableBucketNumber,
long start,
long length,
long fileSize,
long fileModifiedTime,
Storage storage,
TupleDomain<HiveColumnHandle> effectivePredicate,
List<HiveColumnHandle> hiveColumns,
Map<String,HiveColumnHandle> predicateColumns,
List<HivePartitionKey> partitionKeys,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
SchemaTableName tableName,
List<HiveColumnHandle> partitionKeyColumnHandles,
List<Column> tableDataColumns,
Map<String,String> tableParameters,
int partitionDataColumnCount,
TableToPartitionMapping tableToPartitionMapping,
Optional<HiveSplit.BucketConversion> bucketConversion,
boolean s3SelectPushdownEnabled,
HiveFileContext hiveFileContext,
RowExpression remainingPredicate,
boolean isPushdownFilterEnabled,
RowExpressionService rowExpressionService,
Optional<EncryptionInformation> encryptionInformation,
Map<String,String> customSplitInfo) |
Optional<? extends ConnectorPageSource> |
HiveSelectivePageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long fileSize,
Storage storage,
List<HiveColumnHandle> columns,
Map<Integer,String> prefilledValues,
Map<Integer,HiveCoercer> coercers,
Optional<BucketAdaptation> bucketAdaptation,
List<Integer> outputColumns,
TupleDomain<Subfield> domainPredicate,
RowExpression remainingPredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
Optional<? extends ConnectorPageSource> |
HiveBatchPageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long fileSize,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
Optional<? extends ConnectorPageSource> |
HiveBatchPageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long fileSize,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
Optional<RecordCursor> |
S3SelectRecordCursorProvider.createRecordCursor(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
boolean s3SelectPushdownEnabled,
Map<String,String> customSplitInfo) |
Optional<RecordCursor> |
S3SelectRecordCursorProvider.createRecordCursor(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
boolean s3SelectPushdownEnabled,
Map<String,String> customSplitInfo) |
Optional<RecordCursor> |
HiveRecordCursorProvider.createRecordCursor(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
boolean s3SelectPushdownEnabled,
Map<String,String> customSplitInfo) |
Optional<RecordCursor> |
HiveRecordCursorProvider.createRecordCursor(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
boolean s3SelectPushdownEnabled,
Map<String,String> customSplitInfo) |
Optional<RecordCursor> |
GenericHiveRecordCursorProvider.createRecordCursor(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
boolean s3SelectPushdownEnabled,
Map<String,String> customSplitInfo) |
Optional<RecordCursor> |
GenericHiveRecordCursorProvider.createRecordCursor(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
boolean s3SelectPushdownEnabled,
Map<String,String> customSplitInfo) |
static org.apache.hadoop.mapred.RecordReader<?,?> |
HiveUtil.createRecordReader(org.apache.hadoop.conf.Configuration configuration,
Path path,
long start,
long length,
Properties schema,
List<HiveColumnHandle> columns,
Map<String,String> customSplitInfo) |
static List<HiveColumnHandle> |
HiveUtil.getPhysicalHiveColumnHandles(List<HiveColumnHandle> columns,
boolean useOrcColumnNames,
List<OrcType> types,
Path path) |
Optional<EncryptionInformation> |
HiveEncryptionInformationProvider.getReadEncryptionInformation(ConnectorSession session,
Table table,
Optional<Set<HiveColumnHandle>> requestedColumns) |
Optional<EncryptionInformation> |
EncryptionInformationSource.getReadEncryptionInformation(ConnectorSession session,
Table table,
Optional<Set<HiveColumnHandle>> requestedColumns)
Return encryption information for an unpartitioned table
|
Optional<EncryptionInformation> |
AbstractDwrfEncryptionInformationSource.getReadEncryptionInformation(ConnectorSession session,
Table table,
Optional<Set<HiveColumnHandle>> requestedColumns) |
Optional<Map<String,EncryptionInformation>> |
HiveEncryptionInformationProvider.getReadEncryptionInformation(ConnectorSession session,
Table table,
Optional<Set<HiveColumnHandle>> requestedColumns,
Map<String,Partition> partitions) |
Optional<Map<String,EncryptionInformation>> |
EncryptionInformationSource.getReadEncryptionInformation(ConnectorSession session,
Table table,
Optional<Set<HiveColumnHandle>> requestedColumns,
Map<String,Partition> partitions)
Return encryption for a partitioned table
|
Optional<Map<String,EncryptionInformation>> |
AbstractDwrfEncryptionInformationSource.getReadEncryptionInformation(ConnectorSession session,
Table table,
Optional<Set<HiveColumnHandle>> requestedColumns,
Map<String,Partition> partitions) |
protected abstract Map<String,EncryptionInformation> |
AbstractDwrfEncryptionInformationSource.getReadEncryptionInformationInternal(ConnectorSession session,
Table table,
Optional<Set<HiveColumnHandle>> requestedColumns,
Map<String,Partition> partitions,
Map<String,String> fieldToKeyReference,
DwrfTableEncryptionProperties encryptionProperties) |
protected abstract EncryptionInformation |
AbstractDwrfEncryptionInformationSource.getReadEncryptionInformationInternal(ConnectorSession session,
Table table,
Optional<Set<HiveColumnHandle>> requestedColumns,
Map<String,String> fieldToKeyReference,
DwrfTableEncryptionProperties encryptionProperties) |
static HivePartition |
HivePartitionManager.parsePartition(SchemaTableName tableName,
String partitionName,
List<HiveColumnHandle> partitionColumns,
List<Type> partitionColumnTypes,
org.joda.time.DateTimeZone timeZone) |
Map<Integer,io.airlift.slice.Slice> |
DwrfEncryptionMetadata.toKeyMap(List<OrcType> types,
List<HiveColumnHandle> physicalColumnHandles) |
| Constructor and Description |
|---|
BucketConversion(int tableBucketCount,
int partitionBucketCount,
List<HiveColumnHandle> bucketColumnHandles) |
FilteringPageSource(List<HivePageSourceProvider.ColumnMapping> columnMappings,
TupleDomain<HiveColumnHandle> domainPredicate,
RowExpression remainingPredicate,
TypeManager typeManager,
RowExpressionService rowExpressionService,
ConnectorSession session,
Set<Integer> originalIndices,
ConnectorPageSource delegate) |
HiveBucketHandle(List<HiveColumnHandle> columns,
int tableBucketCount,
int readBucketCount) |
HiveInsertTableHandle(String schemaName,
String tableName,
List<HiveColumnHandle> inputColumns,
HivePageSinkMetadata pageSinkMetadata,
LocationHandle locationHandle,
Optional<HiveBucketProperty> bucketProperty,
List<SortingColumn> preferredOrderingColumns,
HiveStorageFormat tableStorageFormat,
HiveStorageFormat partitionStorageFormat,
HiveStorageFormat actualStorageFormat,
HiveCompressionCodec compressionCodec,
Optional<EncryptionInformation> encryptionInformation) |
HiveOutputTableHandle(String schemaName,
String tableName,
List<HiveColumnHandle> inputColumns,
HivePageSinkMetadata pageSinkMetadata,
LocationHandle locationHandle,
HiveStorageFormat tableStorageFormat,
HiveStorageFormat partitionStorageFormat,
HiveStorageFormat actualStorageFormat,
HiveCompressionCodec compressionCodec,
List<String> partitionedBy,
Optional<HiveBucketProperty> bucketProperty,
List<SortingColumn> preferredOrderingColumns,
String tableOwner,
Map<String,String> additionalTableParameters,
Optional<EncryptionInformation> encryptionInformation) |
HivePageSink(HiveWriterFactory writerFactory,
List<HiveColumnHandle> inputColumns,
Optional<HiveBucketProperty> bucketProperty,
String schemaName,
String tableName,
PageIndexerFactory pageIndexerFactory,
TypeManager typeManager,
HdfsEnvironment hdfsEnvironment,
int maxOpenWriters,
com.google.common.util.concurrent.ListeningExecutorService writeVerificationExecutor,
com.facebook.airlift.json.JsonCodec<PartitionUpdate> partitionUpdateCodec,
com.facebook.airlift.json.smile.SmileCodec<PartitionUpdate> partitionUpdateSmileCodec,
ConnectorSession session,
HiveMetadataUpdater hiveMetadataUpdater) |
HivePartitionResult(List<HiveColumnHandle> partitionColumns,
List<Column> dataColumns,
Map<String,String> tableParameters,
List<HivePartition> partitions,
TupleDomain<? extends ColumnHandle> effectivePredicate,
TupleDomain<ColumnHandle> unenforcedConstraint,
TupleDomain<ColumnHandle> enforcedConstraint,
Optional<HiveBucketHandle> bucketHandle,
Optional<HiveBucketing.HiveBucketFilter> bucketFilter) |
HiveTableLayoutHandle(SchemaTableName schemaTableName,
String tablePath,
List<HiveColumnHandle> partitionColumns,
List<Column> dataColumns,
Map<String,String> tableParameters,
List<HivePartition> partitions,
TupleDomain<Subfield> domainPredicate,
RowExpression remainingPredicate,
Map<String,HiveColumnHandle> predicateColumns,
TupleDomain<ColumnHandle> partitionColumnPredicate,
Optional<HiveBucketHandle> bucketHandle,
Optional<HiveBucketing.HiveBucketFilter> bucketFilter,
boolean pushdownFilterEnabled,
String layoutString,
Optional<Set<HiveColumnHandle>> requestedColumns,
boolean partialAggregationsPushedDown) |
HiveTableLayoutHandle(SchemaTableName schemaTableName,
String tablePath,
List<HiveColumnHandle> partitionColumns,
List<Column> dataColumns,
Map<String,String> tableParameters,
List<HivePartition> partitions,
TupleDomain<Subfield> domainPredicate,
RowExpression remainingPredicate,
Map<String,HiveColumnHandle> predicateColumns,
TupleDomain<ColumnHandle> partitionColumnPredicate,
Optional<HiveBucketHandle> bucketHandle,
Optional<HiveBucketing.HiveBucketFilter> bucketFilter,
boolean pushdownFilterEnabled,
String layoutString,
Optional<Set<HiveColumnHandle>> requestedColumns,
boolean partialAggregationsPushedDown) |
HiveTableLayoutHandle(SchemaTableName schemaTableName,
String tablePath,
List<HiveColumnHandle> partitionColumns,
List<Column> dataColumns,
Map<String,String> tableParameters,
List<HivePartition> partitions,
TupleDomain<Subfield> domainPredicate,
RowExpression remainingPredicate,
Map<String,HiveColumnHandle> predicateColumns,
TupleDomain<ColumnHandle> partitionColumnPredicate,
Optional<HiveBucketHandle> bucketHandle,
Optional<HiveBucketing.HiveBucketFilter> bucketFilter,
boolean pushdownFilterEnabled,
String layoutString,
Optional<Set<HiveColumnHandle>> requestedColumns,
boolean partialAggregationsPushedDown) |
HiveTableLayoutHandle(SchemaTableName schemaTableName,
String tablePath,
List<HiveColumnHandle> partitionColumns,
List<Column> dataColumns,
Map<String,String> tableParameters,
TupleDomain<Subfield> domainPredicate,
RowExpression remainingPredicate,
Map<String,HiveColumnHandle> predicateColumns,
TupleDomain<ColumnHandle> partitionColumnPredicate,
Optional<HiveBucketHandle> bucketHandle,
Optional<HiveBucketing.HiveBucketFilter> bucketFilter,
boolean pushdownFilterEnabled,
String layoutString,
Optional<Set<HiveColumnHandle>> requestedColumns,
boolean partialAggregationsPushedDown) |
HiveTableLayoutHandle(SchemaTableName schemaTableName,
String tablePath,
List<HiveColumnHandle> partitionColumns,
List<Column> dataColumns,
Map<String,String> tableParameters,
TupleDomain<Subfield> domainPredicate,
RowExpression remainingPredicate,
Map<String,HiveColumnHandle> predicateColumns,
TupleDomain<ColumnHandle> partitionColumnPredicate,
Optional<HiveBucketHandle> bucketHandle,
Optional<HiveBucketing.HiveBucketFilter> bucketFilter,
boolean pushdownFilterEnabled,
String layoutString,
Optional<Set<HiveColumnHandle>> requestedColumns,
boolean partialAggregationsPushedDown) |
HiveTableLayoutHandle(SchemaTableName schemaTableName,
String tablePath,
List<HiveColumnHandle> partitionColumns,
List<Column> dataColumns,
Map<String,String> tableParameters,
TupleDomain<Subfield> domainPredicate,
RowExpression remainingPredicate,
Map<String,HiveColumnHandle> predicateColumns,
TupleDomain<ColumnHandle> partitionColumnPredicate,
Optional<HiveBucketHandle> bucketHandle,
Optional<HiveBucketing.HiveBucketFilter> bucketFilter,
boolean pushdownFilterEnabled,
String layoutString,
Optional<Set<HiveColumnHandle>> requestedColumns,
boolean partialAggregationsPushedDown) |
HiveWritableTableHandle(String schemaName,
String tableName,
List<HiveColumnHandle> inputColumns,
HivePageSinkMetadata pageSinkMetadata,
LocationHandle locationHandle,
Optional<HiveBucketProperty> bucketProperty,
List<SortingColumn> preferredOrderingColumns,
HiveStorageFormat tableStorageFormat,
HiveStorageFormat partitionStorageFormat,
HiveStorageFormat actualStorageFormat,
HiveCompressionCodec compressionCodec,
Optional<EncryptionInformation> encryptionInformation) |
HiveWriterFactory(Set<HiveFileWriterFactory> fileWriterFactories,
String schemaName,
String tableName,
boolean isCreateTable,
List<HiveColumnHandle> inputColumns,
HiveStorageFormat tableStorageFormat,
HiveStorageFormat partitionStorageFormat,
HiveCompressionCodec compressionCodec,
Map<String,String> additionalTableParameters,
OptionalInt bucketCount,
List<SortingColumn> sortedBy,
LocationHandle locationHandle,
LocationService locationService,
String queryId,
HivePageSinkMetadataProvider pageSinkMetadataProvider,
TypeManager typeManager,
HdfsEnvironment hdfsEnvironment,
PageSorter pageSorter,
io.airlift.units.DataSize sortBufferSize,
int maxOpenSortFiles,
boolean immutablePartitions,
ConnectorSession session,
NodeManager nodeManager,
com.facebook.airlift.event.client.EventClient eventClient,
HiveSessionProperties hiveSessionProperties,
HiveWriterStats hiveWriterStats,
OrcFileWriterFactory orcFileWriterFactory,
boolean commitRequired,
Optional<EncryptionInformation> encryptionInformation) |
| Modifier and Type | Method and Description |
|---|---|
static ConnectorPageSource |
OrcSelectivePageSourceFactory.createOrcPageSource(ConnectorSession session,
OrcEncoding orcEncoding,
HdfsEnvironment hdfsEnvironment,
org.apache.hadoop.conf.Configuration configuration,
Path path,
long start,
long length,
long fileSize,
List<HiveColumnHandle> columns,
Map<Integer,String> prefilledValues,
Map<Integer,HiveCoercer> coercers,
Optional<BucketAdaptation> bucketAdaptation,
List<Integer> outputColumns,
TupleDomain<Subfield> domainPredicate,
RowExpression remainingPredicate,
boolean useOrcColumnNames,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
StandardFunctionResolution functionResolution,
RowExpressionService rowExpressionService,
boolean orcBloomFiltersEnabled,
FileFormatDataSourceStats stats,
int domainCompactionThreshold,
OrcFileTailSource orcFileTailSource,
StripeMetadataSourceFactory stripeMetadataSourceFactory,
HiveFileContext hiveFileContext,
TupleDomainFilterCache tupleDomainFilterCache,
Optional<EncryptionInformation> encryptionInformation,
DwrfEncryptionProvider dwrfEncryptionProvider) |
static ConnectorPageSource |
OrcBatchPageSourceFactory.createOrcPageSource(OrcEncoding orcEncoding,
HdfsEnvironment hdfsEnvironment,
String sessionUser,
org.apache.hadoop.conf.Configuration configuration,
Path path,
long start,
long length,
long fileSize,
List<HiveColumnHandle> columns,
boolean useOrcColumnNames,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
StandardFunctionResolution functionResolution,
io.airlift.units.DataSize maxBufferSize,
io.airlift.units.DataSize streamBufferSize,
boolean lazyReadSmallRanges,
boolean orcBloomFiltersEnabled,
FileFormatDataSourceStats stats,
int domainCompactionThreshold,
OrcFileTailSource orcFileTailSource,
StripeMetadataSourceFactory stripeMetadataSourceFactory,
HiveFileContext hiveFileContext,
OrcReaderOptions orcReaderOptions,
Optional<EncryptionInformation> encryptionInformation,
DwrfEncryptionProvider dwrfEncryptionProvider) |
static ConnectorPageSource |
OrcBatchPageSourceFactory.createOrcPageSource(OrcEncoding orcEncoding,
HdfsEnvironment hdfsEnvironment,
String sessionUser,
org.apache.hadoop.conf.Configuration configuration,
Path path,
long start,
long length,
long fileSize,
List<HiveColumnHandle> columns,
boolean useOrcColumnNames,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
StandardFunctionResolution functionResolution,
io.airlift.units.DataSize maxBufferSize,
io.airlift.units.DataSize streamBufferSize,
boolean lazyReadSmallRanges,
boolean orcBloomFiltersEnabled,
FileFormatDataSourceStats stats,
int domainCompactionThreshold,
OrcFileTailSource orcFileTailSource,
StripeMetadataSourceFactory stripeMetadataSourceFactory,
HiveFileContext hiveFileContext,
OrcReaderOptions orcReaderOptions,
Optional<EncryptionInformation> encryptionInformation,
DwrfEncryptionProvider dwrfEncryptionProvider) |
Optional<? extends ConnectorPageSource> |
OrcSelectivePageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long fileSize,
Storage storage,
List<HiveColumnHandle> columns,
Map<Integer,String> prefilledValues,
Map<Integer,HiveCoercer> coercers,
Optional<BucketAdaptation> bucketAdaptation,
List<Integer> outputColumns,
TupleDomain<Subfield> domainPredicate,
RowExpression remainingPredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
Optional<? extends ConnectorPageSource> |
DwrfSelectivePageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long fileSize,
Storage storage,
List<HiveColumnHandle> columns,
Map<Integer,String> prefilledValues,
Map<Integer,HiveCoercer> coercers,
Optional<BucketAdaptation> bucketAdaptation,
List<Integer> outputColumns,
TupleDomain<Subfield> domainPredicate,
RowExpression remainingPredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
Optional<? extends ConnectorPageSource> |
OrcBatchPageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long fileSize,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
Optional<? extends ConnectorPageSource> |
OrcBatchPageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long fileSize,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
Optional<? extends ConnectorPageSource> |
DwrfBatchPageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long fileSize,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
Optional<? extends ConnectorPageSource> |
DwrfBatchPageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long fileSize,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
| Constructor and Description |
|---|
AggregatedOrcPageSource(List<HiveColumnHandle> columnHandles,
Footer footer,
TypeManager typeManager,
StandardFunctionResolution functionResolution) |
OrcBatchPageSource(OrcBatchRecordReader recordReader,
OrcDataSource orcDataSource,
List<HiveColumnHandle> columns,
TypeManager typeManager,
OrcAggregatedMemoryContext systemMemoryContext,
FileFormatDataSourceStats stats,
RuntimeStats runtimeStats) |
ProjectionBasedDwrfKeyProvider(Optional<EncryptionInformation> encryptionInformation,
List<HiveColumnHandle> columns,
boolean useOrcColumnNames,
Path path) |
| Modifier and Type | Method and Description |
|---|---|
Optional<? extends ConnectorPageSource> |
PageFilePageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long fileSize,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
Optional<? extends ConnectorPageSource> |
PageFilePageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long fileSize,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
| Constructor and Description |
|---|
PageFilePageSource(FSDataInputStream inputStream,
long start,
long splitLength,
long fileSize,
BlockEncodingSerde blockEncodingSerde,
List<HiveColumnHandle> columns) |
| Modifier and Type | Method and Description |
|---|---|
static Optional<org.apache.parquet.schema.Type> |
ParquetPageSourceFactory.getColumnType(Type prestoType,
org.apache.parquet.schema.MessageType messageType,
boolean useParquetColumnNames,
HiveColumnHandle column,
SchemaTableName tableName,
Path path) |
static Optional<org.apache.parquet.schema.Type> |
ParquetPageSourceFactory.getParquetType(Type prestoType,
org.apache.parquet.schema.MessageType messageType,
boolean useParquetColumnNames,
HiveColumnHandle column,
SchemaTableName tableName,
Path path) |
| Modifier and Type | Method and Description |
|---|---|
Optional<? extends ConnectorPageSource> |
ParquetSelectivePageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long fileSize,
Storage storage,
List<HiveColumnHandle> columns,
Map<Integer,String> prefilledValues,
Map<Integer,HiveCoercer> coercers,
Optional<BucketAdaptation> bucketAdaptation,
List<Integer> outputColumns,
TupleDomain<Subfield> domainPredicate,
RowExpression remainingPredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
Optional<? extends ConnectorPageSource> |
ParquetPageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long fileSize,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
Optional<? extends ConnectorPageSource> |
ParquetPageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long fileSize,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
static ConnectorPageSource |
ParquetPageSourceFactory.createParquetPageSource(HdfsEnvironment hdfsEnvironment,
String user,
org.apache.hadoop.conf.Configuration configuration,
Path path,
long start,
long length,
long fileSize,
List<HiveColumnHandle> columns,
SchemaTableName tableName,
boolean useParquetColumnNames,
io.airlift.units.DataSize maxReadBlockSize,
boolean batchReaderEnabled,
boolean verificationEnabled,
TypeManager typeManager,
StandardFunctionResolution functionResolution,
TupleDomain<HiveColumnHandle> effectivePredicate,
FileFormatDataSourceStats stats,
HiveFileContext hiveFileContext,
ParquetMetadataSource parquetMetadataSource,
boolean columnIndexFilterEnabled) |
static ConnectorPageSource |
ParquetPageSourceFactory.createParquetPageSource(HdfsEnvironment hdfsEnvironment,
String user,
org.apache.hadoop.conf.Configuration configuration,
Path path,
long start,
long length,
long fileSize,
List<HiveColumnHandle> columns,
SchemaTableName tableName,
boolean useParquetColumnNames,
io.airlift.units.DataSize maxReadBlockSize,
boolean batchReaderEnabled,
boolean verificationEnabled,
TypeManager typeManager,
StandardFunctionResolution functionResolution,
TupleDomain<HiveColumnHandle> effectivePredicate,
FileFormatDataSourceStats stats,
HiveFileContext hiveFileContext,
ParquetMetadataSource parquetMetadataSource,
boolean columnIndexFilterEnabled) |
static TupleDomain<org.apache.parquet.column.ColumnDescriptor> |
ParquetPageSourceFactory.getParquetTupleDomain(Map<List<String>,RichColumnDescriptor> descriptorsByPath,
TupleDomain<HiveColumnHandle> effectivePredicate) |
| Constructor and Description |
|---|
AggregatedParquetPageSource(List<HiveColumnHandle> columnHandles,
org.apache.parquet.hadoop.metadata.ParquetMetadata parquetMetadata,
TypeManager typeManager,
StandardFunctionResolution functionResolution) |
| Modifier and Type | Method and Description |
|---|---|
Optional<? extends ConnectorPageSource> |
RcFilePageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long fileSize,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
Optional<? extends ConnectorPageSource> |
RcFilePageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long fileSize,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
| Constructor and Description |
|---|
RcFilePageSource(RcFileReader rcFileReader,
List<HiveColumnHandle> columns,
TypeManager typeManager) |
Copyright © 2012–2022. All rights reserved.