| Modifier and Type | Method and Description |
|---|---|
static HiveColumnHandle |
HiveColumnHandle.bucketColumnHandle()
The column indicating the bucket id.
|
static HiveColumnHandle |
HiveColumnHandle.fileModifiedTimeColumnHandle() |
static HiveColumnHandle |
HiveColumnHandle.fileSizeColumnHandle() |
HiveColumnHandle |
HivePageSourceProvider.ColumnMapping.getHiveColumnHandle() |
static HiveColumnHandle |
HiveColumnHandle.pathColumnHandle() |
static HiveColumnHandle |
HiveColumnHandle.updateRowIdHandle() |
| Modifier and Type | Method and Description |
|---|---|
static HivePageSourceProvider.ColumnMapping |
HivePageSourceProvider.ColumnMapping.aggregated(HiveColumnHandle hiveColumnHandle,
int index) |
static Optional<String> |
HiveUtil.getPrefilledColumnValue(HiveColumnHandle columnHandle,
HivePartitionKey partitionKey,
HiveFileSplit fileSplit,
OptionalInt bucketNumber) |
static Subfield |
HiveColumnHandle.getPushedDownSubfield(HiveColumnHandle column)
Return the pushed down subfield if the column represents one
|
static HivePageSourceProvider.ColumnMapping |
HivePageSourceProvider.ColumnMapping.interim(HiveColumnHandle hiveColumnHandle,
int index) |
static boolean |
HiveColumnHandle.isBucketColumnHandle(HiveColumnHandle column) |
static boolean |
HiveColumnHandle.isFileModifiedTimeColumnHandle(HiveColumnHandle column) |
static boolean |
HiveColumnHandle.isFileSizeColumnHandle(HiveColumnHandle column) |
static boolean |
HiveColumnHandle.isPathColumnHandle(HiveColumnHandle column) |
static boolean |
HiveColumnHandle.isPushedDownSubfield(HiveColumnHandle column) |
static HivePageSourceProvider.ColumnMapping |
HivePageSourceProvider.ColumnMapping.prefilled(HiveColumnHandle hiveColumnHandle,
Optional<String> prefilledValue,
Optional<HiveType> coerceFrom) |
static HivePageSourceProvider.ColumnMapping |
HivePageSourceProvider.ColumnMapping.regular(HiveColumnHandle hiveColumnHandle,
int index,
Optional<HiveType> coerceFrom) |
| Modifier and Type | Method and Description |
|---|---|
static List<HivePageSourceProvider.ColumnMapping> |
HivePageSourceProvider.ColumnMapping.buildColumnMappings(List<HivePartitionKey> partitionKeys,
List<HiveColumnHandle> columns,
List<HiveColumnHandle> requiredInterimColumns,
TableToPartitionMapping tableToPartitionMapping,
HiveFileSplit fileSplit,
OptionalInt bucketNumber) |
static List<HivePageSourceProvider.ColumnMapping> |
HivePageSourceProvider.ColumnMapping.buildColumnMappings(List<HivePartitionKey> partitionKeys,
List<HiveColumnHandle> columns,
List<HiveColumnHandle> requiredInterimColumns,
TableToPartitionMapping tableToPartitionMapping,
HiveFileSplit fileSplit,
OptionalInt bucketNumber) |
String |
IonSqlQueryBuilder.buildSql(List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> tupleDomain) |
String |
IonSqlQueryBuilder.buildSql(List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> tupleDomain) |
static TupleDomain<Subfield> |
HiveTableLayoutHandle.canonicalizeDomainPredicate(TupleDomain<Subfield> domainPredicate,
Map<String,HiveColumnHandle> predicateColumns,
PlanCanonicalizationStrategy strategy) |
static Optional<ConnectorPageSource> |
HivePageSourceProvider.createHivePageSource(Set<HiveRecordCursorProvider> cursorProviders,
Set<HiveBatchPageSourceFactory> pageSourceFactories,
org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
OptionalInt tableBucketNumber,
Storage storage,
TupleDomain<HiveColumnHandle> effectivePredicate,
List<HiveColumnHandle> hiveColumns,
Map<String,HiveColumnHandle> predicateColumns,
List<HivePartitionKey> partitionKeys,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
SchemaTableName tableName,
List<HiveColumnHandle> partitionKeyColumnHandles,
List<Column> tableDataColumns,
Map<String,String> tableParameters,
int partitionDataColumnCount,
TableToPartitionMapping tableToPartitionMapping,
Optional<HiveSplit.BucketConversion> bucketConversion,
boolean s3SelectPushdownEnabled,
HiveFileContext hiveFileContext,
RowExpression remainingPredicate,
boolean isPushdownFilterEnabled,
RowExpressionService rowExpressionService,
Optional<EncryptionInformation> encryptionInformation) |
static Optional<ConnectorPageSource> |
HivePageSourceProvider.createHivePageSource(Set<HiveRecordCursorProvider> cursorProviders,
Set<HiveBatchPageSourceFactory> pageSourceFactories,
org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
OptionalInt tableBucketNumber,
Storage storage,
TupleDomain<HiveColumnHandle> effectivePredicate,
List<HiveColumnHandle> hiveColumns,
Map<String,HiveColumnHandle> predicateColumns,
List<HivePartitionKey> partitionKeys,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
SchemaTableName tableName,
List<HiveColumnHandle> partitionKeyColumnHandles,
List<Column> tableDataColumns,
Map<String,String> tableParameters,
int partitionDataColumnCount,
TableToPartitionMapping tableToPartitionMapping,
Optional<HiveSplit.BucketConversion> bucketConversion,
boolean s3SelectPushdownEnabled,
HiveFileContext hiveFileContext,
RowExpression remainingPredicate,
boolean isPushdownFilterEnabled,
RowExpressionService rowExpressionService,
Optional<EncryptionInformation> encryptionInformation) |
static Optional<ConnectorPageSource> |
HivePageSourceProvider.createHivePageSource(Set<HiveRecordCursorProvider> cursorProviders,
Set<HiveBatchPageSourceFactory> pageSourceFactories,
org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
OptionalInt tableBucketNumber,
Storage storage,
TupleDomain<HiveColumnHandle> effectivePredicate,
List<HiveColumnHandle> hiveColumns,
Map<String,HiveColumnHandle> predicateColumns,
List<HivePartitionKey> partitionKeys,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
SchemaTableName tableName,
List<HiveColumnHandle> partitionKeyColumnHandles,
List<Column> tableDataColumns,
Map<String,String> tableParameters,
int partitionDataColumnCount,
TableToPartitionMapping tableToPartitionMapping,
Optional<HiveSplit.BucketConversion> bucketConversion,
boolean s3SelectPushdownEnabled,
HiveFileContext hiveFileContext,
RowExpression remainingPredicate,
boolean isPushdownFilterEnabled,
RowExpressionService rowExpressionService,
Optional<EncryptionInformation> encryptionInformation) |
static Optional<ConnectorPageSource> |
HivePageSourceProvider.createHivePageSource(Set<HiveRecordCursorProvider> cursorProviders,
Set<HiveBatchPageSourceFactory> pageSourceFactories,
org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
OptionalInt tableBucketNumber,
Storage storage,
TupleDomain<HiveColumnHandle> effectivePredicate,
List<HiveColumnHandle> hiveColumns,
Map<String,HiveColumnHandle> predicateColumns,
List<HivePartitionKey> partitionKeys,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
SchemaTableName tableName,
List<HiveColumnHandle> partitionKeyColumnHandles,
List<Column> tableDataColumns,
Map<String,String> tableParameters,
int partitionDataColumnCount,
TableToPartitionMapping tableToPartitionMapping,
Optional<HiveSplit.BucketConversion> bucketConversion,
boolean s3SelectPushdownEnabled,
HiveFileContext hiveFileContext,
RowExpression remainingPredicate,
boolean isPushdownFilterEnabled,
RowExpressionService rowExpressionService,
Optional<EncryptionInformation> encryptionInformation) |
Optional<? extends ConnectorPageSource> |
HiveSelectivePageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
Storage storage,
List<HiveColumnHandle> columns,
Map<Integer,String> prefilledValues,
Map<Integer,HiveCoercer> coercers,
Optional<BucketAdaptation> bucketAdaptation,
List<Integer> outputColumns,
TupleDomain<Subfield> domainPredicate,
RowExpression remainingPredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation,
boolean appendRowNumberEnabled,
boolean footerStatsUnreliable) |
Optional<? extends ConnectorPageSource> |
HiveBatchPageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
Optional<? extends ConnectorPageSource> |
HiveBatchPageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
Optional<RecordCursor> |
HiveRecordCursorProvider.createRecordCursor(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
boolean s3SelectPushdownEnabled) |
Optional<RecordCursor> |
HiveRecordCursorProvider.createRecordCursor(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
boolean s3SelectPushdownEnabled) |
Optional<RecordCursor> |
GenericHiveRecordCursorProvider.createRecordCursor(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
boolean s3SelectPushdownEnabled) |
Optional<RecordCursor> |
GenericHiveRecordCursorProvider.createRecordCursor(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
boolean s3SelectPushdownEnabled) |
static org.apache.hadoop.mapred.RecordReader<?,?> |
HiveUtil.createRecordReader(org.apache.hadoop.conf.Configuration configuration,
Path path,
long start,
long length,
Properties schema,
List<HiveColumnHandle> columns,
Map<String,String> customSplitInfo) |
static List<HiveColumnHandle> |
HiveUtil.getPhysicalHiveColumnHandles(List<HiveColumnHandle> columns,
boolean useOrcColumnNames,
List<OrcType> types,
Path path) |
Optional<EncryptionInformation> |
HiveEncryptionInformationProvider.getReadEncryptionInformation(ConnectorSession session,
Table table,
Optional<Set<HiveColumnHandle>> requestedColumns) |
Optional<EncryptionInformation> |
EncryptionInformationSource.getReadEncryptionInformation(ConnectorSession session,
Table table,
Optional<Set<HiveColumnHandle>> requestedColumns)
Return encryption information for an unpartitioned table
|
Optional<EncryptionInformation> |
AbstractDwrfEncryptionInformationSource.getReadEncryptionInformation(ConnectorSession session,
Table table,
Optional<Set<HiveColumnHandle>> requestedColumns) |
Optional<Map<String,EncryptionInformation>> |
HiveEncryptionInformationProvider.getReadEncryptionInformation(ConnectorSession session,
Table table,
Optional<Set<HiveColumnHandle>> requestedColumns,
Map<String,Partition> partitions) |
Optional<Map<String,EncryptionInformation>> |
EncryptionInformationSource.getReadEncryptionInformation(ConnectorSession session,
Table table,
Optional<Set<HiveColumnHandle>> requestedColumns,
Map<String,Partition> partitions)
Return encryption for a partitioned table
|
Optional<Map<String,EncryptionInformation>> |
AbstractDwrfEncryptionInformationSource.getReadEncryptionInformation(ConnectorSession session,
Table table,
Optional<Set<HiveColumnHandle>> requestedColumns,
Map<String,Partition> partitions) |
protected abstract Map<String,EncryptionInformation> |
AbstractDwrfEncryptionInformationSource.getReadEncryptionInformationInternal(ConnectorSession session,
Table table,
Optional<Set<HiveColumnHandle>> requestedColumns,
Map<String,Partition> partitions,
Map<String,String> fieldToKeyReference,
DwrfTableEncryptionProperties encryptionProperties) |
protected abstract EncryptionInformation |
AbstractDwrfEncryptionInformationSource.getReadEncryptionInformationInternal(ConnectorSession session,
Table table,
Optional<Set<HiveColumnHandle>> requestedColumns,
Map<String,String> fieldToKeyReference,
DwrfTableEncryptionProperties encryptionProperties) |
static HivePartition |
HivePartitionManager.parsePartition(SchemaTableName tableName,
String partitionName,
List<HiveColumnHandle> partitionColumns,
List<Type> partitionColumnTypes,
org.joda.time.DateTimeZone timeZone) |
HiveTableLayoutHandle.Builder |
HiveTableLayoutHandle.Builder.setPartitionColumns(List<HiveColumnHandle> partitionColumns) |
HiveTableLayoutHandle.Builder |
HiveTableLayoutHandle.Builder.setPredicateColumns(Map<String,HiveColumnHandle> predicateColumns) |
HiveTableLayoutHandle.Builder |
HiveTableLayoutHandle.Builder.setRequestedColumns(Optional<Set<HiveColumnHandle>> requestedColumns) |
Map<Integer,io.airlift.slice.Slice> |
DwrfEncryptionMetadata.toKeyMap(List<OrcType> types,
List<HiveColumnHandle> physicalColumnHandles) |
| Constructor and Description |
|---|
BucketConversion(int tableBucketCount,
int partitionBucketCount,
List<HiveColumnHandle> bucketColumnHandles) |
FilteringPageSource(List<HivePageSourceProvider.ColumnMapping> columnMappings,
TupleDomain<HiveColumnHandle> domainPredicate,
RowExpression remainingPredicate,
TypeManager typeManager,
RowExpressionService rowExpressionService,
ConnectorSession session,
Set<Integer> originalIndices,
ConnectorPageSource delegate) |
GenericHiveRecordCursor(org.apache.hadoop.conf.Configuration configuration,
Path path,
org.apache.hadoop.mapred.RecordReader<K,V> recordReader,
long totalBytes,
Properties splitSchema,
List<HiveColumnHandle> columns,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager) |
GenericHiveRecordCursor(org.apache.hadoop.conf.Configuration configuration,
Path path,
org.apache.hadoop.mapred.RecordReader<K,V> recordReader,
long totalBytes,
Properties splitSchema,
List<HiveColumnHandle> columns,
ZoneId hiveStorageTimeZoneId,
TypeManager typeManager) |
HiveBucketHandle(List<HiveColumnHandle> columns,
int tableBucketCount,
int readBucketCount) |
HiveInsertTableHandle(String schemaName,
String tableName,
List<HiveColumnHandle> inputColumns,
HivePageSinkMetadata pageSinkMetadata,
LocationHandle locationHandle,
Optional<HiveBucketProperty> bucketProperty,
List<SortingColumn> preferredOrderingColumns,
HiveStorageFormat tableStorageFormat,
HiveStorageFormat partitionStorageFormat,
HiveStorageFormat actualStorageFormat,
HiveCompressionCodec compressionCodec,
Optional<EncryptionInformation> encryptionInformation) |
HiveOutputTableHandle(String schemaName,
String tableName,
List<HiveColumnHandle> inputColumns,
HivePageSinkMetadata pageSinkMetadata,
LocationHandle locationHandle,
HiveStorageFormat tableStorageFormat,
HiveStorageFormat partitionStorageFormat,
HiveStorageFormat actualStorageFormat,
HiveCompressionCodec compressionCodec,
List<String> partitionedBy,
Optional<HiveBucketProperty> bucketProperty,
List<SortingColumn> preferredOrderingColumns,
String tableOwner,
Map<String,String> additionalTableParameters,
Optional<EncryptionInformation> encryptionInformation) |
HivePageSink(HiveWriterFactory writerFactory,
List<HiveColumnHandle> inputColumns,
Optional<HiveBucketProperty> bucketProperty,
String schemaName,
String tableName,
PageIndexerFactory pageIndexerFactory,
TypeManager typeManager,
HdfsEnvironment hdfsEnvironment,
int maxOpenWriters,
com.google.common.util.concurrent.ListeningExecutorService writeVerificationExecutor,
com.facebook.airlift.json.JsonCodec<PartitionUpdate> partitionUpdateCodec,
com.facebook.airlift.json.smile.SmileCodec<PartitionUpdate> partitionUpdateSmileCodec,
ConnectorSession session,
HiveMetadataUpdater hiveMetadataUpdater) |
HivePartitionResult(List<HiveColumnHandle> partitionColumns,
List<Column> dataColumns,
Map<String,String> tableParameters,
List<HivePartition> partitions,
TupleDomain<? extends ColumnHandle> effectivePredicate,
TupleDomain<ColumnHandle> unenforcedConstraint,
TupleDomain<ColumnHandle> enforcedConstraint,
Optional<HiveBucketHandle> bucketHandle,
Optional<HiveBucketing.HiveBucketFilter> bucketFilter) |
HiveTableLayoutHandle(SchemaTableName schemaTableName,
String tablePath,
List<HiveColumnHandle> partitionColumns,
List<Column> dataColumns,
Map<String,String> tableParameters,
TupleDomain<Subfield> domainPredicate,
RowExpression remainingPredicate,
Map<String,HiveColumnHandle> predicateColumns,
TupleDomain<ColumnHandle> partitionColumnPredicate,
Optional<HiveBucketHandle> bucketHandle,
Optional<HiveBucketing.HiveBucketFilter> bucketFilter,
boolean pushdownFilterEnabled,
String layoutString,
Optional<Set<HiveColumnHandle>> requestedColumns,
boolean partialAggregationsPushedDown,
boolean appendRowNumberEnabled,
boolean footerStatsUnreliable) |
HiveTableLayoutHandle(SchemaTableName schemaTableName,
String tablePath,
List<HiveColumnHandle> partitionColumns,
List<Column> dataColumns,
Map<String,String> tableParameters,
TupleDomain<Subfield> domainPredicate,
RowExpression remainingPredicate,
Map<String,HiveColumnHandle> predicateColumns,
TupleDomain<ColumnHandle> partitionColumnPredicate,
Optional<HiveBucketHandle> bucketHandle,
Optional<HiveBucketing.HiveBucketFilter> bucketFilter,
boolean pushdownFilterEnabled,
String layoutString,
Optional<Set<HiveColumnHandle>> requestedColumns,
boolean partialAggregationsPushedDown,
boolean appendRowNumberEnabled,
boolean footerStatsUnreliable) |
HiveTableLayoutHandle(SchemaTableName schemaTableName,
String tablePath,
List<HiveColumnHandle> partitionColumns,
List<Column> dataColumns,
Map<String,String> tableParameters,
TupleDomain<Subfield> domainPredicate,
RowExpression remainingPredicate,
Map<String,HiveColumnHandle> predicateColumns,
TupleDomain<ColumnHandle> partitionColumnPredicate,
Optional<HiveBucketHandle> bucketHandle,
Optional<HiveBucketing.HiveBucketFilter> bucketFilter,
boolean pushdownFilterEnabled,
String layoutString,
Optional<Set<HiveColumnHandle>> requestedColumns,
boolean partialAggregationsPushedDown,
boolean appendRowNumberEnabled,
boolean footerStatsUnreliable) |
HiveTableLayoutHandle(SchemaTableName schemaTableName,
String tablePath,
List<HiveColumnHandle> partitionColumns,
List<Column> dataColumns,
Map<String,String> tableParameters,
TupleDomain<Subfield> domainPredicate,
RowExpression remainingPredicate,
Map<String,HiveColumnHandle> predicateColumns,
TupleDomain<ColumnHandle> partitionColumnPredicate,
Optional<HiveBucketHandle> bucketHandle,
Optional<HiveBucketing.HiveBucketFilter> bucketFilter,
boolean pushdownFilterEnabled,
String layoutString,
Optional<Set<HiveColumnHandle>> requestedColumns,
boolean partialAggregationsPushedDown,
boolean appendRowNumberEnabled,
Optional<List<HivePartition>> partitions,
boolean footerStatsUnreliable,
Optional<HiveTableHandle> hiveTableHandle) |
HiveTableLayoutHandle(SchemaTableName schemaTableName,
String tablePath,
List<HiveColumnHandle> partitionColumns,
List<Column> dataColumns,
Map<String,String> tableParameters,
TupleDomain<Subfield> domainPredicate,
RowExpression remainingPredicate,
Map<String,HiveColumnHandle> predicateColumns,
TupleDomain<ColumnHandle> partitionColumnPredicate,
Optional<HiveBucketHandle> bucketHandle,
Optional<HiveBucketing.HiveBucketFilter> bucketFilter,
boolean pushdownFilterEnabled,
String layoutString,
Optional<Set<HiveColumnHandle>> requestedColumns,
boolean partialAggregationsPushedDown,
boolean appendRowNumberEnabled,
Optional<List<HivePartition>> partitions,
boolean footerStatsUnreliable,
Optional<HiveTableHandle> hiveTableHandle) |
HiveTableLayoutHandle(SchemaTableName schemaTableName,
String tablePath,
List<HiveColumnHandle> partitionColumns,
List<Column> dataColumns,
Map<String,String> tableParameters,
TupleDomain<Subfield> domainPredicate,
RowExpression remainingPredicate,
Map<String,HiveColumnHandle> predicateColumns,
TupleDomain<ColumnHandle> partitionColumnPredicate,
Optional<HiveBucketHandle> bucketHandle,
Optional<HiveBucketing.HiveBucketFilter> bucketFilter,
boolean pushdownFilterEnabled,
String layoutString,
Optional<Set<HiveColumnHandle>> requestedColumns,
boolean partialAggregationsPushedDown,
boolean appendRowNumberEnabled,
Optional<List<HivePartition>> partitions,
boolean footerStatsUnreliable,
Optional<HiveTableHandle> hiveTableHandle) |
HiveWritableTableHandle(String schemaName,
String tableName,
List<HiveColumnHandle> inputColumns,
HivePageSinkMetadata pageSinkMetadata,
LocationHandle locationHandle,
Optional<HiveBucketProperty> bucketProperty,
List<SortingColumn> preferredOrderingColumns,
HiveStorageFormat tableStorageFormat,
HiveStorageFormat partitionStorageFormat,
HiveStorageFormat actualStorageFormat,
HiveCompressionCodec compressionCodec,
Optional<EncryptionInformation> encryptionInformation) |
HiveWriterFactory(Set<HiveFileWriterFactory> fileWriterFactories,
String schemaName,
String tableName,
boolean isCreateTable,
List<HiveColumnHandle> inputColumns,
HiveStorageFormat tableStorageFormat,
HiveStorageFormat partitionStorageFormat,
HiveCompressionCodec compressionCodec,
Map<String,String> additionalTableParameters,
OptionalInt bucketCount,
List<SortingColumn> sortedBy,
LocationHandle locationHandle,
LocationService locationService,
String queryId,
HivePageSinkMetadataProvider pageSinkMetadataProvider,
TypeManager typeManager,
HdfsEnvironment hdfsEnvironment,
PageSorter pageSorter,
io.airlift.units.DataSize sortBufferSize,
int maxOpenSortFiles,
boolean immutablePartitions,
ConnectorSession session,
NodeManager nodeManager,
com.facebook.airlift.event.client.EventClient eventClient,
HiveSessionProperties hiveSessionProperties,
HiveWriterStats hiveWriterStats,
OrcFileWriterFactory orcFileWriterFactory,
boolean commitRequired,
Optional<EncryptionInformation> encryptionInformation) |
| Modifier and Type | Method and Description |
|---|---|
static ConnectorPageSource |
OrcSelectivePageSourceFactory.createOrcPageSource(ConnectorSession session,
OrcEncoding orcEncoding,
HdfsEnvironment hdfsEnvironment,
org.apache.hadoop.conf.Configuration configuration,
HiveFileSplit fileSplit,
List<HiveColumnHandle> columns,
Map<Integer,String> prefilledValues,
Map<Integer,HiveCoercer> coercers,
Optional<BucketAdaptation> bucketAdaptation,
List<Integer> outputColumns,
TupleDomain<Subfield> domainPredicate,
RowExpression remainingPredicate,
boolean useOrcColumnNames,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
StandardFunctionResolution functionResolution,
RowExpressionService rowExpressionService,
boolean orcBloomFiltersEnabled,
FileFormatDataSourceStats stats,
int domainCompactionThreshold,
OrcFileTailSource orcFileTailSource,
StripeMetadataSourceFactory stripeMetadataSourceFactory,
HiveFileContext hiveFileContext,
TupleDomainFilterCache tupleDomainFilterCache,
Optional<EncryptionInformation> encryptionInformation,
DwrfEncryptionProvider dwrfEncryptionProvider,
boolean appendRowNumberEnabled,
boolean footerStatsUnreliable) |
static ConnectorPageSource |
OrcBatchPageSourceFactory.createOrcPageSource(OrcEncoding orcEncoding,
HdfsEnvironment hdfsEnvironment,
String sessionUser,
org.apache.hadoop.conf.Configuration configuration,
HiveFileSplit fileSplit,
List<HiveColumnHandle> columns,
boolean useOrcColumnNames,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
StandardFunctionResolution functionResolution,
io.airlift.units.DataSize maxBufferSize,
io.airlift.units.DataSize streamBufferSize,
boolean lazyReadSmallRanges,
boolean orcBloomFiltersEnabled,
FileFormatDataSourceStats stats,
int domainCompactionThreshold,
OrcFileTailSource orcFileTailSource,
StripeMetadataSourceFactory stripeMetadataSourceFactory,
HiveFileContext hiveFileContext,
OrcReaderOptions orcReaderOptions,
Optional<EncryptionInformation> encryptionInformation,
DwrfEncryptionProvider dwrfEncryptionProvider) |
static ConnectorPageSource |
OrcBatchPageSourceFactory.createOrcPageSource(OrcEncoding orcEncoding,
HdfsEnvironment hdfsEnvironment,
String sessionUser,
org.apache.hadoop.conf.Configuration configuration,
HiveFileSplit fileSplit,
List<HiveColumnHandle> columns,
boolean useOrcColumnNames,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
StandardFunctionResolution functionResolution,
io.airlift.units.DataSize maxBufferSize,
io.airlift.units.DataSize streamBufferSize,
boolean lazyReadSmallRanges,
boolean orcBloomFiltersEnabled,
FileFormatDataSourceStats stats,
int domainCompactionThreshold,
OrcFileTailSource orcFileTailSource,
StripeMetadataSourceFactory stripeMetadataSourceFactory,
HiveFileContext hiveFileContext,
OrcReaderOptions orcReaderOptions,
Optional<EncryptionInformation> encryptionInformation,
DwrfEncryptionProvider dwrfEncryptionProvider) |
Optional<? extends ConnectorPageSource> |
OrcSelectivePageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
Storage storage,
List<HiveColumnHandle> columns,
Map<Integer,String> prefilledValues,
Map<Integer,HiveCoercer> coercers,
Optional<BucketAdaptation> bucketAdaptation,
List<Integer> outputColumns,
TupleDomain<Subfield> domainPredicate,
RowExpression remainingPredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation,
boolean appendRowNumberEnabled,
boolean footerStatsUnreliable) |
Optional<? extends ConnectorPageSource> |
DwrfSelectivePageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
Storage storage,
List<HiveColumnHandle> columns,
Map<Integer,String> prefilledValues,
Map<Integer,HiveCoercer> coercers,
Optional<BucketAdaptation> bucketAdaptation,
List<Integer> outputColumns,
TupleDomain<Subfield> domainPredicate,
RowExpression remainingPredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation,
boolean appendRowNumberEnabled,
boolean footerStatsUnreliable) |
Optional<? extends ConnectorPageSource> |
OrcBatchPageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
Optional<? extends ConnectorPageSource> |
OrcBatchPageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
Optional<? extends ConnectorPageSource> |
DwrfBatchPageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
Optional<? extends ConnectorPageSource> |
DwrfBatchPageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
| Constructor and Description |
|---|
AggregatedOrcPageSource(List<HiveColumnHandle> columnHandles,
Footer footer,
TypeManager typeManager,
StandardFunctionResolution functionResolution) |
OrcBatchPageSource(OrcBatchRecordReader recordReader,
OrcDataSource orcDataSource,
List<HiveColumnHandle> columns,
TypeManager typeManager,
OrcAggregatedMemoryContext systemMemoryContext,
FileFormatDataSourceStats stats,
RuntimeStats runtimeStats) |
ProjectionBasedDwrfKeyProvider(Optional<EncryptionInformation> encryptionInformation,
List<HiveColumnHandle> columns,
boolean useOrcColumnNames,
Path path) |
| Modifier and Type | Method and Description |
|---|---|
Optional<? extends ConnectorPageSource> |
PageFilePageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
Optional<? extends ConnectorPageSource> |
PageFilePageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
| Constructor and Description |
|---|
PageFilePageSource(FSDataInputStream inputStream,
long start,
long splitLength,
long fileSize,
BlockEncodingSerde blockEncodingSerde,
List<HiveColumnHandle> columns) |
| Modifier and Type | Field and Description |
|---|---|
static HiveColumnHandle |
ParquetPageSourceFactory.PARQUET_ROW_INDEX_COLUMN
If this object is passed as one of the columns for
createPageSource,
it will be populated as an additional column containing the index of each
row read. |
| Modifier and Type | Method and Description |
|---|---|
static Optional<org.apache.parquet.schema.Type> |
ParquetPageSourceFactory.getColumnType(Type prestoType,
org.apache.parquet.schema.MessageType messageType,
boolean useParquetColumnNames,
HiveColumnHandle column,
SchemaTableName tableName,
Path path) |
static Optional<org.apache.parquet.schema.Type> |
ParquetPageSourceFactory.getParquetType(Type prestoType,
org.apache.parquet.schema.MessageType messageType,
boolean useParquetColumnNames,
HiveColumnHandle column,
SchemaTableName tableName,
Path path) |
| Modifier and Type | Method and Description |
|---|---|
Optional<? extends ConnectorPageSource> |
ParquetSelectivePageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
Storage storage,
List<HiveColumnHandle> columns,
Map<Integer,String> prefilledValues,
Map<Integer,HiveCoercer> coercers,
Optional<BucketAdaptation> bucketAdaptation,
List<Integer> outputColumns,
TupleDomain<Subfield> domainPredicate,
RowExpression remainingPredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation,
boolean appendRowNumberEnabled,
boolean footerStatsUnreliable) |
Optional<? extends ConnectorPageSource> |
ParquetPageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
Optional<? extends ConnectorPageSource> |
ParquetPageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
static ConnectorPageSource |
ParquetPageSourceFactory.createParquetPageSource(HdfsEnvironment hdfsEnvironment,
ConnectorSession session,
org.apache.hadoop.conf.Configuration configuration,
HiveFileSplit fileSplit,
List<HiveColumnHandle> columns,
SchemaTableName tableName,
TypeManager typeManager,
StandardFunctionResolution functionResolution,
TupleDomain<HiveColumnHandle> effectivePredicate,
FileFormatDataSourceStats stats,
HiveFileContext hiveFileContext,
ParquetMetadataSource parquetMetadataSource) |
static ConnectorPageSource |
ParquetPageSourceFactory.createParquetPageSource(HdfsEnvironment hdfsEnvironment,
ConnectorSession session,
org.apache.hadoop.conf.Configuration configuration,
HiveFileSplit fileSplit,
List<HiveColumnHandle> columns,
SchemaTableName tableName,
TypeManager typeManager,
StandardFunctionResolution functionResolution,
TupleDomain<HiveColumnHandle> effectivePredicate,
FileFormatDataSourceStats stats,
HiveFileContext hiveFileContext,
ParquetMetadataSource parquetMetadataSource) |
static TupleDomain<org.apache.parquet.column.ColumnDescriptor> |
ParquetPageSourceFactory.getParquetTupleDomain(Map<List<String>,RichColumnDescriptor> descriptorsByPath,
TupleDomain<HiveColumnHandle> effectivePredicate) |
| Constructor and Description |
|---|
AggregatedParquetPageSource(List<HiveColumnHandle> columnHandles,
org.apache.parquet.hadoop.metadata.ParquetMetadata parquetMetadata,
TypeManager typeManager,
StandardFunctionResolution functionResolution) |
| Modifier and Type | Method and Description |
|---|---|
Optional<? extends ConnectorPageSource> |
RcFilePageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
Optional<? extends ConnectorPageSource> |
RcFilePageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
Storage storage,
SchemaTableName tableName,
Map<String,String> tableParameters,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
HiveFileContext hiveFileContext,
Optional<EncryptionInformation> encryptionInformation) |
| Constructor and Description |
|---|
RcFilePageSource(RcFileReader rcFileReader,
List<HiveColumnHandle> columns,
TypeManager typeManager) |
| Modifier and Type | Method and Description |
|---|---|
Optional<RecordCursor> |
S3SelectRecordCursorProvider.createRecordCursor(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
boolean s3SelectPushdownEnabled) |
Optional<RecordCursor> |
S3SelectRecordCursorProvider.createRecordCursor(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
boolean s3SelectPushdownEnabled) |
static Properties |
S3SelectRecordCursor.updateSplitSchema(Properties splitSchema,
List<HiveColumnHandle> columns) |
| Constructor and Description |
|---|
S3SelectRecordCursor(org.apache.hadoop.conf.Configuration configuration,
Path path,
org.apache.hadoop.mapred.RecordReader<K,V> recordReader,
long totalBytes,
Properties splitSchema,
List<HiveColumnHandle> columns,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager) |
Copyright © 2012–2023. All rights reserved.