| Package | Description |
|---|---|
| io.prestosql.plugin.hive | |
| io.prestosql.plugin.hive.orc | |
| io.prestosql.plugin.hive.parquet | |
| io.prestosql.plugin.hive.rcfile | |
| io.prestosql.plugin.hive.util |
| Modifier and Type | Method and Description |
|---|---|
static HiveColumnHandle |
HiveColumnHandle.bucketColumnHandle()
The column indicating the bucket id.
|
HiveColumnHandle |
HivePageSourceProvider.ColumnMapping.getHiveColumnHandle() |
static HiveColumnHandle |
HiveColumnHandle.pathColumnHandle() |
static HiveColumnHandle |
HiveColumnHandle.updateRowIdHandle() |
| Modifier and Type | Method and Description |
|---|---|
List<HiveColumnHandle> |
HiveSplit.BucketConversion.getBucketColumnHandles() |
List<HiveColumnHandle> |
BackgroundHiveSplitLoader.BucketSplitInfo.getBucketColumns() |
List<HiveColumnHandle> |
HiveBucketHandle.getColumns() |
TupleDomain<HiveColumnHandle> |
HiveSplit.getEffectivePredicate() |
List<HiveColumnHandle> |
HiveWritableTableHandle.getInputColumns() |
List<HiveColumnHandle> |
HivePartitionResult.getPartitionColumns() |
static List<HiveColumnHandle> |
HiveUtil.getPartitionKeyColumnHandles(Table table) |
static List<HiveColumnHandle> |
HiveUtil.getRegularColumnHandles(Table table) |
static List<HiveColumnHandle> |
HiveUtil.hiveColumnHandles(Table table) |
static List<HiveColumnHandle> |
HivePageSourceProvider.ColumnMapping.toColumnHandles(List<HivePageSourceProvider.ColumnMapping> regularColumnMappings,
boolean doCoercion) |
| Modifier and Type | Method and Description |
|---|---|
static String |
HiveUtil.getPrefilledColumnValue(HiveColumnHandle columnHandle,
HivePartitionKey partitionKey,
org.apache.hadoop.fs.Path path,
OptionalInt bucketNumber) |
static HivePageSourceProvider.ColumnMapping |
HivePageSourceProvider.ColumnMapping.interim(HiveColumnHandle hiveColumnHandle,
int index) |
static boolean |
HiveColumnHandle.isBucketColumnHandle(HiveColumnHandle column) |
static boolean |
HiveColumnHandle.isPathColumnHandle(HiveColumnHandle column) |
static HivePageSourceProvider.ColumnMapping |
HivePageSourceProvider.ColumnMapping.prefilled(HiveColumnHandle hiveColumnHandle,
String prefilledValue,
Optional<HiveType> coerceFrom) |
static HivePageSourceProvider.ColumnMapping |
HivePageSourceProvider.ColumnMapping.regular(HiveColumnHandle hiveColumnHandle,
int index,
Optional<HiveType> coerceFrom) |
| Modifier and Type | Method and Description |
|---|---|
static List<HivePageSourceProvider.ColumnMapping> |
HivePageSourceProvider.ColumnMapping.buildColumnMappings(List<HivePartitionKey> partitionKeys,
List<HiveColumnHandle> columns,
List<HiveColumnHandle> requiredInterimColumns,
Map<Integer,HiveType> columnCoercions,
org.apache.hadoop.fs.Path path,
OptionalInt bucketNumber) |
static List<HivePageSourceProvider.ColumnMapping> |
HivePageSourceProvider.ColumnMapping.buildColumnMappings(List<HivePartitionKey> partitionKeys,
List<HiveColumnHandle> columns,
List<HiveColumnHandle> requiredInterimColumns,
Map<Integer,HiveType> columnCoercions,
org.apache.hadoop.fs.Path path,
OptionalInt bucketNumber) |
String |
IonSqlQueryBuilder.buildSql(List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> tupleDomain) |
String |
IonSqlQueryBuilder.buildSql(List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> tupleDomain) |
static Optional<ConnectorPageSource> |
HivePageSourceProvider.createHivePageSource(Set<HiveRecordCursorProvider> cursorProviders,
Set<HivePageSourceFactory> pageSourceFactories,
org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
org.apache.hadoop.fs.Path path,
OptionalInt bucketNumber,
long start,
long length,
long fileSize,
Properties schema,
TupleDomain<HiveColumnHandle> effectivePredicate,
List<HiveColumnHandle> hiveColumns,
List<HivePartitionKey> partitionKeys,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
Map<Integer,HiveType> columnCoercions,
Optional<HiveSplit.BucketConversion> bucketConversion,
boolean s3SelectPushdownEnabled) |
static Optional<ConnectorPageSource> |
HivePageSourceProvider.createHivePageSource(Set<HiveRecordCursorProvider> cursorProviders,
Set<HivePageSourceFactory> pageSourceFactories,
org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
org.apache.hadoop.fs.Path path,
OptionalInt bucketNumber,
long start,
long length,
long fileSize,
Properties schema,
TupleDomain<HiveColumnHandle> effectivePredicate,
List<HiveColumnHandle> hiveColumns,
List<HivePartitionKey> partitionKeys,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
Map<Integer,HiveType> columnCoercions,
Optional<HiveSplit.BucketConversion> bucketConversion,
boolean s3SelectPushdownEnabled) |
Optional<? extends ConnectorPageSource> |
HivePageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
org.apache.hadoop.fs.Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone) |
Optional<? extends ConnectorPageSource> |
HivePageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
org.apache.hadoop.fs.Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone) |
Optional<RecordCursor> |
S3SelectRecordCursorProvider.createRecordCursor(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
org.apache.hadoop.fs.Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
boolean s3SelectPushdownEnabled) |
Optional<RecordCursor> |
S3SelectRecordCursorProvider.createRecordCursor(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
org.apache.hadoop.fs.Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
boolean s3SelectPushdownEnabled) |
Optional<RecordCursor> |
HiveRecordCursorProvider.createRecordCursor(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
org.apache.hadoop.fs.Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
boolean s3SelectPushdownEnabled) |
Optional<RecordCursor> |
HiveRecordCursorProvider.createRecordCursor(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
org.apache.hadoop.fs.Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
boolean s3SelectPushdownEnabled) |
Optional<RecordCursor> |
GenericHiveRecordCursorProvider.createRecordCursor(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
org.apache.hadoop.fs.Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
boolean s3SelectPushdownEnabled) |
Optional<RecordCursor> |
GenericHiveRecordCursorProvider.createRecordCursor(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
org.apache.hadoop.fs.Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
boolean s3SelectPushdownEnabled) |
static org.apache.hadoop.mapred.RecordReader<?,?> |
HiveUtil.createRecordReader(org.apache.hadoop.conf.Configuration configuration,
org.apache.hadoop.fs.Path path,
long start,
long length,
Properties schema,
List<HiveColumnHandle> columns) |
static HivePartition |
HivePartitionManager.parsePartition(SchemaTableName tableName,
String partitionName,
List<HiveColumnHandle> partitionColumns,
List<Type> partitionColumnTypes,
org.joda.time.DateTimeZone timeZone) |
void |
HiveStorageFormat.validateColumns(List<HiveColumnHandle> handles) |
| Constructor and Description |
|---|
BucketConversion(int tableBucketCount,
int partitionBucketCount,
List<HiveColumnHandle> bucketColumnHandles) |
HiveBucketHandle(List<HiveColumnHandle> columns,
int tableBucketCount,
int readBucketCount) |
HiveInsertTableHandle(String schemaName,
String tableName,
List<HiveColumnHandle> inputColumns,
String filePrefix,
HivePageSinkMetadata pageSinkMetadata,
LocationHandle locationHandle,
Optional<HiveBucketProperty> bucketProperty,
HiveStorageFormat tableStorageFormat,
HiveStorageFormat partitionStorageFormat) |
HiveOutputTableHandle(String schemaName,
String tableName,
List<HiveColumnHandle> inputColumns,
String filePrefix,
HivePageSinkMetadata pageSinkMetadata,
LocationHandle locationHandle,
HiveStorageFormat tableStorageFormat,
HiveStorageFormat partitionStorageFormat,
List<String> partitionedBy,
Optional<HiveBucketProperty> bucketProperty,
String tableOwner,
Map<String,String> additionalTableParameters) |
HivePageSink(HiveWriterFactory writerFactory,
List<HiveColumnHandle> inputColumns,
Optional<HiveBucketProperty> bucketProperty,
PageIndexerFactory pageIndexerFactory,
TypeManager typeManager,
HdfsEnvironment hdfsEnvironment,
int maxOpenWriters,
com.google.common.util.concurrent.ListeningExecutorService writeVerificationExecutor,
io.airlift.json.JsonCodec<PartitionUpdate> partitionUpdateCodec,
ConnectorSession session) |
HivePartitionResult(List<HiveColumnHandle> partitionColumns,
Iterable<HivePartition> partitions,
TupleDomain<? extends ColumnHandle> compactEffectivePredicate,
TupleDomain<ColumnHandle> unenforcedConstraint,
TupleDomain<ColumnHandle> enforcedConstraint,
Optional<HiveBucketHandle> bucketHandle,
Optional<HiveBucketing.HiveBucketFilter> bucketFilter) |
HiveSplit(String database,
String table,
String partitionName,
String path,
long start,
long length,
long fileSize,
Properties schema,
List<HivePartitionKey> partitionKeys,
List<HostAddress> addresses,
OptionalInt bucketNumber,
boolean forceLocalScheduling,
TupleDomain<HiveColumnHandle> effectivePredicate,
Map<Integer,HiveType> columnCoercions,
Optional<HiveSplit.BucketConversion> bucketConversion,
boolean s3SelectPushdownEnabled) |
HiveWritableTableHandle(String schemaName,
String tableName,
List<HiveColumnHandle> inputColumns,
String filePrefix,
HivePageSinkMetadata pageSinkMetadata,
LocationHandle locationHandle,
Optional<HiveBucketProperty> bucketProperty,
HiveStorageFormat tableStorageFormat,
HiveStorageFormat partitionStorageFormat) |
HiveWriterFactory(Set<HiveFileWriterFactory> fileWriterFactories,
String schemaName,
String tableName,
boolean isCreateTable,
List<HiveColumnHandle> inputColumns,
HiveStorageFormat tableStorageFormat,
HiveStorageFormat partitionStorageFormat,
OptionalInt bucketCount,
List<SortingColumn> sortedBy,
LocationHandle locationHandle,
LocationService locationService,
String filePrefix,
HivePageSinkMetadataProvider pageSinkMetadataProvider,
TypeManager typeManager,
HdfsEnvironment hdfsEnvironment,
PageSorter pageSorter,
io.airlift.units.DataSize sortBufferSize,
int maxOpenSortFiles,
boolean immutablePartitions,
ConnectorSession session,
NodeManager nodeManager,
io.airlift.event.client.EventClient eventClient,
HiveSessionProperties hiveSessionProperties,
HiveWriterStats hiveWriterStats,
OrcFileWriterFactory orcFileWriterFactory) |
| Modifier and Type | Method and Description |
|---|---|
static OrcPageSource |
OrcPageSourceFactory.createOrcPageSource(HdfsEnvironment hdfsEnvironment,
String sessionUser,
org.apache.hadoop.conf.Configuration configuration,
org.apache.hadoop.fs.Path path,
long start,
long length,
long fileSize,
List<HiveColumnHandle> columns,
boolean useOrcColumnNames,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
io.airlift.units.DataSize maxMergeDistance,
io.airlift.units.DataSize maxBufferSize,
io.airlift.units.DataSize streamBufferSize,
io.airlift.units.DataSize tinyStripeThreshold,
io.airlift.units.DataSize maxReadBlockSize,
boolean lazyReadSmallRanges,
boolean orcBloomFiltersEnabled,
FileFormatDataSourceStats stats) |
static OrcPageSource |
OrcPageSourceFactory.createOrcPageSource(HdfsEnvironment hdfsEnvironment,
String sessionUser,
org.apache.hadoop.conf.Configuration configuration,
org.apache.hadoop.fs.Path path,
long start,
long length,
long fileSize,
List<HiveColumnHandle> columns,
boolean useOrcColumnNames,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
io.airlift.units.DataSize maxMergeDistance,
io.airlift.units.DataSize maxBufferSize,
io.airlift.units.DataSize streamBufferSize,
io.airlift.units.DataSize tinyStripeThreshold,
io.airlift.units.DataSize maxReadBlockSize,
boolean lazyReadSmallRanges,
boolean orcBloomFiltersEnabled,
FileFormatDataSourceStats stats) |
Optional<? extends ConnectorPageSource> |
OrcPageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
org.apache.hadoop.fs.Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone) |
Optional<? extends ConnectorPageSource> |
OrcPageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
org.apache.hadoop.fs.Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone) |
| Constructor and Description |
|---|
OrcPageSource(OrcRecordReader recordReader,
OrcDataSource orcDataSource,
List<HiveColumnHandle> columns,
TypeManager typeManager,
AggregatedMemoryContext systemMemoryContext,
FileFormatDataSourceStats stats) |
| Modifier and Type | Method and Description |
|---|---|
static org.apache.parquet.schema.Type |
ParquetPageSourceFactory.getParquetType(HiveColumnHandle column,
org.apache.parquet.schema.MessageType messageType,
boolean useParquetColumnNames) |
| Modifier and Type | Method and Description |
|---|---|
Optional<? extends ConnectorPageSource> |
ParquetPageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
org.apache.hadoop.fs.Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone) |
Optional<? extends ConnectorPageSource> |
ParquetPageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
org.apache.hadoop.fs.Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone) |
static ParquetPageSource |
ParquetPageSourceFactory.createParquetPageSource(HdfsEnvironment hdfsEnvironment,
String user,
org.apache.hadoop.conf.Configuration configuration,
org.apache.hadoop.fs.Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
boolean useParquetColumnNames,
boolean failOnCorruptedParquetStatistics,
io.airlift.units.DataSize maxReadBlockSize,
TypeManager typeManager,
TupleDomain<HiveColumnHandle> effectivePredicate,
FileFormatDataSourceStats stats) |
static ParquetPageSource |
ParquetPageSourceFactory.createParquetPageSource(HdfsEnvironment hdfsEnvironment,
String user,
org.apache.hadoop.conf.Configuration configuration,
org.apache.hadoop.fs.Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
boolean useParquetColumnNames,
boolean failOnCorruptedParquetStatistics,
io.airlift.units.DataSize maxReadBlockSize,
TypeManager typeManager,
TupleDomain<HiveColumnHandle> effectivePredicate,
FileFormatDataSourceStats stats) |
static TupleDomain<org.apache.parquet.column.ColumnDescriptor> |
ParquetPageSourceFactory.getParquetTupleDomain(Map<List<String>,RichColumnDescriptor> descriptorsByPath,
TupleDomain<HiveColumnHandle> effectivePredicate) |
| Constructor and Description |
|---|
ParquetPageSource(ParquetReader parquetReader,
org.apache.parquet.schema.MessageType fileSchema,
org.apache.parquet.io.MessageColumnIO messageColumnIO,
TypeManager typeManager,
Properties splitSchema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
boolean useParquetColumnNames) |
ParquetPageSource(ParquetReader parquetReader,
org.apache.parquet.schema.MessageType fileSchema,
org.apache.parquet.io.MessageColumnIO messageColumnIO,
TypeManager typeManager,
Properties splitSchema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
boolean useParquetColumnNames) |
| Modifier and Type | Method and Description |
|---|---|
Optional<? extends ConnectorPageSource> |
RcFilePageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
org.apache.hadoop.fs.Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone) |
Optional<? extends ConnectorPageSource> |
RcFilePageSourceFactory.createPageSource(org.apache.hadoop.conf.Configuration configuration,
ConnectorSession session,
org.apache.hadoop.fs.Path path,
long start,
long length,
long fileSize,
Properties schema,
List<HiveColumnHandle> columns,
TupleDomain<HiveColumnHandle> effectivePredicate,
org.joda.time.DateTimeZone hiveStorageTimeZone) |
| Constructor and Description |
|---|
RcFilePageSource(RcFileReader rcFileReader,
List<HiveColumnHandle> columns,
TypeManager typeManager) |
| Constructor and Description |
|---|
InternalHiveSplitFactory(org.apache.hadoop.fs.FileSystem fileSystem,
String partitionName,
org.apache.hadoop.mapred.InputFormat<?,?> inputFormat,
Properties schema,
List<HivePartitionKey> partitionKeys,
TupleDomain<HiveColumnHandle> effectivePredicate,
Map<Integer,HiveTypeName> columnCoercions,
Optional<HiveSplit.BucketConversion> bucketConversion,
boolean forceLocalScheduling,
boolean s3SelectPushdownEnabled) |
Copyright © 2012–2019. All rights reserved.