Package | Description |
---|---|
org.apache.hadoop.hive.llap.io.decode | |
org.apache.hadoop.hive.llap.io.encoded | |
org.apache.hadoop.hive.ql.exec |
Hive QL execution tasks, operators, functions and other handlers.
|
org.apache.hadoop.hive.ql.exec.vector | |
org.apache.hadoop.hive.ql.index | |
org.apache.hadoop.hive.ql.index.bitmap | |
org.apache.hadoop.hive.ql.index.compact | |
org.apache.hadoop.hive.ql.io | |
org.apache.hadoop.hive.ql.io.merge | |
org.apache.hadoop.hive.ql.plan |
Modifier and Type | Method and Description |
---|---|
ReadPipeline |
OrcColumnVectorProducer.createReadPipeline(Consumer<ColumnVectorBatch> consumer,
org.apache.hadoop.mapred.FileSplit split,
List<Integer> columnIds,
org.apache.hadoop.hive.ql.io.sarg.SearchArgument sarg,
String[] columnNames,
QueryFragmentCounters counters,
org.apache.orc.TypeDescription readerSchema,
org.apache.hadoop.mapred.InputFormat<?,?> unused0,
Deserializer unused1,
org.apache.hadoop.mapred.Reporter reporter,
org.apache.hadoop.mapred.JobConf job,
Map<org.apache.hadoop.fs.Path,PartitionDesc> unused2) |
ReadPipeline |
ColumnVectorProducer.createReadPipeline(Consumer<ColumnVectorBatch> consumer,
org.apache.hadoop.mapred.FileSplit split,
List<Integer> columnIds,
org.apache.hadoop.hive.ql.io.sarg.SearchArgument sarg,
String[] columnNames,
QueryFragmentCounters counters,
org.apache.orc.TypeDescription readerSchema,
org.apache.hadoop.mapred.InputFormat<?,?> sourceInputFormat,
Deserializer sourceSerDe,
org.apache.hadoop.mapred.Reporter reporter,
org.apache.hadoop.mapred.JobConf job,
Map<org.apache.hadoop.fs.Path,PartitionDesc> parts) |
ReadPipeline |
GenericColumnVectorProducer.createReadPipeline(Consumer<ColumnVectorBatch> consumer,
org.apache.hadoop.mapred.FileSplit split,
List<Integer> columnIds,
org.apache.hadoop.hive.ql.io.sarg.SearchArgument sarg,
String[] columnNames,
QueryFragmentCounters counters,
org.apache.orc.TypeDescription schema,
org.apache.hadoop.mapred.InputFormat<?,?> sourceInputFormat,
Deserializer sourceSerDe,
org.apache.hadoop.mapred.Reporter reporter,
org.apache.hadoop.mapred.JobConf job,
Map<org.apache.hadoop.fs.Path,PartitionDesc> parts) |
Constructor and Description |
---|
SerDeEncodedDataReader(SerDeLowLevelCacheImpl cache,
BufferUsageManager bufferManager,
org.apache.hadoop.conf.Configuration daemonConf,
org.apache.hadoop.mapred.FileSplit split,
List<Integer> columnIds,
OrcEncodedDataConsumer consumer,
org.apache.hadoop.mapred.JobConf jobConf,
org.apache.hadoop.mapred.Reporter reporter,
org.apache.hadoop.mapred.InputFormat<?,?> sourceInputFormat,
Deserializer sourceSerDe,
QueryFragmentCounters counters,
org.apache.orc.TypeDescription schema,
Map<org.apache.hadoop.fs.Path,PartitionDesc> parts) |
Modifier and Type | Method and Description |
---|---|
static PartitionDesc |
Utilities.getPartitionDesc(Partition part) |
static PartitionDesc |
Utilities.getPartitionDescFromTableDesc(TableDesc tblDesc,
Partition part,
boolean usePartSchemaProperties) |
Modifier and Type | Method and Description |
---|---|
static boolean |
Utilities.isInputFileFormatSelfDescribing(PartitionDesc pd) |
static boolean |
Utilities.isInputFileFormatVectorized(PartitionDesc pd) |
Constructor and Description |
---|
MapOpCtx(String alias,
Operator<?> op,
PartitionDesc partDesc) |
Modifier and Type | Field and Description |
---|---|
protected PartitionDesc |
VectorMapOperator.VectorPartitionContext.partDesc |
Modifier and Type | Method and Description |
---|---|
PartitionDesc |
VectorMapOperator.VectorPartitionContext.getPartDesc() |
Modifier and Type | Method and Description |
---|---|
VectorMapOperator.VectorPartitionContext |
VectorMapOperator.createAndInitPartitionContext(PartitionDesc partDesc,
org.apache.hadoop.conf.Configuration hconf) |
static void |
VectorizedRowBatchCtx.getPartitionValues(VectorizedRowBatchCtx vrbCtx,
PartitionDesc partDesc,
Object[] partitionValues) |
Modifier and Type | Method and Description |
---|---|
protected Task<?> |
TableBasedIndexHandler.getIndexBuilderMapRedTask(Set<ReadEntity> inputs,
Set<WriteEntity> outputs,
Index index,
boolean partitioned,
PartitionDesc indexTblPartDesc,
String indexTableName,
PartitionDesc baseTablePartDesc,
String baseTableName,
String dbName) |
protected Task<?> |
AggregateIndexHandler.getIndexBuilderMapRedTask(Set<ReadEntity> inputs,
Set<WriteEntity> outputs,
Index index,
boolean partitioned,
PartitionDesc indexTblPartDesc,
String indexTableName,
PartitionDesc baseTablePartDesc,
String baseTableName,
String dbName) |
protected Task<?> |
TableBasedIndexHandler.getIndexBuilderMapRedTask(Set<ReadEntity> inputs,
Set<WriteEntity> outputs,
List<FieldSchema> indexField,
boolean partitioned,
PartitionDesc indexTblPartDesc,
String indexTableName,
PartitionDesc baseTablePartDesc,
String baseTableName,
String dbName) |
Modifier and Type | Method and Description |
---|---|
protected Task<?> |
BitmapIndexHandler.getIndexBuilderMapRedTask(Set<ReadEntity> inputs,
Set<WriteEntity> outputs,
List<FieldSchema> indexField,
boolean partitioned,
PartitionDesc indexTblPartDesc,
String indexTableName,
PartitionDesc baseTablePartDesc,
String baseTableName,
String dbName) |
Modifier and Type | Method and Description |
---|---|
protected Task<?> |
CompactIndexHandler.getIndexBuilderMapRedTask(Set<ReadEntity> inputs,
Set<WriteEntity> outputs,
List<FieldSchema> indexField,
boolean partitioned,
PartitionDesc indexTblPartDesc,
String indexTableName,
PartitionDesc baseTablePartDesc,
String baseTableName,
String dbName) |
Modifier and Type | Field and Description |
---|---|
protected Map<org.apache.hadoop.fs.Path,PartitionDesc> |
HiveInputFormat.pathToPartitionInfo |
Modifier and Type | Method and Description |
---|---|
protected static PartitionDesc |
HiveInputFormat.getPartitionDescFromPath(Map<org.apache.hadoop.fs.Path,PartitionDesc> pathToPartitionInfo,
org.apache.hadoop.fs.Path dir) |
static PartitionDesc |
HiveFileFormatUtils.getPartitionDescFromPathRecursively(Map<org.apache.hadoop.fs.Path,PartitionDesc> pathToPartitionInfo,
org.apache.hadoop.fs.Path dir,
Map<Map<org.apache.hadoop.fs.Path,PartitionDesc>,Map<org.apache.hadoop.fs.Path,PartitionDesc>> cacheMap) |
static PartitionDesc |
HiveFileFormatUtils.getPartitionDescFromPathRecursively(Map<org.apache.hadoop.fs.Path,PartitionDesc> pathToPartitionInfo,
org.apache.hadoop.fs.Path dir,
Map<Map<org.apache.hadoop.fs.Path,PartitionDesc>,Map<org.apache.hadoop.fs.Path,PartitionDesc>> cacheMap,
boolean ignoreSchema) |
Modifier and Type | Method and Description |
---|---|
Map<Map<org.apache.hadoop.fs.Path,PartitionDesc>,Map<org.apache.hadoop.fs.Path,PartitionDesc>> |
IOPrepareCache.allocatePartitionDescMap() |
Map<Map<org.apache.hadoop.fs.Path,PartitionDesc>,Map<org.apache.hadoop.fs.Path,PartitionDesc>> |
IOPrepareCache.allocatePartitionDescMap() |
Map<Map<org.apache.hadoop.fs.Path,PartitionDesc>,Map<org.apache.hadoop.fs.Path,PartitionDesc>> |
IOPrepareCache.getPartitionDescMap() |
Map<Map<org.apache.hadoop.fs.Path,PartitionDesc>,Map<org.apache.hadoop.fs.Path,PartitionDesc>> |
IOPrepareCache.getPartitionDescMap() |
Modifier and Type | Method and Description |
---|---|
static HiveOutputFormat<?,?> |
HiveFileFormatUtils.getHiveOutputFormat(org.apache.hadoop.conf.Configuration conf,
PartitionDesc partDesc) |
static org.apache.hadoop.mapred.InputFormat<org.apache.hadoop.io.WritableComparable,org.apache.hadoop.io.Writable> |
HiveInputFormat.wrapForLlap(org.apache.hadoop.mapred.InputFormat<org.apache.hadoop.io.WritableComparable,org.apache.hadoop.io.Writable> inputFormat,
org.apache.hadoop.conf.Configuration conf,
PartitionDesc part) |
Modifier and Type | Method and Description |
---|---|
protected static PartitionDesc |
HiveInputFormat.getPartitionDescFromPath(Map<org.apache.hadoop.fs.Path,PartitionDesc> pathToPartitionInfo,
org.apache.hadoop.fs.Path dir) |
static PartitionDesc |
HiveFileFormatUtils.getPartitionDescFromPathRecursively(Map<org.apache.hadoop.fs.Path,PartitionDesc> pathToPartitionInfo,
org.apache.hadoop.fs.Path dir,
Map<Map<org.apache.hadoop.fs.Path,PartitionDesc>,Map<org.apache.hadoop.fs.Path,PartitionDesc>> cacheMap) |
static PartitionDesc |
HiveFileFormatUtils.getPartitionDescFromPathRecursively(Map<org.apache.hadoop.fs.Path,PartitionDesc> pathToPartitionInfo,
org.apache.hadoop.fs.Path dir,
Map<Map<org.apache.hadoop.fs.Path,PartitionDesc>,Map<org.apache.hadoop.fs.Path,PartitionDesc>> cacheMap) |
static PartitionDesc |
HiveFileFormatUtils.getPartitionDescFromPathRecursively(Map<org.apache.hadoop.fs.Path,PartitionDesc> pathToPartitionInfo,
org.apache.hadoop.fs.Path dir,
Map<Map<org.apache.hadoop.fs.Path,PartitionDesc>,Map<org.apache.hadoop.fs.Path,PartitionDesc>> cacheMap) |
static PartitionDesc |
HiveFileFormatUtils.getPartitionDescFromPathRecursively(Map<org.apache.hadoop.fs.Path,PartitionDesc> pathToPartitionInfo,
org.apache.hadoop.fs.Path dir,
Map<Map<org.apache.hadoop.fs.Path,PartitionDesc>,Map<org.apache.hadoop.fs.Path,PartitionDesc>> cacheMap,
boolean ignoreSchema) |
static PartitionDesc |
HiveFileFormatUtils.getPartitionDescFromPathRecursively(Map<org.apache.hadoop.fs.Path,PartitionDesc> pathToPartitionInfo,
org.apache.hadoop.fs.Path dir,
Map<Map<org.apache.hadoop.fs.Path,PartitionDesc>,Map<org.apache.hadoop.fs.Path,PartitionDesc>> cacheMap,
boolean ignoreSchema) |
static PartitionDesc |
HiveFileFormatUtils.getPartitionDescFromPathRecursively(Map<org.apache.hadoop.fs.Path,PartitionDesc> pathToPartitionInfo,
org.apache.hadoop.fs.Path dir,
Map<Map<org.apache.hadoop.fs.Path,PartitionDesc>,Map<org.apache.hadoop.fs.Path,PartitionDesc>> cacheMap,
boolean ignoreSchema) |
void |
IOPrepareCache.setPartitionDescMap(Map<Map<org.apache.hadoop.fs.Path,PartitionDesc>,Map<org.apache.hadoop.fs.Path,PartitionDesc>> partitionDescMap) |
void |
IOPrepareCache.setPartitionDescMap(Map<Map<org.apache.hadoop.fs.Path,PartitionDesc>,Map<org.apache.hadoop.fs.Path,PartitionDesc>> partitionDescMap) |
Constructor and Description |
---|
CombineHiveInputSplit(org.apache.hadoop.mapred.JobConf job,
org.apache.hadoop.mapred.lib.CombineFileSplit inputSplitShim,
Map<org.apache.hadoop.fs.Path,PartitionDesc> pathToPartitionInfo) |
Modifier and Type | Method and Description |
---|---|
void |
MergeFileWork.resolveDynamicPartitionStoredAsSubDirsMerge(HiveConf conf,
org.apache.hadoop.fs.Path path,
TableDesc tblDesc,
ArrayList<String> aliases,
PartitionDesc partDesc) |
Modifier and Type | Method and Description |
---|---|
PartitionDesc |
PartitionDesc.clone() |
Modifier and Type | Method and Description |
---|---|
LinkedHashMap<String,PartitionDesc> |
MapWork.getAliasToPartnInfo() |
ArrayList<PartitionDesc> |
FetchWork.getPartDesc() |
ArrayList<PartitionDesc> |
FetchWork.getPartDescOrderedByPartDir()
Get Partition descriptors in sorted (ascending) order of partition directory
|
List<PartitionDesc> |
FetchWork.getPartDescs(List<org.apache.hadoop.fs.Path> paths) |
ArrayList<PartitionDesc> |
MapWork.getPartitionDescs() |
LinkedHashMap<org.apache.hadoop.fs.Path,PartitionDesc> |
MapWork.getPathToPartitionInfo() |
Modifier and Type | Method and Description |
---|---|
void |
MapWork.addMapWork(org.apache.hadoop.fs.Path path,
String alias,
Operator<?> work,
PartitionDesc pd) |
void |
MapWork.addPathToPartitionInfo(org.apache.hadoop.fs.Path path,
PartitionDesc partitionInfo) |
void |
MapWork.mergeAliasedInput(String alias,
org.apache.hadoop.fs.Path pathDir,
PartitionDesc partitionInfo) |
void |
MapWork.resolveDynamicPartitionStoredAsSubDirsMerge(HiveConf conf,
org.apache.hadoop.fs.Path path,
TableDesc tblDesc,
ArrayList<String> aliases,
PartitionDesc partDesc) |
Modifier and Type | Method and Description |
---|---|
void |
MapWork.setAliasToPartnInfo(LinkedHashMap<String,PartitionDesc> aliasToPartnInfo) |
void |
FetchWork.setPartDesc(ArrayList<PartitionDesc> partDesc) |
void |
MapWork.setPathToPartitionInfo(LinkedHashMap<org.apache.hadoop.fs.Path,PartitionDesc> pathToPartitionInfo) |
Constructor and Description |
---|
FetchWork(List<org.apache.hadoop.fs.Path> partDir,
List<PartitionDesc> partDesc,
TableDesc tblDesc) |
FetchWork(List<org.apache.hadoop.fs.Path> partDir,
List<PartitionDesc> partDesc,
TableDesc tblDesc,
int limit) |
Copyright © 2021 The Apache Software Foundation. All rights reserved.