All Methods Instance Methods Concrete Methods
Modifier and Type |
Method and Description |
void |
addIndexIntermediateFile(String fileName) |
void |
addMapWork(org.apache.hadoop.fs.Path path,
String alias,
Operator<?> work,
PartitionDesc pd) |
void |
addPathToAlias(org.apache.hadoop.fs.Path path,
ArrayList<String> aliases) |
void |
addPathToAlias(org.apache.hadoop.fs.Path path,
String newAlias) |
void |
addPathToPartitionInfo(org.apache.hadoop.fs.Path path,
PartitionDesc partitionInfo) |
void |
configureJobConf(org.apache.hadoop.mapred.JobConf job) |
void |
deriveExplainAttributes()
Derive additional attributes to be rendered by EXPLAIN.
|
void |
deriveLlap(org.apache.hadoop.conf.Configuration conf,
boolean isExecDriver) |
ArrayList<String> |
getAliases() |
LinkedHashMap<String,PartitionDesc> |
getAliasToPartnInfo() |
LinkedHashMap<String,Operator<? extends OperatorDesc>> |
getAliasToWork() |
Set<Operator<? extends OperatorDesc>> |
getAllRootOperators() |
Operator<? extends OperatorDesc> |
getAnyRootOperator() |
String[] |
getBaseSrc() |
Map<String,List<BucketingSortingCtx.BucketCol>> |
getBucketedColsByDirectory() |
boolean |
getDoSplitsGrouping() |
boolean |
getDummyTableScan() |
Map<String,List<String>> |
getEventSourceColumnNameMap() |
Map<String,List<String>> |
getEventSourceColumnTypeMap() |
Map<String,List<ExprNodeDesc>> |
getEventSourcePartKeyExprMap() |
Map<String,List<TableDesc>> |
getEventSourceTableDescMap() |
String |
getExecutionMode() |
BitSet |
getIncludedBuckets() |
String |
getIndexIntermediateFile() |
String |
getInputformat() |
String |
getLlapIoDesc() |
List<String> |
getMapAliases() |
MapWork.MapExplainVectorization |
getMapExplainVectorization() |
Long |
getMaxSplitSize() |
Long |
getMinSplitSize() |
Long |
getMinSplitSizePerNode() |
Long |
getMinSplitSizePerRack() |
HashMap<String,SplitSample> |
getNameToSplitSample() |
VectorizerReason |
getNotEnabledInputFileFormatReason() |
Integer |
getNumMapTasks() |
ArrayList<PartitionDesc> |
getPartitionDescs() |
ArrayList<org.apache.hadoop.fs.Path> |
getPaths() |
LinkedHashMap<org.apache.hadoop.fs.Path,ArrayList<String>> |
getPathToAliases() |
LinkedHashMap<org.apache.hadoop.fs.Path,PartitionDesc> |
getPathToPartitionInfo() |
int |
getSamplingType() |
String |
getSamplingTypeString() |
Map<String,List<BucketingSortingCtx.SortCol>> |
getSortedColsByDirectory() |
org.apache.hadoop.fs.Path |
getTmpHDFSPath() |
org.apache.hadoop.fs.Path |
getTmpPathForPartitionPruning() |
Map<String,ArrayList<String>> |
getTruncatedPathToAliases()
This is used to display and verify output of "Path -> Alias" in test framework.
|
boolean |
getUseVectorizedInputFileFormat() |
List<String> |
getVectorizationEnabledConditionsMet() |
List<String> |
getVectorizationEnabledConditionsNotMet() |
Set<String> |
getVectorizationInputFileFormatClassNameSet() |
org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch |
getVectorizedRowBatch() |
ArrayList<Operator<?>> |
getWorks() |
void |
initialize() |
void |
internTable(com.google.common.collect.Interner<TableDesc> interner) |
boolean |
isInputFormatSorted() |
boolean |
isLeftInputJoin() |
boolean |
isMapperCannotSpanPartns() |
boolean |
isUseBucketizedHiveInputFormat() |
void |
mergeAliasedInput(String alias,
org.apache.hadoop.fs.Path pathDir,
PartitionDesc partitionInfo) |
void |
mergingInto(MapWork mapWork) |
void |
removePathToAlias(org.apache.hadoop.fs.Path path) |
void |
removePathToPartitionInfo(org.apache.hadoop.fs.Path path) |
void |
replaceRoots(Map<Operator<?>,Operator<?>> replacementMap) |
void |
resolveDynamicPartitionStoredAsSubDirsMerge(HiveConf conf,
org.apache.hadoop.fs.Path path,
TableDesc tblDesc,
ArrayList<String> aliases,
PartitionDesc partDesc) |
void |
setAliasToPartnInfo(LinkedHashMap<String,PartitionDesc> aliasToPartnInfo) |
void |
setAliasToWork(LinkedHashMap<String,Operator<? extends OperatorDesc>> aliasToWork) |
void |
setBaseSrc(String[] baseSrc) |
void |
setDoSplitsGrouping(boolean doSplitsGrouping) |
void |
setDummyTableScan(boolean dummyTableScan) |
void |
setEventSourceColumnNameMap(Map<String,List<String>> map) |
void |
setEventSourcePartKeyExprMap(Map<String,List<ExprNodeDesc>> map) |
void |
setEventSourceTableDescMap(Map<String,List<TableDesc>> map) |
void |
setIncludedBuckets(BitSet includedBuckets) |
void |
setInputformat(String inputformat) |
void |
setInputFormatSorted(boolean inputFormatSorted) |
void |
setLeftInputJoin(boolean leftInputJoin) |
void |
setMapAliases(List<String> mapAliases) |
void |
setMapperCannotSpanPartns(boolean mapperCannotSpanPartns) |
void |
setMaxSplitSize(Long maxSplitSize) |
void |
setMinSplitSize(Long minSplitSize) |
void |
setMinSplitSizePerNode(Long minSplitSizePerNode) |
void |
setMinSplitSizePerRack(Long minSplitSizePerRack) |
void |
setNameToSplitSample(HashMap<String,SplitSample> nameToSplitSample) |
void |
setNotEnabledInputFileFormatReason(VectorizerReason notEnabledInputFileFormatReason) |
void |
setNumMapTasks(Integer numMapTasks) |
void |
setPathToAliases(LinkedHashMap<org.apache.hadoop.fs.Path,ArrayList<String>> pathToAliases) |
void |
setPathToPartitionInfo(LinkedHashMap<org.apache.hadoop.fs.Path,PartitionDesc> pathToPartitionInfo) |
void |
setSamplingType(int samplingType) |
void |
setTmpHDFSPath(org.apache.hadoop.fs.Path tmpHDFSPath) |
void |
setTmpPathForPartitionPruning(org.apache.hadoop.fs.Path tmpPathForPartitionPruning) |
void |
setUseBucketizedHiveInputFormat(boolean useBucketizedHiveInputFormat) |
void |
setUseVectorizedInputFileFormat(boolean useVectorizedInputFileFormat) |
void |
setVectorizationEnabledConditionsMet(ArrayList<String> vectorizationEnabledConditionsMet) |
void |
setVectorizationEnabledConditionsNotMet(List<String> vectorizationEnabledConditionsNotMet) |
void |
setVectorizationInputFileFormatClassNameSet(Set<String> vectorizationInputFileFormatClassNameSet) |
void |
setVectorizedRowBatch(org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch vectorizedRowBatch) |