public class MapWork extends BaseWork
Modifier and Type | Field and Description |
---|---|
static int |
SAMPLING_ON_PREV_MR |
static int |
SAMPLING_ON_START |
llapMode, LOG, uberMode, useVectorizedInputFileFormat, vectorizedRowBatchCtx
memNeeded, opProps, opTraits, statistics, vectorMode
addDummyOp, addSortCols, getAllLeafOperators, getAllOperators, getDummyOps, getLlapMode, getMapRedLocalWork, getName, getReservedMemoryMB, getSortCols, getTag, getUberMode, getUseVectorizedInputFileFormat, getVectorizedRowBatchCtx, isGatheringStats, setDummyOps, setGatheringStats, setLlapMode, setMapRedLocalWork, setName, setReservedMemoryMB, setTag, setUberMode, setUseVectorizedInputFileFormat, setVectorizedRowBatchCtx
clone, getMemoryNeeded, getOpProps, getStatistics, getTraits, getUserLevelStatistics, getVectorMode, setMemoryNeeded, setOpProps, setStatistics, setTraits, setVectorMode
public static final int SAMPLING_ON_PREV_MR
public static final int SAMPLING_ON_START
public MapWork()
public MapWork(String name)
public LinkedHashMap<String,ArrayList<String>> getPathToAliases()
public void setPathToAliases(LinkedHashMap<String,ArrayList<String>> pathToAliases)
public Map<String,ArrayList<String>> getTruncatedPathToAliases()
public LinkedHashMap<String,PartitionDesc> getPathToPartitionInfo()
public void setPathToPartitionInfo(LinkedHashMap<String,PartitionDesc> pathToPartitionInfo)
public void deriveExplainAttributes()
public void deriveLlap(org.apache.hadoop.conf.Configuration conf)
public void internTable(com.google.common.collect.Interner<TableDesc> interner)
public LinkedHashMap<String,PartitionDesc> getAliasToPartnInfo()
public void setAliasToPartnInfo(LinkedHashMap<String,PartitionDesc> aliasToPartnInfo)
aliasToPartnInfo
- the aliasToPartnInfo to setpublic LinkedHashMap<String,Operator<? extends OperatorDesc>> getAliasToWork()
public void setAliasToWork(LinkedHashMap<String,Operator<? extends OperatorDesc>> aliasToWork)
public HashMap<String,SplitSample> getNameToSplitSample()
public String getLlapIoDesc()
public void setNameToSplitSample(HashMap<String,SplitSample> nameToSplitSample)
public Integer getNumMapTasks()
public void setNumMapTasks(Integer numMapTasks)
public void addMapWork(String path, String alias, Operator<?> work, PartitionDesc pd)
public boolean isInputFormatSorted()
public void setInputFormatSorted(boolean inputFormatSorted)
public void resolveDynamicPartitionStoredAsSubDirsMerge(HiveConf conf, org.apache.hadoop.fs.Path path, TableDesc tblDesc, ArrayList<String> aliases, PartitionDesc partDesc)
public String getExecutionMode()
public void replaceRoots(Map<Operator<?>,Operator<?>> replacementMap)
replaceRoots
in class BaseWork
public Set<Operator<? extends OperatorDesc>> getAllRootOperators()
getAllRootOperators
in class BaseWork
public Operator<? extends OperatorDesc> getAnyRootOperator()
getAnyRootOperator
in class BaseWork
public void mergeAliasedInput(String alias, String pathDir, PartitionDesc partitionInfo)
public void initialize()
public Long getMaxSplitSize()
public void setMaxSplitSize(Long maxSplitSize)
public Long getMinSplitSize()
public void setMinSplitSize(Long minSplitSize)
public Long getMinSplitSizePerNode()
public void setMinSplitSizePerNode(Long minSplitSizePerNode)
public Long getMinSplitSizePerRack()
public void setMinSplitSizePerRack(Long minSplitSizePerRack)
public String getInputformat()
public void setInputformat(String inputformat)
public boolean isUseBucketizedHiveInputFormat()
public void setUseBucketizedHiveInputFormat(boolean useBucketizedHiveInputFormat)
public void setMapperCannotSpanPartns(boolean mapperCannotSpanPartns)
public boolean isMapperCannotSpanPartns()
public String getIndexIntermediateFile()
public ArrayList<PartitionDesc> getPartitionDescs()
public org.apache.hadoop.fs.Path getTmpHDFSPath()
public void setTmpHDFSPath(org.apache.hadoop.fs.Path tmpHDFSPath)
public org.apache.hadoop.fs.Path getTmpPathForPartitionPruning()
public void setTmpPathForPartitionPruning(org.apache.hadoop.fs.Path tmpPathForPartitionPruning)
public void mergingInto(MapWork mapWork)
public Map<String,List<BucketingSortingCtx.BucketCol>> getBucketedColsByDirectory()
public Map<String,List<BucketingSortingCtx.SortCol>> getSortedColsByDirectory()
public void addIndexIntermediateFile(String fileName)
public int getSamplingType()
public void setSamplingType(int samplingType)
public String getSamplingTypeString()
public void configureJobConf(org.apache.hadoop.mapred.JobConf job)
configureJobConf
in class BaseWork
public void setDummyTableScan(boolean dummyTableScan)
public boolean getDummyTableScan()
public Map<String,List<ExprNodeDesc>> getEventSourcePartKeyExprMap()
public void setEventSourcePartKeyExprMap(Map<String,List<ExprNodeDesc>> map)
public void setDoSplitsGrouping(boolean doSplitsGrouping)
public boolean getDoSplitsGrouping()
public boolean isLeftInputJoin()
public void setLeftInputJoin(boolean leftInputJoin)
public String[] getBaseSrc()
public void setBaseSrc(String[] baseSrc)
public BitSet getIncludedBuckets()
public void setIncludedBuckets(BitSet includedBuckets)
public void setVectorizedRowBatch(VectorizedRowBatch vectorizedRowBatch)
public VectorizedRowBatch getVectorizedRowBatch()
Copyright © 2016 The Apache Software Foundation. All rights reserved.