public class SparkPartitionPruningSinkOperator extends Operator<SparkPartitionPruningSinkDesc>
Operator.OperatorFunc, Operator.State
Modifier and Type | Field and Description |
---|---|
protected org.apache.hadoop.io.DataOutputBuffer |
buffer |
protected static org.slf4j.Logger |
LOG |
protected Serializer |
serializer |
abortOp, alias, asyncInitOperations, cContext, childOperators, childOperatorsArray, childOperatorsTag, colExprMap, conf, CONTEXT_NAME_KEY, done, groupKeyObject, HIVECOUNTERCREATEDFILES, HIVECOUNTERFATAL, id, inputObjInspectors, isLogDebugEnabled, isLogInfoEnabled, isLogTraceEnabled, operatorId, out, outputObjInspector, parentOperators, PLOG, reporter, state, statsMap
Constructor and Description |
---|
SparkPartitionPruningSinkOperator()
Kryo ctor.
|
SparkPartitionPruningSinkOperator(CompilationOpContext ctx) |
Modifier and Type | Method and Description |
---|---|
void |
closeOp(boolean abort)
Operator specific close routine.
|
String |
getName()
Gets the name of the node.
|
static String |
getOperatorName() |
OperatorType |
getType()
Return the type of the specific operator among the
types in OperatorType.
|
void |
initializeOp(org.apache.hadoop.conf.Configuration hconf)
Operator specific initialization.
|
void |
process(Object row,
int tag)
Process the row.
|
abort, acceptLimitPushdown, allInitializedParentsAreClosed, areAllParentsInitialized, augmentPlan, cleanUpInputFileChanged, cleanUpInputFileChangedOp, clone, cloneOp, cloneRecursiveChildren, close, columnNamesRowResolvedCanBeObtained, completeInitializationOp, createDummy, defaultEndGroup, defaultStartGroup, dump, dump, endGroup, flush, forward, getAdditionalCounters, getChildOperators, getChildren, getColumnExprMap, getCompilationOpContext, getConf, getConfiguration, getDone, getExecContext, getGroupKeyObject, getIdentifier, getInputObjInspectors, getIsReduceSink, getNextCntr, getNumChild, getNumParent, getOperatorId, getOpTraits, getOutputObjInspector, getParentOperators, getReduceOutputName, getSchema, getStatistics, getStats, initEvaluators, initEvaluators, initEvaluatorsAndReturnStruct, initialize, initialize, initializeChildren, initializeLocalWork, initOperatorId, isUseBucketizedHiveInputFormat, jobClose, jobCloseOp, logStats, opAllowedAfterMapJoin, opAllowedBeforeMapJoin, opAllowedBeforeSortMergeJoin, opAllowedConvertMapJoin, passExecContext, preorderMap, processGroup, removeChild, removeChildAndAdoptItsChildren, removeParent, removeParents, replaceChild, replaceParent, reset, resetStats, setAlias, setChildOperators, setColumnExprMap, setCompilationOpContext, setConf, setDone, setExecContext, setGroupKeyObject, setId, setInputContext, setInputObjInspectors, setOperatorId, setOpTraits, setOutputCollector, setParentOperators, setReporter, setSchema, setStatistics, setUseBucketizedHiveInputFormat, startGroup, supportAutomaticSortMergeJoin, supportSkewJoinOptimization, supportUnionRemoveOptimization, toString, toString
protected transient Serializer serializer
protected transient org.apache.hadoop.io.DataOutputBuffer buffer
protected static final org.slf4j.Logger LOG
public SparkPartitionPruningSinkOperator()
public SparkPartitionPruningSinkOperator(CompilationOpContext ctx)
public void initializeOp(org.apache.hadoop.conf.Configuration hconf) throws HiveException
Operator
initializeOp
in class Operator<SparkPartitionPruningSinkDesc>
HiveException
public void process(Object row, int tag) throws HiveException
Operator
process
in class Operator<SparkPartitionPruningSinkDesc>
row
- The object representing the row.tag
- The tag of the row usually means which parent this row comes from.
Rows with the same tag should have exactly the same rowInspector
all the time.HiveException
public void closeOp(boolean abort) throws HiveException
Operator
closeOp
in class Operator<SparkPartitionPruningSinkDesc>
HiveException
public OperatorType getType()
Operator
getType
in class Operator<SparkPartitionPruningSinkDesc>
public String getName()
Node
getName
in interface Node
getName
in class Operator<SparkPartitionPruningSinkDesc>
public static String getOperatorName()
Copyright © 2016 The Apache Software Foundation. All rights reserved.