Package | Description |
---|---|
org.apache.hadoop.hive.accumulo |
Serde and InputFormat support for connecting Hive to Accumulo tables.
|
org.apache.hadoop.hive.hbase |
Implements an HBase storage handler for Hive.
|
org.apache.hadoop.hive.ql.exec |
Hive QL execution tasks, operators, functions and other handlers.
|
org.apache.hadoop.hive.ql.exec.persistence | |
org.apache.hadoop.hive.ql.exec.tez | |
org.apache.hadoop.hive.ql.io | |
org.apache.hadoop.hive.ql.io.merge | |
org.apache.hadoop.hive.ql.metadata | |
org.apache.hadoop.hive.ql.optimizer | |
org.apache.hadoop.hive.ql.parse | |
org.apache.hadoop.hive.ql.plan | |
org.apache.hive.hcatalog.mapreduce |
Modifier and Type | Method and Description |
---|---|
void |
AccumuloStorageHandler.configureInputJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties) |
void |
AccumuloStorageHandler.configureJobConf(TableDesc tableDesc,
org.apache.hadoop.mapred.JobConf jobConf) |
void |
AccumuloStorageHandler.configureOutputJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties) |
void |
AccumuloStorageHandler.configureTableJobProperties(TableDesc desc,
Map<String,String> jobProps)
Push down table properties into the JobConf.
|
protected String |
AccumuloStorageHandler.getTableName(TableDesc tableDesc) |
Modifier and Type | Method and Description |
---|---|
void |
HBaseStorageHandler.configureInputJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties) |
void |
HBaseStorageHandler.configureJobConf(TableDesc tableDesc,
org.apache.hadoop.mapred.JobConf jobConf) |
static void |
HBaseSerDe.configureJobConf(TableDesc tableDesc,
org.apache.hadoop.mapred.JobConf jobConf) |
void |
HBaseKeyFactory.configureJobConf(TableDesc tableDesc,
org.apache.hadoop.mapred.JobConf jobConf)
configure jobConf for this factory
|
void |
CompositeHBaseKeyFactory.configureJobConf(TableDesc tableDesc,
org.apache.hadoop.mapred.JobConf jobConf) |
void |
AbstractHBaseKeyFactory.configureJobConf(TableDesc tableDesc,
org.apache.hadoop.mapred.JobConf jobConf) |
void |
HBaseStorageHandler.configureOutputJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties) |
void |
HBaseStorageHandler.configureTableJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties) |
Modifier and Type | Field and Description |
---|---|
static TableDesc |
Utilities.defaultTd |
protected TableDesc[] |
CommonJoinOperator.spillTableDesc |
Modifier and Type | Method and Description |
---|---|
static TableDesc |
JoinUtil.getSpillTableDesc(Byte alias,
TableDesc[] spillTableDesc,
JoinDesc conf,
boolean noFilter) |
TableDesc |
TableScanOperator.getTableDesc() |
static TableDesc |
Utilities.getTableDesc(String cols,
String colTypes) |
static TableDesc |
Utilities.getTableDesc(Table tbl) |
TableDesc |
FetchTask.getTblDesc()
Return the tableDesc of the fetchWork.
|
static TableDesc[] |
JoinUtil.initSpillTables(JoinDesc conf,
boolean noFilter) |
Modifier and Type | Method and Description |
---|---|
static void |
Utilities.copyTableJobPropertiesToConf(TableDesc tbl,
org.apache.hadoop.conf.Configuration job)
Copies the storage handler properties configured for a table descriptor to a runtime job
configuration.
|
static void |
Utilities.copyTablePropertiesToConf(TableDesc tbl,
org.apache.hadoop.mapred.JobConf job)
Copies the storage handler proeprites configured for a table descriptor to a runtime job
configuration.
|
static int |
Utilities.getFooterCount(TableDesc table,
org.apache.hadoop.mapred.JobConf job)
Get footer line count for a table.
|
static int |
Utilities.getHeaderCount(TableDesc table)
Get header line count for a table.
|
static PartitionDesc |
Utilities.getPartitionDescFromTableDesc(TableDesc tblDesc,
Partition part) |
static RowContainer<List<Object>> |
JoinUtil.getRowContainer(org.apache.hadoop.conf.Configuration hconf,
List<ObjectInspector> structFieldObjectInspectors,
Byte alias,
int containerSize,
TableDesc[] spillTableDesc,
JoinDesc conf,
boolean noFilter,
org.apache.hadoop.mapred.Reporter reporter) |
static SerDe |
JoinUtil.getSpillSerDe(byte alias,
TableDesc[] spillTableDesc,
JoinDesc conf,
boolean noFilter) |
static TableDesc |
JoinUtil.getSpillTableDesc(Byte alias,
TableDesc[] spillTableDesc,
JoinDesc conf,
boolean noFilter) |
void |
TableScanOperator.setTableDesc(TableDesc tableDesc) |
Modifier and Type | Method and Description |
---|---|
static TableDesc |
PTFRowContainer.createTableDesc(StructObjectInspector oI) |
Modifier and Type | Method and Description |
---|---|
void |
RowContainer.setTableDesc(TableDesc tblDesc) |
Modifier and Type | Method and Description |
---|---|
protected org.apache.hadoop.hive.ql.exec.tez.DynamicPartitionPruner.SourceInfo |
DynamicPartitionPruner.createSourceInfo(TableDesc t,
ExprNodeDesc partKeyExpr,
String columnName,
org.apache.hadoop.mapred.JobConf jobConf) |
Modifier and Type | Method and Description |
---|---|
static RecordUpdater |
HiveFileFormatUtils.getAcidRecordUpdater(org.apache.hadoop.mapred.JobConf jc,
TableDesc tableInfo,
int bucket,
FileSinkDesc conf,
org.apache.hadoop.fs.Path outPath,
ObjectInspector inspector,
org.apache.hadoop.mapred.Reporter reporter,
int rowIdColNum) |
static HiveOutputFormat<?,?> |
HiveFileFormatUtils.getHiveOutputFormat(org.apache.hadoop.conf.Configuration conf,
TableDesc tableDesc) |
static FileSinkOperator.RecordWriter |
HiveFileFormatUtils.getHiveRecordWriter(org.apache.hadoop.mapred.JobConf jc,
TableDesc tableInfo,
Class<? extends org.apache.hadoop.io.Writable> outputClass,
FileSinkDesc conf,
org.apache.hadoop.fs.Path outPath,
org.apache.hadoop.mapred.Reporter reporter) |
Modifier and Type | Method and Description |
---|---|
void |
MergeFileWork.resolveDynamicPartitionStoredAsSubDirsMerge(HiveConf conf,
org.apache.hadoop.fs.Path path,
TableDesc tblDesc,
ArrayList<String> aliases,
PartitionDesc partDesc) |
Modifier and Type | Method and Description |
---|---|
void |
HiveStorageHandler.configureInputJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties)
This method is called to allow the StorageHandlers the chance
to populate the JobContext.getConfiguration() with properties that
maybe be needed by the handler's bundled artifacts (ie InputFormat, SerDe, etc).
|
void |
DefaultStorageHandler.configureInputJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties) |
void |
HiveStorageHandler.configureJobConf(TableDesc tableDesc,
org.apache.hadoop.mapred.JobConf jobConf)
Called just before submitting MapReduce job.
|
void |
DefaultStorageHandler.configureJobConf(TableDesc tableDesc,
org.apache.hadoop.mapred.JobConf jobConf) |
void |
HiveStorageHandler.configureOutputJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties)
This method is called to allow the StorageHandlers the chance
to populate the JobContext.getConfiguration() with properties that
maybe be needed by the handler's bundled artifacts (ie InputFormat, SerDe, etc).
|
void |
DefaultStorageHandler.configureOutputJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties) |
void |
HiveStorageHandler.configureTableJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties)
Deprecated.
|
void |
DefaultStorageHandler.configureTableJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties) |
Modifier and Type | Method and Description |
---|---|
List<TableDesc> |
GenMRProcContext.GenMRUnionCtx.getTTDesc() |
Modifier and Type | Method and Description |
---|---|
void |
GenMRProcContext.GenMRUnionCtx.addTTDesc(TableDesc tt_desc) |
static TableScanOperator |
GenMapRedUtils.createTemporaryFile(Operator<? extends OperatorDesc> parent,
Operator<? extends OperatorDesc> child,
org.apache.hadoop.fs.Path taskTmpDir,
TableDesc tt_desc,
ParseContext parseCtx)
Break the pipeline between parent and child, and then
output data generated by parent to a temporary file stored in taskTmpDir.
|
static void |
GenMapRedUtils.setTaskPlan(String path,
String alias,
Operator<? extends OperatorDesc> topOp,
MapWork plan,
boolean local,
TableDesc tt_desc)
set the current task in the mapredWork.
|
Modifier and Type | Method and Description |
---|---|
static void |
GenMapRedUtils.internTableDesc(Task<?> task,
com.google.common.collect.Interner<TableDesc> interner) |
Modifier and Type | Method and Description |
---|---|
TableDesc |
ParseContext.getFetchTableDesc() |
Modifier and Type | Method and Description |
---|---|
void |
ParseContext.setFetchTabledesc(TableDesc fetchTableDesc) |
Modifier and Type | Method and Description |
---|---|
static TableDesc |
PlanUtils.getDefaultQueryOutputTableDesc(String cols,
String colTypes,
String fileFormat) |
static TableDesc |
PlanUtils.getDefaultTableDesc(CreateTableDesc directoryDesc,
String cols,
String colTypes) |
static TableDesc |
PlanUtils.getDefaultTableDesc(String separatorCode)
Generate the table descriptor of MetadataTypedColumnsetSerDe with the
separatorCode.
|
static TableDesc |
PlanUtils.getDefaultTableDesc(String separatorCode,
String columns)
Generate the table descriptor of MetadataTypedColumnsetSerDe with the
separatorCode and column names (comma separated string).
|
static TableDesc |
PlanUtils.getDefaultTableDesc(String separatorCode,
String columns,
boolean lastColumnTakesRestOfTheLine)
Generate the table descriptor of MetadataTypedColumnsetSerDe with the
separatorCode and column names (comma separated string), and whether the
last column should take the rest of the line.
|
static TableDesc |
PlanUtils.getDefaultTableDesc(String separatorCode,
String columns,
String columnTypes,
boolean lastColumnTakesRestOfTheLine)
Generate the table descriptor of MetadataTypedColumnsetSerDe with the
separatorCode and column names (comma separated string), and whether the
last column should take the rest of the line.
|
static TableDesc |
PlanUtils.getIntermediateFileTableDesc(List<FieldSchema> fieldSchemas)
Generate the table descriptor for intermediate files.
|
TableDesc |
ReduceWork.getKeyDesc() |
TableDesc |
ReduceSinkDesc.getKeySerializeInfo() |
TableDesc |
JoinDesc.getKeyTableDesc() |
TableDesc |
HashTableSinkDesc.getKeyTableDesc() |
TableDesc |
MapJoinDesc.getKeyTblDesc() |
TableDesc |
HashTableSinkDesc.getKeyTblDesc() |
static TableDesc |
PlanUtils.getMapJoinKeyTableDesc(org.apache.hadoop.conf.Configuration conf,
List<FieldSchema> fieldSchemas)
Generate the table descriptor for Map-side join key.
|
static TableDesc |
PlanUtils.getMapJoinValueTableDesc(List<FieldSchema> fieldSchemas)
Generate the table descriptor for Map-side join value.
|
static TableDesc |
PlanUtils.getReduceKeyTableDesc(List<FieldSchema> fieldSchemas,
String order)
Generate the table descriptor for reduce key.
|
static TableDesc |
PlanUtils.getReduceValueTableDesc(List<FieldSchema> fieldSchemas)
Generate the table descriptor for intermediate files.
|
TableDesc |
ScriptDesc.getScriptErrInfo() |
TableDesc |
ScriptDesc.getScriptInputInfo() |
TableDesc |
ScriptDesc.getScriptOutputInfo() |
TableDesc |
LoadTableDesc.getTable() |
TableDesc |
AppMasterEventDesc.getTable() |
TableDesc |
PartitionDesc.getTableDesc() |
static TableDesc |
PlanUtils.getTableDesc(Class<? extends Deserializer> serdeClass,
String separatorCode,
String columns)
Generate the table descriptor of given serde with the separatorCode and
column names (comma separated string).
|
static TableDesc |
PlanUtils.getTableDesc(Class<? extends Deserializer> serdeClass,
String separatorCode,
String columns,
boolean lastColumnTakesRestOfTheLine)
Generate the table descriptor of the serde specified with the separatorCode
and column names (comma separated string), and whether the last column
should take the rest of the line.
|
static TableDesc |
PlanUtils.getTableDesc(Class<? extends Deserializer> serdeClass,
String separatorCode,
String columns,
String columnTypes,
boolean lastColumnTakesRestOfTheLine) |
static TableDesc |
PlanUtils.getTableDesc(Class<? extends Deserializer> serdeClass,
String separatorCode,
String columns,
String columnTypes,
boolean lastColumnTakesRestOfTheLine,
boolean useDelimitedJSON) |
static TableDesc |
PlanUtils.getTableDesc(Class<? extends Deserializer> serdeClass,
String separatorCode,
String columns,
String columnTypes,
boolean lastColumnTakesRestOfTheLine,
boolean useDelimitedJSON,
String fileFormat) |
static TableDesc |
PlanUtils.getTableDesc(CreateTableDesc crtTblDesc,
String cols,
String colTypes)
Generate a table descriptor from a createTableDesc.
|
TableDesc |
FileSinkDesc.getTableInfo() |
TableDesc |
HashTableDummyDesc.getTbl() |
TableDesc |
FetchWork.getTblDesc() |
TableDesc |
ReduceSinkDesc.getValueSerializeInfo() |
Modifier and Type | Method and Description |
---|---|
Map<String,List<TableDesc>> |
MapWork.getEventSourceTableDescMap() |
List<TableDesc> |
DemuxDesc.getKeysSerializeInfos() |
Map<Byte,TableDesc> |
JoinDesc.getSkewKeysValuesTables() |
Map<Byte,TableDesc> |
HashTableSinkDesc.getSkewKeysValuesTables() |
List<TableDesc> |
ReduceWork.getTagToValueDesc() |
List<TableDesc> |
MapJoinDesc.getValueFilteredTblDescs() |
List<TableDesc> |
DemuxDesc.getValuesSerializeInfos() |
List<TableDesc> |
MapJoinDesc.getValueTblDescs() |
List<TableDesc> |
HashTableSinkDesc.getValueTblDescs() |
List<TableDesc> |
HashTableSinkDesc.getValueTblFilteredDescs() |
Modifier and Type | Method and Description |
---|---|
static void |
PlanUtils.configureInputJobPropertiesForStorageHandler(TableDesc tableDesc)
Loads the storage handler (if one exists) for the given table
and invokes
HiveStorageHandler.configureInputJobProperties(TableDesc, java.util.Map) . |
static void |
PlanUtils.configureJobConf(TableDesc tableDesc,
org.apache.hadoop.mapred.JobConf jobConf) |
static void |
PlanUtils.configureOutputJobPropertiesForStorageHandler(TableDesc tableDesc)
Loads the storage handler (if one exists) for the given table
and invokes
HiveStorageHandler.configureOutputJobProperties(TableDesc, java.util.Map) . |
void |
MapWork.resolveDynamicPartitionStoredAsSubDirsMerge(HiveConf conf,
org.apache.hadoop.fs.Path path,
TableDesc tblDesc,
ArrayList<String> aliases,
PartitionDesc partDesc) |
void |
ReduceWork.setKeyDesc(TableDesc keyDesc)
If the plan has a reducer and correspondingly a reduce-sink, then store the TableDesc pointing
to keySerializeInfo of the ReduceSink
|
void |
ReduceSinkDesc.setKeySerializeInfo(TableDesc keySerializeInfo) |
void |
JoinDesc.setKeyTableDesc(TableDesc keyTblDesc) |
void |
HashTableSinkDesc.setKeyTableDesc(TableDesc keyTableDesc) |
void |
MapJoinDesc.setKeyTblDesc(TableDesc keyTblDesc) |
void |
HashTableSinkDesc.setKeyTblDesc(TableDesc keyTblDesc) |
void |
ScriptDesc.setScriptErrInfo(TableDesc scriptErrInfo) |
void |
ScriptDesc.setScriptInputInfo(TableDesc scriptInputInfo) |
void |
ScriptDesc.setScriptOutputInfo(TableDesc scriptOutputInfo) |
void |
LoadTableDesc.setTable(TableDesc table) |
void |
AppMasterEventDesc.setTable(TableDesc table) |
void |
PartitionDesc.setTableDesc(TableDesc tableDesc) |
void |
FileSinkDesc.setTableInfo(TableDesc tableInfo) |
void |
HashTableDummyDesc.setTbl(TableDesc tbl) |
void |
FetchWork.setTblDesc(TableDesc tblDesc) |
void |
ReduceSinkDesc.setValueSerializeInfo(TableDesc valueSerializeInfo) |
Modifier and Type | Method and Description |
---|---|
void |
PartitionDesc.intern(com.google.common.collect.Interner<TableDesc> interner) |
void |
MapWork.internTable(com.google.common.collect.Interner<TableDesc> interner) |
void |
MapWork.setEventSourceTableDescMap(Map<String,List<TableDesc>> map) |
void |
DemuxDesc.setKeysSerializeInfos(List<TableDesc> keysSerializeInfos) |
void |
JoinDesc.setSkewKeysValuesTables(Map<Byte,TableDesc> skewKeysValuesTables) |
void |
HashTableSinkDesc.setSkewKeysValuesTables(Map<Byte,TableDesc> skewKeysValuesTables) |
void |
ReduceWork.setTagToValueDesc(List<TableDesc> tagToValueDesc) |
void |
MapJoinDesc.setValueFilteredTblDescs(List<TableDesc> valueFilteredTblDescs) |
void |
DemuxDesc.setValuesSerializeInfos(List<TableDesc> valuesSerializeInfos) |
void |
MapJoinDesc.setValueTblDescs(List<TableDesc> valueTblDescs) |
void |
HashTableSinkDesc.setValueTblDescs(List<TableDesc> valueTblDescs) |
void |
HashTableSinkDesc.setValueTblFilteredDescs(List<TableDesc> valueTblFilteredDescs) |
Constructor and Description |
---|
FetchWork(List<org.apache.hadoop.fs.Path> partDir,
List<PartitionDesc> partDesc,
TableDesc tblDesc) |
FetchWork(List<org.apache.hadoop.fs.Path> partDir,
List<PartitionDesc> partDesc,
TableDesc tblDesc,
int limit) |
FetchWork(org.apache.hadoop.fs.Path tblDir,
TableDesc tblDesc) |
FetchWork(org.apache.hadoop.fs.Path tblDir,
TableDesc tblDesc,
int limit) |
FileSinkDesc(org.apache.hadoop.fs.Path dirName,
TableDesc tableInfo,
boolean compressed) |
FileSinkDesc(org.apache.hadoop.fs.Path dirName,
TableDesc tableInfo,
boolean compressed,
int destTableId,
boolean multiFileSpray,
boolean canBeMerged,
int numFiles,
int totalFiles,
ArrayList<ExprNodeDesc> partitionCols,
DynamicPartitionCtx dpCtx) |
LoadTableDesc(org.apache.hadoop.fs.Path sourcePath,
TableDesc table,
DynamicPartitionCtx dpCtx,
AcidUtils.Operation writeType) |
LoadTableDesc(org.apache.hadoop.fs.Path sourcePath,
TableDesc table,
Map<String,String> partitionSpec)
For DDL operations that are not ACID compliant.
|
LoadTableDesc(org.apache.hadoop.fs.Path sourcePath,
TableDesc table,
Map<String,String> partitionSpec,
AcidUtils.Operation writeType) |
LoadTableDesc(org.apache.hadoop.fs.Path sourcePath,
TableDesc table,
Map<String,String> partitionSpec,
boolean replace)
For use with non-ACID compliant operations, such as LOAD
|
LoadTableDesc(org.apache.hadoop.fs.Path sourcePath,
TableDesc table,
Map<String,String> partitionSpec,
boolean replace,
AcidUtils.Operation writeType) |
MapJoinDesc(Map<Byte,List<ExprNodeDesc>> keys,
TableDesc keyTblDesc,
Map<Byte,List<ExprNodeDesc>> values,
List<TableDesc> valueTblDescs,
List<TableDesc> valueFilteredTblDescs,
List<String> outputColumnNames,
int posBigTable,
JoinCondDesc[] conds,
Map<Byte,List<ExprNodeDesc>> filters,
boolean noOuterJoin,
String dumpFilePrefix) |
PartitionDesc(Partition part,
TableDesc tblDesc) |
PartitionDesc(TableDesc table,
LinkedHashMap<String,String> partSpec) |
ReduceSinkDesc(ArrayList<ExprNodeDesc> keyCols,
int numDistributionKeys,
ArrayList<ExprNodeDesc> valueCols,
ArrayList<String> outputKeyColumnNames,
List<List<Integer>> distinctColumnIndices,
ArrayList<String> outputValueColumnNames,
int tag,
ArrayList<ExprNodeDesc> partitionCols,
int numReducers,
TableDesc keySerializeInfo,
TableDesc valueSerializeInfo,
AcidUtils.Operation writeType) |
ScriptDesc(String scriptCmd,
TableDesc scriptInputInfo,
Class<? extends RecordWriter> inRecordWriterClass,
TableDesc scriptOutputInfo,
Class<? extends RecordReader> outRecordReaderClass,
Class<? extends RecordReader> errRecordReaderClass,
TableDesc scriptErrInfo) |
Constructor and Description |
---|
DemuxDesc(Map<Integer,Integer> newTagToOldTag,
Map<Integer,Integer> newTagToChildIndex,
Map<Integer,Integer> childIndexToOriginalNumParents,
List<TableDesc> keysSerializeInfos,
List<TableDesc> valuesSerializeInfos) |
DemuxDesc(Map<Integer,Integer> newTagToOldTag,
Map<Integer,Integer> newTagToChildIndex,
Map<Integer,Integer> childIndexToOriginalNumParents,
List<TableDesc> keysSerializeInfos,
List<TableDesc> valuesSerializeInfos) |
MapJoinDesc(Map<Byte,List<ExprNodeDesc>> keys,
TableDesc keyTblDesc,
Map<Byte,List<ExprNodeDesc>> values,
List<TableDesc> valueTblDescs,
List<TableDesc> valueFilteredTblDescs,
List<String> outputColumnNames,
int posBigTable,
JoinCondDesc[] conds,
Map<Byte,List<ExprNodeDesc>> filters,
boolean noOuterJoin,
String dumpFilePrefix) |
MapJoinDesc(Map<Byte,List<ExprNodeDesc>> keys,
TableDesc keyTblDesc,
Map<Byte,List<ExprNodeDesc>> values,
List<TableDesc> valueTblDescs,
List<TableDesc> valueFilteredTblDescs,
List<String> outputColumnNames,
int posBigTable,
JoinCondDesc[] conds,
Map<Byte,List<ExprNodeDesc>> filters,
boolean noOuterJoin,
String dumpFilePrefix) |
Modifier and Type | Method and Description |
---|---|
void |
FosterStorageHandler.configureInputJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties) |
void |
FosterStorageHandler.configureJobConf(TableDesc tableDesc,
org.apache.hadoop.mapred.JobConf jobConf) |
void |
FosterStorageHandler.configureOutputJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties) |
void |
FosterStorageHandler.configureTableJobProperties(TableDesc tableDesc,
Map<String,String> jobProperties) |
Copyright © 2017 The Apache Software Foundation. All rights reserved.