ReadPipeline |
OrcColumnVectorProducer.createReadPipeline(Consumer<ColumnVectorBatch> consumer,
org.apache.hadoop.mapred.FileSplit split,
List<Integer> columnIds,
org.apache.hadoop.hive.ql.io.sarg.SearchArgument sarg,
String[] columnNames,
QueryFragmentCounters counters,
org.apache.orc.TypeDescription readerSchema,
org.apache.hadoop.mapred.InputFormat<?,?> unused0,
Deserializer unused1,
org.apache.hadoop.mapred.Reporter reporter,
org.apache.hadoop.mapred.JobConf job,
Map<org.apache.hadoop.fs.Path,PartitionDesc> unused2) |
ReadPipeline |
ColumnVectorProducer.createReadPipeline(Consumer<ColumnVectorBatch> consumer,
org.apache.hadoop.mapred.FileSplit split,
List<Integer> columnIds,
org.apache.hadoop.hive.ql.io.sarg.SearchArgument sarg,
String[] columnNames,
QueryFragmentCounters counters,
org.apache.orc.TypeDescription readerSchema,
org.apache.hadoop.mapred.InputFormat<?,?> sourceInputFormat,
Deserializer sourceSerDe,
org.apache.hadoop.mapred.Reporter reporter,
org.apache.hadoop.mapred.JobConf job,
Map<org.apache.hadoop.fs.Path,PartitionDesc> parts) |
ReadPipeline |
GenericColumnVectorProducer.createReadPipeline(Consumer<ColumnVectorBatch> consumer,
org.apache.hadoop.mapred.FileSplit split,
List<Integer> columnIds,
org.apache.hadoop.hive.ql.io.sarg.SearchArgument sarg,
String[] columnNames,
QueryFragmentCounters counters,
org.apache.orc.TypeDescription schema,
org.apache.hadoop.mapred.InputFormat<?,?> sourceInputFormat,
Deserializer sourceSerDe,
org.apache.hadoop.mapred.Reporter reporter,
org.apache.hadoop.mapred.JobConf job,
Map<org.apache.hadoop.fs.Path,PartitionDesc> parts) |