Saving all output to "!!{outputDirectory}!!/metadataonly1.q.raw". Enter "record" with no arguments to stop it. >>> !run !!{qFileDirectory}!!/metadataonly1.q >>> CREATE TABLE TEST1(A INT, B DOUBLE) partitioned by (ds string); No rows affected >>> explain extended select max(ds) from TEST1; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME TEST1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION max (TOK_TABLE_OR_COL ds))))))' '' 'STAGE DEPENDENCIES:' ' Stage-1 is a root stage' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' test1 ' ' TableScan' ' alias: test1' ' GatherStats: false' ' Select Operator' ' expressions:' ' expr: ds' ' type: string' ' outputColumnNames: ds' ' Group By Operator' ' aggregations:' ' expr: max(ds)' ' bucketGroup: true' ' mode: hash' ' outputColumnNames: _col0' ' Reduce Output Operator' ' sort order: ' ' tag: -1' ' value expressions:' ' expr: _col0' ' type: string' ' Needs Tagging: false' ' Reduce Operator Tree:' ' Group By Operator' ' aggregations:' ' expr: max(VALUE._col0)' ' bucketGroup: false' ' mode: mergepartial' ' outputColumnNames: _col0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' outputColumnNames: _col0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0' ' columns.types string' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 68 rows selected >>> select max(ds) from TEST1; '_c0' '' 1 row selected >>> >>> alter table TEST1 add partition (ds='1'); No rows affected >>> explain extended select max(ds) from TEST1; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME TEST1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION max (TOK_TABLE_OR_COL ds))))))' '' 'STAGE DEPENDENCIES:' ' Stage-1 is a root stage' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' test1 ' ' TableScan' ' alias: test1' ' GatherStats: false' ' Select Operator' ' expressions:' ' expr: ds' ' type: string' ' outputColumnNames: ds' ' Group By Operator' ' aggregations:' ' expr: max(ds)' ' bucketGroup: false' ' mode: hash' ' outputColumnNames: _col0' ' Reduce Output Operator' ' sort order: ' ' tag: -1' ' value expressions:' ' expr: _col0' ' type: string' ' Needs Tagging: false' ' Path -> Alias:' ' fake-path-metadata-only-query-metadataonly1.test1{ds=1} [test1]' ' Path -> Partition:' ' fake-path-metadata-only-query-metadataonly1.test1{ds=1} ' ' Partition' ' base file name: ds=1' ' input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' partition values:' ' ds 1' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1/ds=1' ' name metadataonly1.test1' ' partition_columns ds' ' serialization.ddl struct test1 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.NullStructSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1' ' name metadataonly1.test1' ' partition_columns ds' ' serialization.ddl struct test1 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: metadataonly1.test1' ' name: metadataonly1.test1' ' Reduce Operator Tree:' ' Group By Operator' ' aggregations:' ' expr: max(VALUE._col0)' ' bucketGroup: false' ' mode: mergepartial' ' outputColumnNames: _col0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' outputColumnNames: _col0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0' ' columns.types string' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 111 rows selected >>> select max(ds) from TEST1; '_c0' '1' 1 row selected >>> >>> explain extended select count(distinct ds) from TEST1; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME TEST1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL ds))))))' '' 'STAGE DEPENDENCIES:' ' Stage-1 is a root stage' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' test1 ' ' TableScan' ' alias: test1' ' GatherStats: false' ' Select Operator' ' expressions:' ' expr: ds' ' type: string' ' outputColumnNames: ds' ' Group By Operator' ' aggregations:' ' expr: count(DISTINCT ds)' ' bucketGroup: false' ' keys:' ' expr: ds' ' type: string' ' mode: hash' ' outputColumnNames: _col0, _col1' ' Reduce Output Operator' ' key expressions:' ' expr: _col0' ' type: string' ' sort order: +' ' tag: -1' ' value expressions:' ' expr: _col1' ' type: bigint' ' Needs Tagging: false' ' Path -> Alias:' ' fake-path-metadata-only-query-metadataonly1.test1{ds=1} [test1]' ' Path -> Partition:' ' fake-path-metadata-only-query-metadataonly1.test1{ds=1} ' ' Partition' ' base file name: ds=1' ' input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' partition values:' ' ds 1' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1/ds=1' ' name metadataonly1.test1' ' partition_columns ds' ' serialization.ddl struct test1 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.NullStructSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1' ' name metadataonly1.test1' ' partition_columns ds' ' serialization.ddl struct test1 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: metadataonly1.test1' ' name: metadataonly1.test1' ' Reduce Operator Tree:' ' Group By Operator' ' aggregations:' ' expr: count(DISTINCT KEY._col0:0._col0)' ' bucketGroup: false' ' mode: mergepartial' ' outputColumnNames: _col0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: bigint' ' outputColumnNames: _col0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0' ' columns.types bigint' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 117 rows selected >>> select count(distinct ds) from TEST1; '_c0' '1' 1 row selected >>> >>> explain extended select count(ds) from TEST1; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME TEST1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL ds))))))' '' 'STAGE DEPENDENCIES:' ' Stage-1 is a root stage' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' test1 ' ' TableScan' ' alias: test1' ' GatherStats: false' ' Select Operator' ' expressions:' ' expr: ds' ' type: string' ' outputColumnNames: ds' ' Group By Operator' ' aggregations:' ' expr: count(ds)' ' bucketGroup: false' ' mode: hash' ' outputColumnNames: _col0' ' Reduce Output Operator' ' sort order: ' ' tag: -1' ' value expressions:' ' expr: _col0' ' type: bigint' ' Needs Tagging: false' ' Path -> Alias:' ' !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1/ds=1 [test1]' ' Path -> Partition:' ' !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1/ds=1 ' ' Partition' ' base file name: ds=1' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' partition values:' ' ds 1' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1/ds=1' ' name metadataonly1.test1' ' partition_columns ds' ' serialization.ddl struct test1 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1' ' name metadataonly1.test1' ' partition_columns ds' ' serialization.ddl struct test1 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: metadataonly1.test1' ' name: metadataonly1.test1' ' Reduce Operator Tree:' ' Group By Operator' ' aggregations:' ' expr: count(VALUE._col0)' ' bucketGroup: false' ' mode: mergepartial' ' outputColumnNames: _col0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: bigint' ' outputColumnNames: _col0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0' ' columns.types bigint' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 111 rows selected >>> select count(ds) from TEST1; '_c0' '0' 1 row selected >>> >>> alter table TEST1 add partition (ds='2'); No rows affected >>> explain extended select count(*) from TEST1 a2 join (select max(ds) m from TEST1) b on a2.ds=b.m; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME TEST1) a2) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME TEST1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION max (TOK_TABLE_OR_COL ds)) m)))) b) (= (. (TOK_TABLE_OR_COL a2) ds) (. (TOK_TABLE_OR_COL b) m)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))' '' 'STAGE DEPENDENCIES:' ' Stage-1 is a root stage' ' Stage-2 depends on stages: Stage-1' ' Stage-3 depends on stages: Stage-2' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' b:test1 ' ' TableScan' ' alias: test1' ' GatherStats: false' ' Select Operator' ' expressions:' ' expr: ds' ' type: string' ' outputColumnNames: ds' ' Group By Operator' ' aggregations:' ' expr: max(ds)' ' bucketGroup: false' ' mode: hash' ' outputColumnNames: _col0' ' Reduce Output Operator' ' sort order: ' ' tag: -1' ' value expressions:' ' expr: _col0' ' type: string' ' Needs Tagging: false' ' Path -> Alias:' ' fake-path-metadata-only-query-metadataonly1.test1{ds=1} [b:test1]' ' fake-path-metadata-only-query-metadataonly1.test1{ds=2} [b:test1]' ' Path -> Partition:' ' fake-path-metadata-only-query-metadataonly1.test1{ds=1} ' ' Partition' ' base file name: ds=1' ' input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' partition values:' ' ds 1' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1/ds=1' ' name metadataonly1.test1' ' partition_columns ds' ' serialization.ddl struct test1 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.NullStructSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1' ' name metadataonly1.test1' ' partition_columns ds' ' serialization.ddl struct test1 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: metadataonly1.test1' ' name: metadataonly1.test1' ' fake-path-metadata-only-query-metadataonly1.test1{ds=2} ' ' Partition' ' base file name: ds=2' ' input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' partition values:' ' ds 2' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1/ds=2' ' name metadataonly1.test1' ' partition_columns ds' ' serialization.ddl struct test1 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.NullStructSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1' ' name metadataonly1.test1' ' partition_columns ds' ' serialization.ddl struct test1 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: metadataonly1.test1' ' name: metadataonly1.test1' ' Reduce Operator Tree:' ' Group By Operator' ' aggregations:' ' expr: max(VALUE._col0)' ' bucketGroup: false' ' mode: mergepartial' ' outputColumnNames: _col0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' outputColumnNames: _col0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0' ' columns.types string' ' escape.delim \' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-2' ' Map Reduce' ' Alias -> Map Operator Tree:' ' $INTNAME ' ' Reduce Output Operator' ' key expressions:' ' expr: _col0' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: _col0' ' type: string' ' tag: 1' ' a2 ' ' TableScan' ' alias: a2' ' GatherStats: false' ' Reduce Output Operator' ' key expressions:' ' expr: ds' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: ds' ' type: string' ' tag: 0' ' Needs Tagging: true' ' Path -> Alias:' ' file:!!{hive.exec.scratchdir}!! [$INTNAME]' ' !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1/ds=1 [a2]' ' !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1/ds=2 [a2]' ' Path -> Partition:' ' file:!!{hive.exec.scratchdir}!! ' ' Partition' ' base file name: -mr-10002' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0' ' columns.types string' ' escape.delim \' ' ' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0' ' columns.types string' ' escape.delim \' ' !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1/ds=1 ' ' Partition' ' base file name: ds=1' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' partition values:' ' ds 1' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1/ds=1' ' name metadataonly1.test1' ' partition_columns ds' ' serialization.ddl struct test1 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1' ' name metadataonly1.test1' ' partition_columns ds' ' serialization.ddl struct test1 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: metadataonly1.test1' ' name: metadataonly1.test1' ' !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1/ds=2 ' ' Partition' ' base file name: ds=2' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' partition values:' ' ds 2' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1/ds=2' ' name metadataonly1.test1' ' partition_columns ds' ' serialization.ddl struct test1 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1' ' name metadataonly1.test1' ' partition_columns ds' ' serialization.ddl struct test1 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: metadataonly1.test1' ' name: metadataonly1.test1' ' Reduce Operator Tree:' ' Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 ' ' 1 ' ' handleSkewJoin: false' ' Select Operator' ' Group By Operator' ' aggregations:' ' expr: count()' ' bucketGroup: false' ' mode: hash' ' outputColumnNames: _col0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0' ' columns.types bigint' ' escape.delim \' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-3' ' Map Reduce' ' Alias -> Map Operator Tree:' ' file:!!{hive.exec.scratchdir}!! ' ' Reduce Output Operator' ' sort order: ' ' tag: -1' ' value expressions:' ' expr: _col0' ' type: bigint' ' Needs Tagging: false' ' Path -> Alias:' ' file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]' ' Path -> Partition:' ' file:!!{hive.exec.scratchdir}!! ' ' Partition' ' base file name: -mr-10003' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0' ' columns.types bigint' ' escape.delim \' ' ' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0' ' columns.types bigint' ' escape.delim \' ' Reduce Operator Tree:' ' Group By Operator' ' aggregations:' ' expr: count(VALUE._col0)' ' bucketGroup: false' ' mode: mergepartial' ' outputColumnNames: _col0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: bigint' ' outputColumnNames: _col0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0' ' columns.types bigint' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 371 rows selected >>> select count(*) from TEST1 a2 join (select max(ds) m from TEST1) b on a2.ds=b.m; '_c0' '0' 1 row selected >>> >>> >>> CREATE TABLE TEST2(A INT, B DOUBLE) partitioned by (ds string, hr string); No rows affected >>> alter table TEST2 add partition (ds='1', hr='1'); No rows affected >>> alter table TEST2 add partition (ds='1', hr='2'); No rows affected >>> alter table TEST2 add partition (ds='1', hr='3'); No rows affected >>> >>> explain extended select ds, count(distinct hr) from TEST2 group by ds; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME TEST2))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL ds)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_TABLE_OR_COL hr)))) (TOK_GROUPBY (TOK_TABLE_OR_COL ds))))' '' 'STAGE DEPENDENCIES:' ' Stage-1 is a root stage' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' test2 ' ' TableScan' ' alias: test2' ' GatherStats: false' ' Select Operator' ' expressions:' ' expr: ds' ' type: string' ' expr: hr' ' type: string' ' outputColumnNames: ds, hr' ' Group By Operator' ' aggregations:' ' expr: count(DISTINCT hr)' ' bucketGroup: false' ' keys:' ' expr: ds' ' type: string' ' expr: hr' ' type: string' ' mode: hash' ' outputColumnNames: _col0, _col1, _col2' ' Reduce Output Operator' ' key expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' sort order: ++' ' Map-reduce partition columns:' ' expr: _col0' ' type: string' ' tag: -1' ' value expressions:' ' expr: _col2' ' type: bigint' ' Needs Tagging: false' ' Path -> Alias:' ' fake-path-metadata-only-query-metadataonly1.test2{ds=1, hr=1} [test2]' ' fake-path-metadata-only-query-metadataonly1.test2{ds=1, hr=2} [test2]' ' fake-path-metadata-only-query-metadataonly1.test2{ds=1, hr=3} [test2]' ' Path -> Partition:' ' fake-path-metadata-only-query-metadataonly1.test2{ds=1, hr=1} ' ' Partition' ' base file name: hr=1' ' input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' partition values:' ' ds 1' ' hr 1' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test2/ds=1/hr=1' ' name metadataonly1.test2' ' partition_columns ds/hr' ' serialization.ddl struct test2 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.NullStructSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test2' ' name metadataonly1.test2' ' partition_columns ds/hr' ' serialization.ddl struct test2 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: metadataonly1.test2' ' name: metadataonly1.test2' ' fake-path-metadata-only-query-metadataonly1.test2{ds=1, hr=2} ' ' Partition' ' base file name: hr=2' ' input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' partition values:' ' ds 1' ' hr 2' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test2/ds=1/hr=2' ' name metadataonly1.test2' ' partition_columns ds/hr' ' serialization.ddl struct test2 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.NullStructSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test2' ' name metadataonly1.test2' ' partition_columns ds/hr' ' serialization.ddl struct test2 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: metadataonly1.test2' ' name: metadataonly1.test2' ' fake-path-metadata-only-query-metadataonly1.test2{ds=1, hr=3} ' ' Partition' ' base file name: hr=3' ' input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' partition values:' ' ds 1' ' hr 3' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test2/ds=1/hr=3' ' name metadataonly1.test2' ' partition_columns ds/hr' ' serialization.ddl struct test2 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.NullStructSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test2' ' name metadataonly1.test2' ' partition_columns ds/hr' ' serialization.ddl struct test2 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: metadataonly1.test2' ' name: metadataonly1.test2' ' Reduce Operator Tree:' ' Group By Operator' ' aggregations:' ' expr: count(DISTINCT KEY._col1:0._col0)' ' bucketGroup: false' ' keys:' ' expr: KEY._col0' ' type: string' ' mode: mergepartial' ' outputColumnNames: _col0, _col1' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: bigint' ' outputColumnNames: _col0, _col1' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0,_col1' ' columns.types string:bigint' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 216 rows selected >>> select ds, count(distinct hr) from TEST2 group by ds; 'ds','_c1' '1','3' 1 row selected >>> >>> explain extended select ds, count(hr) from TEST2 group by ds; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME TEST2))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL ds)) (TOK_SELEXPR (TOK_FUNCTION count (TOK_TABLE_OR_COL hr)))) (TOK_GROUPBY (TOK_TABLE_OR_COL ds))))' '' 'STAGE DEPENDENCIES:' ' Stage-1 is a root stage' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' test2 ' ' TableScan' ' alias: test2' ' GatherStats: false' ' Select Operator' ' expressions:' ' expr: ds' ' type: string' ' expr: hr' ' type: string' ' outputColumnNames: ds, hr' ' Group By Operator' ' aggregations:' ' expr: count(hr)' ' bucketGroup: false' ' keys:' ' expr: ds' ' type: string' ' mode: hash' ' outputColumnNames: _col0, _col1' ' Reduce Output Operator' ' key expressions:' ' expr: _col0' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: _col0' ' type: string' ' tag: -1' ' value expressions:' ' expr: _col1' ' type: bigint' ' Needs Tagging: false' ' Path -> Alias:' ' !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test2/ds=1/hr=1 [test2]' ' !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test2/ds=1/hr=2 [test2]' ' !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test2/ds=1/hr=3 [test2]' ' Path -> Partition:' ' !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test2/ds=1/hr=1 ' ' Partition' ' base file name: hr=1' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' partition values:' ' ds 1' ' hr 1' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test2/ds=1/hr=1' ' name metadataonly1.test2' ' partition_columns ds/hr' ' serialization.ddl struct test2 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test2' ' name metadataonly1.test2' ' partition_columns ds/hr' ' serialization.ddl struct test2 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: metadataonly1.test2' ' name: metadataonly1.test2' ' !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test2/ds=1/hr=2 ' ' Partition' ' base file name: hr=2' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' partition values:' ' ds 1' ' hr 2' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test2/ds=1/hr=2' ' name metadataonly1.test2' ' partition_columns ds/hr' ' serialization.ddl struct test2 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test2' ' name metadataonly1.test2' ' partition_columns ds/hr' ' serialization.ddl struct test2 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: metadataonly1.test2' ' name: metadataonly1.test2' ' !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test2/ds=1/hr=3 ' ' Partition' ' base file name: hr=3' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' partition values:' ' ds 1' ' hr 3' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test2/ds=1/hr=3' ' name metadataonly1.test2' ' partition_columns ds/hr' ' serialization.ddl struct test2 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test2' ' name metadataonly1.test2' ' partition_columns ds/hr' ' serialization.ddl struct test2 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: metadataonly1.test2' ' name: metadataonly1.test2' ' Reduce Operator Tree:' ' Group By Operator' ' aggregations:' ' expr: count(VALUE._col0)' ' bucketGroup: false' ' keys:' ' expr: KEY._col0' ' type: string' ' mode: mergepartial' ' outputColumnNames: _col0, _col1' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: bigint' ' outputColumnNames: _col0, _col1' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0,_col1' ' columns.types string:bigint' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 212 rows selected >>> select ds, count(hr) from TEST2 group by ds; 'ds','_c1' No rows selected >>> >>> set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; No rows affected >>> >>> explain extended select max(ds) from TEST1; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME TEST1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION max (TOK_TABLE_OR_COL ds))))))' '' 'STAGE DEPENDENCIES:' ' Stage-1 is a root stage' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' test1 ' ' TableScan' ' alias: test1' ' GatherStats: false' ' Select Operator' ' expressions:' ' expr: ds' ' type: string' ' outputColumnNames: ds' ' Group By Operator' ' aggregations:' ' expr: max(ds)' ' bucketGroup: false' ' mode: hash' ' outputColumnNames: _col0' ' Reduce Output Operator' ' sort order: ' ' tag: -1' ' value expressions:' ' expr: _col0' ' type: string' ' Needs Tagging: false' ' Path -> Alias:' ' fake-path-metadata-only-query-metadataonly1.test1{ds=1} [test1]' ' fake-path-metadata-only-query-metadataonly1.test1{ds=2} [test1]' ' Path -> Partition:' ' fake-path-metadata-only-query-metadataonly1.test1{ds=1} ' ' Partition' ' base file name: ds=1' ' input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' partition values:' ' ds 1' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1/ds=1' ' name metadataonly1.test1' ' partition_columns ds' ' serialization.ddl struct test1 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.NullStructSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1' ' name metadataonly1.test1' ' partition_columns ds' ' serialization.ddl struct test1 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: metadataonly1.test1' ' name: metadataonly1.test1' ' fake-path-metadata-only-query-metadataonly1.test1{ds=2} ' ' Partition' ' base file name: ds=2' ' input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' partition values:' ' ds 2' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1/ds=2' ' name metadataonly1.test1' ' partition_columns ds' ' serialization.ddl struct test1 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.NullStructSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns a,b' ' columns.types int:double' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/metadataonly1.db/test1' ' name metadataonly1.test1' ' partition_columns ds' ' serialization.ddl struct test1 { i32 a, double b}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: metadataonly1.test1' ' name: metadataonly1.test1' ' Reduce Operator Tree:' ' Group By Operator' ' aggregations:' ' expr: max(VALUE._col0)' ' bucketGroup: false' ' mode: mergepartial' ' outputColumnNames: _col0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' outputColumnNames: _col0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0' ' columns.types string' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 152 rows selected >>> select max(ds) from TEST1; '_c0' '2' 1 row selected >>> >>> select distinct ds from srcpart; 'ds' '2008-04-08' '2008-04-09' 2 rows selected >>> select min(ds),max(ds) from srcpart; '_c0','_c1' '2008-04-08','2008-04-09' 1 row selected >>> !record