Saving all output to "!!{outputDirectory}!!/ppd_union_view.q.raw". Enter "record" with no arguments to stop it. >>> !run !!{qFileDirectory}!!/ppd_union_view.q >>> -- test predicate pushdown on a view with a union >>> >>> drop view v; No rows affected >>> >>> create table t1_new (key string, value string) partitioned by (ds string); No rows affected >>> >>> insert overwrite table t1_new partition (ds = '2011-10-15') select 'key1', 'value1' from src limit 1; '_c0','_c1' No rows selected >>> >>> insert overwrite table t1_new partition (ds = '2011-10-16') select 'key2', 'value2' from src limit 1; '_c0','_c1' No rows selected >>> >>> create table t1_old (keymap string, value string) partitioned by (ds string); No rows affected >>> >>> insert overwrite table t1_old partition (ds = '2011-10-13') select 'keymap3', 'value3' from src limit 1; '_c0','_c1' No rows selected >>> >>> insert overwrite table t1_old partition (ds = '2011-10-14') select 'keymap4', 'value4' from src limit 1; '_c0','_c1' No rows selected >>> >>> create table t1_mapping (key string, keymap string) partitioned by (ds string); No rows affected >>> >>> insert overwrite table t1_mapping partition (ds = '2011-10-13') select 'key3', 'keymap3' from src limit 1; '_c0','_c1' No rows selected >>> >>> insert overwrite table t1_mapping partition (ds = '2011-10-14') select 'key4', 'keymap4' from src limit 1; '_c0','_c1' No rows selected >>> >>> >>> create view t1 partitioned on (ds) as select * from ( select key, value, ds from t1_new union all select key, value, t1_old.ds from t1_old join t1_mapping on t1_old.keymap = t1_mapping.keymap and t1_old.ds = t1_mapping.ds ) subq; 'key','value','ds' No rows selected >>> >>> explain extended select * from t1 where ds = '2011-10-13'; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME t1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (= (TOK_TABLE_OR_COL ds) '2011-10-13'))))' '' 'STAGE DEPENDENCIES:' ' Stage-1 is a root stage' ' Stage-2 depends on stages: Stage-1' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' t1-subquery2:subq-subquery2:t1_mapping ' ' TableScan' ' alias: t1_mapping' ' GatherStats: false' ' Reduce Output Operator' ' key expressions:' ' expr: keymap' ' type: string' ' expr: ds' ' type: string' ' sort order: ++' ' Map-reduce partition columns:' ' expr: keymap' ' type: string' ' expr: ds' ' type: string' ' tag: 1' ' value expressions:' ' expr: key' ' type: string' ' t1-subquery2:subq-subquery2:t1_old ' ' TableScan' ' alias: t1_old' ' GatherStats: false' ' Reduce Output Operator' ' key expressions:' ' expr: keymap' ' type: string' ' expr: ds' ' type: string' ' sort order: ++' ' Map-reduce partition columns:' ' expr: keymap' ' type: string' ' expr: ds' ' type: string' ' tag: 0' ' value expressions:' ' expr: value' ' type: string' ' expr: ds' ' type: string' ' Needs Tagging: true' ' Path -> Alias:' ' !!{hive.metastore.warehouse.dir}!!/ppd_union_view.db/t1_mapping/ds=2011-10-13 [t1-subquery2:subq-subquery2:t1_mapping]' ' !!{hive.metastore.warehouse.dir}!!/ppd_union_view.db/t1_old/ds=2011-10-13 [t1-subquery2:subq-subquery2:t1_old]' ' Path -> Partition:' ' !!{hive.metastore.warehouse.dir}!!/ppd_union_view.db/t1_mapping/ds=2011-10-13 ' ' Partition' ' base file name: ds=2011-10-13' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' partition values:' ' ds 2011-10-13' ' properties:' ' bucket_count -1' ' columns key,keymap' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/ppd_union_view.db/t1_mapping/ds=2011-10-13' ' name ppd_union_view.t1_mapping' ' numFiles 1' ' numPartitions 2' ' numRows 1' ' partition_columns ds' ' rawDataSize 12' ' serialization.ddl struct t1_mapping { string key, string keymap}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 13' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns key,keymap' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/ppd_union_view.db/t1_mapping' ' name ppd_union_view.t1_mapping' ' numFiles 2' ' numPartitions 2' ' numRows 2' ' partition_columns ds' ' rawDataSize 24' ' serialization.ddl struct t1_mapping { string key, string keymap}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 26' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: ppd_union_view.t1_mapping' ' name: ppd_union_view.t1_mapping' ' !!{hive.metastore.warehouse.dir}!!/ppd_union_view.db/t1_old/ds=2011-10-13 ' ' Partition' ' base file name: ds=2011-10-13' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' partition values:' ' ds 2011-10-13' ' properties:' ' bucket_count -1' ' columns keymap,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/ppd_union_view.db/t1_old/ds=2011-10-13' ' name ppd_union_view.t1_old' ' numFiles 1' ' numPartitions 2' ' numRows 1' ' partition_columns ds' ' rawDataSize 14' ' serialization.ddl struct t1_old { string keymap, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 15' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns keymap,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/ppd_union_view.db/t1_old' ' name ppd_union_view.t1_old' ' numFiles 2' ' numPartitions 2' ' numRows 2' ' partition_columns ds' ' rawDataSize 28' ' serialization.ddl struct t1_old { string keymap, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 30' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: ppd_union_view.t1_old' ' name: ppd_union_view.t1_old' ' Reduce Operator Tree:' ' Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {VALUE._col1} {VALUE._col2}' ' 1 {VALUE._col0}' ' handleSkewJoin: false' ' outputColumnNames: _col1, _col2, _col5' ' Select Operator' ' expressions:' ' expr: _col5' ' type: string' ' expr: _col1' ' type: string' ' expr: _col2' ' type: string' ' outputColumnNames: _col0, _col1, _col2' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0,_col1,_col2' ' columns.types string,string,string' ' escape.delim \' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-2' ' Map Reduce' ' Alias -> Map Operator Tree:' ' file:!!{hive.exec.scratchdir}!! ' ' TableScan' ' GatherStats: false' ' Union' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col2' ' type: string' ' outputColumnNames: _col0, _col1, _col2' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col2' ' type: string' ' outputColumnNames: _col0, _col1, _col2' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0,_col1,_col2' ' columns.types string:string:string' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' ' t1-subquery1:subq-subquery1:t1_new ' ' TableScan' ' alias: t1_new' ' GatherStats: false' ' Filter Operator' ' isSamplingPred: false' ' predicate:' ' expr: (ds = '2011-10-13')' ' type: boolean' ' Select Operator' ' expressions:' ' expr: key' ' type: string' ' expr: value' ' type: string' ' expr: ds' ' type: string' ' outputColumnNames: _col0, _col1, _col2' ' Union' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col2' ' type: string' ' outputColumnNames: _col0, _col1, _col2' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col2' ' type: string' ' outputColumnNames: _col0, _col1, _col2' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0,_col1,_col2' ' columns.types string:string:string' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' ' Needs Tagging: false' ' Path -> Alias:' ' file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]' ' Path -> Partition:' ' file:!!{hive.exec.scratchdir}!! ' ' Partition' ' base file name: -mr-10002' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0,_col1,_col2' ' columns.types string,string,string' ' escape.delim \' ' ' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0,_col1,_col2' ' columns.types string,string,string' ' escape.delim \' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 315 rows selected >>> >>> select * from t1 where ds = '2011-10-13'; 'key','value','ds' 'key3','value3','2011-10-13' 1 row selected >>> >>> select * from t1 where ds = '2011-10-14'; 'key','value','ds' 'key4','value4','2011-10-14' 1 row selected >>> >>> explain extended select * from t1 where ds = '2011-10-15'; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME t1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (= (TOK_TABLE_OR_COL ds) '2011-10-15'))))' '' 'STAGE DEPENDENCIES:' ' Stage-1 is a root stage' ' Stage-2 depends on stages: Stage-1' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' t1-subquery2:subq-subquery2:t1_mapping ' ' TableScan' ' alias: t1_mapping' ' GatherStats: false' ' Filter Operator' ' isSamplingPred: false' ' predicate:' ' expr: (ds = '2011-10-15')' ' type: boolean' ' Reduce Output Operator' ' key expressions:' ' expr: keymap' ' type: string' ' expr: ds' ' type: string' ' sort order: ++' ' Map-reduce partition columns:' ' expr: keymap' ' type: string' ' expr: ds' ' type: string' ' tag: 1' ' value expressions:' ' expr: key' ' type: string' ' t1-subquery2:subq-subquery2:t1_old ' ' TableScan' ' alias: t1_old' ' GatherStats: false' ' Filter Operator' ' isSamplingPred: false' ' predicate:' ' expr: (ds = '2011-10-15')' ' type: boolean' ' Reduce Output Operator' ' key expressions:' ' expr: keymap' ' type: string' ' expr: ds' ' type: string' ' sort order: ++' ' Map-reduce partition columns:' ' expr: keymap' ' type: string' ' expr: ds' ' type: string' ' tag: 0' ' value expressions:' ' expr: value' ' type: string' ' expr: ds' ' type: string' ' Needs Tagging: true' ' Reduce Operator Tree:' ' Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {VALUE._col1} {VALUE._col2}' ' 1 {VALUE._col0}' ' handleSkewJoin: false' ' outputColumnNames: _col1, _col2, _col5' ' Select Operator' ' expressions:' ' expr: _col5' ' type: string' ' expr: _col1' ' type: string' ' expr: _col2' ' type: string' ' outputColumnNames: _col0, _col1, _col2' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0,_col1,_col2' ' columns.types string,string,string' ' escape.delim \' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-2' ' Map Reduce' ' Alias -> Map Operator Tree:' ' file:!!{hive.exec.scratchdir}!! ' ' TableScan' ' GatherStats: false' ' Union' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col2' ' type: string' ' outputColumnNames: _col0, _col1, _col2' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col2' ' type: string' ' outputColumnNames: _col0, _col1, _col2' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0,_col1,_col2' ' columns.types string:string:string' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' ' t1-subquery1:subq-subquery1:t1_new ' ' TableScan' ' alias: t1_new' ' GatherStats: false' ' Select Operator' ' expressions:' ' expr: key' ' type: string' ' expr: value' ' type: string' ' expr: ds' ' type: string' ' outputColumnNames: _col0, _col1, _col2' ' Union' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col2' ' type: string' ' outputColumnNames: _col0, _col1, _col2' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col2' ' type: string' ' outputColumnNames: _col0, _col1, _col2' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0,_col1,_col2' ' columns.types string:string:string' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' ' Needs Tagging: false' ' Path -> Alias:' ' file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]' ' !!{hive.metastore.warehouse.dir}!!/ppd_union_view.db/t1_new/ds=2011-10-15 [t1-subquery1:subq-subquery1:t1_new]' ' Path -> Partition:' ' file:!!{hive.exec.scratchdir}!! ' ' Partition' ' base file name: -mr-10002' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0,_col1,_col2' ' columns.types string,string,string' ' escape.delim \' ' ' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0,_col1,_col2' ' columns.types string,string,string' ' escape.delim \' ' !!{hive.metastore.warehouse.dir}!!/ppd_union_view.db/t1_new/ds=2011-10-15 ' ' Partition' ' base file name: ds=2011-10-15' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' partition values:' ' ds 2011-10-15' ' properties:' ' bucket_count -1' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/ppd_union_view.db/t1_new/ds=2011-10-15' ' name ppd_union_view.t1_new' ' numFiles 1' ' numPartitions 2' ' numRows 1' ' partition_columns ds' ' rawDataSize 11' ' serialization.ddl struct t1_new { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 12' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/ppd_union_view.db/t1_new' ' name ppd_union_view.t1_new' ' numFiles 2' ' numPartitions 2' ' numRows 2' ' partition_columns ds' ' rawDataSize 22' ' serialization.ddl struct t1_new { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 24' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: ppd_union_view.t1_new' ' name: ppd_union_view.t1_new' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 267 rows selected >>> >>> select * from t1 where ds = '2011-10-15'; 'key','value','ds' 'key1','value1','2011-10-15' 1 row selected >>> select * from t1 where ds = '2011-10-16'; 'key','value','ds' 'key2','value2','2011-10-16' 1 row selected >>> !record