Saving all output to "!!{outputDirectory}!!/ppd_join_filter.q.raw". Enter "record" with no arguments to stop it. >>> !run !!{qFileDirectory}!!/ppd_join_filter.q >>> set hive.optimize.ppd=true; No rows affected >>> set hive.ppd.remove.duplicatefilters=false; No rows affected >>> >>> explain extended select a.key, b.k2, b.k3 from src a join ( select key, min(key) as k, min(key)+1 as k1, min(key)+2 as k2, min(key)+3 as k3 from src group by key ) b on a.key=b.key and b.k1 < 5; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) a) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION min (TOK_TABLE_OR_COL key)) k) (TOK_SELEXPR (+ (TOK_FUNCTION min (TOK_TABLE_OR_COL key)) 1) k1) (TOK_SELEXPR (+ (TOK_FUNCTION min (TOK_TABLE_OR_COL key)) 2) k2) (TOK_SELEXPR (+ (TOK_FUNCTION min (TOK_TABLE_OR_COL key)) 3) k3)) (TOK_GROUPBY (TOK_TABLE_OR_COL key)))) b) (and (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (< (. (TOK_TABLE_OR_COL b) k1) 5)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) k2)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) k3)))))' '' 'STAGE DEPENDENCIES:' ' Stage-2 is a root stage' ' Stage-1 depends on stages: Stage-2' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-2' ' Map Reduce' ' Alias -> Map Operator Tree:' ' b:src ' ' TableScan' ' alias: src' ' GatherStats: false' ' Select Operator' ' expressions:' ' expr: key' ' type: string' ' outputColumnNames: key' ' Group By Operator' ' aggregations:' ' expr: min(key)' ' bucketGroup: false' ' keys:' ' expr: key' ' type: string' ' mode: hash' ' outputColumnNames: _col0, _col1' ' Reduce Output Operator' ' key expressions:' ' expr: _col0' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: _col0' ' type: string' ' tag: -1' ' value expressions:' ' expr: _col1' ' type: string' ' Needs Tagging: false' ' Path -> Alias:' ' !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src [b:src]' ' Path -> Partition:' ' !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src ' ' Partition' ' base file name: src' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src' ' name ppd_join_filter.src' ' numFiles 1' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct src { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 5812' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src' ' name ppd_join_filter.src' ' numFiles 1' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct src { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 5812' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: ppd_join_filter.src' ' name: ppd_join_filter.src' ' Reduce Operator Tree:' ' Group By Operator' ' aggregations:' ' expr: min(VALUE._col0)' ' bucketGroup: false' ' keys:' ' expr: KEY._col0' ' type: string' ' mode: mergepartial' ' outputColumnNames: _col0, _col1' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: (_col1 + 1)' ' type: double' ' expr: (_col1 + 2)' ' type: double' ' expr: (_col1 + 3)' ' type: double' ' outputColumnNames: _col0, _col2, _col3, _col4' ' Filter Operator' ' isSamplingPred: false' ' predicate:' ' expr: (_col2 < 5.0)' ' type: boolean' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0,_col2,_col3,_col4' ' columns.types string,double,double,double' ' escape.delim \' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' $INTNAME ' ' Reduce Output Operator' ' key expressions:' ' expr: _col0' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: _col0' ' type: string' ' tag: 1' ' value expressions:' ' expr: _col3' ' type: double' ' expr: _col4' ' type: double' ' a ' ' TableScan' ' alias: a' ' GatherStats: false' ' Reduce Output Operator' ' key expressions:' ' expr: key' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: key' ' type: string' ' tag: 0' ' value expressions:' ' expr: key' ' type: string' ' Needs Tagging: true' ' Path -> Alias:' ' file:!!{hive.exec.scratchdir}!! [$INTNAME]' ' !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src [a]' ' Path -> Partition:' ' file:!!{hive.exec.scratchdir}!! ' ' Partition' ' base file name: -mr-10002' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0,_col2,_col3,_col4' ' columns.types string,double,double,double' ' escape.delim \' ' ' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0,_col2,_col3,_col4' ' columns.types string,double,double,double' ' escape.delim \' ' !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src ' ' Partition' ' base file name: src' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src' ' name ppd_join_filter.src' ' numFiles 1' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct src { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 5812' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src' ' name ppd_join_filter.src' ' numFiles 1' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct src { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 5812' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: ppd_join_filter.src' ' name: ppd_join_filter.src' ' Reduce Operator Tree:' ' Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {VALUE._col0}' ' 1 {VALUE._col3} {VALUE._col4}' ' handleSkewJoin: false' ' outputColumnNames: _col0, _col7, _col8' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col7' ' type: double' ' expr: _col8' ' type: double' ' outputColumnNames: _col0, _col1, _col2' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0,_col1,_col2' ' columns.types string:double:double' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 276 rows selected >>> >>> select a.key, b.k2, b.k3 from src a join ( select key, min(key) as k, min(key)+1 as k1, min(key)+2 as k2, min(key)+3 as k3 from src group by key ) b on a.key=b.key and b.k1 < 5; 'key','k2','k3' '0','2.0','3.0' '0','2.0','3.0' '0','2.0','3.0' '2','4.0','5.0' 4 rows selected >>> >>> set hive.optimize.ppd=true; No rows affected >>> set hive.ppd.remove.duplicatefilters=true; No rows affected >>> >>> explain extended select a.key, b.k2, b.k3 from src a join ( select key, min(key) as k, min(key)+1 as k1, min(key)+2 as k2, min(key)+3 as k3 from src group by key ) b on a.key=b.key and b.k1 < 5; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) a) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION min (TOK_TABLE_OR_COL key)) k) (TOK_SELEXPR (+ (TOK_FUNCTION min (TOK_TABLE_OR_COL key)) 1) k1) (TOK_SELEXPR (+ (TOK_FUNCTION min (TOK_TABLE_OR_COL key)) 2) k2) (TOK_SELEXPR (+ (TOK_FUNCTION min (TOK_TABLE_OR_COL key)) 3) k3)) (TOK_GROUPBY (TOK_TABLE_OR_COL key)))) b) (and (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (< (. (TOK_TABLE_OR_COL b) k1) 5)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) k2)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) k3)))))' '' 'STAGE DEPENDENCIES:' ' Stage-2 is a root stage' ' Stage-1 depends on stages: Stage-2' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-2' ' Map Reduce' ' Alias -> Map Operator Tree:' ' b:src ' ' TableScan' ' alias: src' ' GatherStats: false' ' Select Operator' ' expressions:' ' expr: key' ' type: string' ' outputColumnNames: key' ' Group By Operator' ' aggregations:' ' expr: min(key)' ' bucketGroup: false' ' keys:' ' expr: key' ' type: string' ' mode: hash' ' outputColumnNames: _col0, _col1' ' Reduce Output Operator' ' key expressions:' ' expr: _col0' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: _col0' ' type: string' ' tag: -1' ' value expressions:' ' expr: _col1' ' type: string' ' Needs Tagging: false' ' Path -> Alias:' ' !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src [b:src]' ' Path -> Partition:' ' !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src ' ' Partition' ' base file name: src' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src' ' name ppd_join_filter.src' ' numFiles 1' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct src { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 5812' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src' ' name ppd_join_filter.src' ' numFiles 1' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct src { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 5812' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: ppd_join_filter.src' ' name: ppd_join_filter.src' ' Reduce Operator Tree:' ' Group By Operator' ' aggregations:' ' expr: min(VALUE._col0)' ' bucketGroup: false' ' keys:' ' expr: KEY._col0' ' type: string' ' mode: mergepartial' ' outputColumnNames: _col0, _col1' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: (_col1 + 1)' ' type: double' ' expr: (_col1 + 2)' ' type: double' ' expr: (_col1 + 3)' ' type: double' ' outputColumnNames: _col0, _col2, _col3, _col4' ' Filter Operator' ' isSamplingPred: false' ' predicate:' ' expr: (_col2 < 5.0)' ' type: boolean' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0,_col2,_col3,_col4' ' columns.types string,double,double,double' ' escape.delim \' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' $INTNAME ' ' Reduce Output Operator' ' key expressions:' ' expr: _col0' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: _col0' ' type: string' ' tag: 1' ' value expressions:' ' expr: _col3' ' type: double' ' expr: _col4' ' type: double' ' a ' ' TableScan' ' alias: a' ' GatherStats: false' ' Reduce Output Operator' ' key expressions:' ' expr: key' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: key' ' type: string' ' tag: 0' ' value expressions:' ' expr: key' ' type: string' ' Needs Tagging: true' ' Path -> Alias:' ' file:!!{hive.exec.scratchdir}!! [$INTNAME]' ' !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src [a]' ' Path -> Partition:' ' file:!!{hive.exec.scratchdir}!! ' ' Partition' ' base file name: -mr-10002' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0,_col2,_col3,_col4' ' columns.types string,double,double,double' ' escape.delim \' ' ' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0,_col2,_col3,_col4' ' columns.types string,double,double,double' ' escape.delim \' ' !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src ' ' Partition' ' base file name: src' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src' ' name ppd_join_filter.src' ' numFiles 1' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct src { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 5812' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src' ' name ppd_join_filter.src' ' numFiles 1' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct src { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 5812' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: ppd_join_filter.src' ' name: ppd_join_filter.src' ' Reduce Operator Tree:' ' Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {VALUE._col0}' ' 1 {VALUE._col3} {VALUE._col4}' ' handleSkewJoin: false' ' outputColumnNames: _col0, _col7, _col8' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col7' ' type: double' ' expr: _col8' ' type: double' ' outputColumnNames: _col0, _col1, _col2' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0,_col1,_col2' ' columns.types string:double:double' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 276 rows selected >>> >>> select a.key, b.k2, b.k3 from src a join ( select key, min(key) as k, min(key)+1 as k1, min(key)+2 as k2, min(key)+3 as k3 from src group by key ) b on a.key=b.key and b.k1 < 5; 'key','k2','k3' '0','2.0','3.0' '0','2.0','3.0' '0','2.0','3.0' '2','4.0','5.0' 4 rows selected >>> >>> set hive.optimize.ppd=false; No rows affected >>> set hive.ppd.remove.duplicatefilters=false; No rows affected >>> >>> explain extended select a.key, b.k2, b.k3 from src a join ( select key, min(key) as k, min(key)+1 as k1, min(key)+2 as k2, min(key)+3 as k3 from src group by key ) b on a.key=b.key and b.k1 < 5; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) a) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION min (TOK_TABLE_OR_COL key)) k) (TOK_SELEXPR (+ (TOK_FUNCTION min (TOK_TABLE_OR_COL key)) 1) k1) (TOK_SELEXPR (+ (TOK_FUNCTION min (TOK_TABLE_OR_COL key)) 2) k2) (TOK_SELEXPR (+ (TOK_FUNCTION min (TOK_TABLE_OR_COL key)) 3) k3)) (TOK_GROUPBY (TOK_TABLE_OR_COL key)))) b) (and (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (< (. (TOK_TABLE_OR_COL b) k1) 5)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) k2)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) k3)))))' '' 'STAGE DEPENDENCIES:' ' Stage-2 is a root stage' ' Stage-1 depends on stages: Stage-2' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-2' ' Map Reduce' ' Alias -> Map Operator Tree:' ' b:src ' ' TableScan' ' alias: src' ' GatherStats: false' ' Select Operator' ' expressions:' ' expr: key' ' type: string' ' outputColumnNames: key' ' Group By Operator' ' aggregations:' ' expr: min(key)' ' bucketGroup: false' ' keys:' ' expr: key' ' type: string' ' mode: hash' ' outputColumnNames: _col0, _col1' ' Reduce Output Operator' ' key expressions:' ' expr: _col0' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: _col0' ' type: string' ' tag: -1' ' value expressions:' ' expr: _col1' ' type: string' ' Needs Tagging: false' ' Path -> Alias:' ' !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src [b:src]' ' Path -> Partition:' ' !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src ' ' Partition' ' base file name: src' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src' ' name ppd_join_filter.src' ' numFiles 1' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct src { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 5812' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src' ' name ppd_join_filter.src' ' numFiles 1' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct src { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 5812' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: ppd_join_filter.src' ' name: ppd_join_filter.src' ' Reduce Operator Tree:' ' Group By Operator' ' aggregations:' ' expr: min(VALUE._col0)' ' bucketGroup: false' ' keys:' ' expr: KEY._col0' ' type: string' ' mode: mergepartial' ' outputColumnNames: _col0, _col1' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: (_col1 + 1)' ' type: double' ' expr: (_col1 + 2)' ' type: double' ' expr: (_col1 + 3)' ' type: double' ' outputColumnNames: _col0, _col2, _col3, _col4' ' Filter Operator' ' isSamplingPred: false' ' predicate:' ' expr: (_col2 < 5.0)' ' type: boolean' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0,_col2,_col3,_col4' ' columns.types string,double,double,double' ' escape.delim \' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' $INTNAME ' ' Reduce Output Operator' ' key expressions:' ' expr: _col0' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: _col0' ' type: string' ' tag: 1' ' value expressions:' ' expr: _col3' ' type: double' ' expr: _col4' ' type: double' ' a ' ' TableScan' ' alias: a' ' GatherStats: false' ' Reduce Output Operator' ' key expressions:' ' expr: key' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: key' ' type: string' ' tag: 0' ' value expressions:' ' expr: key' ' type: string' ' Needs Tagging: true' ' Path -> Alias:' ' file:!!{hive.exec.scratchdir}!! [$INTNAME]' ' !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src [a]' ' Path -> Partition:' ' file:!!{hive.exec.scratchdir}!! ' ' Partition' ' base file name: -mr-10002' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0,_col2,_col3,_col4' ' columns.types string,double,double,double' ' escape.delim \' ' ' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0,_col2,_col3,_col4' ' columns.types string,double,double,double' ' escape.delim \' ' !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src ' ' Partition' ' base file name: src' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src' ' name ppd_join_filter.src' ' numFiles 1' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct src { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 5812' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src' ' name ppd_join_filter.src' ' numFiles 1' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct src { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 5812' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: ppd_join_filter.src' ' name: ppd_join_filter.src' ' Reduce Operator Tree:' ' Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {VALUE._col0}' ' 1 {VALUE._col3} {VALUE._col4}' ' handleSkewJoin: false' ' outputColumnNames: _col0, _col7, _col8' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col7' ' type: double' ' expr: _col8' ' type: double' ' outputColumnNames: _col0, _col1, _col2' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0,_col1,_col2' ' columns.types string:double:double' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 276 rows selected >>> >>> select a.key, b.k2, b.k3 from src a join ( select key, min(key) as k, min(key)+1 as k1, min(key)+2 as k2, min(key)+3 as k3 from src group by key ) b on a.key=b.key and b.k1 < 5; 'key','k2','k3' '0','2.0','3.0' '0','2.0','3.0' '0','2.0','3.0' '2','4.0','5.0' 4 rows selected >>> >>> set hive.optimize.ppd=faluse; No rows affected >>> set hive.ppd.remove.duplicatefilters=true; No rows affected >>> >>> explain extended select a.key, b.k2, b.k3 from src a join ( select key, min(key) as k, min(key)+1 as k1, min(key)+2 as k2, min(key)+3 as k3 from src group by key ) b on a.key=b.key and b.k1 < 5; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) a) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_FUNCTION min (TOK_TABLE_OR_COL key)) k) (TOK_SELEXPR (+ (TOK_FUNCTION min (TOK_TABLE_OR_COL key)) 1) k1) (TOK_SELEXPR (+ (TOK_FUNCTION min (TOK_TABLE_OR_COL key)) 2) k2) (TOK_SELEXPR (+ (TOK_FUNCTION min (TOK_TABLE_OR_COL key)) 3) k3)) (TOK_GROUPBY (TOK_TABLE_OR_COL key)))) b) (and (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (< (. (TOK_TABLE_OR_COL b) k1) 5)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) k2)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) k3)))))' '' 'STAGE DEPENDENCIES:' ' Stage-2 is a root stage' ' Stage-1 depends on stages: Stage-2' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-2' ' Map Reduce' ' Alias -> Map Operator Tree:' ' b:src ' ' TableScan' ' alias: src' ' GatherStats: false' ' Select Operator' ' expressions:' ' expr: key' ' type: string' ' outputColumnNames: key' ' Group By Operator' ' aggregations:' ' expr: min(key)' ' bucketGroup: false' ' keys:' ' expr: key' ' type: string' ' mode: hash' ' outputColumnNames: _col0, _col1' ' Reduce Output Operator' ' key expressions:' ' expr: _col0' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: _col0' ' type: string' ' tag: -1' ' value expressions:' ' expr: _col1' ' type: string' ' Needs Tagging: false' ' Path -> Alias:' ' !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src [b:src]' ' Path -> Partition:' ' !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src ' ' Partition' ' base file name: src' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src' ' name ppd_join_filter.src' ' numFiles 1' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct src { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 5812' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src' ' name ppd_join_filter.src' ' numFiles 1' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct src { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 5812' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: ppd_join_filter.src' ' name: ppd_join_filter.src' ' Reduce Operator Tree:' ' Group By Operator' ' aggregations:' ' expr: min(VALUE._col0)' ' bucketGroup: false' ' keys:' ' expr: KEY._col0' ' type: string' ' mode: mergepartial' ' outputColumnNames: _col0, _col1' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: (_col1 + 1)' ' type: double' ' expr: (_col1 + 2)' ' type: double' ' expr: (_col1 + 3)' ' type: double' ' outputColumnNames: _col0, _col2, _col3, _col4' ' Filter Operator' ' isSamplingPred: false' ' predicate:' ' expr: (_col2 < 5.0)' ' type: boolean' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0,_col2,_col3,_col4' ' columns.types string,double,double,double' ' escape.delim \' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' $INTNAME ' ' Reduce Output Operator' ' key expressions:' ' expr: _col0' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: _col0' ' type: string' ' tag: 1' ' value expressions:' ' expr: _col3' ' type: double' ' expr: _col4' ' type: double' ' a ' ' TableScan' ' alias: a' ' GatherStats: false' ' Reduce Output Operator' ' key expressions:' ' expr: key' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: key' ' type: string' ' tag: 0' ' value expressions:' ' expr: key' ' type: string' ' Needs Tagging: true' ' Path -> Alias:' ' file:!!{hive.exec.scratchdir}!! [$INTNAME]' ' !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src [a]' ' Path -> Partition:' ' file:!!{hive.exec.scratchdir}!! ' ' Partition' ' base file name: -mr-10002' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0,_col2,_col3,_col4' ' columns.types string,double,double,double' ' escape.delim \' ' ' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns _col0,_col2,_col3,_col4' ' columns.types string,double,double,double' ' escape.delim \' ' !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src ' ' Partition' ' base file name: src' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src' ' name ppd_join_filter.src' ' numFiles 1' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct src { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 5812' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/ppd_join_filter.db/src' ' name ppd_join_filter.src' ' numFiles 1' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct src { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 5812' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: ppd_join_filter.src' ' name: ppd_join_filter.src' ' Reduce Operator Tree:' ' Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {VALUE._col0}' ' 1 {VALUE._col3} {VALUE._col4}' ' handleSkewJoin: false' ' outputColumnNames: _col0, _col7, _col8' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col7' ' type: double' ' expr: _col8' ' type: double' ' outputColumnNames: _col0, _col1, _col2' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0,_col1,_col2' ' columns.types string:double:double' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 276 rows selected >>> >>> select a.key, b.k2, b.k3 from src a join ( select key, min(key) as k, min(key)+1 as k1, min(key)+2 as k2, min(key)+3 as k3 from src group by key ) b on a.key=b.key and b.k1 < 5; 'key','k2','k3' '0','2.0','3.0' '0','2.0','3.0' '0','2.0','3.0' '2','4.0','5.0' 4 rows selected >>> >>> !record