Saving all output to "!!{outputDirectory}!!/auto_join13.q.raw". Enter "record" with no arguments to stop it. >>> !run !!{qFileDirectory}!!/auto_join13.q >>> >>> set hive.auto.convert.join = true; No rows affected >>> >>> explain SELECT sum(hash(src1.c1, src2.c4)) FROM (SELECT src.key as c1, src.value as c2 from src) src1 JOIN (SELECT src.key as c3, src.value as c4 from src) src2 ON src1.c1 = src2.c3 AND src1.c1 < 100 JOIN (SELECT src.key as c5, src.value as c6 from src) src3 ON src1.c1 + src2.c3 = src3.c5 AND src3.c5 < 200; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_JOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value) c2)))) src1) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value) c4)))) src2) (AND (= (. (TOK_TABLE_OR_COL src1) c1) (. (TOK_TABLE_OR_COL src2) c3)) (< (. (TOK_TABLE_OR_COL src1) c1) 100))) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key) c5) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value) c6)))) src3) (AND (= (+ (. (TOK_TABLE_OR_COL src1) c1) (. (TOK_TABLE_OR_COL src2) c3)) (. (TOK_TABLE_OR_COL src3) c5)) (< (. (TOK_TABLE_OR_COL src3) c5) 200)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION hash (. (TOK_TABLE_OR_COL src1) c1) (. (TOK_TABLE_OR_COL src2) c4)))))))' '' 'STAGE DEPENDENCIES:' ' Stage-11 is a root stage , consists of Stage-14, Stage-15, Stage-1' ' Stage-14 has a backup stage: Stage-1' ' Stage-9 depends on stages: Stage-14' ' Stage-8 depends on stages: Stage-1, Stage-9, Stage-10 , consists of Stage-12, Stage-13, Stage-2' ' Stage-12 has a backup stage: Stage-2' ' Stage-6 depends on stages: Stage-12' ' Stage-3 depends on stages: Stage-2, Stage-6, Stage-7' ' Stage-13 has a backup stage: Stage-2' ' Stage-7 depends on stages: Stage-13' ' Stage-2' ' Stage-15 has a backup stage: Stage-1' ' Stage-10 depends on stages: Stage-15' ' Stage-1' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-11' ' Conditional Operator' '' ' Stage: Stage-14' ' Map Reduce Local Work' ' Alias -> Map Local Tables:' ' src2:src ' ' Fetch Operator' ' limit: -1' ' Alias -> Map Local Operator Tree:' ' src2:src ' ' TableScan' ' alias: src' ' Filter Operator' ' predicate:' ' expr: (key < 100)' ' type: boolean' ' Select Operator' ' expressions:' ' expr: key' ' type: string' ' expr: value' ' type: string' ' outputColumnNames: _col0, _col1' ' HashTable Sink Operator' ' condition expressions:' ' 0 {_col0}' ' 1 {_col0} {_col1}' ' handleSkewJoin: false' ' keys:' ' 0 [Column[_col0]]' ' 1 [Column[_col0]]' ' Position of Big Table: 0' '' ' Stage: Stage-9' ' Map Reduce' ' Alias -> Map Operator Tree:' ' src1:src ' ' TableScan' ' alias: src' ' Filter Operator' ' predicate:' ' expr: (key < 100)' ' type: boolean' ' Select Operator' ' expressions:' ' expr: key' ' type: string' ' outputColumnNames: _col0' ' Map Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {_col0}' ' 1 {_col0} {_col1}' ' handleSkewJoin: false' ' keys:' ' 0 [Column[_col0]]' ' 1 [Column[_col0]]' ' outputColumnNames: _col0, _col2, _col3' ' Position of Big Table: 0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' Local Work:' ' Map Reduce Local Work' '' ' Stage: Stage-8' ' Conditional Operator' '' ' Stage: Stage-12' ' Map Reduce Local Work' ' Alias -> Map Local Tables:' ' src3:src ' ' Fetch Operator' ' limit: -1' ' Alias -> Map Local Operator Tree:' ' src3:src ' ' TableScan' ' alias: src' ' Filter Operator' ' predicate:' ' expr: (key < 200)' ' type: boolean' ' Select Operator' ' expressions:' ' expr: key' ' type: string' ' outputColumnNames: _col0' ' HashTable Sink Operator' ' condition expressions:' ' 0 {_col3} {_col0}' ' 1 ' ' handleSkewJoin: false' ' keys:' ' 0 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[_col0], Column[_col2]()]' ' 1 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[_col0]()]' ' Position of Big Table: 0' '' ' Stage: Stage-6' ' Map Reduce' ' Alias -> Map Operator Tree:' ' $INTNAME ' ' Map Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {_col3} {_col0}' ' 1 ' ' handleSkewJoin: false' ' keys:' ' 0 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[_col0], Column[_col2]()]' ' 1 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[_col0]()]' ' outputColumnNames: _col1, _col2' ' Position of Big Table: 0' ' Select Operator' ' expressions:' ' expr: _col2' ' type: string' ' expr: _col1' ' type: string' ' outputColumnNames: _col2, _col1' ' Group By Operator' ' aggregations:' ' expr: sum(hash(_col2,_col1))' ' bucketGroup: false' ' mode: hash' ' outputColumnNames: _col0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' Local Work:' ' Map Reduce Local Work' '' ' Stage: Stage-3' ' Map Reduce' ' Alias -> Map Operator Tree:' ' file:!!{hive.exec.scratchdir}!! ' ' Reduce Output Operator' ' sort order: ' ' tag: -1' ' value expressions:' ' expr: _col0' ' type: bigint' ' Reduce Operator Tree:' ' Group By Operator' ' aggregations:' ' expr: sum(VALUE._col0)' ' bucketGroup: false' ' mode: mergepartial' ' outputColumnNames: _col0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: bigint' ' outputColumnNames: _col0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' '' ' Stage: Stage-13' ' Map Reduce Local Work' ' Alias -> Map Local Tables:' ' $INTNAME ' ' Fetch Operator' ' limit: -1' ' Alias -> Map Local Operator Tree:' ' $INTNAME ' ' HashTable Sink Operator' ' condition expressions:' ' 0 {_col3} {_col0}' ' 1 ' ' handleSkewJoin: false' ' keys:' ' 0 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[_col0], Column[_col2]()]' ' 1 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[_col0]()]' ' Position of Big Table: 1' '' ' Stage: Stage-7' ' Map Reduce' ' Alias -> Map Operator Tree:' ' src3:src ' ' TableScan' ' alias: src' ' Filter Operator' ' predicate:' ' expr: (key < 200)' ' type: boolean' ' Select Operator' ' expressions:' ' expr: key' ' type: string' ' outputColumnNames: _col0' ' Map Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {_col3} {_col0}' ' 1 ' ' handleSkewJoin: false' ' keys:' ' 0 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[_col0], Column[_col2]()]' ' 1 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[_col0]()]' ' outputColumnNames: _col1, _col2' ' Position of Big Table: 1' ' Select Operator' ' expressions:' ' expr: _col2' ' type: string' ' expr: _col1' ' type: string' ' outputColumnNames: _col2, _col1' ' Group By Operator' ' aggregations:' ' expr: sum(hash(_col2,_col1))' ' bucketGroup: false' ' mode: hash' ' outputColumnNames: _col0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' Local Work:' ' Map Reduce Local Work' '' ' Stage: Stage-2' ' Map Reduce' ' Alias -> Map Operator Tree:' ' $INTNAME ' ' Reduce Output Operator' ' key expressions:' ' expr: (_col0 + _col2)' ' type: double' ' sort order: +' ' Map-reduce partition columns:' ' expr: (_col0 + _col2)' ' type: double' ' tag: 0' ' value expressions:' ' expr: _col3' ' type: string' ' expr: _col0' ' type: string' ' src3:src ' ' TableScan' ' alias: src' ' Filter Operator' ' predicate:' ' expr: (key < 200)' ' type: boolean' ' Select Operator' ' expressions:' ' expr: key' ' type: string' ' outputColumnNames: _col0' ' Reduce Output Operator' ' key expressions:' ' expr: UDFToDouble(_col0)' ' type: double' ' sort order: +' ' Map-reduce partition columns:' ' expr: UDFToDouble(_col0)' ' type: double' ' tag: 1' ' Reduce Operator Tree:' ' Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {VALUE._col1} {VALUE._col2}' ' 1 ' ' handleSkewJoin: false' ' outputColumnNames: _col1, _col2' ' Select Operator' ' expressions:' ' expr: _col2' ' type: string' ' expr: _col1' ' type: string' ' outputColumnNames: _col2, _col1' ' Group By Operator' ' aggregations:' ' expr: sum(hash(_col2,_col1))' ' bucketGroup: false' ' mode: hash' ' outputColumnNames: _col0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' '' ' Stage: Stage-15' ' Map Reduce Local Work' ' Alias -> Map Local Tables:' ' src1:src ' ' Fetch Operator' ' limit: -1' ' Alias -> Map Local Operator Tree:' ' src1:src ' ' TableScan' ' alias: src' ' Filter Operator' ' predicate:' ' expr: (key < 100)' ' type: boolean' ' Select Operator' ' expressions:' ' expr: key' ' type: string' ' outputColumnNames: _col0' ' HashTable Sink Operator' ' condition expressions:' ' 0 {_col0}' ' 1 {_col0} {_col1}' ' handleSkewJoin: false' ' keys:' ' 0 [Column[_col0]]' ' 1 [Column[_col0]]' ' Position of Big Table: 1' '' ' Stage: Stage-10' ' Map Reduce' ' Alias -> Map Operator Tree:' ' src2:src ' ' TableScan' ' alias: src' ' Filter Operator' ' predicate:' ' expr: (key < 100)' ' type: boolean' ' Select Operator' ' expressions:' ' expr: key' ' type: string' ' expr: value' ' type: string' ' outputColumnNames: _col0, _col1' ' Map Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {_col0}' ' 1 {_col0} {_col1}' ' handleSkewJoin: false' ' keys:' ' 0 [Column[_col0]]' ' 1 [Column[_col0]]' ' outputColumnNames: _col0, _col2, _col3' ' Position of Big Table: 1' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' Local Work:' ' Map Reduce Local Work' '' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' src1:src ' ' TableScan' ' alias: src' ' Filter Operator' ' predicate:' ' expr: (key < 100)' ' type: boolean' ' Select Operator' ' expressions:' ' expr: key' ' type: string' ' outputColumnNames: _col0' ' Reduce Output Operator' ' key expressions:' ' expr: _col0' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: _col0' ' type: string' ' tag: 0' ' value expressions:' ' expr: _col0' ' type: string' ' src2:src ' ' TableScan' ' alias: src' ' Filter Operator' ' predicate:' ' expr: (key < 100)' ' type: boolean' ' Select Operator' ' expressions:' ' expr: key' ' type: string' ' expr: value' ' type: string' ' outputColumnNames: _col0, _col1' ' Reduce Output Operator' ' key expressions:' ' expr: _col0' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: _col0' ' type: string' ' tag: 1' ' value expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' Reduce Operator Tree:' ' Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {VALUE._col0}' ' 1 {VALUE._col0} {VALUE._col1}' ' handleSkewJoin: false' ' outputColumnNames: _col0, _col2, _col3' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 467 rows selected >>> >>> SELECT sum(hash(src1.c1, src2.c4)) FROM (SELECT src.key as c1, src.value as c2 from src) src1 JOIN (SELECT src.key as c3, src.value as c4 from src) src2 ON src1.c1 = src2.c3 AND src1.c1 < 100 JOIN (SELECT src.key as c5, src.value as c6 from src) src3 ON src1.c1 + src2.c3 = src3.c5 AND src3.c5 < 200; '_c0' '-97670109576' 1 row selected >>> !record