Saving all output to "!!{outputDirectory}!!/mapjoin_distinct.q.raw". Enter "record" with no arguments to stop it. >>> !run !!{qFileDirectory}!!/mapjoin_distinct.q >>> set hive.map.aggr = true; No rows affected >>> set hive.groupby.skewindata = true; No rows affected >>> explain FROM srcpart c JOIN srcpart d ON ( c.key=d.key AND c.ds='2008-04-08' AND d.ds='2008-04-08') SELECT /*+ MAPJOIN(d) */ DISTINCT c.value; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcpart) c) (TOK_TABREF (TOK_TABNAME srcpart) d) (AND (AND (= (. (TOK_TABLE_OR_COL c) key) (. (TOK_TABLE_OR_COL d) key)) (= (. (TOK_TABLE_OR_COL c) ds) '2008-04-08')) (= (. (TOK_TABLE_OR_COL d) ds) '2008-04-08')))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECTDI (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST d))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) value)))))' '' 'STAGE DEPENDENCIES:' ' Stage-5 is a root stage' ' Stage-1 depends on stages: Stage-5' ' Stage-2 depends on stages: Stage-1' ' Stage-3 depends on stages: Stage-2' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-5' ' Map Reduce Local Work' ' Alias -> Map Local Tables:' ' d ' ' Fetch Operator' ' limit: -1' ' Alias -> Map Local Operator Tree:' ' d ' ' TableScan' ' alias: d' ' HashTable Sink Operator' ' condition expressions:' ' 0 {value}' ' 1 ' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key]]' ' 1 [Column[key]]' ' Position of Big Table: 0' '' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' c ' ' TableScan' ' alias: c' ' Map Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {value}' ' 1 ' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key]]' ' 1 [Column[key]]' ' outputColumnNames: _col1' ' Position of Big Table: 0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' Local Work:' ' Map Reduce Local Work' '' ' Stage: Stage-2' ' Map Reduce' ' Alias -> Map Operator Tree:' ' file:!!{hive.exec.scratchdir}!! ' ' Select Operator' ' expressions:' ' expr: _col1' ' type: string' ' outputColumnNames: _col1' ' Select Operator' ' expressions:' ' expr: _col1' ' type: string' ' outputColumnNames: _col1' ' Group By Operator' ' bucketGroup: false' ' keys:' ' expr: _col1' ' type: string' ' mode: hash' ' outputColumnNames: _col0' ' Reduce Output Operator' ' key expressions:' ' expr: _col0' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: rand()' ' type: double' ' tag: -1' ' Reduce Operator Tree:' ' Group By Operator' ' bucketGroup: false' ' keys:' ' expr: KEY._col0' ' type: string' ' mode: partials' ' outputColumnNames: _col0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' '' ' Stage: Stage-3' ' Map Reduce' ' Alias -> Map Operator Tree:' ' file:!!{hive.exec.scratchdir}!! ' ' Reduce Output Operator' ' key expressions:' ' expr: _col0' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: _col0' ' type: string' ' tag: -1' ' Reduce Operator Tree:' ' Group By Operator' ' bucketGroup: false' ' keys:' ' expr: KEY._col0' ' type: string' ' mode: final' ' outputColumnNames: _col0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' outputColumnNames: _col0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 141 rows selected >>> >>> FROM srcpart c JOIN srcpart d ON ( c.key=d.key AND c.ds='2008-04-08' AND d.ds='2008-04-08') SELECT /*+ MAPJOIN(d) */ DISTINCT c.value as value order by value limit 10; 'value' 'val_0' 'val_10' 'val_100' 'val_103' 'val_104' 'val_105' 'val_11' 'val_111' 'val_113' 'val_114' 10 rows selected >>> >>> set hive.map.aggr = true; No rows affected >>> set hive.groupby.skewindata = false; No rows affected >>> explain FROM srcpart c JOIN srcpart d ON ( c.key=d.key AND c.ds='2008-04-08' AND d.ds='2008-04-08') SELECT /*+ MAPJOIN(d) */ DISTINCT c.value; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcpart) c) (TOK_TABREF (TOK_TABNAME srcpart) d) (AND (AND (= (. (TOK_TABLE_OR_COL c) key) (. (TOK_TABLE_OR_COL d) key)) (= (. (TOK_TABLE_OR_COL c) ds) '2008-04-08')) (= (. (TOK_TABLE_OR_COL d) ds) '2008-04-08')))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECTDI (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST d))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) value)))))' '' 'STAGE DEPENDENCIES:' ' Stage-4 is a root stage' ' Stage-1 depends on stages: Stage-4' ' Stage-2 depends on stages: Stage-1' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-4' ' Map Reduce Local Work' ' Alias -> Map Local Tables:' ' d ' ' Fetch Operator' ' limit: -1' ' Alias -> Map Local Operator Tree:' ' d ' ' TableScan' ' alias: d' ' HashTable Sink Operator' ' condition expressions:' ' 0 {value}' ' 1 ' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key]]' ' 1 [Column[key]]' ' Position of Big Table: 0' '' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' c ' ' TableScan' ' alias: c' ' Map Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {value}' ' 1 ' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key]]' ' 1 [Column[key]]' ' outputColumnNames: _col1' ' Position of Big Table: 0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' Local Work:' ' Map Reduce Local Work' '' ' Stage: Stage-2' ' Map Reduce' ' Alias -> Map Operator Tree:' ' file:!!{hive.exec.scratchdir}!! ' ' Select Operator' ' expressions:' ' expr: _col1' ' type: string' ' outputColumnNames: _col1' ' Select Operator' ' expressions:' ' expr: _col1' ' type: string' ' outputColumnNames: _col1' ' Group By Operator' ' bucketGroup: false' ' keys:' ' expr: _col1' ' type: string' ' mode: hash' ' outputColumnNames: _col0' ' Reduce Output Operator' ' key expressions:' ' expr: _col0' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: _col0' ' type: string' ' tag: -1' ' Reduce Operator Tree:' ' Group By Operator' ' bucketGroup: false' ' keys:' ' expr: KEY._col0' ' type: string' ' mode: mergepartial' ' outputColumnNames: _col0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' outputColumnNames: _col0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 112 rows selected >>> >>> FROM srcpart c JOIN srcpart d ON ( c.key=d.key AND c.ds='2008-04-08' AND d.ds='2008-04-08') SELECT /*+ MAPJOIN(d) */ DISTINCT c.value as value order by value limit 10; 'value' 'val_0' 'val_10' 'val_100' 'val_103' 'val_104' 'val_105' 'val_11' 'val_111' 'val_113' 'val_114' 10 rows selected >>> >>> >>> set hive.map.aggr = false; No rows affected >>> set hive.groupby.skewindata = true; No rows affected >>> explain FROM srcpart c JOIN srcpart d ON ( c.key=d.key AND c.ds='2008-04-08' AND d.ds='2008-04-08') SELECT /*+ MAPJOIN(d) */ DISTINCT c.value; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcpart) c) (TOK_TABREF (TOK_TABNAME srcpart) d) (AND (AND (= (. (TOK_TABLE_OR_COL c) key) (. (TOK_TABLE_OR_COL d) key)) (= (. (TOK_TABLE_OR_COL c) ds) '2008-04-08')) (= (. (TOK_TABLE_OR_COL d) ds) '2008-04-08')))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECTDI (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST d))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) value)))))' '' 'STAGE DEPENDENCIES:' ' Stage-5 is a root stage' ' Stage-1 depends on stages: Stage-5' ' Stage-2 depends on stages: Stage-1' ' Stage-3 depends on stages: Stage-2' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-5' ' Map Reduce Local Work' ' Alias -> Map Local Tables:' ' d ' ' Fetch Operator' ' limit: -1' ' Alias -> Map Local Operator Tree:' ' d ' ' TableScan' ' alias: d' ' HashTable Sink Operator' ' condition expressions:' ' 0 {value}' ' 1 ' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key]]' ' 1 [Column[key]]' ' Position of Big Table: 0' '' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' c ' ' TableScan' ' alias: c' ' Map Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {value}' ' 1 ' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key]]' ' 1 [Column[key]]' ' outputColumnNames: _col1' ' Position of Big Table: 0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' Local Work:' ' Map Reduce Local Work' '' ' Stage: Stage-2' ' Map Reduce' ' Alias -> Map Operator Tree:' ' file:!!{hive.exec.scratchdir}!! ' ' Select Operator' ' expressions:' ' expr: _col1' ' type: string' ' outputColumnNames: _col1' ' Select Operator' ' expressions:' ' expr: _col1' ' type: string' ' outputColumnNames: _col1' ' Reduce Output Operator' ' key expressions:' ' expr: _col1' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: rand()' ' type: double' ' tag: -1' ' Reduce Operator Tree:' ' Group By Operator' ' bucketGroup: false' ' keys:' ' expr: KEY._col0' ' type: string' ' mode: partial1' ' outputColumnNames: _col0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' '' ' Stage: Stage-3' ' Map Reduce' ' Alias -> Map Operator Tree:' ' file:!!{hive.exec.scratchdir}!! ' ' Reduce Output Operator' ' key expressions:' ' expr: _col0' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: _col0' ' type: string' ' tag: -1' ' Reduce Operator Tree:' ' Group By Operator' ' bucketGroup: false' ' keys:' ' expr: KEY._col0' ' type: string' ' mode: final' ' outputColumnNames: _col0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' outputColumnNames: _col0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 134 rows selected >>> >>> FROM srcpart c JOIN srcpart d ON ( c.key=d.key AND c.ds='2008-04-08' AND d.ds='2008-04-08') SELECT /*+ MAPJOIN(d) */ DISTINCT c.value as value order by value limit 10; 'value' 'val_0' 'val_10' 'val_100' 'val_103' 'val_104' 'val_105' 'val_11' 'val_111' 'val_113' 'val_114' 10 rows selected >>> >>> >>> set hive.map.aggr = false; No rows affected >>> set hive.groupby.skewindata = false; No rows affected >>> explain FROM srcpart c JOIN srcpart d ON ( c.key=d.key AND c.ds='2008-04-08' AND d.ds='2008-04-08') SELECT /*+ MAPJOIN(d) */ DISTINCT c.value; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcpart) c) (TOK_TABREF (TOK_TABNAME srcpart) d) (AND (AND (= (. (TOK_TABLE_OR_COL c) key) (. (TOK_TABLE_OR_COL d) key)) (= (. (TOK_TABLE_OR_COL c) ds) '2008-04-08')) (= (. (TOK_TABLE_OR_COL d) ds) '2008-04-08')))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECTDI (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST d))) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) value)))))' '' 'STAGE DEPENDENCIES:' ' Stage-4 is a root stage' ' Stage-1 depends on stages: Stage-4' ' Stage-2 depends on stages: Stage-1' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-4' ' Map Reduce Local Work' ' Alias -> Map Local Tables:' ' d ' ' Fetch Operator' ' limit: -1' ' Alias -> Map Local Operator Tree:' ' d ' ' TableScan' ' alias: d' ' HashTable Sink Operator' ' condition expressions:' ' 0 {value}' ' 1 ' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key]]' ' 1 [Column[key]]' ' Position of Big Table: 0' '' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' c ' ' TableScan' ' alias: c' ' Map Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {value}' ' 1 ' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key]]' ' 1 [Column[key]]' ' outputColumnNames: _col1' ' Position of Big Table: 0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' Local Work:' ' Map Reduce Local Work' '' ' Stage: Stage-2' ' Map Reduce' ' Alias -> Map Operator Tree:' ' file:!!{hive.exec.scratchdir}!! ' ' Select Operator' ' expressions:' ' expr: _col1' ' type: string' ' outputColumnNames: _col1' ' Select Operator' ' expressions:' ' expr: _col1' ' type: string' ' outputColumnNames: _col1' ' Reduce Output Operator' ' key expressions:' ' expr: _col1' ' type: string' ' sort order: +' ' Map-reduce partition columns:' ' expr: _col1' ' type: string' ' tag: -1' ' Reduce Operator Tree:' ' Group By Operator' ' bucketGroup: false' ' keys:' ' expr: KEY._col0' ' type: string' ' mode: complete' ' outputColumnNames: _col0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' outputColumnNames: _col0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 105 rows selected >>> >>> FROM srcpart c JOIN srcpart d ON ( c.key=d.key AND c.ds='2008-04-08' AND d.ds='2008-04-08') SELECT /*+ MAPJOIN(d) */ DISTINCT c.value as value order by value limit 10; 'value' 'val_0' 'val_10' 'val_100' 'val_103' 'val_104' 'val_105' 'val_11' 'val_111' 'val_113' 'val_114' 10 rows selected >>> >>> >>> !record