Saving all output to "!!{outputDirectory}!!/bucketmapjoin_negative3.q.raw". Enter "record" with no arguments to stop it. >>> !run !!{qFileDirectory}!!/bucketmapjoin_negative3.q >>> drop table test1; No rows affected >>> drop table test2; No rows affected >>> drop table test3; No rows affected >>> drop table test4; No rows affected >>> >>> create table test1 (key string, value string) clustered by (key) sorted by (key) into 3 buckets; No rows affected >>> create table test2 (key string, value string) clustered by (value) sorted by (value) into 3 buckets; No rows affected >>> create table test3 (key string, value string) clustered by (key, value) sorted by (key, value) into 3 buckets; No rows affected >>> create table test4 (key string, value string) clustered by (value, key) sorted by (value, key) into 3 buckets; No rows affected >>> >>> load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test1; No rows affected >>> load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test1; No rows affected >>> load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test1; No rows affected >>> >>> load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test2; No rows affected >>> load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test2; No rows affected >>> load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test2; No rows affected >>> >>> load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test3; No rows affected >>> load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test3; No rows affected >>> load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test3; No rows affected >>> >>> load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test4; No rows affected >>> load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test4; No rows affected >>> load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test4; No rows affected >>> >>> set hive.optimize.bucketmapjoin = true; No rows affected >>> -- should be allowed >>> explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key=R.key AND L.value=R.value; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test1) L) (TOK_TABREF (TOK_TABNAME test1) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))' '' 'STAGE DEPENDENCIES:' ' Stage-3 is a root stage' ' Stage-1 depends on stages: Stage-3' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-3' ' Map Reduce Local Work' ' Alias -> Map Local Tables:' ' r ' ' Fetch Operator' ' limit: -1' ' Alias -> Map Local Operator Tree:' ' r ' ' TableScan' ' alias: r' ' GatherStats: false' ' HashTable Sink Operator' ' condition expressions:' ' 0 {key} {value}' ' 1 {key} {value}' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key], Column[value]]' ' 1 [Column[key], Column[value]]' ' Position of Big Table: 0' ' Bucket Mapjoin Context:' ' Alias Bucket Base File Name Mapping:' ' r {srcbucket20.txt=[srcbucket20.txt], srcbucket21.txt=[srcbucket21.txt], srcbucket22.txt=[srcbucket22.txt]}' ' Alias Bucket File Name Mapping:' ' r {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket21.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket22.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket22.txt]}' ' Alias Bucket Output File Name Mapping:' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket20.txt 0' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket21.txt 1' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1/srcbucket22.txt 2' '' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' l ' ' TableScan' ' alias: l' ' GatherStats: false' ' Map Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {key} {value}' ' 1 {key} {value}' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key], Column[value]]' ' 1 [Column[key], Column[value]]' ' outputColumnNames: _col0, _col1, _col4, _col5' ' Position of Big Table: 0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col4' ' type: string' ' expr: _col5' ' type: string' ' outputColumnNames: _col0, _col1, _col4, _col5' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col4' ' type: string' ' expr: _col5' ' type: string' ' outputColumnNames: _col0, _col1, _col2, _col3' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0,_col1,_col2,_col3' ' columns.types string:string:string:string' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' ' Local Work:' ' Map Reduce Local Work' ' Needs Tagging: false' ' Path -> Alias:' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 [l]' ' Path -> Partition:' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 ' ' Partition' ' base file name: test1' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' SORTBUCKETCOLSPREFIX TRUE' ' bucket_count 3' ' bucket_field_name key' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1' ' name bucketmapjoin_negative3.test1' ' numFiles 3' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct test1 { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 4200' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' SORTBUCKETCOLSPREFIX TRUE' ' bucket_count 3' ' bucket_field_name key' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1' ' name bucketmapjoin_negative3.test1' ' numFiles 3' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct test1 { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 4200' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: bucketmapjoin_negative3.test1' ' name: bucketmapjoin_negative3.test1' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 159 rows selected >>> explain extended select /* + MAPJOIN(R) */ * from test2 L join test2 R on L.key=R.key AND L.value=R.value; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test2) L) (TOK_TABREF (TOK_TABNAME test2) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))' '' 'STAGE DEPENDENCIES:' ' Stage-3 is a root stage' ' Stage-1 depends on stages: Stage-3' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-3' ' Map Reduce Local Work' ' Alias -> Map Local Tables:' ' r ' ' Fetch Operator' ' limit: -1' ' Alias -> Map Local Operator Tree:' ' r ' ' TableScan' ' alias: r' ' GatherStats: false' ' HashTable Sink Operator' ' condition expressions:' ' 0 {key} {value}' ' 1 {key} {value}' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key], Column[value]]' ' 1 [Column[key], Column[value]]' ' Position of Big Table: 0' ' Bucket Mapjoin Context:' ' Alias Bucket Base File Name Mapping:' ' r {srcbucket20.txt=[srcbucket20.txt], srcbucket21.txt=[srcbucket21.txt], srcbucket22.txt=[srcbucket22.txt]}' ' Alias Bucket File Name Mapping:' ' r {!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket20.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket20.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket21.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket21.txt], !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket22.txt=[!!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket22.txt]}' ' Alias Bucket Output File Name Mapping:' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket20.txt 0' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket21.txt 1' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2/srcbucket22.txt 2' '' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' l ' ' TableScan' ' alias: l' ' GatherStats: false' ' Map Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {key} {value}' ' 1 {key} {value}' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key], Column[value]]' ' 1 [Column[key], Column[value]]' ' outputColumnNames: _col0, _col1, _col4, _col5' ' Position of Big Table: 0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col4' ' type: string' ' expr: _col5' ' type: string' ' outputColumnNames: _col0, _col1, _col4, _col5' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col4' ' type: string' ' expr: _col5' ' type: string' ' outputColumnNames: _col0, _col1, _col2, _col3' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0,_col1,_col2,_col3' ' columns.types string:string:string:string' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' ' Local Work:' ' Map Reduce Local Work' ' Needs Tagging: false' ' Path -> Alias:' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2 [l]' ' Path -> Partition:' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2 ' ' Partition' ' base file name: test2' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' SORTBUCKETCOLSPREFIX TRUE' ' bucket_count 3' ' bucket_field_name value' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2' ' name bucketmapjoin_negative3.test2' ' numFiles 3' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct test2 { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 4200' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' SORTBUCKETCOLSPREFIX TRUE' ' bucket_count 3' ' bucket_field_name value' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2' ' name bucketmapjoin_negative3.test2' ' numFiles 3' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct test2 { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 4200' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: bucketmapjoin_negative3.test2' ' name: bucketmapjoin_negative3.test2' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 159 rows selected >>> >>> -- should not apply bucket mapjoin >>> explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key+L.key=R.key; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test1) L) (TOK_TABREF (TOK_TABNAME test1) R) (= (+ (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL L) key)) (. (TOK_TABLE_OR_COL R) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))' '' 'STAGE DEPENDENCIES:' ' Stage-3 is a root stage' ' Stage-1 depends on stages: Stage-3' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-3' ' Map Reduce Local Work' ' Alias -> Map Local Tables:' ' r ' ' Fetch Operator' ' limit: -1' ' Alias -> Map Local Operator Tree:' ' r ' ' TableScan' ' alias: r' ' GatherStats: false' ' HashTable Sink Operator' ' condition expressions:' ' 0 {key} {value}' ' 1 {key} {value}' ' handleSkewJoin: false' ' keys:' ' 0 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[key], Column[key]()]' ' 1 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[key]()]' ' Position of Big Table: 0' '' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' l ' ' TableScan' ' alias: l' ' GatherStats: false' ' Map Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {key} {value}' ' 1 {key} {value}' ' handleSkewJoin: false' ' keys:' ' 0 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[key], Column[key]()]' ' 1 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[key]()]' ' outputColumnNames: _col0, _col1, _col4, _col5' ' Position of Big Table: 0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col4' ' type: string' ' expr: _col5' ' type: string' ' outputColumnNames: _col0, _col1, _col4, _col5' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col4' ' type: string' ' expr: _col5' ' type: string' ' outputColumnNames: _col0, _col1, _col2, _col3' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0,_col1,_col2,_col3' ' columns.types string:string:string:string' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' ' Local Work:' ' Map Reduce Local Work' ' Needs Tagging: false' ' Path -> Alias:' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 [l]' ' Path -> Partition:' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 ' ' Partition' ' base file name: test1' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' SORTBUCKETCOLSPREFIX TRUE' ' bucket_count 3' ' bucket_field_name key' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1' ' name bucketmapjoin_negative3.test1' ' numFiles 3' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct test1 { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 4200' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' SORTBUCKETCOLSPREFIX TRUE' ' bucket_count 3' ' bucket_field_name key' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1' ' name bucketmapjoin_negative3.test1' ' numFiles 3' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct test1 { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 4200' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: bucketmapjoin_negative3.test1' ' name: bucketmapjoin_negative3.test1' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 150 rows selected >>> explain extended select /* + MAPJOIN(R) */ * from test1 L join test2 R on L.key=R.key AND L.value=R.value; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test1) L) (TOK_TABREF (TOK_TABNAME test2) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))' '' 'STAGE DEPENDENCIES:' ' Stage-3 is a root stage' ' Stage-1 depends on stages: Stage-3' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-3' ' Map Reduce Local Work' ' Alias -> Map Local Tables:' ' r ' ' Fetch Operator' ' limit: -1' ' Alias -> Map Local Operator Tree:' ' r ' ' TableScan' ' alias: r' ' GatherStats: false' ' HashTable Sink Operator' ' condition expressions:' ' 0 {key} {value}' ' 1 {key} {value}' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key], Column[value]]' ' 1 [Column[key], Column[value]]' ' Position of Big Table: 0' '' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' l ' ' TableScan' ' alias: l' ' GatherStats: false' ' Map Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {key} {value}' ' 1 {key} {value}' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key], Column[value]]' ' 1 [Column[key], Column[value]]' ' outputColumnNames: _col0, _col1, _col4, _col5' ' Position of Big Table: 0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col4' ' type: string' ' expr: _col5' ' type: string' ' outputColumnNames: _col0, _col1, _col4, _col5' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col4' ' type: string' ' expr: _col5' ' type: string' ' outputColumnNames: _col0, _col1, _col2, _col3' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0,_col1,_col2,_col3' ' columns.types string:string:string:string' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' ' Local Work:' ' Map Reduce Local Work' ' Needs Tagging: false' ' Path -> Alias:' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 [l]' ' Path -> Partition:' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 ' ' Partition' ' base file name: test1' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' SORTBUCKETCOLSPREFIX TRUE' ' bucket_count 3' ' bucket_field_name key' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1' ' name bucketmapjoin_negative3.test1' ' numFiles 3' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct test1 { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 4200' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' SORTBUCKETCOLSPREFIX TRUE' ' bucket_count 3' ' bucket_field_name key' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1' ' name bucketmapjoin_negative3.test1' ' numFiles 3' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct test1 { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 4200' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: bucketmapjoin_negative3.test1' ' name: bucketmapjoin_negative3.test1' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 150 rows selected >>> explain extended select /* + MAPJOIN(R) */ * from test1 L join test3 R on L.key=R.key AND L.value=R.value; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test1) L) (TOK_TABREF (TOK_TABNAME test3) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))' '' 'STAGE DEPENDENCIES:' ' Stage-3 is a root stage' ' Stage-1 depends on stages: Stage-3' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-3' ' Map Reduce Local Work' ' Alias -> Map Local Tables:' ' r ' ' Fetch Operator' ' limit: -1' ' Alias -> Map Local Operator Tree:' ' r ' ' TableScan' ' alias: r' ' GatherStats: false' ' HashTable Sink Operator' ' condition expressions:' ' 0 {key} {value}' ' 1 {key} {value}' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key], Column[value]]' ' 1 [Column[key], Column[value]]' ' Position of Big Table: 0' '' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' l ' ' TableScan' ' alias: l' ' GatherStats: false' ' Map Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {key} {value}' ' 1 {key} {value}' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key], Column[value]]' ' 1 [Column[key], Column[value]]' ' outputColumnNames: _col0, _col1, _col4, _col5' ' Position of Big Table: 0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col4' ' type: string' ' expr: _col5' ' type: string' ' outputColumnNames: _col0, _col1, _col4, _col5' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col4' ' type: string' ' expr: _col5' ' type: string' ' outputColumnNames: _col0, _col1, _col2, _col3' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0,_col1,_col2,_col3' ' columns.types string:string:string:string' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' ' Local Work:' ' Map Reduce Local Work' ' Needs Tagging: false' ' Path -> Alias:' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 [l]' ' Path -> Partition:' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 ' ' Partition' ' base file name: test1' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' SORTBUCKETCOLSPREFIX TRUE' ' bucket_count 3' ' bucket_field_name key' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1' ' name bucketmapjoin_negative3.test1' ' numFiles 3' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct test1 { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 4200' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' SORTBUCKETCOLSPREFIX TRUE' ' bucket_count 3' ' bucket_field_name key' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1' ' name bucketmapjoin_negative3.test1' ' numFiles 3' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct test1 { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 4200' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: bucketmapjoin_negative3.test1' ' name: bucketmapjoin_negative3.test1' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 150 rows selected >>> explain extended select /* + MAPJOIN(R) */ * from test1 L join test4 R on L.key=R.key AND L.value=R.value; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test1) L) (TOK_TABREF (TOK_TABNAME test4) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))' '' 'STAGE DEPENDENCIES:' ' Stage-3 is a root stage' ' Stage-1 depends on stages: Stage-3' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-3' ' Map Reduce Local Work' ' Alias -> Map Local Tables:' ' r ' ' Fetch Operator' ' limit: -1' ' Alias -> Map Local Operator Tree:' ' r ' ' TableScan' ' alias: r' ' GatherStats: false' ' HashTable Sink Operator' ' condition expressions:' ' 0 {key} {value}' ' 1 {key} {value}' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key], Column[value]]' ' 1 [Column[key], Column[value]]' ' Position of Big Table: 0' '' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' l ' ' TableScan' ' alias: l' ' GatherStats: false' ' Map Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {key} {value}' ' 1 {key} {value}' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key], Column[value]]' ' 1 [Column[key], Column[value]]' ' outputColumnNames: _col0, _col1, _col4, _col5' ' Position of Big Table: 0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col4' ' type: string' ' expr: _col5' ' type: string' ' outputColumnNames: _col0, _col1, _col4, _col5' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col4' ' type: string' ' expr: _col5' ' type: string' ' outputColumnNames: _col0, _col1, _col2, _col3' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0,_col1,_col2,_col3' ' columns.types string:string:string:string' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' ' Local Work:' ' Map Reduce Local Work' ' Needs Tagging: false' ' Path -> Alias:' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 [l]' ' Path -> Partition:' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1 ' ' Partition' ' base file name: test1' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' SORTBUCKETCOLSPREFIX TRUE' ' bucket_count 3' ' bucket_field_name key' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1' ' name bucketmapjoin_negative3.test1' ' numFiles 3' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct test1 { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 4200' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' SORTBUCKETCOLSPREFIX TRUE' ' bucket_count 3' ' bucket_field_name key' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test1' ' name bucketmapjoin_negative3.test1' ' numFiles 3' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct test1 { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 4200' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: bucketmapjoin_negative3.test1' ' name: bucketmapjoin_negative3.test1' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 150 rows selected >>> explain extended select /* + MAPJOIN(R) */ * from test2 L join test3 R on L.key=R.key AND L.value=R.value; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test2) L) (TOK_TABREF (TOK_TABNAME test3) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))' '' 'STAGE DEPENDENCIES:' ' Stage-3 is a root stage' ' Stage-1 depends on stages: Stage-3' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-3' ' Map Reduce Local Work' ' Alias -> Map Local Tables:' ' r ' ' Fetch Operator' ' limit: -1' ' Alias -> Map Local Operator Tree:' ' r ' ' TableScan' ' alias: r' ' GatherStats: false' ' HashTable Sink Operator' ' condition expressions:' ' 0 {key} {value}' ' 1 {key} {value}' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key], Column[value]]' ' 1 [Column[key], Column[value]]' ' Position of Big Table: 0' '' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' l ' ' TableScan' ' alias: l' ' GatherStats: false' ' Map Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {key} {value}' ' 1 {key} {value}' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key], Column[value]]' ' 1 [Column[key], Column[value]]' ' outputColumnNames: _col0, _col1, _col4, _col5' ' Position of Big Table: 0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col4' ' type: string' ' expr: _col5' ' type: string' ' outputColumnNames: _col0, _col1, _col4, _col5' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col4' ' type: string' ' expr: _col5' ' type: string' ' outputColumnNames: _col0, _col1, _col2, _col3' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0,_col1,_col2,_col3' ' columns.types string:string:string:string' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' ' Local Work:' ' Map Reduce Local Work' ' Needs Tagging: false' ' Path -> Alias:' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2 [l]' ' Path -> Partition:' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2 ' ' Partition' ' base file name: test2' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' SORTBUCKETCOLSPREFIX TRUE' ' bucket_count 3' ' bucket_field_name value' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2' ' name bucketmapjoin_negative3.test2' ' numFiles 3' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct test2 { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 4200' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' SORTBUCKETCOLSPREFIX TRUE' ' bucket_count 3' ' bucket_field_name value' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2' ' name bucketmapjoin_negative3.test2' ' numFiles 3' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct test2 { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 4200' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: bucketmapjoin_negative3.test2' ' name: bucketmapjoin_negative3.test2' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 150 rows selected >>> explain extended select /* + MAPJOIN(R) */ * from test2 L join test4 R on L.key=R.key AND L.value=R.value; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test2) L) (TOK_TABREF (TOK_TABNAME test4) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))' '' 'STAGE DEPENDENCIES:' ' Stage-3 is a root stage' ' Stage-1 depends on stages: Stage-3' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-3' ' Map Reduce Local Work' ' Alias -> Map Local Tables:' ' r ' ' Fetch Operator' ' limit: -1' ' Alias -> Map Local Operator Tree:' ' r ' ' TableScan' ' alias: r' ' GatherStats: false' ' HashTable Sink Operator' ' condition expressions:' ' 0 {key} {value}' ' 1 {key} {value}' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key], Column[value]]' ' 1 [Column[key], Column[value]]' ' Position of Big Table: 0' '' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' l ' ' TableScan' ' alias: l' ' GatherStats: false' ' Map Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {key} {value}' ' 1 {key} {value}' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key], Column[value]]' ' 1 [Column[key], Column[value]]' ' outputColumnNames: _col0, _col1, _col4, _col5' ' Position of Big Table: 0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col4' ' type: string' ' expr: _col5' ' type: string' ' outputColumnNames: _col0, _col1, _col4, _col5' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col4' ' type: string' ' expr: _col5' ' type: string' ' outputColumnNames: _col0, _col1, _col2, _col3' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0,_col1,_col2,_col3' ' columns.types string:string:string:string' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' ' Local Work:' ' Map Reduce Local Work' ' Needs Tagging: false' ' Path -> Alias:' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2 [l]' ' Path -> Partition:' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2 ' ' Partition' ' base file name: test2' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' SORTBUCKETCOLSPREFIX TRUE' ' bucket_count 3' ' bucket_field_name value' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2' ' name bucketmapjoin_negative3.test2' ' numFiles 3' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct test2 { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 4200' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' SORTBUCKETCOLSPREFIX TRUE' ' bucket_count 3' ' bucket_field_name value' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test2' ' name bucketmapjoin_negative3.test2' ' numFiles 3' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct test2 { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 4200' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: bucketmapjoin_negative3.test2' ' name: bucketmapjoin_negative3.test2' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 150 rows selected >>> explain extended select /* + MAPJOIN(R) */ * from test3 L join test4 R on L.key=R.key AND L.value=R.value; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test3) L) (TOK_TABREF (TOK_TABNAME test4) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))' '' 'STAGE DEPENDENCIES:' ' Stage-3 is a root stage' ' Stage-1 depends on stages: Stage-3' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-3' ' Map Reduce Local Work' ' Alias -> Map Local Tables:' ' r ' ' Fetch Operator' ' limit: -1' ' Alias -> Map Local Operator Tree:' ' r ' ' TableScan' ' alias: r' ' GatherStats: false' ' HashTable Sink Operator' ' condition expressions:' ' 0 {key} {value}' ' 1 {key} {value}' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key], Column[value]]' ' 1 [Column[key], Column[value]]' ' Position of Big Table: 0' '' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' l ' ' TableScan' ' alias: l' ' GatherStats: false' ' Map Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 {key} {value}' ' 1 {key} {value}' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key], Column[value]]' ' 1 [Column[key], Column[value]]' ' outputColumnNames: _col0, _col1, _col4, _col5' ' Position of Big Table: 0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col4' ' type: string' ' expr: _col5' ' type: string' ' outputColumnNames: _col0, _col1, _col4, _col5' ' Select Operator' ' expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' expr: _col4' ' type: string' ' expr: _col5' ' type: string' ' outputColumnNames: _col0, _col1, _col2, _col3' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0,_col1,_col2,_col3' ' columns.types string:string:string:string' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' ' Local Work:' ' Map Reduce Local Work' ' Needs Tagging: false' ' Path -> Alias:' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test3 [l]' ' Path -> Partition:' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test3 ' ' Partition' ' base file name: test3' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' SORTBUCKETCOLSPREFIX TRUE' ' bucket_count 3' ' bucket_field_name key' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test3' ' name bucketmapjoin_negative3.test3' ' numFiles 3' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct test3 { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 4200' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' SORTBUCKETCOLSPREFIX TRUE' ' bucket_count 3' ' bucket_field_name key' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin_negative3.db/test3' ' name bucketmapjoin_negative3.test3' ' numFiles 3' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct test3 { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 4200' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: bucketmapjoin_negative3.test3' ' name: bucketmapjoin_negative3.test3' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 150 rows selected >>> !record