Saving all output to "!!{outputDirectory}!!/bucketcontext_5.q.raw". Enter "record" with no arguments to stop it. >>> !run !!{qFileDirectory}!!/bucketcontext_5.q >>> -- small no part, 4 bucket & big no part, 2 bucket >>> CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; No rows affected >>> load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small; No rows affected >>> load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small; No rows affected >>> load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small; No rows affected >>> load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small; No rows affected >>> >>> CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; No rows affected >>> load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big; No rows affected >>> load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big; No rows affected >>> >>> set hive.optimize.bucketmapjoin = true; No rows affected >>> explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))' '' 'STAGE DEPENDENCIES:' ' Stage-4 is a root stage' ' Stage-1 depends on stages: Stage-4' ' Stage-2 depends on stages: Stage-1' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-4' ' Map Reduce Local Work' ' Alias -> Map Local Tables:' ' a ' ' Fetch Operator' ' limit: -1' ' Alias -> Map Local Operator Tree:' ' a ' ' TableScan' ' alias: a' ' GatherStats: false' ' HashTable Sink Operator' ' condition expressions:' ' 0 ' ' 1 ' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key]]' ' 1 [Column[key]]' ' Position of Big Table: 1' ' Bucket Mapjoin Context:' ' Alias Bucket Base File Name Mapping:' ' a {srcsortbucket1outof4.txt=[srcsortbucket1outof4.txt, srcsortbucket3outof4.txt], srcsortbucket2outof4.txt=[srcsortbucket2outof4.txt, srcsortbucket4outof4.txt]}' ' Alias Bucket File Name Mapping:' ' a {!!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big/srcsortbucket1outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_small/srcsortbucket1outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_small/srcsortbucket3outof4.txt], !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big/srcsortbucket2outof4.txt=[!!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_small/srcsortbucket2outof4.txt, !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_small/srcsortbucket4outof4.txt]}' ' Alias Bucket Output File Name Mapping:' ' !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big/srcsortbucket1outof4.txt 0' ' !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big/srcsortbucket2outof4.txt 1' '' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' b ' ' TableScan' ' alias: b' ' GatherStats: false' ' Map Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 ' ' 1 ' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key]]' ' 1 [Column[key]]' ' Position of Big Table: 1' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns ' ' columns.types ' ' escape.delim \' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' ' Local Work:' ' Map Reduce Local Work' ' Needs Tagging: false' ' Path -> Alias:' ' !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big [b]' ' Path -> Partition:' ' !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big ' ' Partition' ' base file name: bucket_big' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' SORTBUCKETCOLSPREFIX TRUE' ' bucket_count 2' ' bucket_field_name key' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big' ' name bucketcontext_5.bucket_big' ' numFiles 2' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct bucket_big { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 2750' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' SORTBUCKETCOLSPREFIX TRUE' ' bucket_count 2' ' bucket_field_name key' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big' ' name bucketcontext_5.bucket_big' ' numFiles 2' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct bucket_big { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 2750' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: bucketcontext_5.bucket_big' ' name: bucketcontext_5.bucket_big' '' ' Stage: Stage-2' ' Map Reduce' ' Alias -> Map Operator Tree:' ' file:!!{hive.exec.scratchdir}!! ' ' Select Operator' ' Select Operator' ' Group By Operator' ' aggregations:' ' expr: count()' ' bucketGroup: false' ' mode: hash' ' outputColumnNames: _col0' ' Reduce Output Operator' ' sort order: ' ' tag: -1' ' value expressions:' ' expr: _col0' ' type: bigint' ' Needs Tagging: false' ' Path -> Alias:' ' file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]' ' Path -> Partition:' ' file:!!{hive.exec.scratchdir}!! ' ' Partition' ' base file name: -mr-10002' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns ' ' columns.types ' ' escape.delim \' ' ' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns ' ' columns.types ' ' escape.delim \' ' Reduce Operator Tree:' ' Group By Operator' ' aggregations:' ' expr: count(VALUE._col0)' ' bucketGroup: false' ' mode: mergepartial' ' outputColumnNames: _col0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: bigint' ' outputColumnNames: _col0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0' ' columns.types bigint' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 202 rows selected >>> select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; '_c1' '464' 1 row selected >>> >>> set hive.optimize.bucketmapjoin.sortedmerge = true; No rows affected >>> explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME bucket_small) a) (TOK_TABREF (TOK_TABNAME bucket_big) b) (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST a))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))' '' 'STAGE DEPENDENCIES:' ' Stage-1 is a root stage' ' Stage-2 depends on stages: Stage-1' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' b ' ' TableScan' ' alias: b' ' GatherStats: false' ' Sorted Merge Bucket Map Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 ' ' 1 ' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key]]' ' 1 [Column[key]]' ' Position of Big Table: 1' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns ' ' columns.types ' ' escape.delim \' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' ' Needs Tagging: false' ' Path -> Alias:' ' !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big [b]' ' Path -> Partition:' ' !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big ' ' Partition' ' base file name: bucket_big' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' SORTBUCKETCOLSPREFIX TRUE' ' bucket_count 2' ' bucket_field_name key' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big' ' name bucketcontext_5.bucket_big' ' numFiles 2' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct bucket_big { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 2750' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' SORTBUCKETCOLSPREFIX TRUE' ' bucket_count 2' ' bucket_field_name key' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucketcontext_5.db/bucket_big' ' name bucketcontext_5.bucket_big' ' numFiles 2' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct bucket_big { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 2750' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: bucketcontext_5.bucket_big' ' name: bucketcontext_5.bucket_big' '' ' Stage: Stage-2' ' Map Reduce' ' Alias -> Map Operator Tree:' ' file:!!{hive.exec.scratchdir}!! ' ' Select Operator' ' Select Operator' ' Group By Operator' ' aggregations:' ' expr: count()' ' bucketGroup: false' ' mode: hash' ' outputColumnNames: _col0' ' Reduce Output Operator' ' sort order: ' ' tag: -1' ' value expressions:' ' expr: _col0' ' type: bigint' ' Needs Tagging: false' ' Path -> Alias:' ' file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]' ' Path -> Partition:' ' file:!!{hive.exec.scratchdir}!! ' ' Partition' ' base file name: -mr-10002' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns ' ' columns.types ' ' escape.delim \' ' ' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns ' ' columns.types ' ' escape.delim \' ' Reduce Operator Tree:' ' Group By Operator' ' aggregations:' ' expr: count(VALUE._col0)' ' bucketGroup: false' ' mode: mergepartial' ' outputColumnNames: _col0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: bigint' ' outputColumnNames: _col0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0' ' columns.types bigint' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 170 rows selected >>> select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key; '_c1' '464' 1 row selected >>> !record