Saving all output to "!!{outputDirectory}!!/bucketmapjoin10.q.raw". Enter "record" with no arguments to stop it. >>> !run !!{qFileDirectory}!!/bucketmapjoin10.q >>> set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; No rows affected >>> >>> CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; No rows affected >>> LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1'); No rows affected >>> LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1'); No rows affected >>> >>> ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (key) INTO 3 BUCKETS; No rows affected >>> LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2'); No rows affected >>> LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2'); No rows affected >>> LOAD DATA LOCAL INPATH '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2'); No rows affected >>> >>> CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE; No rows affected >>> LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1'); No rows affected >>> LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1'); No rows affected >>> LOAD DATA LOCAL INPATH '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1'); No rows affected >>> >>> ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 BUCKETS; No rows affected >>> LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2'); No rows affected >>> LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2'); No rows affected >>> >>> ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 3 BUCKETS; No rows affected >>> >>> set hive.optimize.bucketmapjoin=true; No rows affected >>> >>> -- The table bucketing metadata matches but the partition metadata does not, bucket map join should not be used >>> >>> EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_1) a) (TOK_TABREF (TOK_TABNAME srcbucket_mapjoin_part_2) b) (AND (AND (= (. (TOK_TABLE_OR_COL a) key) (. (TOK_TABLE_OR_COL b) key)) (TOK_FUNCTION TOK_ISNOTNULL (. (TOK_TABLE_OR_COL a) part))) (TOK_FUNCTION TOK_ISNOTNULL (. (TOK_TABLE_OR_COL b) part))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST b))) (TOK_SELEXPR (TOK_FUNCTIONSTAR count)))))' '' 'STAGE DEPENDENCIES:' ' Stage-4 is a root stage' ' Stage-1 depends on stages: Stage-4' ' Stage-2 depends on stages: Stage-1' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-4' ' Map Reduce Local Work' ' Alias -> Map Local Tables:' ' b ' ' Fetch Operator' ' limit: -1' ' Alias -> Map Local Operator Tree:' ' b ' ' TableScan' ' alias: b' ' GatherStats: false' ' HashTable Sink Operator' ' condition expressions:' ' 0 ' ' 1 ' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key]]' ' 1 [Column[key]]' ' Position of Big Table: 0' '' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' a ' ' TableScan' ' alias: a' ' GatherStats: false' ' Map Join Operator' ' condition map:' ' Inner Join 0 to 1' ' condition expressions:' ' 0 ' ' 1 ' ' handleSkewJoin: false' ' keys:' ' 0 [Column[key]]' ' 1 [Column[key]]' ' Position of Big Table: 0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' table:' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns ' ' columns.types ' ' escape.delim \' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' ' Local Work:' ' Map Reduce Local Work' ' Needs Tagging: false' ' Path -> Alias:' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin10.db/srcbucket_mapjoin_part_1/part=1 [a]' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin10.db/srcbucket_mapjoin_part_1/part=2 [a]' ' Path -> Partition:' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin10.db/srcbucket_mapjoin_part_1/part=1 ' ' Partition' ' base file name: part=1' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' partition values:' ' part 1' ' properties:' ' bucket_count 2' ' bucket_field_name key' ' columns key,value' ' columns.types int:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' last_modified_by !!{user.name}!!' ' last_modified_time !!UNIXTIME!!' ' location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin10.db/srcbucket_mapjoin_part_1/part=1' ' name bucketmapjoin10.srcbucket_mapjoin_part_1' ' numFiles 2' ' numPartitions 2' ' numRows 0' ' partition_columns part' ' rawDataSize 0' ' serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 2750' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count 3' ' bucket_field_name key' ' columns key,value' ' columns.types int:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' last_modified_by !!{user.name}!!' ' last_modified_time !!UNIXTIME!!' ' location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin10.db/srcbucket_mapjoin_part_1' ' name bucketmapjoin10.srcbucket_mapjoin_part_1' ' numFiles 5' ' numPartitions 2' ' numRows 0' ' partition_columns part' ' rawDataSize 0' ' serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 6950' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: bucketmapjoin10.srcbucket_mapjoin_part_1' ' name: bucketmapjoin10.srcbucket_mapjoin_part_1' ' !!{hive.metastore.warehouse.dir}!!/bucketmapjoin10.db/srcbucket_mapjoin_part_1/part=2 ' ' Partition' ' base file name: part=2' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' partition values:' ' part 2' ' properties:' ' bucket_count 3' ' bucket_field_name key' ' columns key,value' ' columns.types int:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' last_modified_by !!{user.name}!!' ' last_modified_time !!UNIXTIME!!' ' location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin10.db/srcbucket_mapjoin_part_1/part=2' ' name bucketmapjoin10.srcbucket_mapjoin_part_1' ' numFiles 3' ' numPartitions 2' ' numRows 0' ' partition_columns part' ' rawDataSize 0' ' serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 4200' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count 3' ' bucket_field_name key' ' columns key,value' ' columns.types int:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' last_modified_by !!{user.name}!!' ' last_modified_time !!UNIXTIME!!' ' location !!{hive.metastore.warehouse.dir}!!/bucketmapjoin10.db/srcbucket_mapjoin_part_1' ' name bucketmapjoin10.srcbucket_mapjoin_part_1' ' numFiles 5' ' numPartitions 2' ' numRows 0' ' partition_columns part' ' rawDataSize 0' ' serialization.ddl struct srcbucket_mapjoin_part_1 { i32 key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 6950' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: bucketmapjoin10.srcbucket_mapjoin_part_1' ' name: bucketmapjoin10.srcbucket_mapjoin_part_1' '' ' Stage: Stage-2' ' Map Reduce' ' Alias -> Map Operator Tree:' ' file:!!{hive.exec.scratchdir}!! ' ' Select Operator' ' Select Operator' ' Group By Operator' ' aggregations:' ' expr: count()' ' bucketGroup: false' ' mode: hash' ' outputColumnNames: _col0' ' Reduce Output Operator' ' sort order: ' ' tag: -1' ' value expressions:' ' expr: _col0' ' type: bigint' ' Needs Tagging: false' ' Path -> Alias:' ' file:!!{hive.exec.scratchdir}!! [file:!!{hive.exec.scratchdir}!!]' ' Path -> Partition:' ' file:!!{hive.exec.scratchdir}!! ' ' Partition' ' base file name: -mr-10002' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns ' ' columns.types ' ' escape.delim \' ' ' ' input format: org.apache.hadoop.mapred.SequenceFileInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat' ' properties:' ' columns ' ' columns.types ' ' escape.delim \' ' Reduce Operator Tree:' ' Group By Operator' ' aggregations:' ' expr: count(VALUE._col0)' ' bucketGroup: false' ' mode: mergepartial' ' outputColumnNames: _col0' ' Select Operator' ' expressions:' ' expr: _col0' ' type: bigint' ' outputColumnNames: _col0' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' directory: file:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 1' ' Stats Publishing Key Prefix: file:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' columns _col0' ' columns.types bigint' ' escape.delim \' ' serialization.format 1' ' TotalFiles: 1' ' GatherStats: false' ' MultiFileSpray: false' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 257 rows selected >>> >>> SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL; '_c1' '2116' 1 row selected >>> !record