Saving all output to "!!{outputDirectory}!!/bucket3.q.raw". Enter "record" with no arguments to stop it. >>> !run !!{qFileDirectory}!!/bucket3.q >>> set hive.enforce.bucketing = true; No rows affected >>> set hive.exec.reducers.max = 1; No rows affected >>> >>> CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS; No rows affected >>> >>> explain extended insert overwrite table bucket3_1 partition (ds='1') select * from src; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME bucket3_1) (TOK_PARTSPEC (TOK_PARTVAL ds '1')))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))' '' 'STAGE DEPENDENCIES:' ' Stage-1 is a root stage' ' Stage-0 depends on stages: Stage-1' ' Stage-2 depends on stages: Stage-0' '' 'STAGE PLANS:' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' src ' ' TableScan' ' alias: src' ' GatherStats: false' ' Select Operator' ' expressions:' ' expr: key' ' type: string' ' expr: value' ' type: string' ' outputColumnNames: _col0, _col1' ' Reduce Output Operator' ' sort order: ' ' Map-reduce partition columns:' ' expr: UDFToInteger(_col0)' ' type: int' ' tag: -1' ' value expressions:' ' expr: _col0' ' type: string' ' expr: _col1' ' type: string' ' Needs Tagging: false' ' Path -> Alias:' ' !!{hive.metastore.warehouse.dir}!!/bucket3.db/src [src]' ' Path -> Partition:' ' !!{hive.metastore.warehouse.dir}!!/bucket3.db/src ' ' Partition' ' base file name: src' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucket3.db/src' ' name bucket3.src' ' numFiles 1' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct src { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 5812' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' ' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count -1' ' columns key,value' ' columns.types string:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucket3.db/src' ' name bucket3.src' ' numFiles 1' ' numPartitions 0' ' numRows 0' ' rawDataSize 0' ' serialization.ddl struct src { string key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' totalSize 5812' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: bucket3.src' ' name: bucket3.src' ' Reduce Operator Tree:' ' Extract' ' Select Operator' ' expressions:' ' expr: UDFToInteger(_col0)' ' type: int' ' expr: _col1' ' type: string' ' outputColumnNames: _col0, _col1' ' File Output Operator' ' compressed: false' ' GlobalTableId: 1' ' directory: pfile:!!{hive.exec.scratchdir}!!' ' NumFilesPerFileSink: 2' ' Static Partition Specification: ds=1/' ' Stats Publishing Key Prefix: pfile:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count 2' ' bucket_field_name key' ' columns key,value' ' columns.types int:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucket3.db/bucket3_1' ' name bucket3.bucket3_1' ' partition_columns ds' ' serialization.ddl struct bucket3_1 { i32 key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: bucket3.bucket3_1' ' TotalFiles: 2' ' GatherStats: true' ' MultiFileSpray: true' '' ' Stage: Stage-0' ' Move Operator' ' tables:' ' partition:' ' ds 1' ' replace: true' ' source: pfile:!!{hive.exec.scratchdir}!!' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' properties:' ' bucket_count 2' ' bucket_field_name key' ' columns key,value' ' columns.types int:string' ' file.inputformat org.apache.hadoop.mapred.TextInputFormat' ' file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ' location !!{hive.metastore.warehouse.dir}!!/bucket3.db/bucket3_1' ' name bucket3.bucket3_1' ' partition_columns ds' ' serialization.ddl struct bucket3_1 { i32 key, string value}' ' serialization.format 1' ' serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' transient_lastDdlTime !!UNIXTIME!!' ' serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ' name: bucket3.bucket3_1' ' tmp directory: pfile:!!{hive.exec.scratchdir}!!' '' ' Stage: Stage-2' ' Stats-Aggr Operator' ' Stats Aggregation Key Prefix: pfile:!!{hive.exec.scratchdir}!!' '' '' 156 rows selected >>> >>> insert overwrite table bucket3_1 partition (ds='1') select * from src; '_col0','_col1' No rows selected >>> >>> insert overwrite table bucket3_1 partition (ds='2') select * from src; '_col0','_col1' No rows selected >>> >>> explain select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1' order by key; 'Explain' 'ABSTRACT SYNTAX TREE:' ' (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME bucket3_1) (TOK_TABLEBUCKETSAMPLE 1 2) s)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_WHERE (= (TOK_TABLE_OR_COL ds) '1')) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)))))' '' 'STAGE DEPENDENCIES:' ' Stage-1 is a root stage' ' Stage-0 is a root stage' '' 'STAGE PLANS:' ' Stage: Stage-1' ' Map Reduce' ' Alias -> Map Operator Tree:' ' s ' ' TableScan' ' alias: s' ' Filter Operator' ' predicate:' ' expr: (((hash(key) & 2147483647) % 2) = 0)' ' type: boolean' ' Select Operator' ' expressions:' ' expr: key' ' type: int' ' expr: value' ' type: string' ' expr: ds' ' type: string' ' outputColumnNames: _col0, _col1, _col2' ' Reduce Output Operator' ' key expressions:' ' expr: _col0' ' type: int' ' sort order: +' ' tag: -1' ' value expressions:' ' expr: _col0' ' type: int' ' expr: _col1' ' type: string' ' expr: _col2' ' type: string' ' Reduce Operator Tree:' ' Extract' ' File Output Operator' ' compressed: false' ' GlobalTableId: 0' ' table:' ' input format: org.apache.hadoop.mapred.TextInputFormat' ' output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' '' ' Stage: Stage-0' ' Fetch Operator' ' limit: -1' '' '' 54 rows selected >>> >>> select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1' order by key; 'key','value','ds' '0','val_0','1' '0','val_0','1' '0','val_0','1' '2','val_2','1' '4','val_4','1' '8','val_8','1' '10','val_10','1' '12','val_12','1' '12','val_12','1' '18','val_18','1' '18','val_18','1' '20','val_20','1' '24','val_24','1' '24','val_24','1' '26','val_26','1' '26','val_26','1' '28','val_28','1' '30','val_30','1' '34','val_34','1' '42','val_42','1' '42','val_42','1' '44','val_44','1' '54','val_54','1' '58','val_58','1' '58','val_58','1' '64','val_64','1' '66','val_66','1' '70','val_70','1' '70','val_70','1' '70','val_70','1' '72','val_72','1' '72','val_72','1' '74','val_74','1' '76','val_76','1' '76','val_76','1' '78','val_78','1' '80','val_80','1' '82','val_82','1' '84','val_84','1' '84','val_84','1' '86','val_86','1' '90','val_90','1' '90','val_90','1' '90','val_90','1' '92','val_92','1' '96','val_96','1' '98','val_98','1' '98','val_98','1' '100','val_100','1' '100','val_100','1' '104','val_104','1' '104','val_104','1' '114','val_114','1' '116','val_116','1' '118','val_118','1' '118','val_118','1' '120','val_120','1' '120','val_120','1' '126','val_126','1' '128','val_128','1' '128','val_128','1' '128','val_128','1' '134','val_134','1' '134','val_134','1' '136','val_136','1' '138','val_138','1' '138','val_138','1' '138','val_138','1' '138','val_138','1' '146','val_146','1' '146','val_146','1' '150','val_150','1' '152','val_152','1' '152','val_152','1' '156','val_156','1' '158','val_158','1' '160','val_160','1' '162','val_162','1' '164','val_164','1' '164','val_164','1' '166','val_166','1' '168','val_168','1' '170','val_170','1' '172','val_172','1' '172','val_172','1' '174','val_174','1' '174','val_174','1' '176','val_176','1' '176','val_176','1' '178','val_178','1' '180','val_180','1' '186','val_186','1' '190','val_190','1' '192','val_192','1' '194','val_194','1' '196','val_196','1' '200','val_200','1' '200','val_200','1' '202','val_202','1' '208','val_208','1' '208','val_208','1' '208','val_208','1' '214','val_214','1' '216','val_216','1' '216','val_216','1' '218','val_218','1' '222','val_222','1' '224','val_224','1' '224','val_224','1' '226','val_226','1' '228','val_228','1' '230','val_230','1' '230','val_230','1' '230','val_230','1' '230','val_230','1' '230','val_230','1' '238','val_238','1' '238','val_238','1' '242','val_242','1' '242','val_242','1' '244','val_244','1' '248','val_248','1' '252','val_252','1' '256','val_256','1' '256','val_256','1' '258','val_258','1' '260','val_260','1' '262','val_262','1' '266','val_266','1' '272','val_272','1' '272','val_272','1' '274','val_274','1' '278','val_278','1' '278','val_278','1' '280','val_280','1' '280','val_280','1' '282','val_282','1' '282','val_282','1' '284','val_284','1' '286','val_286','1' '288','val_288','1' '288','val_288','1' '292','val_292','1' '296','val_296','1' '298','val_298','1' '298','val_298','1' '298','val_298','1' '302','val_302','1' '306','val_306','1' '308','val_308','1' '310','val_310','1' '316','val_316','1' '316','val_316','1' '316','val_316','1' '318','val_318','1' '318','val_318','1' '318','val_318','1' '322','val_322','1' '322','val_322','1' '332','val_332','1' '336','val_336','1' '338','val_338','1' '342','val_342','1' '342','val_342','1' '344','val_344','1' '344','val_344','1' '348','val_348','1' '348','val_348','1' '348','val_348','1' '348','val_348','1' '348','val_348','1' '356','val_356','1' '360','val_360','1' '362','val_362','1' '364','val_364','1' '366','val_366','1' '368','val_368','1' '374','val_374','1' '378','val_378','1' '382','val_382','1' '382','val_382','1' '384','val_384','1' '384','val_384','1' '384','val_384','1' '386','val_386','1' '392','val_392','1' '394','val_394','1' '396','val_396','1' '396','val_396','1' '396','val_396','1' '400','val_400','1' '402','val_402','1' '404','val_404','1' '404','val_404','1' '406','val_406','1' '406','val_406','1' '406','val_406','1' '406','val_406','1' '414','val_414','1' '414','val_414','1' '418','val_418','1' '424','val_424','1' '424','val_424','1' '430','val_430','1' '430','val_430','1' '430','val_430','1' '432','val_432','1' '436','val_436','1' '438','val_438','1' '438','val_438','1' '438','val_438','1' '444','val_444','1' '446','val_446','1' '448','val_448','1' '452','val_452','1' '454','val_454','1' '454','val_454','1' '454','val_454','1' '458','val_458','1' '458','val_458','1' '460','val_460','1' '462','val_462','1' '462','val_462','1' '466','val_466','1' '466','val_466','1' '466','val_466','1' '468','val_468','1' '468','val_468','1' '468','val_468','1' '468','val_468','1' '470','val_470','1' '472','val_472','1' '478','val_478','1' '478','val_478','1' '480','val_480','1' '480','val_480','1' '480','val_480','1' '482','val_482','1' '484','val_484','1' '490','val_490','1' '492','val_492','1' '492','val_492','1' '494','val_494','1' '496','val_496','1' '498','val_498','1' '498','val_498','1' '498','val_498','1' 247 rows selected >>> !record