PREHOOK: query: -- create testing table. create table alter_coltype(key string, value string) partitioned by (dt string, ts string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@alter_coltype POSTHOOK: query: -- create testing table. create table alter_coltype(key string, value string) partitioned by (dt string, ts string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@alter_coltype PREHOOK: query: -- insert and create a partition. insert overwrite table alter_coltype partition(dt='100', ts='6.30') select * from src1 PREHOOK: type: QUERY PREHOOK: Input: default@src1 PREHOOK: Output: default@alter_coltype@dt=100/ts=6.30 POSTHOOK: query: -- insert and create a partition. insert overwrite table alter_coltype partition(dt='100', ts='6.30') select * from src1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 POSTHOOK: Output: default@alter_coltype@dt=100/ts=6.30 POSTHOOK: Lineage: alter_coltype PARTITION(dt=100,ts=6.30).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: alter_coltype PARTITION(dt=100,ts=6.30).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: desc alter_coltype PREHOOK: type: DESCTABLE PREHOOK: Input: default@alter_coltype POSTHOOK: query: desc alter_coltype POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@alter_coltype key string value string dt string ts string # Partition Information # col_name data_type comment dt string ts string PREHOOK: query: -- select with paritition predicate. select count(*) from alter_coltype where dt = '100' PREHOOK: type: QUERY PREHOOK: Input: default@alter_coltype PREHOOK: Input: default@alter_coltype@dt=100/ts=6.30 #### A masked pattern was here #### POSTHOOK: query: -- select with paritition predicate. select count(*) from alter_coltype where dt = '100' POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_coltype POSTHOOK: Input: default@alter_coltype@dt=100/ts=6.30 #### A masked pattern was here #### 25 PREHOOK: query: -- alter partition key column data type for dt column. alter table alter_coltype partition column (dt int) PREHOOK: type: ALTERTABLE_PARTCOLTYPE PREHOOK: Input: default@alter_coltype POSTHOOK: query: -- alter partition key column data type for dt column. alter table alter_coltype partition column (dt int) POSTHOOK: type: ALTERTABLE_PARTCOLTYPE POSTHOOK: Input: default@alter_coltype POSTHOOK: Output: default@alter_coltype PREHOOK: query: -- load a new partition using new data type. insert overwrite table alter_coltype partition(dt=100, ts='3.0') select * from src1 PREHOOK: type: QUERY PREHOOK: Input: default@src1 PREHOOK: Output: default@alter_coltype@dt=100/ts=3.0 POSTHOOK: query: -- load a new partition using new data type. insert overwrite table alter_coltype partition(dt=100, ts='3.0') select * from src1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 POSTHOOK: Output: default@alter_coltype@dt=100/ts=3.0 POSTHOOK: Lineage: alter_coltype PARTITION(dt=100,ts=3.0).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: alter_coltype PARTITION(dt=100,ts=3.0).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: -- make sure the partition predicate still works. select count(*) from alter_coltype where dt = '100' PREHOOK: type: QUERY PREHOOK: Input: default@alter_coltype PREHOOK: Input: default@alter_coltype@dt=100/ts=3.0 PREHOOK: Input: default@alter_coltype@dt=100/ts=6.30 #### A masked pattern was here #### POSTHOOK: query: -- make sure the partition predicate still works. select count(*) from alter_coltype where dt = '100' POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_coltype POSTHOOK: Input: default@alter_coltype@dt=100/ts=3.0 POSTHOOK: Input: default@alter_coltype@dt=100/ts=6.30 #### A masked pattern was here #### 50 PREHOOK: query: explain extended select count(*) from alter_coltype where dt = '100' PREHOOK: type: QUERY POSTHOOK: query: explain extended select count(*) from alter_coltype where dt = '100' POSTHOOK: type: QUERY ABSTRACT SYNTAX TREE: TOK_QUERY TOK_FROM TOK_TABREF TOK_TABNAME alter_coltype TOK_INSERT TOK_DESTINATION TOK_DIR TOK_TMP_FILE TOK_SELECT TOK_SELEXPR TOK_FUNCTIONSTAR count TOK_WHERE = TOK_TABLE_OR_COL dt '100' STAGE DEPENDENCIES: Stage-1 is a root stage Stage-0 depends on stages: Stage-1 STAGE PLANS: Stage: Stage-1 Map Reduce Map Operator Tree: TableScan alias: alter_coltype Statistics: Num rows: 50 Data size: 382 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator Statistics: Num rows: 50 Data size: 382 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: bigint) auto parallelism: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: #### A masked pattern was here #### Partition base file name: ts=3.0 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: dt 100 ts 3.0 properties: COLUMN_STATS_ACCURATE true bucket_count -1 columns key,value columns.comments columns.types string:string #### A masked pattern was here #### name default.alter_coltype numFiles 1 numRows 25 partition_columns dt/ts partition_columns.types int:string rawDataSize 191 serialization.ddl struct alter_coltype { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 216 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: bucket_count -1 columns key,value columns.comments columns.types string:string #### A masked pattern was here #### name default.alter_coltype partition_columns dt/ts partition_columns.types int:string serialization.ddl struct alter_coltype { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.alter_coltype name: default.alter_coltype #### A masked pattern was here #### Partition base file name: ts=6.30 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: dt 100 ts 6.30 properties: COLUMN_STATS_ACCURATE true bucket_count -1 columns key,value columns.comments columns.types string:string #### A masked pattern was here #### name default.alter_coltype numFiles 1 numRows 25 partition_columns dt/ts partition_columns.types int:string rawDataSize 191 serialization.ddl struct alter_coltype { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 216 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: bucket_count -1 columns key,value columns.comments columns.types string:string #### A masked pattern was here #### name default.alter_coltype partition_columns dt/ts partition_columns.types int:string serialization.ddl struct alter_coltype { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.alter_coltype name: default.alter_coltype Truncated Path -> Alias: /alter_coltype/dt=100/ts=3.0 [$hdt$_0:alter_coltype] /alter_coltype/dt=100/ts=6.30 [$hdt$_0:alter_coltype] Needs Tagging: false Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: columns _col0 columns.types bigint escape.delim \ hive.serialization.extend.nesting.levels true serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TotalFiles: 1 GatherStats: false MultiFileSpray: false Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: ListSink PREHOOK: query: -- alter partition key column data type for ts column. alter table alter_coltype partition column (ts double) PREHOOK: type: ALTERTABLE_PARTCOLTYPE PREHOOK: Input: default@alter_coltype POSTHOOK: query: -- alter partition key column data type for ts column. alter table alter_coltype partition column (ts double) POSTHOOK: type: ALTERTABLE_PARTCOLTYPE POSTHOOK: Input: default@alter_coltype POSTHOOK: Output: default@alter_coltype PREHOOK: query: alter table alter_coltype partition column (dt string) PREHOOK: type: ALTERTABLE_PARTCOLTYPE PREHOOK: Input: default@alter_coltype POSTHOOK: query: alter table alter_coltype partition column (dt string) POSTHOOK: type: ALTERTABLE_PARTCOLTYPE POSTHOOK: Input: default@alter_coltype POSTHOOK: Output: default@alter_coltype PREHOOK: query: -- load a new partition using new data type. insert overwrite table alter_coltype partition(dt='100', ts=3.0) select * from src1 PREHOOK: type: QUERY PREHOOK: Input: default@src1 PREHOOK: Output: default@alter_coltype@dt=100/ts=3.0 POSTHOOK: query: -- load a new partition using new data type. insert overwrite table alter_coltype partition(dt='100', ts=3.0) select * from src1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 POSTHOOK: Output: default@alter_coltype@dt=100/ts=3.0 POSTHOOK: Lineage: alter_coltype PARTITION(dt=100,ts=3.0).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: alter_coltype PARTITION(dt=100,ts=3.0).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: -- validate partition key column predicate can still work. select count(*) from alter_coltype where ts = '6.30' PREHOOK: type: QUERY PREHOOK: Input: default@alter_coltype PREHOOK: Input: default@alter_coltype@dt=100/ts=6.30 #### A masked pattern was here #### POSTHOOK: query: -- validate partition key column predicate can still work. select count(*) from alter_coltype where ts = '6.30' POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_coltype POSTHOOK: Input: default@alter_coltype@dt=100/ts=6.30 #### A masked pattern was here #### 25 PREHOOK: query: explain extended select count(*) from alter_coltype where ts = '6.30' PREHOOK: type: QUERY POSTHOOK: query: explain extended select count(*) from alter_coltype where ts = '6.30' POSTHOOK: type: QUERY ABSTRACT SYNTAX TREE: TOK_QUERY TOK_FROM TOK_TABREF TOK_TABNAME alter_coltype TOK_INSERT TOK_DESTINATION TOK_DIR TOK_TMP_FILE TOK_SELECT TOK_SELEXPR TOK_FUNCTIONSTAR count TOK_WHERE = TOK_TABLE_OR_COL ts '6.30' STAGE DEPENDENCIES: Stage-1 is a root stage Stage-0 depends on stages: Stage-1 STAGE PLANS: Stage: Stage-1 Map Reduce Map Operator Tree: TableScan alias: alter_coltype Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: bigint) auto parallelism: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: #### A masked pattern was here #### Partition base file name: ts=6.30 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: dt 100 ts 6.30 properties: COLUMN_STATS_ACCURATE true bucket_count -1 columns key,value columns.comments columns.types string:string #### A masked pattern was here #### name default.alter_coltype numFiles 1 numRows 25 partition_columns dt/ts partition_columns.types string:double rawDataSize 191 serialization.ddl struct alter_coltype { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 216 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: bucket_count -1 columns key,value columns.comments columns.types string:string #### A masked pattern was here #### name default.alter_coltype partition_columns dt/ts partition_columns.types string:double serialization.ddl struct alter_coltype { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.alter_coltype name: default.alter_coltype Truncated Path -> Alias: /alter_coltype/dt=100/ts=6.30 [$hdt$_0:alter_coltype] Needs Tagging: false Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: columns _col0 columns.types bigint escape.delim \ hive.serialization.extend.nesting.levels true serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TotalFiles: 1 GatherStats: false MultiFileSpray: false Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: ListSink PREHOOK: query: -- validate partition key column predicate on two different partition column data type -- can still work. select count(*) from alter_coltype where ts = 3.0 and dt=100 PREHOOK: type: QUERY PREHOOK: Input: default@alter_coltype PREHOOK: Input: default@alter_coltype@dt=100/ts=3.0 #### A masked pattern was here #### POSTHOOK: query: -- validate partition key column predicate on two different partition column data type -- can still work. select count(*) from alter_coltype where ts = 3.0 and dt=100 POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_coltype POSTHOOK: Input: default@alter_coltype@dt=100/ts=3.0 #### A masked pattern was here #### 25 PREHOOK: query: explain extended select count(*) from alter_coltype where ts = 3.0 and dt=100 PREHOOK: type: QUERY POSTHOOK: query: explain extended select count(*) from alter_coltype where ts = 3.0 and dt=100 POSTHOOK: type: QUERY ABSTRACT SYNTAX TREE: TOK_QUERY TOK_FROM TOK_TABREF TOK_TABNAME alter_coltype TOK_INSERT TOK_DESTINATION TOK_DIR TOK_TMP_FILE TOK_SELECT TOK_SELEXPR TOK_FUNCTIONSTAR count TOK_WHERE and = TOK_TABLE_OR_COL ts 3.0 = TOK_TABLE_OR_COL dt 100 STAGE DEPENDENCIES: Stage-1 is a root stage Stage-0 depends on stages: Stage-1 STAGE PLANS: Stage: Stage-1 Map Reduce Map Operator Tree: TableScan alias: alter_coltype Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: bigint) auto parallelism: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: #### A masked pattern was here #### Partition base file name: ts=3.0 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: dt 100 ts 3.0 properties: COLUMN_STATS_ACCURATE true bucket_count -1 columns key,value columns.comments columns.types string:string #### A masked pattern was here #### name default.alter_coltype numFiles 1 numRows 25 partition_columns dt/ts partition_columns.types string:double rawDataSize 191 serialization.ddl struct alter_coltype { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 216 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: bucket_count -1 columns key,value columns.comments columns.types string:string #### A masked pattern was here #### name default.alter_coltype partition_columns dt/ts partition_columns.types string:double serialization.ddl struct alter_coltype { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.alter_coltype name: default.alter_coltype Truncated Path -> Alias: /alter_coltype/dt=100/ts=3.0 [$hdt$_0:$hdt$_0:alter_coltype] Needs Tagging: false Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: columns _col0 columns.types bigint escape.delim \ hive.serialization.extend.nesting.levels true serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe TotalFiles: 1 GatherStats: false MultiFileSpray: false Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: ListSink PREHOOK: query: -- query where multiple partition values (of different datatypes) are being selected select key, value, dt, ts from alter_coltype where dt is not null PREHOOK: type: QUERY PREHOOK: Input: default@alter_coltype PREHOOK: Input: default@alter_coltype@dt=100/ts=3.0 PREHOOK: Input: default@alter_coltype@dt=100/ts=6.30 #### A masked pattern was here #### POSTHOOK: query: -- query where multiple partition values (of different datatypes) are being selected select key, value, dt, ts from alter_coltype where dt is not null POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_coltype POSTHOOK: Input: default@alter_coltype@dt=100/ts=3.0 POSTHOOK: Input: default@alter_coltype@dt=100/ts=6.30 #### A masked pattern was here #### 238 val_238 100 3.0 100 3.0 311 val_311 100 3.0 val_27 100 3.0 val_165 100 3.0 val_409 100 3.0 255 val_255 100 3.0 278 val_278 100 3.0 98 val_98 100 3.0 val_484 100 3.0 val_265 100 3.0 val_193 100 3.0 401 val_401 100 3.0 150 val_150 100 3.0 273 val_273 100 3.0 224 100 3.0 369 100 3.0 66 val_66 100 3.0 128 100 3.0 213 val_213 100 3.0 146 val_146 100 3.0 406 val_406 100 3.0 100 3.0 100 3.0 100 3.0 238 val_238 100 6.3 100 6.3 311 val_311 100 6.3 val_27 100 6.3 val_165 100 6.3 val_409 100 6.3 255 val_255 100 6.3 278 val_278 100 6.3 98 val_98 100 6.3 val_484 100 6.3 val_265 100 6.3 val_193 100 6.3 401 val_401 100 6.3 150 val_150 100 6.3 273 val_273 100 6.3 224 100 6.3 369 100 6.3 66 val_66 100 6.3 128 100 6.3 213 val_213 100 6.3 146 val_146 100 6.3 406 val_406 100 6.3 100 6.3 100 6.3 100 6.3 PREHOOK: query: explain extended select key, value, dt, ts from alter_coltype where dt is not null PREHOOK: type: QUERY POSTHOOK: query: explain extended select key, value, dt, ts from alter_coltype where dt is not null POSTHOOK: type: QUERY ABSTRACT SYNTAX TREE: TOK_QUERY TOK_FROM TOK_TABREF TOK_TABNAME alter_coltype TOK_INSERT TOK_DESTINATION TOK_DIR TOK_TMP_FILE TOK_SELECT TOK_SELEXPR TOK_TABLE_OR_COL key TOK_SELEXPR TOK_TABLE_OR_COL value TOK_SELEXPR TOK_TABLE_OR_COL dt TOK_SELEXPR TOK_TABLE_OR_COL ts TOK_WHERE TOK_FUNCTION TOK_ISNOTNULL TOK_TABLE_OR_COL dt STAGE DEPENDENCIES: Stage-0 is a root stage STAGE PLANS: Stage: Stage-0 Fetch Operator limit: -1 Partition Description: Partition input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: dt 100 ts 3.0 properties: COLUMN_STATS_ACCURATE true bucket_count -1 columns key,value columns.comments columns.types string:string #### A masked pattern was here #### name default.alter_coltype numFiles 1 numRows 25 partition_columns dt/ts partition_columns.types string:double rawDataSize 191 serialization.ddl struct alter_coltype { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 216 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: bucket_count -1 columns key,value columns.comments columns.types string:string #### A masked pattern was here #### name default.alter_coltype partition_columns dt/ts partition_columns.types string:double serialization.ddl struct alter_coltype { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.alter_coltype name: default.alter_coltype Partition input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: dt 100 ts 6.30 properties: COLUMN_STATS_ACCURATE true bucket_count -1 columns key,value columns.comments columns.types string:string #### A masked pattern was here #### name default.alter_coltype numFiles 1 numRows 25 partition_columns dt/ts partition_columns.types string:double rawDataSize 191 serialization.ddl struct alter_coltype { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 216 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: bucket_count -1 columns key,value columns.comments columns.types string:string #### A masked pattern was here #### name default.alter_coltype partition_columns dt/ts partition_columns.types string:double serialization.ddl struct alter_coltype { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.alter_coltype name: default.alter_coltype Processor Tree: TableScan alias: alter_coltype Statistics: Num rows: 50 Data size: 382 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator expressions: key (type: string), value (type: string), dt (type: string), ts (type: double) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 50 Data size: 382 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: select count(*) from alter_coltype where ts = 3.0 PREHOOK: type: QUERY PREHOOK: Input: default@alter_coltype PREHOOK: Input: default@alter_coltype@dt=100/ts=3.0 #### A masked pattern was here #### POSTHOOK: query: select count(*) from alter_coltype where ts = 3.0 POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_coltype POSTHOOK: Input: default@alter_coltype@dt=100/ts=3.0 #### A masked pattern was here #### 25 PREHOOK: query: -- make sure the partition predicate still works. select count(*) from alter_coltype where dt = '100' PREHOOK: type: QUERY PREHOOK: Input: default@alter_coltype PREHOOK: Input: default@alter_coltype@dt=100/ts=3.0 PREHOOK: Input: default@alter_coltype@dt=100/ts=6.30 #### A masked pattern was here #### POSTHOOK: query: -- make sure the partition predicate still works. select count(*) from alter_coltype where dt = '100' POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_coltype POSTHOOK: Input: default@alter_coltype@dt=100/ts=3.0 POSTHOOK: Input: default@alter_coltype@dt=100/ts=6.30 #### A masked pattern was here #### 50 PREHOOK: query: desc alter_coltype PREHOOK: type: DESCTABLE PREHOOK: Input: default@alter_coltype POSTHOOK: query: desc alter_coltype POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@alter_coltype key string value string dt string ts double # Partition Information # col_name data_type comment dt string ts double PREHOOK: query: desc alter_coltype partition (dt='100', ts='6.30') PREHOOK: type: DESCTABLE PREHOOK: Input: default@alter_coltype POSTHOOK: query: desc alter_coltype partition (dt='100', ts='6.30') POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@alter_coltype key string value string dt string ts double # Partition Information # col_name data_type comment dt string ts double PREHOOK: query: desc alter_coltype partition (dt='100', ts=3.0) PREHOOK: type: DESCTABLE PREHOOK: Input: default@alter_coltype POSTHOOK: query: desc alter_coltype partition (dt='100', ts=3.0) POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@alter_coltype key string value string dt string ts double # Partition Information # col_name data_type comment dt string ts double PREHOOK: query: drop table alter_coltype PREHOOK: type: DROPTABLE PREHOOK: Input: default@alter_coltype PREHOOK: Output: default@alter_coltype POSTHOOK: query: drop table alter_coltype POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@alter_coltype POSTHOOK: Output: default@alter_coltype PREHOOK: query: create database pt PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:pt POSTHOOK: query: create database pt POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:pt PREHOOK: query: create table pt.alterdynamic_part_table(intcol string) partitioned by (partcol1 string, partcol2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:pt PREHOOK: Output: pt@alterdynamic_part_table POSTHOOK: query: create table pt.alterdynamic_part_table(intcol string) partitioned by (partcol1 string, partcol2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:pt POSTHOOK: Output: pt@alterdynamic_part_table PREHOOK: query: insert into table pt.alterdynamic_part_table partition(partcol1, partcol2) select '1', '1', '1' from src where key=150 limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: pt@alterdynamic_part_table POSTHOOK: query: insert into table pt.alterdynamic_part_table partition(partcol1, partcol2) select '1', '1', '1' from src where key=150 limit 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: pt@alterdynamic_part_table@partcol1=1/partcol2=1 POSTHOOK: Lineage: alterdynamic_part_table PARTITION(partcol1=1,partcol2=1).intcol SIMPLE [] PREHOOK: query: insert into table pt.alterdynamic_part_table partition(partcol1, partcol2) select '1', '2', '1' from src where key=150 limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: pt@alterdynamic_part_table POSTHOOK: query: insert into table pt.alterdynamic_part_table partition(partcol1, partcol2) select '1', '2', '1' from src where key=150 limit 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: pt@alterdynamic_part_table@partcol1=2/partcol2=1 POSTHOOK: Lineage: alterdynamic_part_table PARTITION(partcol1=2,partcol2=1).intcol SIMPLE [] PREHOOK: query: insert into table pt.alterdynamic_part_table partition(partcol1, partcol2) select NULL, '1', '1' from src where key=150 limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: pt@alterdynamic_part_table POSTHOOK: query: insert into table pt.alterdynamic_part_table partition(partcol1, partcol2) select NULL, '1', '1' from src where key=150 limit 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: pt@alterdynamic_part_table@partcol1=1/partcol2=1 POSTHOOK: Lineage: alterdynamic_part_table PARTITION(partcol1=1,partcol2=1).intcol SIMPLE [] PREHOOK: query: alter table pt.alterdynamic_part_table partition column (partcol1 int) PREHOOK: type: ALTERTABLE_PARTCOLTYPE PREHOOK: Input: pt@alterdynamic_part_table POSTHOOK: query: alter table pt.alterdynamic_part_table partition column (partcol1 int) POSTHOOK: type: ALTERTABLE_PARTCOLTYPE POSTHOOK: Input: pt@alterdynamic_part_table POSTHOOK: Output: pt@alterdynamic_part_table PREHOOK: query: explain extended select intcol from pt.alterdynamic_part_table where partcol1='1' and partcol2='1' PREHOOK: type: QUERY POSTHOOK: query: explain extended select intcol from pt.alterdynamic_part_table where partcol1='1' and partcol2='1' POSTHOOK: type: QUERY ABSTRACT SYNTAX TREE: TOK_QUERY TOK_FROM TOK_TABREF TOK_TABNAME pt alterdynamic_part_table TOK_INSERT TOK_DESTINATION TOK_DIR TOK_TMP_FILE TOK_SELECT TOK_SELEXPR TOK_TABLE_OR_COL intcol TOK_WHERE and = TOK_TABLE_OR_COL partcol1 '1' = TOK_TABLE_OR_COL partcol2 '1' STAGE DEPENDENCIES: Stage-0 is a root stage STAGE PLANS: Stage: Stage-0 Fetch Operator limit: -1 Partition Description: Partition input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: partcol1 1 partcol2 1 properties: COLUMN_STATS_ACCURATE true bucket_count -1 columns intcol columns.comments columns.types string #### A masked pattern was here #### name pt.alterdynamic_part_table numFiles 2 numRows 2 partition_columns partcol1/partcol2 partition_columns.types int:string rawDataSize 3 serialization.ddl struct alterdynamic_part_table { string intcol} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 5 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: bucket_count -1 columns intcol columns.comments columns.types string #### A masked pattern was here #### name pt.alterdynamic_part_table partition_columns partcol1/partcol2 partition_columns.types int:string serialization.ddl struct alterdynamic_part_table { string intcol} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: pt.alterdynamic_part_table name: pt.alterdynamic_part_table Processor Tree: TableScan alias: alterdynamic_part_table Statistics: Num rows: 2 Data size: 3 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator expressions: intcol (type: string) outputColumnNames: _col0 Statistics: Num rows: 2 Data size: 3 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: explain extended select intcol from pt.alterdynamic_part_table where (partcol1='2' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__') PREHOOK: type: QUERY POSTHOOK: query: explain extended select intcol from pt.alterdynamic_part_table where (partcol1='2' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__') POSTHOOK: type: QUERY ABSTRACT SYNTAX TREE: TOK_QUERY TOK_FROM TOK_TABREF TOK_TABNAME pt alterdynamic_part_table TOK_INSERT TOK_DESTINATION TOK_DIR TOK_TMP_FILE TOK_SELECT TOK_SELEXPR TOK_TABLE_OR_COL intcol TOK_WHERE or and = TOK_TABLE_OR_COL partcol1 '2' = TOK_TABLE_OR_COL partcol2 '1' and = TOK_TABLE_OR_COL partcol1 '1' = TOK_TABLE_OR_COL partcol2 '__HIVE_DEFAULT_PARTITION__' STAGE DEPENDENCIES: Stage-0 is a root stage STAGE PLANS: Stage: Stage-0 Fetch Operator limit: -1 Partition Description: Partition input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: partcol1 2 partcol2 1 properties: COLUMN_STATS_ACCURATE true bucket_count -1 columns intcol columns.comments columns.types string #### A masked pattern was here #### name pt.alterdynamic_part_table numFiles 1 numRows 1 partition_columns partcol1/partcol2 partition_columns.types int:string rawDataSize 1 serialization.ddl struct alterdynamic_part_table { string intcol} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 2 #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: bucket_count -1 columns intcol columns.comments columns.types string #### A masked pattern was here #### name pt.alterdynamic_part_table partition_columns partcol1/partcol2 partition_columns.types int:string serialization.ddl struct alterdynamic_part_table { string intcol} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: pt.alterdynamic_part_table name: pt.alterdynamic_part_table Processor Tree: TableScan alias: alterdynamic_part_table Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator expressions: intcol (type: string) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: select intcol from pt.alterdynamic_part_table where (partcol1='2' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__') PREHOOK: type: QUERY PREHOOK: Input: pt@alterdynamic_part_table PREHOOK: Input: pt@alterdynamic_part_table@partcol1=2/partcol2=1 #### A masked pattern was here #### POSTHOOK: query: select intcol from pt.alterdynamic_part_table where (partcol1='2' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__') POSTHOOK: type: QUERY POSTHOOK: Input: pt@alterdynamic_part_table POSTHOOK: Input: pt@alterdynamic_part_table@partcol1=2/partcol2=1 #### A masked pattern was here #### 1 PREHOOK: query: drop table pt.alterdynamic_part_table PREHOOK: type: DROPTABLE PREHOOK: Input: pt@alterdynamic_part_table PREHOOK: Output: pt@alterdynamic_part_table POSTHOOK: query: drop table pt.alterdynamic_part_table POSTHOOK: type: DROPTABLE POSTHOOK: Input: pt@alterdynamic_part_table POSTHOOK: Output: pt@alterdynamic_part_table PREHOOK: query: drop database pt PREHOOK: type: DROPDATABASE PREHOOK: Input: database:pt PREHOOK: Output: database:pt POSTHOOK: query: drop database pt POSTHOOK: type: DROPDATABASE POSTHOOK: Input: database:pt POSTHOOK: Output: database:pt