PREHOOK: query: -- Create bucketed and sorted tables CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 POSTHOOK: query: -- Create bucketed and sorted tables CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table1 PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table2 POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table2 PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table3 POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table3 PREHOOK: query: CREATE TABLE test_table4 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table4 POSTHOOK: query: CREATE TABLE test_table4 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table4 PREHOOK: query: CREATE TABLE test_table5 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table5 POSTHOOK: query: CREATE TABLE test_table5 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table5 PREHOOK: query: CREATE TABLE test_table6 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table6 POSTHOOK: query: CREATE TABLE test_table6 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table6 PREHOOK: query: CREATE TABLE test_table7 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table7 POSTHOOK: query: CREATE TABLE test_table7 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table7 PREHOOK: query: CREATE TABLE test_table8 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table8 POSTHOOK: query: CREATE TABLE test_table8 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table8 PREHOOK: query: INSERT OVERWRITE TABLE test_table1 SELECT * FROM src WHERE key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table1 POSTHOOK: query: INSERT OVERWRITE TABLE test_table1 SELECT * FROM src WHERE key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table1 POSTHOOK: Lineage: test_table1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: INSERT OVERWRITE TABLE test_table2 SELECT * FROM src WHERE key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table2 POSTHOOK: query: INSERT OVERWRITE TABLE test_table2 SELECT * FROM src WHERE key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table2 POSTHOOK: Lineage: test_table2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: INSERT OVERWRITE TABLE test_table3 SELECT * FROM src WHERE key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table3 POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 SELECT * FROM src WHERE key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table3 POSTHOOK: Lineage: test_table3.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: INSERT OVERWRITE TABLE test_table4 SELECT * FROM src WHERE key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table4 POSTHOOK: query: INSERT OVERWRITE TABLE test_table4 SELECT * FROM src WHERE key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table4 POSTHOOK: Lineage: test_table4.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: INSERT OVERWRITE TABLE test_table5 SELECT * FROM src WHERE key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table5 POSTHOOK: query: INSERT OVERWRITE TABLE test_table5 SELECT * FROM src WHERE key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table5 POSTHOOK: Lineage: test_table5.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: INSERT OVERWRITE TABLE test_table6 SELECT * FROM src WHERE key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table6 POSTHOOK: query: INSERT OVERWRITE TABLE test_table6 SELECT * FROM src WHERE key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table6 POSTHOOK: Lineage: test_table6.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table6.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: INSERT OVERWRITE TABLE test_table7 SELECT * FROM src WHERE key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table7 POSTHOOK: query: INSERT OVERWRITE TABLE test_table7 SELECT * FROM src WHERE key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table7 POSTHOOK: Lineage: test_table7.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table7.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: INSERT OVERWRITE TABLE test_table8 SELECT * FROM src WHERE key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table8 POSTHOOK: query: INSERT OVERWRITE TABLE test_table8 SELECT * FROM src WHERE key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table8 POSTHOOK: Lineage: test_table8.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table8.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: -- Mapjoin followed by a aggregation should be performed in a single MR job upto 7 tables EXPLAIN SELECT /*+ mapjoin(b, c, d, e, f, g) */ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key JOIN test_table3 c ON a.key = c.key JOIN test_table4 d ON a.key = d.key JOIN test_table5 e ON a.key = e.key JOIN test_table6 f ON a.key = f.key JOIN test_table7 g ON a.key = g.key PREHOOK: type: QUERY POSTHOOK: query: -- Mapjoin followed by a aggregation should be performed in a single MR job upto 7 tables EXPLAIN SELECT /*+ mapjoin(b, c, d, e, f, g) */ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key JOIN test_table3 c ON a.key = c.key JOIN test_table4 d ON a.key = d.key JOIN test_table5 e ON a.key = e.key JOIN test_table6 f ON a.key = f.key JOIN test_table7 g ON a.key = g.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage Stage-0 depends on stages: Stage-1 STAGE PLANS: Stage: Stage-1 Spark Edges: Reducer 2 <- Map 1 (GROUP, 1) #### A masked pattern was here #### Vertices: Map 1 Map Operator Tree: TableScan alias: a Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE Sorted Merge Bucket Map Join Operator condition map: Inner Join 0 to 1 Inner Join 0 to 2 Inner Join 0 to 3 Inner Join 0 to 4 Inner Join 0 to 5 Inner Join 0 to 6 keys: 0 key (type: int) 1 key (type: int) 2 key (type: int) 3 key (type: int) 4 key (type: int) 5 key (type: int) 6 key (type: int) Statistics: Num rows: 33 Data size: 231 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reducer 2 Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: ListSink PREHOOK: query: SELECT /*+ mapjoin(b, c, d, e, f, g) */ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key JOIN test_table3 c ON a.key = c.key JOIN test_table4 d ON a.key = d.key JOIN test_table5 e ON a.key = e.key JOIN test_table6 f ON a.key = f.key JOIN test_table7 g ON a.key = g.key PREHOOK: type: QUERY PREHOOK: Input: default@test_table1 PREHOOK: Input: default@test_table2 PREHOOK: Input: default@test_table3 PREHOOK: Input: default@test_table4 PREHOOK: Input: default@test_table5 PREHOOK: Input: default@test_table6 PREHOOK: Input: default@test_table7 #### A masked pattern was here #### POSTHOOK: query: SELECT /*+ mapjoin(b, c, d, e, f, g) */ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key JOIN test_table3 c ON a.key = c.key JOIN test_table4 d ON a.key = d.key JOIN test_table5 e ON a.key = e.key JOIN test_table6 f ON a.key = f.key JOIN test_table7 g ON a.key = g.key POSTHOOK: type: QUERY POSTHOOK: Input: default@test_table1 POSTHOOK: Input: default@test_table2 POSTHOOK: Input: default@test_table3 POSTHOOK: Input: default@test_table4 POSTHOOK: Input: default@test_table5 POSTHOOK: Input: default@test_table6 POSTHOOK: Input: default@test_table7 #### A masked pattern was here #### 4378 PREHOOK: query: -- It should be automatically converted to a sort-merge join followed by a groupby in -- a single MR job EXPLAIN SELECT count(*) FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key LEFT OUTER JOIN test_table3 c ON a.key = c.key LEFT OUTER JOIN test_table4 d ON a.key = d.key LEFT OUTER JOIN test_table5 e ON a.key = e.key LEFT OUTER JOIN test_table6 f ON a.key = f.key LEFT OUTER JOIN test_table7 g ON a.key = g.key PREHOOK: type: QUERY POSTHOOK: query: -- It should be automatically converted to a sort-merge join followed by a groupby in -- a single MR job EXPLAIN SELECT count(*) FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key LEFT OUTER JOIN test_table3 c ON a.key = c.key LEFT OUTER JOIN test_table4 d ON a.key = d.key LEFT OUTER JOIN test_table5 e ON a.key = e.key LEFT OUTER JOIN test_table6 f ON a.key = f.key LEFT OUTER JOIN test_table7 g ON a.key = g.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage Stage-0 depends on stages: Stage-1 STAGE PLANS: Stage: Stage-1 Spark Edges: Reducer 2 <- Map 1 (GROUP, 1) #### A masked pattern was here #### Vertices: Map 1 Map Operator Tree: TableScan alias: a Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Sorted Merge Bucket Map Join Operator condition map: Left Outer Join0 to 1 Left Outer Join0 to 2 Left Outer Join0 to 3 Left Outer Join0 to 4 Left Outer Join0 to 5 Left Outer Join0 to 6 keys: 0 key (type: int) 1 key (type: int) 2 key (type: int) 3 key (type: int) 4 key (type: int) 5 key (type: int) 6 key (type: int) Statistics: Num rows: 66 Data size: 462 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reducer 2 Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: ListSink PREHOOK: query: SELECT count(*) FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key LEFT OUTER JOIN test_table3 c ON a.key = c.key LEFT OUTER JOIN test_table4 d ON a.key = d.key LEFT OUTER JOIN test_table5 e ON a.key = e.key LEFT OUTER JOIN test_table6 f ON a.key = f.key LEFT OUTER JOIN test_table7 g ON a.key = g.key PREHOOK: type: QUERY PREHOOK: Input: default@test_table1 PREHOOK: Input: default@test_table2 PREHOOK: Input: default@test_table3 PREHOOK: Input: default@test_table4 PREHOOK: Input: default@test_table5 PREHOOK: Input: default@test_table6 PREHOOK: Input: default@test_table7 #### A masked pattern was here #### POSTHOOK: query: SELECT count(*) FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key LEFT OUTER JOIN test_table3 c ON a.key = c.key LEFT OUTER JOIN test_table4 d ON a.key = d.key LEFT OUTER JOIN test_table5 e ON a.key = e.key LEFT OUTER JOIN test_table6 f ON a.key = f.key LEFT OUTER JOIN test_table7 g ON a.key = g.key POSTHOOK: type: QUERY POSTHOOK: Input: default@test_table1 POSTHOOK: Input: default@test_table2 POSTHOOK: Input: default@test_table3 POSTHOOK: Input: default@test_table4 POSTHOOK: Input: default@test_table5 POSTHOOK: Input: default@test_table6 POSTHOOK: Input: default@test_table7 #### A masked pattern was here #### 4378 PREHOOK: query: EXPLAIN SELECT count(*) FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key LEFT OUTER JOIN test_table3 c ON a.key = c.key LEFT OUTER JOIN test_table4 d ON a.key = d.key LEFT OUTER JOIN test_table5 e ON a.key = e.key LEFT OUTER JOIN test_table6 f ON a.key = f.key LEFT OUTER JOIN test_table7 g ON a.key = g.key LEFT OUTER JOIN test_table8 h ON a.key = h.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT count(*) FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key LEFT OUTER JOIN test_table3 c ON a.key = c.key LEFT OUTER JOIN test_table4 d ON a.key = d.key LEFT OUTER JOIN test_table5 e ON a.key = e.key LEFT OUTER JOIN test_table6 f ON a.key = f.key LEFT OUTER JOIN test_table7 g ON a.key = g.key LEFT OUTER JOIN test_table8 h ON a.key = h.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage Stage-0 depends on stages: Stage-1 STAGE PLANS: Stage: Stage-1 Spark Edges: Reducer 2 <- Map 1 (GROUP, 1) #### A masked pattern was here #### Vertices: Map 1 Map Operator Tree: TableScan alias: a Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Sorted Merge Bucket Map Join Operator condition map: Left Outer Join0 to 1 Left Outer Join0 to 2 Left Outer Join0 to 3 Left Outer Join0 to 4 Left Outer Join0 to 5 Left Outer Join0 to 6 Left Outer Join0 to 7 keys: 0 key (type: int) 1 key (type: int) 2 key (type: int) 3 key (type: int) 4 key (type: int) 5 key (type: int) 6 key (type: int) 7 key (type: int) Statistics: Num rows: 77 Data size: 539 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reducer 2 Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: ListSink PREHOOK: query: SELECT count(*) FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key LEFT OUTER JOIN test_table3 c ON a.key = c.key LEFT OUTER JOIN test_table4 d ON a.key = d.key LEFT OUTER JOIN test_table5 e ON a.key = e.key LEFT OUTER JOIN test_table6 f ON a.key = f.key LEFT OUTER JOIN test_table7 g ON a.key = g.key LEFT OUTER JOIN test_table8 h ON a.key = h.key PREHOOK: type: QUERY PREHOOK: Input: default@test_table1 PREHOOK: Input: default@test_table2 PREHOOK: Input: default@test_table3 PREHOOK: Input: default@test_table4 PREHOOK: Input: default@test_table5 PREHOOK: Input: default@test_table6 PREHOOK: Input: default@test_table7 PREHOOK: Input: default@test_table8 #### A masked pattern was here #### POSTHOOK: query: SELECT count(*) FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key LEFT OUTER JOIN test_table3 c ON a.key = c.key LEFT OUTER JOIN test_table4 d ON a.key = d.key LEFT OUTER JOIN test_table5 e ON a.key = e.key LEFT OUTER JOIN test_table6 f ON a.key = f.key LEFT OUTER JOIN test_table7 g ON a.key = g.key LEFT OUTER JOIN test_table8 h ON a.key = h.key POSTHOOK: type: QUERY POSTHOOK: Input: default@test_table1 POSTHOOK: Input: default@test_table2 POSTHOOK: Input: default@test_table3 POSTHOOK: Input: default@test_table4 POSTHOOK: Input: default@test_table5 POSTHOOK: Input: default@test_table6 POSTHOOK: Input: default@test_table7 POSTHOOK: Input: default@test_table8 #### A masked pattern was here #### 13126 PREHOOK: query: -- outer join with max 16 aliases EXPLAIN SELECT a.* FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key LEFT OUTER JOIN test_table3 c ON a.key = c.key LEFT OUTER JOIN test_table4 d ON a.key = d.key LEFT OUTER JOIN test_table5 e ON a.key = e.key LEFT OUTER JOIN test_table6 f ON a.key = f.key LEFT OUTER JOIN test_table7 g ON a.key = g.key LEFT OUTER JOIN test_table8 h ON a.key = h.key LEFT OUTER JOIN test_table4 i ON a.key = i.key LEFT OUTER JOIN test_table5 j ON a.key = j.key LEFT OUTER JOIN test_table6 k ON a.key = k.key LEFT OUTER JOIN test_table7 l ON a.key = l.key LEFT OUTER JOIN test_table8 m ON a.key = m.key LEFT OUTER JOIN test_table7 n ON a.key = n.key LEFT OUTER JOIN test_table8 o ON a.key = o.key LEFT OUTER JOIN test_table4 p ON a.key = p.key LEFT OUTER JOIN test_table5 q ON a.key = q.key LEFT OUTER JOIN test_table6 r ON a.key = r.key LEFT OUTER JOIN test_table7 s ON a.key = s.key LEFT OUTER JOIN test_table8 t ON a.key = t.key PREHOOK: type: QUERY POSTHOOK: query: -- outer join with max 16 aliases EXPLAIN SELECT a.* FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key LEFT OUTER JOIN test_table3 c ON a.key = c.key LEFT OUTER JOIN test_table4 d ON a.key = d.key LEFT OUTER JOIN test_table5 e ON a.key = e.key LEFT OUTER JOIN test_table6 f ON a.key = f.key LEFT OUTER JOIN test_table7 g ON a.key = g.key LEFT OUTER JOIN test_table8 h ON a.key = h.key LEFT OUTER JOIN test_table4 i ON a.key = i.key LEFT OUTER JOIN test_table5 j ON a.key = j.key LEFT OUTER JOIN test_table6 k ON a.key = k.key LEFT OUTER JOIN test_table7 l ON a.key = l.key LEFT OUTER JOIN test_table8 m ON a.key = m.key LEFT OUTER JOIN test_table7 n ON a.key = n.key LEFT OUTER JOIN test_table8 o ON a.key = o.key LEFT OUTER JOIN test_table4 p ON a.key = p.key LEFT OUTER JOIN test_table5 q ON a.key = q.key LEFT OUTER JOIN test_table6 r ON a.key = r.key LEFT OUTER JOIN test_table7 s ON a.key = s.key LEFT OUTER JOIN test_table8 t ON a.key = t.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage Stage-1 depends on stages: Stage-2 Stage-0 depends on stages: Stage-1 STAGE PLANS: Stage: Stage-2 Spark #### A masked pattern was here #### Vertices: Map 2 Map Operator Tree: TableScan alias: q Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Spark HashTable Sink Operator keys: 0 _col0 (type: int) 1 key (type: int) 2 key (type: int) 3 key (type: int) 4 key (type: int) Local Work: Map Reduce Local Work Map 3 Map Operator Tree: TableScan alias: r Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Spark HashTable Sink Operator keys: 0 _col0 (type: int) 1 key (type: int) 2 key (type: int) 3 key (type: int) 4 key (type: int) Local Work: Map Reduce Local Work Map 4 Map Operator Tree: TableScan alias: s Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Spark HashTable Sink Operator keys: 0 _col0 (type: int) 1 key (type: int) 2 key (type: int) 3 key (type: int) 4 key (type: int) Local Work: Map Reduce Local Work Map 5 Map Operator Tree: TableScan alias: t Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Spark HashTable Sink Operator keys: 0 _col0 (type: int) 1 key (type: int) 2 key (type: int) 3 key (type: int) 4 key (type: int) Local Work: Map Reduce Local Work Stage: Stage-1 Spark #### A masked pattern was here #### Vertices: Map 1 Map Operator Tree: TableScan alias: a Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Sorted Merge Bucket Map Join Operator condition map: Left Outer Join0 to 1 Left Outer Join0 to 2 Left Outer Join0 to 3 Left Outer Join0 to 4 Left Outer Join0 to 5 Left Outer Join0 to 6 Left Outer Join0 to 7 Left Outer Join0 to 8 Left Outer Join0 to 9 Left Outer Join0 to 10 Left Outer Join0 to 11 Left Outer Join0 to 12 Left Outer Join0 to 13 Left Outer Join0 to 14 Left Outer Join0 to 15 keys: 0 key (type: int) 1 key (type: int) 2 key (type: int) 3 key (type: int) 4 key (type: int) 5 key (type: int) 6 key (type: int) 7 key (type: int) 8 key (type: int) 9 key (type: int) 10 key (type: int) 11 key (type: int) 12 key (type: int) 13 key (type: int) 14 key (type: int) 15 key (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 165 Data size: 1155 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Left Outer Join0 to 1 Left Outer Join0 to 2 Left Outer Join0 to 3 Left Outer Join0 to 4 keys: 0 _col0 (type: int) 1 key (type: int) 2 key (type: int) 3 key (type: int) 4 key (type: int) outputColumnNames: _col0, _col1 input vertices: 1 Map 2 2 Map 3 3 Map 4 4 Map 5 Statistics: Num rows: 726 Data size: 5082 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 726 Data size: 5082 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false Statistics: Num rows: 726 Data size: 5082 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Local Work: Map Reduce Local Work Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: ListSink