Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
Original file line number Diff line number Diff line change
Expand Up @@ -1737,6 +1737,11 @@ public static enum ConfVars {
"How many rows in the right-most join operand Hive should buffer before emitting the join result."),
HIVE_JOIN_CACHE_SIZE("hive.join.cache.size", 25000,
"How many rows in the joining tables (except the streaming table) should be cached in memory."),
HIVE_MERGE_JOIN_SKEW_THRESHOLD("hive.merge.join.skew.threshold", -1L,
"Maximum number of rows allowed per join key in a single Tez sort-merge join task before a "
+ "skew event is reported."),
HIVE_MERGE_JOIN_SKEW_ABORT("hive.merge.join.skew.abort", false,
"When set to true and the row count is equal to hive.merge.join.skew.threshold, the Tez task will be aborted."),
HIVE_PUSH_RESIDUAL_INNER("hive.join.inner.residual", false,
"Whether to push non-equi filter predicates within inner joins. This can improve efficiency in "
+ "the evaluation of certain joins, since we will not be emitting rows which are thrown away by "
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
import java.util.TreeSet;

import org.apache.hadoop.hive.ql.exec.tez.ReduceRecordSource;
import org.apache.hadoop.hive.ql.exec.tez.monitoring.SkewedMergeJoinMonitor;
import org.apache.hadoop.hive.ql.util.NullOrdering;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.slf4j.Logger;
Expand Down Expand Up @@ -97,6 +98,8 @@ public class CommonMergeJoinOperator extends AbstractMapJoinOperator<CommonMerge
transient NullOrdering nullOrdering;
transient private boolean shortcutUnmatchedRows;

transient SkewedMergeJoinMonitor skewedMergeJoinMonitor;

/** Kryo ctor. */
protected CommonMergeJoinOperator() {
super();
Expand Down Expand Up @@ -139,6 +142,14 @@ public void initializeOp(Configuration hconf) throws HiveException {
int oldVar = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_MAPJOIN_BUCKET_CACHE_SIZE);
shortcutUnmatchedRows = HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVE_JOIN_SHORTCUT_UNMATCHED_ROWS);

skewedMergeJoinMonitor = new SkewedMergeJoinMonitor(
HiveConf.getLongVar(hconf,
HiveConf.ConfVars.HIVE_MERGE_JOIN_SKEW_THRESHOLD),
HiveConf.getBoolVar(hconf,
HiveConf.ConfVars.HIVE_MERGE_JOIN_SKEW_ABORT),
maxAlias
);

if (oldVar != 100) {
bucketSize = oldVar;
} else {
Expand Down Expand Up @@ -322,6 +333,10 @@ public void process(Object row, int tag) throws HiveException {

assert !nextKeyGroup;
candidateStorage[tag].addRow(value);

if (tag == posBigTable) {
skewedMergeJoinMonitor.checkMergeJoinSkew(alias, candidateStorage[tag].rowCount());
}
}

private void emitUnmatchedRows(int tag, boolean force) throws HiveException {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.hive.ql.exec.tez.monitoring;

import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class SkewedMergeJoinMonitor {

private transient long mergeJoinSkewThreshold;
private transient boolean mergeJoinSkewAbort;
private transient boolean[] skewedKeyFlagged;

private static final Logger LOG = LoggerFactory.getLogger(SkewedMergeJoinMonitor.class.getName());

public SkewedMergeJoinMonitor(long mergeJoinSkewThreshold, boolean mergeJoinSkewAbort, int maxAlias) {
this.mergeJoinSkewThreshold = mergeJoinSkewThreshold;
this.mergeJoinSkewAbort = mergeJoinSkewAbort;
skewedKeyFlagged = new boolean[maxAlias];
}

public boolean isActive() {
return mergeJoinSkewThreshold > 0;
}

public boolean shouldBeFlagged(byte alias, long rowCount) {
return rowCount >= mergeJoinSkewThreshold && !skewedKeyFlagged[alias];
}

public boolean isFlagged(int alias) {
return skewedKeyFlagged[alias];
}

public void checkMergeJoinSkew(byte alias, long rowCount) throws HiveException {
if (!isActive()) {
return;
}

if (!shouldBeFlagged(alias, rowCount)) {
return;
}

skewedKeyFlagged[alias] = true;

String msg = String.format(
"Data skew detected in merge join, "
+ "table alias %d has accumulated %d rows.",
alias, rowCount);

if (mergeJoinSkewAbort) {
throw new HiveException(msg);
} else {
LOG.warn(msg);
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.hive.ql.exec;

import org.apache.hadoop.hive.ql.exec.tez.monitoring.SkewedMergeJoinMonitor;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;

public class TestCommonMergeJoinSkewThreshold {

private CommonMergeJoinOperator op;

@Before
public void setUp() {
op = new CommonMergeJoinOperator();
}

@Test
public void testDisabled_noWarnNoThrow() throws HiveException {
op.skewedMergeJoinMonitor = new SkewedMergeJoinMonitor(
-1L, false, 4);

op.skewedMergeJoinMonitor.checkMergeJoinSkew((byte) 0, Long.MAX_VALUE);
}

@Test
public void testBelowThreshold_isOk() throws HiveException {
op.skewedMergeJoinMonitor = new SkewedMergeJoinMonitor(
1000L, false, 4);
op.skewedMergeJoinMonitor.checkMergeJoinSkew((byte) 0, 999L);
}

@Test
public void testAtThreshold_warnOnce() throws HiveException {
op.skewedMergeJoinMonitor = new SkewedMergeJoinMonitor(
500L, false, 4);

// should warn without throwing
op.skewedMergeJoinMonitor.checkMergeJoinSkew((byte) 0, 500L);

Assert.assertTrue("skewedKeyFlagged[0] must be set after the first crossing",
op.skewedMergeJoinMonitor.isFlagged(0));

}

@Test
public void testFlagsAreIndependentPerTag() throws HiveException {
op.skewedMergeJoinMonitor = new SkewedMergeJoinMonitor(
100L, false, 4);

op.skewedMergeJoinMonitor.checkMergeJoinSkew((byte) 0, 200L);
Assert.assertTrue("tag 0 should be flagged", op.skewedMergeJoinMonitor.isFlagged(0));
Assert.assertFalse("tag 1 should still be clear", op.skewedMergeJoinMonitor.isFlagged(1));

op.skewedMergeJoinMonitor.checkMergeJoinSkew((byte) 1, 150L);
Assert.assertTrue("tag 1 should now be flagged", op.skewedMergeJoinMonitor.isFlagged(1));
}

@Test
public void testAbortMode_belowThreshold_noThrow() throws HiveException {
op.skewedMergeJoinMonitor = new SkewedMergeJoinMonitor(
100L, true, 4);

op.skewedMergeJoinMonitor.checkMergeJoinSkew((byte) 0, 99L);
}

@Test
public void testAbortMode_throwsHiveException() {
op.skewedMergeJoinMonitor = new SkewedMergeJoinMonitor(
100L, true, 4);

try {
op.skewedMergeJoinMonitor.checkMergeJoinSkew((byte) 0, 200L);
Assert.fail("Expected HiveException to be thrown in abort mode");
} catch (HiveException e) {
String msg = e.getMessage();
Assert.assertNotNull(msg);
Assert.assertTrue("Message should mention row count 200", msg.contains("200"));
}
}
}

20 changes: 20 additions & 0 deletions ql/src/test/queries/clientnegative/mergejoin_skew_abort.q
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
SET hive.vectorized.execution.enabled=false;
set hive.mapred.mode=nonstrict;
set hive.explain.user=false;
set hive.cbo.enable=false;
set hive.auto.convert.join=false;
set hive.optimize.ppd=false;
-- merge join observability config, with true should throw exception after skew
-- join detected beyond the threshold
set hive.merge.join.skew.threshold=2;
set hive.merge.join.skew.abort=true;

CREATE TABLE merge_skew_abort_a (key int, value string) STORED AS TEXTFILE;
CREATE TABLE merge_skew_abort_b (key int, value string) STORED AS TEXTFILE;

INSERT INTO TABLE merge_skew_abort_a VALUES (1, 'a1'), (1, 'a2'), (1, 'a3'), (1, 'a4'),(2, 'b1');
INSERT INTO TABLE merge_skew_abort_b VALUES (1, 'x1'), (2, 'y1');

SELECT a.key, a.value, b.value
FROM merge_skew_abort_a a JOIN merge_skew_abort_b b ON a.key = b.key;

35 changes: 35 additions & 0 deletions ql/src/test/queries/clientpositive/mergejoin_skew_warn.q
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
SET hive.vectorized.execution.enabled=false;
set hive.mapred.mode=nonstrict;
set hive.explain.user=false;
set hive.cbo.enable=false;
set hive.auto.convert.join=false;
set hive.optimize.ppd=false;
set hive.merge.join.skew.threshold=2;
set hive.merge.join.skew.abort=false;

-- SORT_QUERY_RESULTS

CREATE TABLE merge_skew_warn_a (key int, value string) STORED AS TEXTFILE;
CREATE TABLE merge_skew_warn_b (key int, value string) STORED AS TEXTFILE;

INSERT INTO TABLE merge_skew_warn_a VALUES (1, 'a1'), (1, 'a2'), (1, 'a3'), (1, 'a4'),
(2, 'b1'), (3, 'c1');
INSERT INTO TABLE merge_skew_warn_b VALUES (1, 'x1'), (2, 'y1'), (3, 'z1');

EXPLAIN
SELECT a.key, a.value, b.value
FROM merge_skew_warn_a a JOIN merge_skew_warn_b b ON a.key = b.key;

SELECT a.key, a.value, b.value
FROM merge_skew_warn_a a JOIN merge_skew_warn_b b ON a.key = b.key;

SELECT count(*) FROM merge_skew_warn_a a JOIN merge_skew_warn_b b ON a.key = b.key;

-- no warning run
set hive.merge.join.skew.threshold=-1;

SELECT count(*) FROM merge_skew_warn_a a JOIN merge_skew_warn_b b ON a.key = b.key;

DROP TABLE merge_skew_warn_a;
DROP TABLE merge_skew_warn_b;

70 changes: 70 additions & 0 deletions ql/src/test/results/clientnegative/mergejoin_skew_abort.q.out
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
PREHOOK: query: CREATE TABLE merge_skew_abort_a (key int, value string) STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@merge_skew_abort_a
POSTHOOK: query: CREATE TABLE merge_skew_abort_a (key int, value string) STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@merge_skew_abort_a
PREHOOK: query: CREATE TABLE merge_skew_abort_b (key int, value string) STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@merge_skew_abort_b
POSTHOOK: query: CREATE TABLE merge_skew_abort_b (key int, value string) STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@merge_skew_abort_b
PREHOOK: query: INSERT INTO TABLE merge_skew_abort_a VALUES (1, 'a1'), (1, 'a2'), (1, 'a3'), (1, 'a4'),(2, 'b1')
PREHOOK: type: QUERY
PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@merge_skew_abort_a
POSTHOOK: query: INSERT INTO TABLE merge_skew_abort_a VALUES (1, 'a1'), (1, 'a2'), (1, 'a3'), (1, 'a4'),(2, 'b1')
POSTHOOK: type: QUERY
POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@merge_skew_abort_a
POSTHOOK: Lineage: merge_skew_abort_a.key SCRIPT []
POSTHOOK: Lineage: merge_skew_abort_a.value SCRIPT []
PREHOOK: query: INSERT INTO TABLE merge_skew_abort_b VALUES (1, 'x1'), (2, 'y1')
PREHOOK: type: QUERY
PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@merge_skew_abort_b
POSTHOOK: query: INSERT INTO TABLE merge_skew_abort_b VALUES (1, 'x1'), (2, 'y1')
POSTHOOK: type: QUERY
POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@merge_skew_abort_b
POSTHOOK: Lineage: merge_skew_abort_b.key SCRIPT []
POSTHOOK: Lineage: merge_skew_abort_b.value SCRIPT []
PREHOOK: query: SELECT a.key, a.value, b.value
FROM merge_skew_abort_a a JOIN merge_skew_abort_b b ON a.key = b.key
PREHOOK: type: QUERY
PREHOOK: Input: default@merge_skew_abort_a
PREHOOK: Input: default@merge_skew_abort_b
#### A masked pattern was here ####
Status: Failed
Vertex failed, vertexName=Reducer 2, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row
#### A masked pattern was here ####
Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row
#### A masked pattern was here ####
Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: Data skew detected in merge join, table alias 0 has accumulated 2 rows.
#### A masked pattern was here ####
], TaskAttempt 1 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row
#### A masked pattern was here ####
Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row
#### A masked pattern was here ####
Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: Data skew detected in merge join, table alias 0 has accumulated 2 rows.
#### A masked pattern was here ####
]], Vertex did not succeed due to OWN_TASK_FAILURE, failedTasks:1 killedTasks:0, Vertex vertex_#ID# [Reducer 2] killed/failed due to:OWN_TASK_FAILURE]
DAG did not succeed due to VERTEX_FAILURE. failedVertices:1 killedVertices:0
FAILED: Execution Error, return code 2 from org.apache.hadoop.hive.ql.exec.tez.TezTask. Vertex failed, vertexName=Reducer 2, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row
#### A masked pattern was here ####
Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row
#### A masked pattern was here ####
Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: Data skew detected in merge join, table alias 0 has accumulated 2 rows.
#### A masked pattern was here ####
], TaskAttempt 1 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row
#### A masked pattern was here ####
Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row
#### A masked pattern was here ####
Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: Data skew detected in merge join, table alias 0 has accumulated 2 rows.
#### A masked pattern was here ####
]], Vertex did not succeed due to OWN_TASK_FAILURE, failedTasks:1 killedTasks:0, Vertex vertex_#ID# [Reducer 2] killed/failed due to:OWN_TASK_FAILURE]DAG did not succeed due to VERTEX_FAILURE. failedVertices:1 killedVertices:0
Loading
Loading