Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ private void cleanUsingLocation(CompactionInfo ci, String path, boolean requires
deleted = fsRemover.clean(getCleaningRequestBasedOnLocation(ci, path));
}
if (!deleted.isEmpty()) {
txnHandler.markCleaned(ci.asSoftDeleted());
txnHandler.markCleaned(ci);
} else {
txnHandler.clearCleanerStart(ci);
}
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,7 @@ enum CompactionType {
REBALANCE = 3,
ABORT_TXN_CLEANUP = 4,
SMART_OPTIMIZE = 5,
DEFERRED_CLEANUP = 6,
}

enum GrantRevokeType {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,8 @@ public void onDropTable(DropTableEvent tableEvent) throws MetaException {

if (currentTxn > 0) {
try {
CompactionRequest rqst = new CompactionRequest(table.getDbName(), table.getTableName(), CompactionType.MAJOR);
CompactionRequest rqst = new CompactionRequest(table.getDbName(), table.getTableName(),
CompactionType.DEFERRED_CLEANUP);
rqst.setRunas(TxnUtils.findUserToRunAs(table.getSd().getLocation(), table, conf));
rqst.putToProperties("location", table.getSd().getLocation());
rqst.putToProperties("ifPurge", Boolean.toString(isMustPurge(tableEvent.getEnvironmentContext(), table)));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
import java.util.concurrent.atomic.AtomicReference;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.repl.ReplConst;
import org.apache.hadoop.hive.metastore.Batchable;
import org.apache.hadoop.hive.metastore.HMSHandler;
import org.apache.hadoop.hive.metastore.IHMSHandler;
Expand Down Expand Up @@ -137,9 +136,6 @@ public DropDatabaseResult execute() throws TException, IOException {
if (isSoftDelete) {
context = new EnvironmentContext();
context.putToProperties(hive_metastoreConstants.TXN_ID, String.valueOf(request.getTxnId()));
if (ReplChangeManager.isSourceOfReplication(db)) {
Comment thread
deniskuzZ marked this conversation as resolved.
context.putToProperties(ReplConst.SOURCE_OF_REPLICATION, Boolean.TRUE.toString());
}
request.setDeleteManagedDir(false);
}
DropTableRequest dropRequest = new DropTableRequest(name, table.getTableName());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.TableName;
import org.apache.hadoop.hive.common.repl.ReplConst;
import org.apache.hadoop.hive.metastore.HMSHandler;
import org.apache.hadoop.hive.metastore.IHMSHandler;
import org.apache.hadoop.hive.metastore.MetaStoreListenerNotifier;
Expand All @@ -37,6 +38,7 @@
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.DropTableRequest;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.GetTableRequest;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
Expand Down Expand Up @@ -88,7 +90,10 @@ public DropTableResult execute() throws TException {
throw new MetaException("Drop table in REMOTE database " + db.getName() + " is not allowed");
}
isReplicated = isDbReplicationTarget(db);

EnvironmentContext context = request.getEnvContext();
if (!request.isDeleteData() && context != null && ReplChangeManager.isSourceOfReplication(db)) {
Copy link
Copy Markdown
Member

@deniskuzZ deniskuzZ May 4, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what does request.isDeleteData() do? do we need it

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since it is enough to set only when request.isDeleteData() is false(i.e., soft delete), have added this check. But there is no harm setting it in context irrespective of it since we do not use it. Do you suggest to remove it?

Copy link
Copy Markdown
Member

@deniskuzZ deniskuzZ May 6, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

that's not accurate: (request.isDeleteData() == false) != SOFT_DELETE. Please add a comment that this if block is needed to handle soft-delete

context.putToProperties(ReplConst.SOURCE_OF_REPLICATION, Boolean.TRUE.toString());
}
checkInterrupted();
// Check if table is part of a materialized view.
// If it is, it cannot be dropped.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,7 @@ enum MUTEX_KEY {
char REBALANCE_TYPE = 'r';
char ABORT_TXN_CLEANUP_TYPE = 'c';
char SMART_OPTIMIZE_TYPE = '*';
char DEFERRED_CLEANUP = 'd';

String[] COMPACTION_STATES = new String[] {INITIATED_RESPONSE, WORKING_RESPONSE, CLEANING_RESPONSE, FAILED_RESPONSE,
SUCCEEDED_RESPONSE, DID_NOT_INITIATE_RESPONSE, REFUSED_RESPONSE };
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -612,6 +612,8 @@
return CompactionType.ABORT_TXN_CLEANUP;
case TxnStore.SMART_OPTIMIZE_TYPE:
return CompactionType.SMART_OPTIMIZE;
case TxnStore.DEFERRED_CLEANUP:

Check warning on line 615 in standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

'case' child has incorrect indentation level 6, expected level should be 4.

See more on https://sonarcloud.io/project/issues?id=apache_hive&issues=AZ3gIwolA4zcgYsbgZNl&open=AZ3gIwolA4zcgYsbgZNl&pullRequest=6459
return CompactionType.DEFERRED_CLEANUP;

Check warning on line 616 in standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

'block' child has incorrect indentation level 8, expected level should be 6.

See more on https://sonarcloud.io/project/issues?id=apache_hive&issues=AZ3gIwolA4zcgYsbgZNm&open=AZ3gIwolA4zcgYsbgZNm&pullRequest=6459
default:
throw new SQLException("Unexpected compaction type " + dbValue);
}
Expand All @@ -629,6 +631,8 @@
return TxnStore.ABORT_TXN_CLEANUP_TYPE;
case SMART_OPTIMIZE:
return TxnStore.SMART_OPTIMIZE_TYPE;
case DEFERRED_CLEANUP:

Check warning on line 634 in standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

'case' child has incorrect indentation level 6, expected level should be 4.

See more on https://sonarcloud.io/project/issues?id=apache_hive&issues=AZ3gIwolA4zcgYsbgZNn&open=AZ3gIwolA4zcgYsbgZNn&pullRequest=6459
return TxnStore.DEFERRED_CLEANUP;

Check warning on line 635 in standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

'block' child has incorrect indentation level 8, expected level should be 6.

See more on https://sonarcloud.io/project/issues?id=apache_hive&issues=AZ3gIwolA4zcgYsbgZNo&open=AZ3gIwolA4zcgYsbgZNo&pullRequest=6459
default:
throw new MetaException("Unexpected compaction type " + ct);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,6 @@ public class CompactionInfo implements Comparable<CompactionInfo> {
private String fullPartitionName = null;
private String fullTableName = null;
private StringableMap propertiesMap;
private boolean softDelete;

public CompactionInfo(String dbname, String tableName, String partName, CompactionType type) {
this.dbname = dbname;
Expand Down Expand Up @@ -192,7 +191,6 @@ public String toString() {
.append("numberOfBuckets", numberOfBuckets)
.append("orderByClause", orderByClause)
.append("minOpenWriteTxnId", minOpenWriteTxnId)
.append("softDelete", softDelete)
.build();
}

Expand Down Expand Up @@ -372,13 +370,8 @@ public boolean isAbortedTxnCleanup() {
return type == CompactionType.ABORT_TXN_CLEANUP;
}

public CompactionInfo asSoftDeleted() {
this.softDelete = true;
return this;
}

public boolean isSoftDelete() {
return softDelete;
public boolean isDeferredCleanup() {
return CompactionType.DEFERRED_CLEANUP == type;
}

public boolean isSourceOfReplication() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.txn.TxnStore;
import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
import org.slf4j.Logger;
Expand Down Expand Up @@ -66,7 +67,8 @@
"\"CQ_DATABASE\" = :dbName AND " +
"(\"CQ_TABLE\" = :tableName OR :tableName IS NULL) AND " +
"(\"CQ_PARTITION\" = :partName OR :partName IS NULL) AND " +
"(\"CQ_TXN_ID\" != :txnId OR :txnId IS NULL)");
"(\"CQ_TXN_ID\" != :txnId OR :txnId IS NULL) AND " +
"(\"CQ_TYPE\" != :compactionType)");
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

could have shorten to cType

put((hiveObjectType, keepTxnToWriteIdMetaData) -> HIVE_OBJECT_TYPES.contains(hiveObjectType),
"DELETE FROM \"COMPLETED_COMPACTIONS\" WHERE " +
"\"CC_DATABASE\" = :dbName AND " +
Expand Down Expand Up @@ -112,7 +114,6 @@
public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException {
// cleanup should be done only for objects belonging to default catalog
List<MapSqlParameterSource> paramSources = new ArrayList<>();

switch (type) {
case DATABASE: {
if (!defaultCatalog.equals(db.getCatalogName())) {
Expand All @@ -124,7 +125,8 @@
.addValue("dbName", db.getName().toLowerCase())
.addValue("tableName", null, Types.VARCHAR)
.addValue("partName", null, Types.VARCHAR)
.addValue("txnId", txnId, Types.BIGINT));
.addValue("txnId", txnId, Types.BIGINT)

Check failure on line 128 in standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CleanupRecordsFunction.java

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

Define a constant instead of duplicating this literal "txnId" 3 times.

See more on https://sonarcloud.io/project/issues?id=apache_hive&issues=AZ3gIwiwA4zcgYsbgZNk&open=AZ3gIwiwA4zcgYsbgZNk&pullRequest=6459
.addValue("compactionType", Character.toString(TxnStore.DEFERRED_CLEANUP), Types.CHAR));

Check failure on line 129 in standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CleanupRecordsFunction.java

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

Define a constant instead of duplicating this literal "compactionType" 3 times.

See more on https://sonarcloud.io/project/issues?id=apache_hive&issues=AZ38CoqhbKRzIzxRFWD5&open=AZ38CoqhbKRzIzxRFWD5&pullRequest=6459
break;
}
case TABLE: {
Expand All @@ -137,7 +139,8 @@
.addValue("dbName", table.getDbName().toLowerCase())
.addValue("tableName", table.getTableName().toLowerCase(), Types.VARCHAR)
.addValue("partName", null, Types.VARCHAR)
.addValue("txnId", null, Types.BIGINT));
.addValue("txnId", null, Types.BIGINT)
.addValue("compactionType", Character.toString(TxnStore.DEFERRED_CLEANUP), Types.CHAR));
break;
}
case PARTITION: {
Expand All @@ -155,7 +158,8 @@
.addValue("dbName", table.getDbName().toLowerCase())
.addValue("tableName", table.getTableName().toLowerCase(), Types.VARCHAR)
.addValue("partName", Warehouse.makePartName(partCols, partVals), Types.VARCHAR)
.addValue("txnId", null, Types.BIGINT));
.addValue("txnId", null, Types.BIGINT)
.addValue("compactionType", Character.toString(TxnStore.DEFERRED_CLEANUP), Types.CHAR));
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ public MarkCleanedFunction(CompactionInfo info) {
public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException {
NamedParameterJdbcTemplate jdbcTemplate = jdbcResource.getJdbcTemplate();
MapSqlParameterSource param;
if (info.isSoftDelete()) {
if (info.isDeferredCleanup()) {
// Remove compaction queue record and return
removeCompactionAndAbortRetryEntries(info, jdbcTemplate);
return null;
Expand Down
Loading