-
Notifications
You must be signed in to change notification settings - Fork 4.8k
HIVE-29587: Cleanup acid table dir after async drop #6459
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -29,6 +29,7 @@ | |
| import org.apache.hadoop.fs.FileStatus; | ||
| import org.apache.hadoop.fs.Path; | ||
| import org.apache.hadoop.hive.common.TableName; | ||
| import org.apache.hadoop.hive.common.repl.ReplConst; | ||
| import org.apache.hadoop.hive.metastore.HMSHandler; | ||
| import org.apache.hadoop.hive.metastore.IHMSHandler; | ||
| import org.apache.hadoop.hive.metastore.MetaStoreListenerNotifier; | ||
|
|
@@ -37,6 +38,7 @@ | |
| import org.apache.hadoop.hive.metastore.Warehouse; | ||
| import org.apache.hadoop.hive.metastore.api.Database; | ||
| import org.apache.hadoop.hive.metastore.api.DropTableRequest; | ||
| import org.apache.hadoop.hive.metastore.api.EnvironmentContext; | ||
| import org.apache.hadoop.hive.metastore.api.GetTableRequest; | ||
| import org.apache.hadoop.hive.metastore.api.MetaException; | ||
| import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; | ||
|
|
@@ -88,7 +90,10 @@ public DropTableResult execute() throws TException { | |
| throw new MetaException("Drop table in REMOTE database " + db.getName() + " is not allowed"); | ||
| } | ||
| isReplicated = isDbReplicationTarget(db); | ||
|
|
||
| EnvironmentContext context = request.getEnvContext(); | ||
| if (!request.isDeleteData() && context != null && ReplChangeManager.isSourceOfReplication(db)) { | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. what does request.isDeleteData() do? do we need it
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Since it is enough to set only when request.isDeleteData() is
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. that's not accurate: (request.isDeleteData() == false) != SOFT_DELETE. Please add a comment that this if block is needed to handle soft-delete |
||
| context.putToProperties(ReplConst.SOURCE_OF_REPLICATION, Boolean.TRUE.toString()); | ||
| } | ||
| checkInterrupted(); | ||
| // Check if table is part of a materialized view. | ||
| // If it is, it cannot be dropped. | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -25,6 +25,7 @@ | |
| import org.apache.hadoop.hive.metastore.api.MetaException; | ||
| import org.apache.hadoop.hive.metastore.api.Partition; | ||
| import org.apache.hadoop.hive.metastore.api.Table; | ||
| import org.apache.hadoop.hive.metastore.txn.TxnStore; | ||
| import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource; | ||
| import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction; | ||
| import org.slf4j.Logger; | ||
|
|
@@ -66,7 +67,8 @@ | |
| "\"CQ_DATABASE\" = :dbName AND " + | ||
| "(\"CQ_TABLE\" = :tableName OR :tableName IS NULL) AND " + | ||
| "(\"CQ_PARTITION\" = :partName OR :partName IS NULL) AND " + | ||
| "(\"CQ_TXN_ID\" != :txnId OR :txnId IS NULL)"); | ||
| "(\"CQ_TXN_ID\" != :txnId OR :txnId IS NULL) AND " + | ||
| "(\"CQ_TYPE\" != :compactionType)"); | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. could have shorten to |
||
| put((hiveObjectType, keepTxnToWriteIdMetaData) -> HIVE_OBJECT_TYPES.contains(hiveObjectType), | ||
| "DELETE FROM \"COMPLETED_COMPACTIONS\" WHERE " + | ||
| "\"CC_DATABASE\" = :dbName AND " + | ||
|
|
@@ -112,7 +114,6 @@ | |
| public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException { | ||
| // cleanup should be done only for objects belonging to default catalog | ||
| List<MapSqlParameterSource> paramSources = new ArrayList<>(); | ||
|
|
||
| switch (type) { | ||
| case DATABASE: { | ||
| if (!defaultCatalog.equals(db.getCatalogName())) { | ||
|
|
@@ -124,7 +125,8 @@ | |
| .addValue("dbName", db.getName().toLowerCase()) | ||
| .addValue("tableName", null, Types.VARCHAR) | ||
| .addValue("partName", null, Types.VARCHAR) | ||
| .addValue("txnId", txnId, Types.BIGINT)); | ||
| .addValue("txnId", txnId, Types.BIGINT) | ||
|
Check failure on line 128 in standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CleanupRecordsFunction.java
|
||
| .addValue("compactionType", Character.toString(TxnStore.DEFERRED_CLEANUP), Types.CHAR)); | ||
|
Check failure on line 129 in standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CleanupRecordsFunction.java
|
||
| break; | ||
| } | ||
| case TABLE: { | ||
|
|
@@ -137,7 +139,8 @@ | |
| .addValue("dbName", table.getDbName().toLowerCase()) | ||
| .addValue("tableName", table.getTableName().toLowerCase(), Types.VARCHAR) | ||
| .addValue("partName", null, Types.VARCHAR) | ||
| .addValue("txnId", null, Types.BIGINT)); | ||
| .addValue("txnId", null, Types.BIGINT) | ||
| .addValue("compactionType", Character.toString(TxnStore.DEFERRED_CLEANUP), Types.CHAR)); | ||
| break; | ||
| } | ||
| case PARTITION: { | ||
|
|
@@ -155,7 +158,8 @@ | |
| .addValue("dbName", table.getDbName().toLowerCase()) | ||
| .addValue("tableName", table.getTableName().toLowerCase(), Types.VARCHAR) | ||
| .addValue("partName", Warehouse.makePartName(partCols, partVals), Types.VARCHAR) | ||
| .addValue("txnId", null, Types.BIGINT)); | ||
| .addValue("txnId", null, Types.BIGINT) | ||
| .addValue("compactionType", Character.toString(TxnStore.DEFERRED_CLEANUP), Types.CHAR)); | ||
| } | ||
| } | ||
| } | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.