diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java index 5a5d7ac95dfc..76eedeadb617 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java @@ -3253,6 +3253,11 @@ public Function get_function(String dbName, String funcName) throws TException { @Override public void update_table_params(List updates) throws TException { + for (TableParamsUpdate update : updates) { + if (!update.isSetCat_name()) { + update.setCat_name(getDefaultCatalog(conf)); + } + } getMS().updateTableParams(updates); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index 88ee4a4b8c58..d3ff9e96377d 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -639,6 +639,18 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str FileSystem destFs = null; boolean dataWasMoved = false; Database db; + Partition check_part; + try { + check_part = msdb.getPartition(catName, dbname, name, new_part.getValues()); + } catch(NoSuchObjectException e) { + // this means there is no existing partition + check_part = null; + } + + if (check_part != null) { + throw new AlreadyExistsException("Partition already exists:" + dbname + "." + name + "." + + new_part.getValues()); + } try { msdb.openTransaction(); Table tbl = msdb.getTable(catName, dbname, name, null); @@ -655,19 +667,6 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str "Unable to rename partition because old partition does not exist"); } - Partition check_part; - try { - check_part = msdb.getPartition(catName, dbname, name, new_part.getValues()); - } catch(NoSuchObjectException e) { - // this means there is no existing partition - check_part = null; - } - - if (check_part != null) { - throw new AlreadyExistsException("Partition already exists:" + dbname + "." + name + "." + - new_part.getValues()); - } - // when renaming a partition, we should update // 1) partition SD Location // 2) partition column stats if there are any because of part_name field in HMS table PART_COL_STATS diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 88dbd1ff4727..92303fd4fa96 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -18,11 +18,9 @@ package org.apache.hadoop.hive.metastore; -import static org.apache.commons.lang3.StringUtils.join; import static org.apache.hadoop.hive.metastore.Batchable.NO_BATCHING; import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.COMPACTOR_USE_CUSTOM_POOL; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; -import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.newMetaException; import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifiers; @@ -39,10 +37,8 @@ import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -50,18 +46,15 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; -import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.TreeMap; -import java.util.TreeSet; import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; -import java.util.concurrent.atomic.AtomicLong; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -77,7 +70,6 @@ import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.ArrayUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.classification.InterfaceAudience; @@ -101,7 +93,6 @@ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.CreationMetadata; -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.AddPackageRequest; import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest; @@ -115,42 +106,26 @@ import org.apache.hadoop.hive.metastore.api.GetPackageRequest; import org.apache.hadoop.hive.metastore.api.GetPartitionsFilterSpec; import org.apache.hadoop.hive.metastore.api.GetProjectionsSpec; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.HiveObjectType; import org.apache.hadoop.hive.metastore.api.ISchema; import org.apache.hadoop.hive.metastore.api.ISchemaName; import org.apache.hadoop.hive.metastore.api.InvalidInputException; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; import org.apache.hadoop.hive.metastore.api.ListPackageRequest; import org.apache.hadoop.hive.metastore.api.ListStoredProcedureRequest; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.Package; import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PartitionEventType; import org.apache.hadoop.hive.metastore.api.PartitionFilterMode; -import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse; -import org.apache.hadoop.hive.metastore.api.PartitionValuesRow; import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; import org.apache.hadoop.hive.metastore.api.QueryState; import org.apache.hadoop.hive.metastore.api.ResourceType; import org.apache.hadoop.hive.metastore.api.ResourceUri; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; import org.apache.hadoop.hive.metastore.api.RuntimeStat; import org.apache.hadoop.hive.metastore.api.ReplicationMetricList; import org.apache.hadoop.hive.metastore.api.GetReplicationMetricsRequest; @@ -181,13 +156,9 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.StoredProcedure; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TableMeta; import org.apache.hadoop.hive.metastore.api.TableParamsUpdate; import org.apache.hadoop.hive.metastore.api.Type; import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; -import org.apache.hadoop.hive.metastore.api.UnknownDBException; -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; -import org.apache.hadoop.hive.metastore.api.UnknownTableException; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import org.apache.hadoop.hive.metastore.api.WMMapping; import org.apache.hadoop.hive.metastore.api.WMNullablePool; @@ -198,14 +169,13 @@ import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus; import org.apache.hadoop.hive.metastore.api.WMTrigger; import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; -import org.apache.hadoop.hive.metastore.api.WriteEventInfo; import org.apache.hadoop.hive.metastore.client.builder.GetPartitionsArgs; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.directsql.DirectSqlAggrStats; +import org.apache.hadoop.hive.metastore.metastore.iface.PrivilegeStore; import org.apache.hadoop.hive.metastore.metrics.Metrics; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; -import org.apache.hadoop.hive.metastore.model.FetchGroups; import org.apache.hadoop.hive.metastore.model.MCatalog; import org.apache.hadoop.hive.metastore.model.MColumnDescriptor; import org.apache.hadoop.hive.metastore.model.MConstraint; @@ -217,23 +187,16 @@ import org.apache.hadoop.hive.metastore.model.MDelegationToken; import org.apache.hadoop.hive.metastore.model.MFieldSchema; import org.apache.hadoop.hive.metastore.model.MFunction; -import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege; import org.apache.hadoop.hive.metastore.model.MISchema; import org.apache.hadoop.hive.metastore.model.MMVSource; import org.apache.hadoop.hive.metastore.model.MMasterKey; import org.apache.hadoop.hive.metastore.model.MMetastoreDBProperties; -import org.apache.hadoop.hive.metastore.model.MNotificationLog; -import org.apache.hadoop.hive.metastore.model.MNotificationNextId; import org.apache.hadoop.hive.metastore.model.MOrder; import org.apache.hadoop.hive.metastore.model.MPackage; import org.apache.hadoop.hive.metastore.model.MPartition; -import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege; import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics; import org.apache.hadoop.hive.metastore.model.MPartitionEvent; -import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege; import org.apache.hadoop.hive.metastore.model.MResourceUri; -import org.apache.hadoop.hive.metastore.model.MRole; -import org.apache.hadoop.hive.metastore.model.MRoleMap; import org.apache.hadoop.hive.metastore.model.MRuntimeStat; import org.apache.hadoop.hive.metastore.model.MScheduledExecution; import org.apache.hadoop.hive.metastore.model.MScheduledQuery; @@ -243,10 +206,8 @@ import org.apache.hadoop.hive.metastore.model.MStoredProc; import org.apache.hadoop.hive.metastore.model.MStringList; import org.apache.hadoop.hive.metastore.model.MTable; -import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege; import org.apache.hadoop.hive.metastore.model.MTableColumnStatistics; import org.apache.hadoop.hive.metastore.model.MTablePrivilege; -import org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog; import org.apache.hadoop.hive.metastore.model.MType; import org.apache.hadoop.hive.metastore.model.MVersionTable; import org.apache.hadoop.hive.metastore.model.MWMMapping; @@ -256,17 +217,18 @@ import org.apache.hadoop.hive.metastore.model.MWMResourcePlan.Status; import org.apache.hadoop.hive.metastore.model.MWMTrigger; import org.apache.hadoop.hive.metastore.model.MReplicationMetrics; -import org.apache.hadoop.hive.metastore.parser.ExpressionTree; -import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder; import org.apache.hadoop.hive.metastore.properties.CachingPropertyStore; import org.apache.hadoop.hive.metastore.properties.PropertyStore; +import org.apache.hadoop.hive.metastore.metastore.PersistenceManagerProxy; +import org.apache.hadoop.hive.metastore.metastore.RawStoreAware; +import org.apache.hadoop.hive.metastore.metastore.MetaDescriptor; +import org.apache.hadoop.hive.metastore.metastore.TransactionHandler; import org.apache.hadoop.hive.metastore.tools.SQLGenerator; import org.apache.hadoop.hive.metastore.txn.TxnUtils; -import org.apache.hadoop.hive.metastore.utils.FileUtils; +import org.apache.hadoop.hive.metastore.utils.JavaUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.RetryingExecutor; -import org.apache.thrift.TException; import org.datanucleus.ExecutionContext; import org.datanucleus.api.jdo.JDOPersistenceManager; import org.datanucleus.api.jdo.JDOTransaction; @@ -282,7 +244,6 @@ import com.cronutils.model.time.ExecutionTime; import com.cronutils.parser.CronParser; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.base.Strings; import com.google.common.collect.Lists; @@ -335,7 +296,6 @@ private enum TXN_STATUS { private MetaStoreDirectSql directSql = null; private DirectSqlAggrStats directSqlAggrStats; protected DatabaseProduct dbType = null; - private PartitionExpressionProxy expressionProxy = null; protected Configuration conf; private volatile int openTrasactionCalls = 0; private Transaction currentTransaction = null; @@ -343,6 +303,7 @@ private enum TXN_STATUS { private Counter directSqlErrors; private boolean areTxnStatsSupported = false; private PropertyStore propertyStore; + private Map, Object> cachedImpls = new HashMap<>(); public ObjectStore() { } @@ -373,7 +334,6 @@ public void setConf(Configuration conf) { pm = null; directSql = null; directSqlAggrStats = null; - expressionProxy = null; openTrasactionCalls = 0; currentTransaction = null; transactionStatus = TXN_STATUS.NO_STATE; @@ -410,7 +370,6 @@ private void initialize() { if (isInitialized) { dbType = PersistenceManagerProvider.getDatabaseProduct(); sqlGenerator = new SQLGenerator(dbType, conf); - expressionProxy = PartFilterExprUtil.createExpressionProxy(conf); if (MetastoreConf.getBoolVar(getConf(), ConfVars.TRY_DIRECT_SQL)) { String schema = PersistenceManagerProvider.getProperty("javax.jdo.mapping.Schema"); schema = org.apache.commons.lang3.StringUtils.defaultIfBlank(schema, null); @@ -558,27 +517,29 @@ public boolean openTransaction() { return result; } + @SuppressWarnings("unchecked") @Override - public long updateParameterWithExpectedValue(Table table, String key, String expectedValue, String newValue) - throws MetaException, NoSuchObjectException { - return new GetHelper(table.getCatName(), table.getDbName(), table.getTableName(), true, false) { - - @Override - protected String describeResult() { - return "Affected rows"; - } - - @Override - protected Long getSqlResult(GetHelper ctx) throws MetaException { - return directSql.updateTableParam(table, key, expectedValue, newValue); - } + public T unwrap(Class iface) { + MetaDescriptor descriptor = iface.getAnnotation(MetaDescriptor.class); + if (descriptor == null) { + throw new IllegalArgumentException("Unable to unwrap the store as " + iface); + } + String implClassName = conf.get("metastore." + descriptor.alias() + ".store.impl", ""); + T impl = (T) cachedImpls.get(iface); + if (impl != null && impl.getClass().getName().equals(implClassName)) { + return impl; + } - @Override - protected Long getJdoResult(GetHelper ctx) throws MetaException, NoSuchObjectException, InvalidObjectException { - throw new UnsupportedOperationException( - "Cannot update parameter with JDO, make sure direct SQL is enabled"); - } - }.run(false); + Class ifaceImpl = conf.getClass(implClassName, descriptor.defaultImpl()); + T simpl = (T) JavaUtils.newInstance(ifaceImpl); + List trackOpenedQueries = new LinkedList<>(); + if (simpl instanceof RawStoreAware rsa) { + rsa.setBaseStore(this); + rsa.setPersistentManager(PersistenceManagerProxy.getProxy(pm, trackOpenedQueries)); + } + impl = TransactionHandler.getProxy(iface, new TransactionHandler<>(this, simpl, trackOpenedQueries)); + cachedImpls.put(iface, impl); + return impl; } @Override @@ -927,7 +888,7 @@ public Database getJDODatabase(String catName, String name) throws NoSuchObjectE Database db = new Database(); db.setName(mdb.getName()); db.setDescription(mdb.getDescription()); - db.setParameters(convertMap(mdb.getParameters())); + db.setParameters(convertMap(mdb.getParameters(), conf)); db.setOwnerName(mdb.getOwnerName()); String type = org.apache.commons.lang3.StringUtils.defaultIfBlank(mdb.getOwnerType(), null); PrincipalType principalType = (type == null) ? null : PrincipalType.valueOf(type); @@ -996,7 +957,7 @@ public boolean dropDatabase(String catName, String dbname) // then drop the database MDatabase db = getMDatabase(catName, dbname); pm.retrieve(db); - List dbGrants = this.listDatabaseGrants(catName, dbname, null); + List dbGrants = unwrap(PrivilegeStore.class).listDatabaseGrants(catName, dbname, null); if (CollectionUtils.isNotEmpty(dbGrants)) { pm.deletePersistentAll(dbGrants); } @@ -1093,7 +1054,7 @@ private Database convertToDatabase(MDatabase mdb) { Database db = new Database(); db.setName(mdb.getName()); db.setDescription(mdb.getDescription()); - db.setParameters(convertMap(mdb.getParameters())); + db.setParameters(convertMap(mdb.getParameters(), conf)); db.setOwnerName(mdb.getOwnerName()); String type = org.apache.commons.lang3.StringUtils.defaultIfBlank(mdb.getOwnerType(), null); PrincipalType principalType = (type == null) ? null : PrincipalType.valueOf(type); @@ -1115,6 +1076,16 @@ private Database convertToDatabase(MDatabase mdb) { @Override public void createDataConnector(DataConnector connector) throws InvalidObjectException, MetaException { boolean commited = false; + try { + openTransaction(); + pm.makePersistent(convert(connector)); + commited = commitTransaction(); + } finally { + rollbackAndCleanup(commited, null); + } + } + + public static MDataConnector convert(DataConnector connector) { MDataConnector mDataConnector = new MDataConnector(); mDataConnector.setName(connector.getName().toLowerCase()); mDataConnector.setType(connector.getType()); @@ -1125,13 +1096,7 @@ public void createDataConnector(DataConnector connector) throws InvalidObjectExc PrincipalType ownerType = connector.getOwnerType(); mDataConnector.setOwnerType((null == ownerType ? PrincipalType.USER.name() : ownerType.name())); mDataConnector.setCreateTime(connector.getCreateTime()); - try { - openTransaction(); - pm.makePersistent(mDataConnector); - commited = commitTransaction(); - } finally { - rollbackAndCleanup(commited, null); - } + return mDataConnector; } @SuppressWarnings("nls") @@ -1175,7 +1140,7 @@ public DataConnector getDataConnector(String name) throws NoSuchObjectException connector.setType(mdc.getType()); connector.setUrl(mdc.getUrl()); connector.setDescription(mdc.getDescription()); - connector.setParameters(convertMap(mdc.getParameters())); + connector.setParameters(convertMap(mdc.getParameters(), conf)); connector.setOwnerName(mdc.getOwnerName()); String type = org.apache.commons.lang3.StringUtils.defaultIfBlank(mdc.getOwnerType(), null); PrincipalType principalType = (type == null) ? null : PrincipalType.valueOf(type); @@ -1247,7 +1212,7 @@ public boolean dropDataConnector(String dcname) // then drop the dataconnector MDataConnector mdb = getMDataConnector(dcname); pm.retrieve(mdb); - List dcGrants = this.listDataConnectorGrants(dcname, null); + List dcGrants = unwrap(PrivilegeStore.class).listDataConnectorGrants(dcname, null); if (CollectionUtils.isNotEmpty(dcGrants)) { pm.deletePersistentAll(dcGrants); } @@ -1386,53 +1351,12 @@ public SQLAllTableConstraints createTableWithConstraints(Table tbl, SQLAllTableC } } - @Override - public void createTable(Table tbl) throws InvalidObjectException, MetaException { - boolean commited = false; - MTable mtbl = null; - - try { - openTransaction(); - - mtbl = convertToMTable(tbl); - if (TxnUtils.isTransactionalTable(tbl)) { - mtbl.setWriteId(tbl.getWriteId()); - } - pm.makePersistent(mtbl); - - if (tbl.getCreationMetadata() != null) { - MCreationMetadata mcm = convertToMCreationMetadata(tbl.getCreationMetadata()); - pm.makePersistent(mcm); - } - tbl.setId(mtbl.getId()); - - PrincipalPrivilegeSet principalPrivs = tbl.getPrivileges(); - List toPersistPrivObjs = new ArrayList<>(); - if (principalPrivs != null) { - int now = (int) (System.currentTimeMillis() / 1000); - - Map> userPrivs = principalPrivs.getUserPrivileges(); - putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, userPrivs, PrincipalType.USER, "SQL"); - - Map> groupPrivs = principalPrivs.getGroupPrivileges(); - putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, groupPrivs, PrincipalType.GROUP, "SQL"); - - Map> rolePrivs = principalPrivs.getRolePrivileges(); - putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, rolePrivs, PrincipalType.ROLE, "SQL"); - } - pm.makePersistentAll(toPersistPrivObjs); - commited = commitTransaction(); - } finally { - rollbackAndCleanup(commited, null); - } - } - /** * Convert PrivilegeGrantInfo from privMap to MTablePrivilege, and add all of * them to the toPersistPrivObjs. These privilege objects will be persisted as * part of createTable. */ - private void putPersistentPrivObjects(MTable mtbl, List toPersistPrivObjs, + public static void putPersistentPrivObjects(MTable mtbl, List toPersistPrivObjs, int now, Map> privMap, PrincipalType type, String authorizer) { if (privMap != null) { for (Map.Entry> entry : privMap @@ -1454,148 +1378,6 @@ private void putPersistentPrivObjects(MTable mtbl, List toPersistPrivObj } } - @Override - public boolean dropTable(String catName, String dbName, String tableName) - throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - boolean materializedView = false; - boolean success = false; - try { - openTransaction(); - MTable tbl = getMTable(catName, dbName, tableName); - pm.retrieve(tbl); - if (tbl != null) { - materializedView = TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType()); - // first remove all the grants - List tabGrants = listAllTableGrants(catName, dbName, tableName); - if (CollectionUtils.isNotEmpty(tabGrants)) { - pm.deletePersistentAll(tabGrants); - } - List tblColGrants = listTableAllColumnGrants(catName, dbName, - tableName); - if (CollectionUtils.isNotEmpty(tblColGrants)) { - pm.deletePersistentAll(tblColGrants); - } - - List partGrants = this.listTableAllPartitionGrants(catName, dbName, tableName); - if (CollectionUtils.isNotEmpty(partGrants)) { - pm.deletePersistentAll(partGrants); - } - - List partColGrants = listTableAllPartitionColumnGrants(catName, dbName, - tableName); - if (CollectionUtils.isNotEmpty(partColGrants)) { - pm.deletePersistentAll(partColGrants); - } - // delete column statistics if present - try { - deleteTableColumnStatistics(catName, dbName, tableName, null, null); - } catch (NoSuchObjectException e) { - LOG.info("Found no table level column statistics associated with {} to delete", - TableName.getQualified(catName, dbName, tableName)); - } - - List tabConstraints = listAllTableConstraintsWithOptionalConstraintName( - catName, dbName, tableName, null); - if (CollectionUtils.isNotEmpty(tabConstraints)) { - pm.deletePersistentAll(tabConstraints); - } - - preDropStorageDescriptor(tbl.getSd()); - - if (materializedView) { - dropCreationMetadata(tbl.getDatabase().getCatalogName(), - tbl.getDatabase().getName(), tbl.getTableName()); - } - - // then remove the table - pm.deletePersistentAll(tbl); - } - success = commitTransaction(); - } finally { - rollbackAndCleanup(success, null); - } - return success; - } - - private boolean dropCreationMetadata(String catName, String dbName, String tableName) { - boolean success = false; - try { - openTransaction(); - MCreationMetadata mcm = getCreationMetadata(catName, dbName, tableName); - pm.retrieve(mcm); - if (mcm != null) { - pm.deletePersistentAll(mcm); - } - success = commitTransaction(); - } finally { - rollbackAndCleanup(success, null); - } - return success; - } - - @Override - public List isPartOfMaterializedView(String catName, String dbName, String tblName) { - - boolean committed = false; - Query query = null; - List mViewList = new ArrayList<>(); - - try { - openTransaction(); - - query = pm.newQuery("select from org.apache.hadoop.hive.metastore.model.MCreationMetadata"); - - List creationMetadata = (List)query.execute(); - Iterator iter = creationMetadata.iterator(); - - while (iter.hasNext()) - { - MCreationMetadata p = iter.next(); - Set tables = p.getTables(); - for (MMVSource sourceTable : tables) { - MTable table = sourceTable.getTable(); - if (dbName.equals(table.getDatabase().getName()) && tblName.equals(table.getTableName())) { - LOG.info("Cannot drop table " + table.getTableName() + - " as it is being used by MView " + p.getTblName()); - mViewList.add(p.getDbName() + "." + p.getTblName()); - } - } - } - - committed = commitTransaction(); - } finally { - rollbackAndCleanup(committed, query); - } - return mViewList; - } - - @Override - public List dropAllPartitionsAndGetLocations(TableName table, - String baseLocationToNotShow, AtomicReference message) - throws MetaException, InvalidInputException, NoSuchObjectException, InvalidObjectException { - String catName = table.getCat(); - String dbName = table.getDb(); - String tableName = table.getTable(); - return new GetHelper>(catName, dbName, tableName, true, true) { - @Override - protected String describeResult() { - return "delete all partitions from " + table; - } - @Override - protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.dropAllPartitionsAndGetLocations(getTable().getId(), baseLocationToNotShow, message); - } - @Override - protected List getJdoResult(GetHelper> ctx) - throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - Map partitionLocations = - getPartitionLocations(catName, dbName, tableName, baseLocationToNotShow, -1); - dropPartitionsViaJdo(catName, dbName, tableName, new ArrayList<>(partitionLocations.keySet()), message); - return partitionLocations.values().stream().filter(Objects::nonNull).toList(); - } - }.run(true); - } - private List listAllTableConstraintsWithOptionalConstraintName( String catName, String dbName, String tableName, String constraintname) { catName = normalizeIdentifier(catName); @@ -1640,84 +1422,6 @@ private List listAllTableConstraintsWithOptionalConstraintName( return mConstraints; } - @Override - public Table getTable(String catName, String dbName, String tableName) - throws MetaException { - return getTable( - ObjectUtils.defaultIfNull(catName, getDefaultCatalog(conf)), - dbName, tableName, - null - ); - } - - @Override - public Table getTable(String catName, String dbName, String tableName, String writeIdList) - throws MetaException { - boolean commited = false; - Table tbl = null; - try { - openTransaction(); - MTable mtable = getMTable(catName, dbName, tableName); - tbl = convertToTable(mtable); - // Retrieve creation metadata if needed - if (tbl != null && TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType())) { - tbl.setCreationMetadata( - convertToCreationMetadata(getCreationMetadata(catName, dbName, tableName))); - } - - // If transactional non partitioned table, - // check whether the current version table statistics - // in the metastore comply with the client query's snapshot isolation. - // Note: a partitioned table has table stats and table snapshot in MPartiiton. - if (writeIdList != null) { - boolean isTxn = TxnUtils.isTransactionalTable(tbl); - if (isTxn && !areTxnStatsSupported) { - StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE); - LOG.info("Removed COLUMN_STATS_ACCURATE from Table's parameters."); - } else if (isTxn && tbl.getPartitionKeysSize() == 0) { - if (isCurrentStatsValidForTheQuery(mtable, writeIdList, false)) { - tbl.setIsStatsCompliant(true); - } else { - tbl.setIsStatsCompliant(false); - // Do not make persistent the following state since it is the query specific (not global). - StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE); - LOG.info("Removed COLUMN_STATS_ACCURATE from Table's parameters."); - } - } - } - commited = commitTransaction(); - } finally { - rollbackAndCleanup(commited, null); - } - - return tbl; - } - - @Override - public Table getTable(String catalogName, String dbName, String tableName, String writeIdList, long tableId) - throws MetaException { - return getTable( catalogName, dbName, tableName, writeIdList); - } - - @Override - public List getTables(String catName, String dbName, String pattern) - throws MetaException { - return getTables(catName, dbName, pattern, null, -1); - } - - @Override - public List getTables(String catName, String dbName, String pattern, TableType tableType, int limit) - throws MetaException { - try { - // We only support pattern matching via jdo since pattern matching in Java - // might be different than the one used by the metastore backends - return getTablesInternal(catName, dbName, pattern, tableType, - (pattern == null || pattern.equals(".*")), true, limit); - } catch (NoSuchObjectException e) { - throw new MetaException(ExceptionUtils.getStackTrace(e)); - } - } - @Override public List getTableNamesWithStats() throws MetaException, NoSuchObjectException { return new GetListHelper(null, null, null, true, false) { @@ -1807,430 +1511,105 @@ protected List getJdoResult( }.run(false); } - protected List getTablesInternal(String catName, String dbName, String pattern, - TableType tableType, boolean allowSql, boolean allowJdo, int limit) - throws MetaException, NoSuchObjectException { - final String db_name = normalizeIdentifier(dbName); - final String cat_name = normalizeIdentifier(catName); - return new GetListHelper(cat_name, dbName, null, allowSql, allowJdo) { - @Override - protected List getSqlResult(GetHelper> ctx) - throws MetaException { - return directSql.getTables(cat_name, db_name, tableType, limit); - } + public static StringBuilder appendPatternCondition(StringBuilder builder, + String fieldName, String elements, List parameters) { + elements = normalizeIdentifier(elements); + return appendCondition(builder, fieldName, elements.split("\\|"), true, parameters); + } - @Override - protected List getJdoResult(GetHelper> ctx) - throws MetaException, NoSuchObjectException { - return getTablesInternalViaJdo(cat_name, db_name, pattern, tableType, limit); - } - }.run(false); + public static StringBuilder appendSimpleCondition(StringBuilder builder, + String fieldName, String[] elements, List parameters) { + return appendCondition(builder, fieldName, elements, false, parameters); } - private List getTablesInternalViaJdo(String catName, String dbName, String pattern, - TableType tableType, int limit) { - boolean commited = false; - Query query = null; - List tbls = null; - try { - openTransaction(); - dbName = normalizeIdentifier(dbName); - // Take the pattern and split it on the | to get all the composing - // patterns - List parameterVals = new ArrayList<>(); - StringBuilder filterBuilder = new StringBuilder(); - //adds database.name == dbName to the filter - appendSimpleCondition(filterBuilder, "database.name", new String[] {dbName}, parameterVals); - appendSimpleCondition(filterBuilder, "database.catalogName", new String[] {catName}, parameterVals); - if(pattern != null) { - appendPatternCondition(filterBuilder, "tableName", pattern, parameterVals); + private static StringBuilder appendCondition(StringBuilder builder, + String fieldName, String[] elements, boolean pattern, List parameters) { + if (builder.length() > 0) { + builder.append(" && "); + } + builder.append(" ("); + int length = builder.length(); + for (String element : elements) { + if (pattern) { + element = element.replaceAll("\\*", ".*"); } - if(tableType != null) { - appendSimpleCondition(filterBuilder, "tableType", new String[] {tableType.toString()}, parameterVals); + parameters.add(element); + if (builder.length() > length) { + builder.append(" || "); } - - query = pm.newQuery(MTable.class, filterBuilder.toString()); - query.setResult("tableName"); - query.setOrdering("tableName ascending"); - if (limit >= 0) { - query.setRange(0, limit); + builder.append(fieldName); + if (pattern) { + builder.append(".matches(").append(JDO_PARAM).append(parameters.size()).append(")"); + } else { + builder.append(" == ").append(JDO_PARAM).append(parameters.size()); } - Collection names = (Collection) query.executeWithArray(parameterVals.toArray(new String[0])); - tbls = new ArrayList<>(names); - commited = commitTransaction(); - } finally { - rollbackAndCleanup(commited, query); } - return tbls; + builder.append(" )"); + return builder; } - @Override - public List getAllMaterializedViewObjectsForRewriting(String catName) throws MetaException { - List
allMaterializedViews = new ArrayList<>(); - boolean commited = false; - Query query = null; - try { - openTransaction(); - catName = normalizeIdentifier(catName); - query = pm.newQuery(MTable.class); - query.setFilter("database.catalogName == catName && tableType == tt && rewriteEnabled == re"); - query.declareParameters("java.lang.String catName, java.lang.String tt, boolean re"); - Collection mTbls = (Collection) query.executeWithArray( - catName, TableType.MATERIALIZED_VIEW.toString(), true); - for (MTable mTbl : mTbls) { - Table tbl = convertToTable(mTbl); - tbl.setCreationMetadata( - convertToCreationMetadata( - getCreationMetadata(tbl.getCatName(), tbl.getDbName(), tbl.getTableName()))); - allMaterializedViews.add(tbl); - } - commited = commitTransaction(); - } finally { - rollbackAndCleanup(commited, query); + class AttachedMTableInfo { + MTable mtbl; + MColumnDescriptor mcd; + + public AttachedMTableInfo() {} + + public AttachedMTableInfo(MTable mtbl, MColumnDescriptor mcd) { + this.mtbl = mtbl; + this.mcd = mcd; } - return allMaterializedViews; } - @Override - public List getMaterializedViewsForRewriting(String catName, String dbName) - throws MetaException, NoSuchObjectException { - final String db_name = normalizeIdentifier(dbName); - catName = normalizeIdentifier(catName); + private AttachedMTableInfo getMTable(String catName, String db, String table, + boolean retrieveCD) { + AttachedMTableInfo nmtbl = new AttachedMTableInfo(); + MTable mtbl = null; boolean commited = false; - Query query = null; - List tbls = null; + Query query = null; try { openTransaction(); - dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(Optional.ofNullable(catName).orElse(getDefaultCatalog(conf))); + db = normalizeIdentifier(db); + table = normalizeIdentifier(table); query = pm.newQuery(MTable.class, - "database.name == db && database.catalogName == cat && tableType == tt && rewriteEnabled == re"); + "tableName == table && database.name == db && database.catalogName == catname"); query.declareParameters( - "java.lang.String db, java.lang.String cat, java.lang.String tt, boolean re"); - query.setResult("tableName"); - Collection names = (Collection) query.executeWithArray( - db_name, catName, TableType.MATERIALIZED_VIEW.toString(), true); - tbls = new ArrayList<>(names); + "java.lang.String table, java.lang.String db, java.lang.String catname"); + query.setUnique(true); + if (LOG.isDebugEnabled()) { + LOG.debug("Executing getMTable for {}", + TableName.getQualified(catName, db, table)); + } + mtbl = (MTable) query.execute(table, db, catName); + pm.retrieve(mtbl); + // Retrieving CD can be expensive and unnecessary, so do it only when required. + if (mtbl != null && retrieveCD) { + pm.retrieve(mtbl.getSd()); + pm.retrieveAll(mtbl.getSd().getCD()); + nmtbl.mcd = mtbl.getSd().getCD(); + } commited = commitTransaction(); } finally { rollbackAndCleanup(commited, query); } - return tbls; + nmtbl.mtbl = mtbl; + return nmtbl; } - @Override - public int getDatabaseCount() throws MetaException { - return getObjectCount("name", MDatabase.class.getName()); - } - - @Override - public int getPartitionCount() throws MetaException { - return getObjectCount("partitionName", MPartition.class.getName()); - } - - @Override - public int getTableCount() throws MetaException { - return getObjectCount("tableName", MTable.class.getName()); - } - - private int getObjectCount(String fieldName, String objName) { - Long result = 0L; - boolean commited = false; - Query query = null; - try { - openTransaction(); - String queryStr = - "select count(" + fieldName + ") from " + objName; - query = pm.newQuery(queryStr); - result = (Long) query.execute(); - commited = commitTransaction(); - } finally { - rollbackAndCleanup(commited, query); - } - return result.intValue(); - } - - @Override - public List getTableMeta(String catName, String dbNames, String tableNames, - List tableTypes) throws MetaException { - - boolean commited = false; - Query query = null; - List metas = new ArrayList<>(); - try { - openTransaction(); - // Take the pattern and split it on the | to get all the composing - // patterns - StringBuilder filterBuilder = new StringBuilder(); - List parameterVals = new ArrayList<>(); - appendSimpleCondition(filterBuilder, "database.catalogName", new String[] {catName}, parameterVals); - if (dbNames != null && !dbNames.equals("*")) { - appendPatternCondition(filterBuilder, "database.name", dbNames, parameterVals); - } - if (tableNames != null && !tableNames.equals("*")) { - appendPatternCondition(filterBuilder, "tableName", tableNames, parameterVals); - } - if (tableTypes != null && !tableTypes.isEmpty()) { - appendSimpleCondition(filterBuilder, "tableType", tableTypes.toArray(new String[0]), parameterVals); - } - - if (LOG.isDebugEnabled()) { - LOG.debug("getTableMeta with filter " + filterBuilder + " params: " + - StringUtils.join(parameterVals, ", ")); - } - // Add the fetch group here which retrieves the database object along with the MTable - // objects. If we don't prefetch the database object, we could end up in a situation where - // the database gets dropped while we are looping through the tables throwing a - // JDOObjectNotFoundException. This causes HMS to go into a retry loop which greatly degrades - // performance of this function when called with dbNames="*" and tableNames="*" (fetch all - // tables in all databases, essentially a full dump) - pm.getFetchPlan().addGroup(FetchGroups.FETCH_DATABASE_ON_MTABLE); - query = pm.newQuery(MTable.class, filterBuilder.toString()) ; - query.setResult("database.name, tableName, tableType, parameters.get(\"comment\"), owner, ownerType"); - List tables = (List) query.executeWithArray(parameterVals.toArray(new String[0])); - for (Object[] table : tables) { - TableMeta metaData = new TableMeta(table[0].toString(), table[1].toString(), table[2].toString()); - metaData.setCatName(catName); - if (table[3] != null) { - metaData.setComments(table[3].toString()); - } - if (table[4] != null) { - metaData.setOwnerName(table[4].toString()); - } - if (table[5] != null) { - metaData.setOwnerType(getPrincipalTypeFromStr(table[5].toString())); - } - metas.add(metaData); - } - commited = commitTransaction(); - } finally { - pm.getFetchPlan().removeGroup(FetchGroups.FETCH_DATABASE_ON_MTABLE); - rollbackAndCleanup(commited, query); - } - return metas; - } - - protected StringBuilder appendPatternCondition(StringBuilder builder, - String fieldName, String elements, List parameters) { - elements = normalizeIdentifier(elements); - return appendCondition(builder, fieldName, elements.split("\\|"), true, parameters); - } - - private StringBuilder appendSimpleCondition(StringBuilder builder, - String fieldName, String[] elements, List parameters) { - return appendCondition(builder, fieldName, elements, false, parameters); - } - - private StringBuilder appendCondition(StringBuilder builder, - String fieldName, String[] elements, boolean pattern, List parameters) { - if (builder.length() > 0) { - builder.append(" && "); - } - builder.append(" ("); - int length = builder.length(); - for (String element : elements) { - if (pattern) { - element = element.replaceAll("\\*", ".*"); - } - parameters.add(element); - if (builder.length() > length) { - builder.append(" || "); - } - builder.append(fieldName); - if (pattern) { - builder.append(".matches(").append(JDO_PARAM).append(parameters.size()).append(")"); - } else { - builder.append(" == ").append(JDO_PARAM).append(parameters.size()); - } - } - builder.append(" )"); - return builder; - } - - @Override - public List getAllTables(String catName, String dbName) throws MetaException { - return getTables(catName, dbName, ".*"); - } - - class AttachedMTableInfo { - MTable mtbl; - MColumnDescriptor mcd; - - public AttachedMTableInfo() {} - - public AttachedMTableInfo(MTable mtbl, MColumnDescriptor mcd) { - this.mtbl = mtbl; - this.mcd = mcd; - } - } - - private AttachedMTableInfo getMTable(String catName, String db, String table, - boolean retrieveCD) { - AttachedMTableInfo nmtbl = new AttachedMTableInfo(); - MTable mtbl = null; - boolean commited = false; - Query query = null; - try { - openTransaction(); - catName = normalizeIdentifier(Optional.ofNullable(catName).orElse(getDefaultCatalog(conf))); - db = normalizeIdentifier(db); - table = normalizeIdentifier(table); - query = pm.newQuery(MTable.class, - "tableName == table && database.name == db && database.catalogName == catname"); - query.declareParameters( - "java.lang.String table, java.lang.String db, java.lang.String catname"); - query.setUnique(true); - if (LOG.isDebugEnabled()) { - LOG.debug("Executing getMTable for {}", - TableName.getQualified(catName, db, table)); - } - mtbl = (MTable) query.execute(table, db, catName); - pm.retrieve(mtbl); - // Retrieving CD can be expensive and unnecessary, so do it only when required. - if (mtbl != null && retrieveCD) { - pm.retrieve(mtbl.getSd()); - pm.retrieveAll(mtbl.getSd().getCD()); - nmtbl.mcd = mtbl.getSd().getCD(); - } - commited = commitTransaction(); - } finally { - rollbackAndCleanup(commited, query); - } - nmtbl.mtbl = mtbl; - return nmtbl; - } - - private MCreationMetadata getCreationMetadata(String catName, String dbName, String tblName) { - boolean commited = false; - MCreationMetadata mcm = null; - Query query = null; - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - try { - openTransaction(); - query = pm.newQuery( - MCreationMetadata.class, "tblName == table && dbName == db && catalogName == cat"); - query.declareParameters("java.lang.String table, java.lang.String db, java.lang.String cat"); - query.setUnique(true); - mcm = (MCreationMetadata) query.execute(tblName, dbName, catName); - pm.retrieve(mcm); - commited = commitTransaction(); - } finally { - rollbackAndCleanup(commited, query); - } - return mcm; - } - - private MTable getMTable(String catName, String db, String table) { - AttachedMTableInfo nmtbl = getMTable(catName, db, table, false); - return nmtbl.mtbl; - } - - @Override - public List
getTableObjectsByName(String catName, String db, List tbl_names, - GetProjectionsSpec projectionSpec, String tablePattern) throws MetaException, UnknownDBException { - List
tables = new ArrayList<>(); - boolean committed = false; - Query query = null; - List mtables = null; - - try { - openTransaction(); - catName = normalizeIdentifier(catName); - - List lowered_tbl_names = normalizeIdentifiers(tbl_names); - StringBuilder filterBuilder = new StringBuilder(); - List parameterVals = new ArrayList<>(); - appendPatternCondition(filterBuilder, "database.name", db, parameterVals); - appendSimpleCondition(filterBuilder, "database.catalogName", new String[] {catName}, parameterVals); - if(tbl_names != null){ - appendSimpleCondition(filterBuilder, "tableName", lowered_tbl_names.toArray(new String[0]), parameterVals); - } - if(tablePattern != null){ - appendPatternCondition(filterBuilder, "tableName", tablePattern, parameterVals); - } - query = pm.newQuery(MTable.class, filterBuilder.toString()) ; - List projectionFields = null; - - // If a projection specification has been set, validate it and translate it to JDO columns. - if (projectionSpec != null) { - //Validate the projection fields for multi-valued fields. - projectionFields = TableFields.getMFieldNames(projectionSpec.getFieldList()); - } - - // If the JDO translation resulted in valid JDO columns names, use it to create a projection for the JDO query. - if (projectionFields != null) { - // fetch partially filled tables using result clause - query.setResult(Joiner.on(',').join(projectionFields)); - } - - if (projectionFields == null) { - mtables = (List) query.executeWithArray(parameterVals.toArray(new String[parameterVals.size()])); - } else { - if (projectionFields.size() > 1) { - // Execute the query to fetch the partial results. - List results = (List) query.executeWithArray(parameterVals.toArray(new String[parameterVals.size()])); - // Declare the tables array to return the list of tables - mtables = new ArrayList<>(results.size()); - // Iterate through each row of the result and create the MTable object. - for (Object[] row : results) { - MTable mtable = new MTable(); - int i = 0; - for (Object val : row) { - MetaStoreServerUtils.setNestedProperty(mtable, projectionFields.get(i), val, true); - i++; - } - mtables.add(mtable); - } - } else if (projectionFields.size() == 1) { - // Execute the query to fetch the partial results. - List results = (List) query.executeWithArray(parameterVals.toArray(new String[parameterVals.size()])); - // Iterate through each row of the result and create the MTable object. - mtables = new ArrayList<>(results.size()); - for (Object row : results) { - MTable mtable = new MTable(); - MetaStoreServerUtils.setNestedProperty(mtable, projectionFields.get(0), row, true); - mtables.add(mtable); - } - } - } - - if (mtables == null || mtables.isEmpty()) { - ensureGetDatabase(catName, db); - } else { - for (Iterator iter = mtables.iterator(); iter.hasNext(); ) { - Table tbl = convertToTable((MTable) iter.next()); - // Retrieve creation metadata if needed - if (TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType())) { - tbl.setCreationMetadata( - convertToCreationMetadata( - getCreationMetadata(tbl.getCatName(), tbl.getDbName(), tbl.getTableName()))); - } - tables.add(tbl); - } - } - committed = commitTransaction(); - } finally { - rollbackAndCleanup(committed, query); - } - return tables; - } - - @Override - public List
getTableObjectsByName(String catName, String db, List tbl_names) - throws MetaException, UnknownDBException { - return getTableObjectsByName(catName, db, tbl_names, null, null); + private MTable getMTable(String catName, String db, String table) { + AttachedMTableInfo nmtbl = getMTable(catName, db, table, false); + return nmtbl.mtbl; } /** Makes shallow copy of a list to avoid DataNucleus mucking with our objects. */ - private List convertList(List dnList) { + private static List convertList(List dnList) { return (dnList == null) ? null : Lists.newArrayList(dnList); } /** Makes shallow copy of a map to avoid DataNucleus mucking with our objects. */ - private Map convertMap(Map dnMap, GetPartitionsArgs... args) { + private static Map convertMap(Map dnMap, Configuration conf, GetPartitionsArgs... args) { Map parameters = MetaStoreServerUtils.trimMapNulls(dnMap, - MetastoreConf.getBoolVar(getConf(), ConfVars.ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS)); + MetastoreConf.getBoolVar(conf, ConfVars.ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS)); if (parameters != null && args != null && args.length == 1) { // Pattern matching in Java might be different from the one used by the metastore backends, // An underscore (_) in pattern stands for (matches) any single character; @@ -2262,7 +1641,7 @@ private Map convertMap(Map dnMap, GetPartitionsA return parameters; } - private Table convertToTable(MTable mtbl) throws MetaException { + public static Table convertToTable(MTable mtbl, Configuration conf) throws MetaException { if (mtbl == null) { return null; } @@ -2284,11 +1663,11 @@ private Table convertToTable(MTable mtbl) throws MetaException { viewExpandedText = mtbl.getViewExpandedText(); } } - Map parameters = convertMap(mtbl.getParameters()); + Map parameters = convertMap(mtbl.getParameters(), conf); boolean isAcidTable = TxnUtils.isAcidTable(parameters); final Table t = new Table(mtbl.getTableName(), mtbl.getDatabase() != null ? mtbl.getDatabase().getName() : null, mtbl.getOwner(), mtbl.getCreateTime(), mtbl.getLastAccessTime(), mtbl.getRetention(), - convertToStorageDescriptor(mtbl.getSd(), false, isAcidTable), + convertToStorageDescriptor(mtbl.getSd(), false, isAcidTable, conf), convertToFieldSchemas(mtbl.getPartitionKeys()), parameters, viewOriginalText, viewExpandedText, tableType); @@ -2307,7 +1686,7 @@ private Table convertToTable(MTable mtbl) throws MetaException { return t; } - private MTable convertToMTable(Table tbl) throws InvalidObjectException, + public static MTable convertToMTable(Table tbl, RawStore base) throws InvalidObjectException, MetaException { // NOTE: we don't set writeId in this method. Write ID is only set after validating the // existing write ID against the caller's valid list. @@ -2315,9 +1694,9 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, return null; } MDatabase mdb = null; - String catName = tbl.isSetCatName() ? tbl.getCatName() : getDefaultCatalog(conf); + String catName = tbl.isSetCatName() ? tbl.getCatName() : getDefaultCatalog(base.getConf()); try { - mdb = getMDatabase(catName, tbl.getDbName()); + mdb = base.ensureGetMDatabase(catName, tbl.getDbName()); } catch (NoSuchObjectException e) { LOG.error("Could not convert to MTable", e); throw new InvalidObjectException("Database " + @@ -2351,7 +1730,7 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, tableType); } - private List convertToMFieldSchemas(List keys) { + private static List convertToMFieldSchemas(List keys) { List mkeys = null; if (keys != null) { mkeys = new ArrayList<>(keys.size()); @@ -2363,7 +1742,7 @@ private List convertToMFieldSchemas(List keys) { return mkeys; } - protected List convertToFieldSchemas(List mkeys) { + public static List convertToFieldSchemas(List mkeys) { List keys = null; if (mkeys != null) { keys = new ArrayList<>(); @@ -2375,7 +1754,7 @@ protected List convertToFieldSchemas(List mkeys) { return keys; } - private List convertToMOrders(List keys) { + private static List convertToMOrders(List keys) { List mkeys = null; if (keys != null) { mkeys = new ArrayList<>(); @@ -2386,7 +1765,7 @@ private List convertToMOrders(List keys) { return mkeys; } - private List convertToOrders(List mkeys) { + private static List convertToOrders(List mkeys) { List keys = null; if (mkeys != null) { keys = new ArrayList<>(); @@ -2397,7 +1776,8 @@ private List convertToOrders(List mkeys) { return keys; } - private SerDeInfo convertToSerDeInfo(MSerDeInfo ms, boolean allowNull) throws MetaException { + private static SerDeInfo convertToSerDeInfo(MSerDeInfo ms, Configuration conf, boolean allowNull) + throws MetaException { if (ms == null) { if (allowNull) { return null; @@ -2405,7 +1785,7 @@ private SerDeInfo convertToSerDeInfo(MSerDeInfo ms, boolean allowNull) throws Me throw new MetaException("Invalid SerDeInfo object"); } SerDeInfo serde = - new SerDeInfo(ms.getName(), ms.getSerializationLib(), convertMap(ms.getParameters())); + new SerDeInfo(ms.getName(), ms.getSerializationLib(), convertMap(ms.getParameters(), conf)); if (ms.getDescription() != null) { serde.setDescription(ms.getDescription()); } @@ -2421,7 +1801,7 @@ private SerDeInfo convertToSerDeInfo(MSerDeInfo ms, boolean allowNull) throws Me return serde; } - private MSerDeInfo convertToMSerDeInfo(SerDeInfo ms) throws MetaException { + private static MSerDeInfo convertToMSerDeInfo(SerDeInfo ms) throws MetaException { if (ms == null) { throw new MetaException("Invalid SerDeInfo object"); } @@ -2435,15 +1815,15 @@ private MSerDeInfo convertToMSerDeInfo(SerDeInfo ms) throws MetaException { * @param cols the columns the column descriptor contains * @return a new column descriptor db-backed object */ - private MColumnDescriptor createNewMColumnDescriptor(List cols) { + private static MColumnDescriptor createNewMColumnDescriptor(List cols) { if (cols == null) { return null; } return new MColumnDescriptor(cols); } - private StorageDescriptor convertToStorageDescriptor( - MStorageDescriptor msd, boolean noFS, boolean isAcidTable) throws MetaException { + private static StorageDescriptor convertToStorageDescriptor( + MStorageDescriptor msd, boolean noFS, boolean isAcidTable, Configuration conf) throws MetaException { if (msd == null) { return null; } @@ -2457,11 +1837,11 @@ private StorageDescriptor convertToStorageDescriptor( List bucList = convertList(msd.getBucketCols()); SkewedInfo skewedInfo = null; - Map sdParams = isAcidTable ? Collections.emptyMap() : convertMap(msd.getParameters()); + Map sdParams = isAcidTable ? Collections.emptyMap() : convertMap(msd.getParameters(), conf); StorageDescriptor sd = new StorageDescriptor(convertToFieldSchemas(mFieldSchemas), msd.getLocation(), msd.getInputFormat(), msd.getOutputFormat(), msd .isCompressed(), msd.getNumBuckets(), - (!isAcidTable) ? convertToSerDeInfo(msd.getSerDeInfo(), true) + (!isAcidTable) ? convertToSerDeInfo(msd.getSerDeInfo(), conf, true) : new SerDeInfo(msd.getSerDeInfo().getName(), msd.getSerDeInfo().getSerializationLib(), Collections.emptyMap()), bucList , orderList, sdParams); if (!isAcidTable) { @@ -2480,7 +1860,7 @@ private StorageDescriptor convertToStorageDescriptor( /** * Convert a list of MStringList to a list of list string */ - private List> convertToSkewedValues(List mLists) { + private static List> convertToSkewedValues(List mLists) { List> lists = null; if (mLists != null) { lists = new ArrayList<>(); @@ -2491,7 +1871,7 @@ private List> convertToSkewedValues(List mLists) { return lists; } - private List convertToMStringLists(List> mLists) { + private static List convertToMStringLists(List> mLists) { List lists = null ; if (null != mLists) { lists = new ArrayList<>(); @@ -2505,7 +1885,7 @@ private List convertToMStringLists(List> mLists) { /** * Convert a MStringList Map to a Map */ - private Map, String> covertToSkewedMap(Map mMap) { + private static Map, String> covertToSkewedMap(Map mMap) { Map, String> map = null; if (mMap != null) { map = new HashMap<>(); @@ -2520,7 +1900,7 @@ private Map, String> covertToSkewedMap(Map mMa /** * Covert a Map to a MStringList Map */ - private Map covertToMapMStringList(Map, String> mMap) { + private static Map covertToMapMStringList(Map, String> mMap) { Map map = null; if (mMap != null) { map = new HashMap<>(); @@ -2538,7 +1918,7 @@ private Map covertToMapMStringList(Map, String * @param sd the storage descriptor to wrap in a db-backed object * @return the storage descriptor db-backed object */ - private MStorageDescriptor convertToMStorageDescriptor(StorageDescriptor sd) + private static MStorageDescriptor convertToMStorageDescriptor(StorageDescriptor sd) throws MetaException { if (sd == null) { return null; @@ -2555,7 +1935,7 @@ private MStorageDescriptor convertToMStorageDescriptor(StorageDescriptor sd) * @param mcd the db-backed column descriptor * @return the db-backed storage descriptor object */ - private MStorageDescriptor convertToMStorageDescriptor(StorageDescriptor sd, + private static MStorageDescriptor convertToMStorageDescriptor(StorageDescriptor sd, MColumnDescriptor mcd) throws MetaException { if (sd == null) { return null; @@ -2573,29 +1953,34 @@ private MStorageDescriptor convertToMStorageDescriptor(StorageDescriptor sd, .getSkewedColValueLocationMaps()), sd.isStoredAsSubDirectories()); } - private MCreationMetadata convertToMCreationMetadata(CreationMetadata m) { + public static MCreationMetadata convertToMCreationMetadata(CreationMetadata m, RawStore base) + throws MetaException { if (m == null) { return null; } assert !m.isSetMaterializationTime(); - Set tablesUsed = new HashSet<>(); - if (m.isSetSourceTables()) { - for (SourceTable sourceTable : m.getSourceTables()) { - tablesUsed.add(convertToSourceTable(m.getCatName(), sourceTable)); - } - } else { - for (String fullyQualifiedName : m.getTablesUsed()) { - tablesUsed.add(convertToSourceTable(m.getCatName(), fullyQualifiedName)); + try { + Set tablesUsed = new HashSet<>(); + if (m.isSetSourceTables()) { + for (SourceTable sourceTable : m.getSourceTables()) { + tablesUsed.add(convertToSourceTable(m.getCatName(), sourceTable, base)); + } + } else { + for (String fullyQualifiedName : m.getTablesUsed()) { + tablesUsed.add(convertToSourceTable(m.getCatName(), fullyQualifiedName, base)); + } } + return new MCreationMetadata(normalizeIdentifier(m.getCatName()), normalizeIdentifier(m.getDbName()), + normalizeIdentifier(m.getTblName()), tablesUsed, m.getValidTxnList(), System.currentTimeMillis()); + } catch (NoSuchObjectException nse) { + throw new MetaException(nse.getMessage()); } - return new MCreationMetadata(normalizeIdentifier(m.getCatName()), - normalizeIdentifier(m.getDbName()), normalizeIdentifier(m.getTblName()), - tablesUsed, m.getValidTxnList(), System.currentTimeMillis()); } - private MMVSource convertToSourceTable(String catalog, SourceTable sourceTable) { + public static MMVSource convertToSourceTable(String catalog, SourceTable sourceTable, RawStore base) + throws NoSuchObjectException { Table table = sourceTable.getTable(); - MTable mtbl = getMTable(catalog, table.getDbName(), table.getTableName(), false).mtbl; + MTable mtbl = base.ensureGetMTable(catalog, table.getDbName(), table.getTableName()); MMVSource source = new MMVSource(); source.setTable(mtbl); source.setInsertedCount(sourceTable.getInsertedCount()); @@ -2608,16 +1993,17 @@ private MMVSource convertToSourceTable(String catalog, SourceTable sourceTable) * This method resets the stats to 0 and supports only backward compatibility with clients does not * send {@link SourceTable} instances. * - * Use {@link ObjectStore#convertToSourceTable(String, SourceTable)} instead. + * Use {@link ObjectStore#convertToSourceTable(String, SourceTable, RawStore)} instead. * * @param catalog Catalog name where source table is located * @param fullyQualifiedTableName fully qualified name of source table * @return {@link MMVSource} instance represents this source table. */ @Deprecated - private MMVSource convertToSourceTable(String catalog, String fullyQualifiedTableName) { + private static MMVSource convertToSourceTable(String catalog, String fullyQualifiedTableName, RawStore base) + throws NoSuchObjectException { String[] names = fullyQualifiedTableName.split("\\."); - MTable mtbl = getMTable(catalog, names[0], names[1], false).mtbl; + MTable mtbl = base.ensureGetMTable(catalog, names[0], names[1]); MMVSource source = new MMVSource(); source.setTable(mtbl); source.setInsertedCount(0L); @@ -2626,30 +2012,38 @@ private MMVSource convertToSourceTable(String catalog, String fullyQualifiedTabl return source; } - private CreationMetadata convertToCreationMetadata(MCreationMetadata s) throws MetaException { + public static CreationMetadata convertToCreationMetadata(MCreationMetadata s, RawStore base) + throws MetaException { if (s == null) { return null; } - Set tablesUsed = new HashSet<>(); - List sourceTables = new ArrayList<>(s.getTables().size()); - for (MMVSource mtbl : s.getTables()) { - tablesUsed.add(Warehouse.getQualifiedName(mtbl.getTable().getDatabase().getName(), mtbl.getTable().getTableName())); - sourceTables.add(convertToSourceTable(mtbl, s.getCatalogName())); - } - CreationMetadata r = new CreationMetadata(s.getCatalogName(), - s.getDbName(), s.getTblName(), tablesUsed); - r.setMaterializationTime(s.getMaterializationTime()); - if (s.getTxnList() != null) { - r.setValidTxnList(s.getTxnList()); + try { + Set tablesUsed = new HashSet<>(); + List sourceTables = new ArrayList<>(s.getTables().size()); + for (MMVSource mtbl : s.getTables()) { + tablesUsed.add( + Warehouse.getQualifiedName(mtbl.getTable().getDatabase().getName(), mtbl.getTable().getTableName())); + sourceTables.add(convertToSourceTable(mtbl, s.getCatalogName(), base)); + } + CreationMetadata r = new CreationMetadata(s.getCatalogName(), s.getDbName(), s.getTblName(), tablesUsed); + r.setMaterializationTime(s.getMaterializationTime()); + if (s.getTxnList() != null) { + r.setValidTxnList(s.getTxnList()); + } + r.setSourceTables(sourceTables); + return r; + } catch (NoSuchObjectException nse) { + throw new MetaException(nse.getMessage()); } - r.setSourceTables(sourceTables); - return r; } - private SourceTable convertToSourceTable(MMVSource mmvSource, String catalogName) throws MetaException { + private static SourceTable convertToSourceTable(MMVSource mmvSource, String catalogName, RawStore base) + throws MetaException, NoSuchObjectException { SourceTable sourceTable = new SourceTable(); MTable mTable = mmvSource.getTable(); - Table table = getTable(catalogName, mTable.getDatabase().getName(), mTable.getTableName()); + Table table = + convertToTable(base.ensureGetMTable(catalogName, mTable.getDatabase().getName(), mTable.getTableName()), + base.getConf()); sourceTable.setTable(table); sourceTable.setInsertedCount(mmvSource.getInsertedCount()); sourceTable.setUpdatedCount(mmvSource.getUpdatedCount()); @@ -2657,302 +2051,49 @@ private SourceTable convertToSourceTable(MMVSource mmvSource, String catalogName return sourceTable; } - @Override - public boolean addPartitions(String catName, String dbName, String tblName, List parts) + /** + * Convert a Partition object into an MPartition, which is an object backed by the db + * If the Partition's set of columns is the same as the parent table's AND useTableCD + * is true, then this partition's storage descriptor's column descriptor will point + * to the same one as the table's storage descriptor. + * @param part the partition to convert + * @param mt the parent table object + * @return the model partition object, and null if the input partition is null. + */ + public static MPartition convertToMPart(Partition part, MTable mt) throws InvalidObjectException, MetaException { - boolean success = false; - openTransaction(); - try { - addPartitionsInternal(catName, dbName, tblName, parts); - success = commitTransaction(); - } finally { - rollbackAndCleanup(success, null); + // NOTE: we don't set writeId in this method. Write ID is only set after validating the + // existing write ID against the caller's valid list. + if (part == null) { + return null; + } + if (mt == null) { + throw new InvalidObjectException( + "Partition doesn't have a valid table or database name"); } - return success; - } - - private void addPartitionsInternal(String catName, String dbName, - String tblName, List parts) - throws MetaException, InvalidObjectException { - List tabGrants = null; - List tabColumnGrants = null; - MTable table = this.getMTable(catName, dbName, tblName); - if (table == null) { - throw new InvalidObjectException("Unable to add partitions because " - + TableName.getQualified(catName, dbName, tblName) + - " does not exist"); - } - if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { - tabGrants = this.listAllTableGrants(catName, dbName, tblName); - tabColumnGrants = this.listTableAllColumnGrants(catName, dbName, tblName); - } - List mParts = new ArrayList<>(); - List> mPartPrivilegesList = new ArrayList<>(); - List> mPartColPrivilegesList = new ArrayList<>(); - for (Partition part : parts) { - if (!part.getTableName().equalsIgnoreCase(tblName) || !part.getDbName().equalsIgnoreCase(dbName)) { - throw new MetaException("Partition does not belong to target table " - + dbName + "." + tblName + ": " + part); - } - MPartition mpart = convertToMPart(part, table); - mParts.add(mpart); - int now = (int) (System.currentTimeMillis() / 1000); - List mPartPrivileges = new ArrayList<>(); - if (tabGrants != null) { - for (MTablePrivilege tab: tabGrants) { - MPartitionPrivilege mPartPrivilege = new MPartitionPrivilege(tab.getPrincipalName(), tab.getPrincipalType(), - mpart, tab.getPrivilege(), now, tab.getGrantor(), tab.getGrantorType(), tab.getGrantOption(), - tab.getAuthorizer()); - mPartPrivileges.add(mPartPrivilege); - } - } - List mPartColumnPrivileges = new ArrayList<>(); - if (tabColumnGrants != null) { - for (MTableColumnPrivilege col : tabColumnGrants) { - MPartitionColumnPrivilege mPartColumnPrivilege = new MPartitionColumnPrivilege(col.getPrincipalName(), - col.getPrincipalType(), mpart, col.getColumnName(), col.getPrivilege(), now, col.getGrantor(), - col.getGrantorType(), col.getGrantOption(), col.getAuthorizer()); - mPartColumnPrivileges.add(mPartColumnPrivilege); - } - } - mPartPrivilegesList.add(mPartPrivileges); - mPartColPrivilegesList.add(mPartColumnPrivileges); + // If this partition's set of columns is the same as the parent table's, + // use the parent table's, so we do not create a duplicate column descriptor, + // thereby saving space + MStorageDescriptor msd; + if (mt.getSd() != null && mt.getSd().getCD() != null && + mt.getSd().getCD().getCols() != null && + part.getSd() != null && + convertToFieldSchemas(mt.getSd().getCD().getCols()). + equals(part.getSd().getCols())) { + msd = convertToMStorageDescriptor(part.getSd(), mt.getSd().getCD()); + } else { + msd = convertToMStorageDescriptor(part.getSd()); } - if (CollectionUtils.isNotEmpty(mParts)) { - GetHelper helper = new GetHelper(null, null, null, true, true) { - @Override - protected Void getSqlResult(GetHelper ctx) throws MetaException { - directSql.addPartitions(mParts, mPartPrivilegesList, mPartColPrivilegesList); - return null; - } - @Override - protected Void getJdoResult(GetHelper ctx) { - List toPersist = new ArrayList<>(mParts); - mPartPrivilegesList.forEach(toPersist::addAll); - mPartColPrivilegesList.forEach(toPersist::addAll); - pm.makePersistentAll(toPersist); - pm.flush(); - return null; - } + return new MPartition(Warehouse.makePartName(convertToFieldSchemas(mt + .getPartitionKeys()), part.getValues()), mt, part.getValues(), part + .getCreateTime(), part.getLastAccessTime(), + msd, part.getParameters()); + } - @Override - protected String describeResult() { - return "add partitions"; - } - }; - try { - helper.run(false); - } catch (NoSuchObjectException e) { - throw newMetaException(e); - } - } - } - - @Override - public boolean addPartition(Partition part) throws InvalidObjectException, - MetaException { - boolean committed = false; - try { - openTransaction(); - String catName = part.isSetCatName() ? part.getCatName() : getDefaultCatalog(conf); - addPartitionsInternal(catName, part.getDbName(), part.getTableName(), Arrays.asList(part)); - committed = commitTransaction(); - } finally { - rollbackAndCleanup(committed, null); - } - return committed; - } - - @Override - public Partition getPartition(String catName, String dbName, String tableName, - List part_vals) throws NoSuchObjectException, MetaException { - return getPartition(catName, dbName, tableName, part_vals, null); - } - - @Override - public Partition getPartition(String catName, String dbName, String tableName, - List part_vals, - String validWriteIds) - throws NoSuchObjectException, MetaException { - Partition part = null; - boolean committed = false; - try { - openTransaction(); - MTable table = this.getMTable(catName, dbName, tableName); - if (table == null) { - throw new NoSuchObjectException("Unable to get partition because " - + TableName.getQualified(catName, dbName, tableName) + - " does not exist"); - } - MPartition mpart = getMPartition(catName, dbName, tableName, part_vals, table); - part = convertToPart(catName, dbName, tableName, mpart, - TxnUtils.isAcidTable(table.getParameters())); - committed = commitTransaction(); - if (part == null) { - throw new NoSuchObjectException("partition values=" - + part_vals.toString()); - } - - part.setValues(part_vals); - // If transactional table partition, check whether the current version partition - // statistics in the metastore comply with the client query's snapshot isolation. - long statsWriteId = mpart.getWriteId(); - if (TxnUtils.isTransactionalTable(table.getParameters())) { - if (!areTxnStatsSupported) { - // Do not make persistent the following state since it is query specific (not global). - StatsSetupConst.setBasicStatsState(part.getParameters(), StatsSetupConst.FALSE); - LOG.info("Removed COLUMN_STATS_ACCURATE from Partition object's parameters."); - } else if (validWriteIds != null) { - if (isCurrentStatsValidForTheQuery(part, statsWriteId, validWriteIds, false)) { - part.setIsStatsCompliant(true); - } else { - part.setIsStatsCompliant(false); - // Do not make persistent the following state since it is query specific (not global). - StatsSetupConst.setBasicStatsState(part.getParameters(), StatsSetupConst.FALSE); - LOG.info("Removed COLUMN_STATS_ACCURATE from Partition object's parameters."); - } - } - } - } finally { - rollbackAndCleanup(committed, (Query)null); - } - return part; - } - - /** - * Getting MPartition object. Use this method only if the partition name is not available, - * since then the table will be queried to get the partition keys. - * @param catName The catalogue - * @param dbName The database - * @param tableName The table - * @param part_vals The values defining the partition - * @return The MPartition object in the backend database - */ - private MPartition getMPartition(String catName, String dbName, String tableName, List part_vals, MTable mtbl) - throws MetaException { - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tableName = normalizeIdentifier(tableName); - boolean committed = false; - MPartition result = null; - try { - openTransaction(); - if (mtbl == null) { - mtbl = getMTable(catName, dbName, tableName); - if (mtbl == null) { - return null; - } - } - // Change the query to use part_vals instead of the name which is - // redundant TODO: callers of this often get part_vals out of name for no reason... - String name = - Warehouse.makePartName(convertToFieldSchemas(mtbl.getPartitionKeys()), part_vals); - result = getMPartition(catName, dbName, tableName, name); - committed = commitTransaction(); - } finally { - rollbackAndCleanup(committed, (Query)null); - } - return result; - } - - /** - * Getting MPartition object. Use this method if the partition name is available, so we do not - * query the table object again. - * @param catName The catalogue - * @param dbName The database - * @param tableName The table - * @param name The partition name - * @return The MPartition object in the backend database - */ - private MPartition getMPartition(String catName, String dbName, String tableName, - String name) throws MetaException { - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tableName = normalizeIdentifier(tableName); - List mparts = null; - MPartition ret = null; - boolean commited = false; - Query query = null; - try { - openTransaction(); - query = - pm.newQuery(MPartition.class, - "table.tableName == t1 && table.database.name == t2 && partitionName == t3 " + - " && table.database.catalogName == t4"); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " - + "java.lang.String t4"); - mparts = (List) query.executeWithArray(tableName, dbName, name, catName); - pm.retrieveAll(mparts); - commited = commitTransaction(); - // We need to compare partition name with requested name since some DBs - // (like MySQL, Derby) considers 'a' = 'a ' whereas others like (Postgres, - // Oracle) doesn't exhibit this problem. - if (CollectionUtils.isNotEmpty(mparts)) { - if (mparts.size() > 1) { - throw new MetaException( - "Expecting only one partition but more than one partitions are found."); - } else { - MPartition mpart = mparts.get(0); - if (name.equals(mpart.getPartitionName())) { - ret = mpart; - } else { - throw new MetaException("Expecting a partition with name " + name - + ", but metastore is returning a partition with name " + mpart.getPartitionName() - + "."); - } - } - } - } finally { - rollbackAndCleanup(commited, query); - } - return ret; - } - - /** - * Convert a Partition object into an MPartition, which is an object backed by the db - * If the Partition's set of columns is the same as the parent table's AND useTableCD - * is true, then this partition's storage descriptor's column descriptor will point - * to the same one as the table's storage descriptor. - * @param part the partition to convert - * @param mt the parent table object - * @return the model partition object, and null if the input partition is null. - */ - private MPartition convertToMPart(Partition part, MTable mt) - throws InvalidObjectException, MetaException { - // NOTE: we don't set writeId in this method. Write ID is only set after validating the - // existing write ID against the caller's valid list. - if (part == null) { - return null; - } - if (mt == null) { - throw new InvalidObjectException( - "Partition doesn't have a valid table or database name"); - } - - // If this partition's set of columns is the same as the parent table's, - // use the parent table's, so we do not create a duplicate column descriptor, - // thereby saving space - MStorageDescriptor msd; - if (mt.getSd() != null && mt.getSd().getCD() != null && - mt.getSd().getCD().getCols() != null && - part.getSd() != null && - convertToFieldSchemas(mt.getSd().getCD().getCols()). - equals(part.getSd().getCols())) { - msd = convertToMStorageDescriptor(part.getSd(), mt.getSd().getCD()); - } else { - msd = convertToMStorageDescriptor(part.getSd()); - } - - return new MPartition(Warehouse.makePartName(convertToFieldSchemas(mt - .getPartitionKeys()), part.getValues()), mt, part.getValues(), part - .getCreateTime(), part.getLastAccessTime(), - msd, part.getParameters()); - } - - private Partition convertToPart(String catName, String dbName, String tblName, - MPartition mpart, boolean isAcidTable, GetPartitionsArgs... args) + public static Partition convertToPart(String catName, String dbName, String tblName, + MPartition mpart, boolean isAcidTable, Configuration conf, GetPartitionsArgs... args) throws MetaException { if (mpart == null) { return null; @@ -2960,11 +2101,11 @@ private Partition convertToPart(String catName, String dbName, String tblName, catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); tblName = normalizeIdentifier(tblName); - Map params = convertMap(mpart.getParameters(), args); + Map params = convertMap(mpart.getParameters(), conf, args); boolean noFS = args != null && args.length == 1 && args[0].isSkipColumnSchemaForPartition(); Partition p = new Partition(convertList(mpart.getValues()), dbName, tblName, mpart.getCreateTime(), mpart.getLastAccessTime(), - convertToStorageDescriptor(mpart.getSd(), noFS, isAcidTable), params); + convertToStorageDescriptor(mpart.getSd(), noFS, isAcidTable, conf), params); p.setCatName(catName); if(mpart.getWriteId()>0) { p.setWriteId(mpart.getWriteId()); @@ -2974,6192 +2115,1269 @@ private Partition convertToPart(String catName, String dbName, String tblName, return p; } - @Override - public boolean dropPartition(String catName, String dbName, String tableName, String partName) - throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - boolean success = false; - try { - openTransaction(); - dropPartitionsInternal(catName, dbName, tableName, Arrays.asList(partName), true, true); - success = commitTransaction(); - } finally { - rollbackAndCleanup(success, null); + public static List convertToParts(String catName, String dbName, String tblName, + List mparts, boolean isAcidTable, Configuration conf, GetPartitionsArgs args) + throws MetaException { + List parts = new ArrayList<>(mparts.size()); + for (MPartition mp : mparts) { + parts.add(convertToPart(catName, dbName, tblName, mp, isAcidTable, conf, args)); + Deadline.checkTimeout(); } - return success; + return parts; } - @Override - public void dropPartitions(String catName, String dbName, String tblName, List partNames) - throws MetaException, NoSuchObjectException { - dropPartitionsInternal(catName, dbName, tblName, partNames, true, true); + public static Pair> getPartQueryWithParams( + PersistenceManager pm, + String catName, String dbName, String tblName, + List partNames) { + Query query = pm.newQuery(); + Map params = new HashMap<>(); + String filterStr = getJDOFilterStrForPartitionNames(catName, dbName, tblName, partNames, params); + query.setFilter(filterStr); + LOG.debug(" JDOQL filter is {}", filterStr); + query.declareParameters(makeParameterDeclarationString(params)); + return Pair.of(query, params); } - @VisibleForTesting - void dropPartitionsInternal(String catName, String dbName, String tblName, - List partNames, boolean allowSql, boolean allowJdo) - throws MetaException, NoSuchObjectException { - if (CollectionUtils.isEmpty(partNames)) { - return; + public static String getJDOFilterStrForPartitionNames(String catName, String dbName, String tblName, + List partNames, Map params) { + StringBuilder sb = new StringBuilder( + "table.tableName == t1 && table.database.name == t2 &&" + " table.database.catalogName == t3 && ("); + params.put("t1", normalizeIdentifier(tblName)); + params.put("t2", normalizeIdentifier(dbName)); + params.put("t3", normalizeIdentifier(catName)); + int n = 0; + for (Iterator itr = partNames.iterator(); itr.hasNext(); ) { + String pn = "p" + n; + n++; + String part = itr.next(); + params.put(pn, part); + sb.append("partitionName == ").append(pn); + sb.append(" || "); } - new GetListHelper(catName, dbName, tblName, allowSql, allowJdo) { - @Override - protected List getSqlResult(GetHelper> ctx) throws MetaException { - directSql.dropPartitionsViaSqlFilter(catName, dbName, tblName, partNames); - return Collections.emptyList(); - } - @Override - protected List getJdoResult(GetHelper> ctx) throws MetaException { - dropPartitionsViaJdo(catName, dbName, tblName, partNames, new AtomicReference<>()); - return Collections.emptyList(); - } - }.run(false); + sb.setLength(sb.length() - 4); // remove the last " || " + sb.append(')'); + return sb.toString(); } - private void dropPartitionsViaJdo(String catName, String dbName, String tblName, - List partNames, AtomicReference message) throws MetaException { - boolean success = false; - - if (partNames.isEmpty()) { - return; - } - openTransaction(); - - int batch = batchSize == NO_BATCHING ? 1 : (partNames.size() + batchSize) / batchSize; - AtomicLong batchIdx = new AtomicLong(1); - AtomicLong timeSpent = new AtomicLong(0); - try { - Batchable.runBatched(batchSize, partNames, new Batchable() { - @Override - public List run(List input) throws MetaException { - StringBuilder progress = new StringBuilder("Dropping partitions, batch: "); - long start = System.currentTimeMillis(); - progress.append(batchIdx.get()).append("/").append(batch); - if (batchIdx.get() > 1) { - long leftTime = (batch - batchIdx.get()) * timeSpent.get() / batchIdx.get(); - progress.append(", time left: ").append(leftTime).append("ms"); - } - message.set(progress.toString()); - // Delete all things. - dropPartitionGrantsNoTxn(catName, dbName, tblName, input); - dropPartitionAllColumnGrantsNoTxn(catName, dbName, tblName, input); - dropPartitionColumnStatisticsNoTxn(catName, dbName, tblName, input); - - // CDs are reused; go try partition SDs, detach all CDs from SDs, then remove unused CDs. - for (MColumnDescriptor mcd : detachCdsFromSdsNoTxn(catName, dbName, tblName, input)) { - removeUnusedColumnDescriptor(mcd); - } - dropPartitionsNoTxn(catName, dbName, tblName, input); - timeSpent.addAndGet(System.currentTimeMillis() - start); - batchIdx.incrementAndGet(); - return Collections.emptyList(); - } - }); - - if (!(success = commitTransaction())) { - throw new MetaException("Failed to drop partitions"); - } - } finally { - rollbackAndCleanup(success, null); + public static String makeParameterDeclarationString(Map params) { + //Create the parameter declaration string + StringBuilder paramDecl = new StringBuilder(); + for (String key : params.keySet()) { + paramDecl.append(", java.lang.String ").append(key); } + return paramDecl.toString(); } - @Override - public List getPartitions(String catName, String dbName, String tableName, - GetPartitionsArgs args) throws MetaException, NoSuchObjectException { - List results = Collections.emptyList(); - boolean success = false; - - LOG.debug("Executing getPartitions"); + /** Helper class for getting stuff w/transaction, direct SQL, perf logging, etc. */ + @VisibleForTesting + public abstract class GetHelper { + private final boolean isInTxn, doTrace, allowJdo; + private boolean doUseDirectSql; + private long start; + private Table table; + protected final List partitionFields; + protected final String catName, dbName, tblName; + private boolean success = false; + protected T results = null; - try { - openTransaction(); - results = getPartitionsInternal(catName, dbName, tableName, true, true, args); - success = commitTransaction(); - } finally { - rollbackAndCleanup(success, null); + public GetHelper(String catalogName, String dbName, String tblName, + boolean allowSql, boolean allowJdo) throws MetaException { + this(catalogName, dbName, tblName, null, allowSql, allowJdo); } - return results; - } - @Override - public Map getPartitionLocations(String catName, String dbName, String tblName, - String baseLocationToNotShow, int max) { - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); + public GetHelper(String catalogName, String dbName, String tblName, + List fields, boolean allowSql, boolean allowJdo) throws MetaException { + assert allowSql || allowJdo; + this.allowJdo = allowJdo; + this.catName = (catalogName != null) ? normalizeIdentifier(catalogName) : null; + this.dbName = (dbName != null) ? normalizeIdentifier(dbName) : null; + this.partitionFields = fields; + if (tblName != null) { + this.tblName = normalizeIdentifier(tblName); + } else { + // tblName can be null in cases of Helper being used at a higher + // abstraction level, such as with datbases + this.tblName = null; + this.table = null; + } + this.doTrace = LOG.isDebugEnabled(); + this.isInTxn = isActiveTransaction(); - boolean success = false; - Query query = null; - Map partLocations = new HashMap<>(); - try { - openTransaction(); - LOG.debug("Executing getPartitionLocations"); - - query = pm.newQuery(MPartition.class); - query.setFilter("this.table.database.catalogName == t1 && this.table.database.name == t2 " - + "&& this.table.tableName == t3"); - query.declareParameters("String t1, String t2, String t3"); - query.setResult("this.partitionName, this.sd.location"); - if (max >= 0) { - //Row limit specified, set it on the Query - query.setRange(0, max); - } - - List result = (List)query.execute(catName, dbName, tblName); - for(Object[] row:result) { - String location = (String)row[1]; - if (baseLocationToNotShow != null && location != null - && FileUtils.isSubdirectory(baseLocationToNotShow, location)) { - location = null; - } - partLocations.put((String)row[0], location); + boolean isConfigEnabled = MetastoreConf.getBoolVar(getConf(), ConfVars.TRY_DIRECT_SQL); + if (isConfigEnabled && directSql == null) { + directSql = new MetaStoreDirectSql(pm, getConf(), ""); } - LOG.debug("Done executing query for getPartitionLocations"); - success = commitTransaction(); - } finally { - rollbackAndCleanup(success, query); - } - return partLocations; - } - protected List getPartitionsInternal(String catName, String dbName, String tblName, - boolean allowSql, boolean allowJdo, GetPartitionsArgs args) - throws MetaException, NoSuchObjectException { - return new GetListHelper(catName, dbName, tblName, allowSql, allowJdo) { - @Override - protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getPartitions(catName, dbName, tblName, args); + if (!allowJdo && isConfigEnabled && !directSql.isCompatibleDatastore()) { + throw new MetaException("SQL is not operational"); // test path; SQL is enabled and broken. } - @Override - protected List getJdoResult(GetHelper> ctx) throws MetaException { - try { - return convertToParts(catName, dbName, tblName, - listMPartitions(catName, dbName, tblName, args.getMax()), false, args); - } catch (Exception e) { - LOG.error("Failed to convert to parts", e); - throw new MetaException(e.getMessage()); - } - } - }.run(false); - } - - @Override - public Partition getPartitionWithAuth(String catName, String dbName, String tblName, - List partVals, String user_name, List group_names) - throws NoSuchObjectException, MetaException, InvalidObjectException { - boolean success = false; - try { - openTransaction(); - MPartition mpart = getMPartition(catName, dbName, tblName, partVals, null); - if (mpart == null) { - commitTransaction(); - throw new NoSuchObjectException("partition values=" - + partVals.toString()); - } - MTable mtbl = mpart.getTable(); - - Partition part = convertToPart(catName, dbName, tblName, mpart, TxnUtils.isAcidTable(mtbl.getParameters())); - if ("TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { - String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl - .getPartitionKeys()), partVals); - PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(catName, dbName, - tblName, partName, user_name, group_names); - part.setPrivileges(partAuth); - } - - success = commitTransaction(); - return part; - } finally { - rollbackAndCleanup(success, null); - } - } - - private List convertToParts(String catName, String dbName, String tblName, - List mparts, boolean isAcidTable, GetPartitionsArgs args) - throws MetaException { - List parts = new ArrayList<>(mparts.size()); - for (MPartition mp : mparts) { - parts.add(convertToPart(catName, dbName, tblName, mp, isAcidTable, args)); - Deadline.checkTimeout(); + this.doUseDirectSql = allowSql && isConfigEnabled && directSql.isCompatibleDatastore(); } - return parts; - } - // TODO:pc implement max - @Override - public List listPartitionNames(String catName, String dbName, String tableName, - short max) throws MetaException { - List pns = null; - boolean success = false; - try { - openTransaction(); - LOG.debug("Executing getPartitionNames"); - pns = getPartitionNamesNoTxn(catName, dbName, tableName, max); - success = commitTransaction(); - } finally { - rollbackAndCleanup(success, null); + protected boolean canUseDirectSql(GetHelper ctx) throws MetaException { + return true; // By default, assume we can user directSQL - that's kind of the point. } - return pns; - } + protected abstract String describeResult(); + protected abstract T getSqlResult(GetHelper ctx) throws MetaException; + protected abstract T getJdoResult( + GetHelper ctx) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; - @Override - public List listPartitionNames(final String catName, final String dbName, final String tblName, - final String defaultPartName, final byte[] exprBytes, - final String order, final int maxParts) throws MetaException, NoSuchObjectException { - final String defaultPartitionName = getDefaultPartitionName(defaultPartName); - final boolean isEmptyFilter = exprBytes.length == 1 && exprBytes[0] == -1; - ExpressionTree tmp = null; - if (!isEmptyFilter) { - tmp = PartFilterExprUtil.makeExpressionTree(expressionProxy, exprBytes, - getDefaultPartitionName(defaultPartName), conf); - } - final ExpressionTree exprTree = tmp; - return new GetListHelper(catName, dbName, tblName, true, true) { - private List getPartNamesPrunedByExpr(Table table, boolean isJdoQuery) throws MetaException { - int max = isEmptyFilter ? maxParts : -1; - List result; - if (isJdoQuery) { - result = getPartitionNamesViaOrm(catName, dbName, tblName, ExpressionTree.EMPTY_TREE, - order, max, true, table.getPartitionKeys()); - } else { - SqlFilterForPushdown filter = new SqlFilterForPushdown(table, false); - result = directSql.getPartitionNamesViaSql(filter, table.getPartitionKeys(), - defaultPartitionName, order, max); - } - if (!isEmptyFilter) { - prunePartitionNamesByExpr(catName, dbName, tblName, result, - new GetPartitionsArgs.GetPartitionsArgsBuilder() - .expr(exprBytes).defaultPartName(defaultPartName).max(maxParts).build()); - } - return result; - } - @Override - protected List getSqlResult(GetHelper> ctx) throws MetaException { - SqlFilterForPushdown filter = new SqlFilterForPushdown(ctx.getTable(), false); - List partNames = null; - Table table = ctx.getTable(); - if (exprTree != null) { - if (directSql.generateSqlFilterForPushdown(table.getCatName(), table.getDbName(), table.getTableName(), - ctx.getTable().getPartitionKeys(), exprTree, defaultPartitionName, filter)) { - partNames = directSql.getPartitionNamesViaSql(filter, table.getPartitionKeys(), - defaultPartitionName, order, (int)maxParts); - } - } - if (partNames == null) { - partNames = getPartNamesPrunedByExpr(table, false); - } - return partNames; - } - @Override - protected List getJdoResult( - GetHelper> ctx) throws MetaException, NoSuchObjectException { - List result = null; - if (exprTree != null) { + public T run(boolean initTable) throws MetaException, NoSuchObjectException { + try { + start(initTable); + String savePoint = isInTxn && allowJdo ? "rollback_" + System.nanoTime() : null; + if (doUseDirectSql) { try { - result = getPartitionNamesViaOrm(catName, dbName, tblName, exprTree, order, - maxParts, true, ctx.getTable().getPartitionKeys()); - } catch (MetaException e) { - result = null; + directSql.prepareTxn(); + setTransactionSavePoint(savePoint); + this.results = getSqlResult(this); + LOG.debug("Using direct SQL optimization."); + } catch (Exception ex) { + handleDirectSqlError(ex, savePoint); } } - if (result == null) { - result = getPartNamesPrunedByExpr(ctx.getTable(), true); + // Note that this will be invoked in 2 cases: + // 1) DirectSQL was disabled to start with; + // 2) DirectSQL threw and was disabled in handleDirectSqlError. + if (!doUseDirectSql) { + this.results = getJdoResult(this); + LOG.debug("Not using direct SQL optimization."); } - return result; + return commit(); + } catch (NoSuchObjectException | MetaException ex) { + throw ex; + } catch (Exception ex) { + LOG.error("", ex); + throw new MetaException(ex.getMessage()); + } finally { + close(); } - }.run(true); - } - - @Override - public List listPartitionNamesByFilter(String catName, String dbName, String tblName, - GetPartitionsArgs args) throws MetaException, NoSuchObjectException { - - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - - MTable mTable = ensureGetMTable(catName, dbName, tblName); - List partitionKeys = convertToFieldSchemas(mTable.getPartitionKeys()); - String filter = args.getFilter(); - final ExpressionTree tree = (filter != null && !filter.isEmpty()) - ? PartFilterExprUtil.parseFilterTree(filter) : ExpressionTree.EMPTY_TREE; - return new GetListHelper(catName, dbName, tblName, true, true) { - private final SqlFilterForPushdown filter = new SqlFilterForPushdown(); + } - @Override - protected boolean canUseDirectSql(GetHelper> ctx) throws MetaException { - return directSql.generateSqlFilterForPushdown(catName, dbName, tblName, - partitionKeys, tree, null, filter); + private void start(boolean initTable) throws MetaException, NoSuchObjectException { + start = doTrace ? System.nanoTime() : 0; + openTransaction(); + if (initTable && (tblName != null)) { + table = ensureGetTable(catName, dbName, tblName); } + doUseDirectSql = doUseDirectSql && canUseDirectSql(this); + } - @Override - protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getPartitionNamesViaSql(filter, partitionKeys, - getDefaultPartitionName(args.getDefaultPartName()), null, args.getMax()); + private void handleDirectSqlError(Exception ex, String savePoint) throws MetaException, NoSuchObjectException { + String message = null; + try { + message = generateShorterMessage(ex); + } catch (Throwable t) { + message = ex.toString() + "; error building a better message: " + t.getMessage(); } + LOG.warn(message); // Don't log the exception, people just get confused. + LOG.debug("Full DirectSQL callstack for debugging (not an error)", ex); - @Override - protected List getJdoResult(GetHelper> ctx) - throws MetaException, NoSuchObjectException, InvalidObjectException { - return getPartitionNamesViaOrm(catName, dbName, tblName, tree, null, - args.getMax(), true, partitionKeys); + if (!allowJdo || !DatabaseProduct.isRecoverableException(ex)) { + throw ExceptionHandler.newMetaException(ex); } - }.run(false); - } + + if (!isInTxn) { + JDOException rollbackEx = null; + try { + rollbackTransaction(); + } catch (JDOException jex) { + rollbackEx = jex; + } + if (rollbackEx != null) { + // Datanucleus propagates some pointless exceptions and rolls back in the finally. + if (currentTransaction != null && currentTransaction.isActive()) { + throw rollbackEx; // Throw if the tx wasn't rolled back. + } + LOG.info("Ignoring exception, rollback succeeded: " + rollbackEx.getMessage()); + } - private List getPartitionNamesViaOrm(String catName, String dbName, String tblName, - ExpressionTree tree, String order, Integer maxParts, boolean isValidatedFilter, - List partitionKeys) throws MetaException { - Map params = new HashMap(); - String jdoFilter = makeQueryFilterString(catName, dbName, tblName, tree, - params, isValidatedFilter, partitionKeys); - if (jdoFilter == null) { - assert !isValidatedFilter; - throw new MetaException("Failed to generate filter."); - } - - try (QueryWrapper query = new QueryWrapper(pm.newQuery( - "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition"))) { - query.setFilter(jdoFilter); - List orderSpecs = MetaStoreUtils.makeOrderSpecs(order); - StringBuilder builder = new StringBuilder(); - for (Object[] spec : orderSpecs) { - // TODO: order by casted value if the type of partition key is not string - builder.append("values.get(").append(spec[0]).append(") ").append(spec[1]).append(","); - } - if (builder.length() > 0) { - builder.setLength(builder.length() - 1); - query.setOrdering(builder.toString()); + start = doTrace ? System.nanoTime() : 0; + openTransaction(); + if (table != null) { + table = ensureGetTable(catName, dbName, tblName); + } } else { - query.setOrdering("partitionName ascending"); + rollbackTransactionToSavePoint(savePoint); + start = doTrace ? System.nanoTime() : 0; } - if (maxParts > -1) { - query.setRange(0, maxParts); + if (directSqlErrors != null) { + directSqlErrors.inc(); } - String parameterDeclaration = makeParameterDeclarationStringObj(params); - query.declareParameters(parameterDeclaration); - Collection jdoRes = (Collection) query.executeWithMap(params); - List result = new LinkedList(); - for (Object partName : jdoRes) { - result.add((String) partName); - } - return result; + doUseDirectSql = false; } - } - - private String extractPartitionKey(FieldSchema key, List pkeys) { - StringBuilder buffer = new StringBuilder(256); - - assert pkeys.size() >= 1; - - String partKey = "/" + key.getName() + "="; - // Table is partitioned by single key - if (pkeys.size() == 1 && (pkeys.get(0).getName().matches(key.getName()))) { - buffer.append("partitionName.substring(partitionName.indexOf(\"") - .append(key.getName()).append("=\") + ").append(key.getName().length() + 1) - .append(")"); - - // First partition key - anything between key= and first / - } else if ((pkeys.get(0).getName().matches(key.getName()))) { - - buffer.append("partitionName.substring(partitionName.indexOf(\"") - .append(key.getName()).append("=\") + ").append(key.getName().length() + 1).append(", ") - .append("partitionName.indexOf(\"/\")") - .append(")"); - - // Last partition key - anything between /key= and end - } else if ((pkeys.get(pkeys.size() - 1).getName().matches(key.getName()))) { - buffer.append("partitionName.substring(partitionName.indexOf(\"") - .append(partKey).append("\") + ").append(partKey.length()) - .append(")"); + private String generateShorterMessage(Exception ex) { + StringBuilder message = new StringBuilder( + "Falling back to ORM path due to direct SQL failure (this is not an error): "); + Throwable t = ex; + StackTraceElement[] prevStack = null; + while (t != null) { + message.append(t.getMessage()); + StackTraceElement[] stack = t.getStackTrace(); + int uniqueFrames = stack.length - 1; + if (prevStack != null) { + int n = prevStack.length - 1; + while (uniqueFrames >= 0 && n >= 0 && stack[uniqueFrames].equals(prevStack[n])) { + uniqueFrames--; n--; + } + } + for (int i = 0; i <= uniqueFrames; ++i) { + StackTraceElement ste = stack[i]; + message.append(" at ").append(ste); + if (ste.getMethodName().contains("getSqlResult") + && (ste.getFileName() == null || ste.getFileName().contains("ObjectStore"))) { + break; + } + } + prevStack = stack; + t = t.getCause(); + if (t != null) { + message.append(";\n Caused by: "); + } + } + return message.toString(); + } - // Intermediate key - anything between /key= and the following / - } else { + private T commit() { + success = commitTransaction(); + if (doTrace) { + double time = ((System.nanoTime() - start) / 1000000.0); + String result = describeResult(); + String retrieveType = doUseDirectSql ? "SQL" : "ORM"; - buffer.append("partitionName.substring(partitionName.indexOf(\"") - .append(partKey).append("\") + ").append(partKey.length()).append(", ") - .append("partitionName.indexOf(\"/\", partitionName.indexOf(\"").append(partKey) - .append("\") + 1))"); + LOG.debug("{} retrieved using {} in {}ms", result, retrieveType, time); + } + return results; } - LOG.info("Query for Key:" + key.getName() + " is :" + buffer); - return buffer.toString(); - } - - @Override - public PartitionValuesResponse listPartitionValues(String catName, String dbName, - String tableName, List cols, - boolean applyDistinct, String filter, - boolean ascending, List order, - long maxParts) throws MetaException { - catName = normalizeIdentifier(catName); - dbName = dbName.toLowerCase().trim(); - tableName = tableName.toLowerCase().trim(); - try { - if (filter == null || filter.isEmpty()) { - PartitionValuesResponse response = getDistinctValuesForPartitionsNoTxn(catName, dbName, - tableName, cols, applyDistinct, maxParts); - LOG.info("Number of records fetched: {}", response.getPartitionValues().size()); - return response; - } else { - PartitionValuesResponse response = - extractPartitionNamesByFilter(catName, dbName, tableName, filter, cols, ascending, maxParts); - if (response.getPartitionValues() != null) { - LOG.info("Number of records fetched with filter: {}", response.getPartitionValues().size()); - } - return response; + private void close() { + if (!success) { + rollbackTransaction(); } - } catch (Exception t) { - LOG.error("Exception in ORM", t); - throw new MetaException("Error retrieving partition values: " + t); + } + + public Table getTable() { + return table; } } - private PartitionValuesResponse extractPartitionNamesByFilter( - String catName, String dbName, String tableName, String filter, List cols, - boolean ascending, long maxParts) - throws MetaException, NoSuchObjectException { + private abstract class GetListHelper extends GetHelper> { + public GetListHelper(String catName, String dbName, String tblName, boolean allowSql, + boolean allowJdo) throws MetaException { + super(catName, dbName, tblName, null, allowSql, allowJdo); + } - LOG.info("Table: {} filter: \"{}\" cols: {}", - TableName.getQualified(catName, dbName, tableName), filter, cols); - List partitionNames = null; - List partitions = null; - Table tbl = getTable(catName, dbName, tableName, null); - try { - // Get partitions by name - ascending or descending - partitionNames = getPartitionNamesByFilter(catName, dbName, tableName, filter, ascending, - maxParts); - } catch (MetaException e) { - LOG.warn("Querying by partition names failed, trying out with partition objects, filter: {}", filter); + public GetListHelper(String catName, String dbName, String tblName, List fields, + boolean allowSql, boolean allowJdo) throws MetaException { + super(catName, dbName, tblName, fields, allowSql, allowJdo); } - if (partitionNames == null) { - partitions = getPartitionsByFilter(catName, dbName, tableName, - new GetPartitionsArgs.GetPartitionsArgsBuilder().filter(filter).max((short) maxParts).build()); + @Override + protected String describeResult() { + return results.size() + " entries"; } + } - if (partitions != null) { - partitionNames = new ArrayList<>(partitions.size()); - for (Partition partition : partitions) { - // Check for NULL's just to be safe - if (tbl.getPartitionKeys() != null && partition.getValues() != null) { - partitionNames.add(Warehouse.makePartName(tbl.getPartitionKeys(), partition.getValues())); - } - } + @VisibleForTesting + public abstract class GetDbHelper extends GetHelper { + /** + * GetHelper for returning db info using directSql/JDO. + * @param dbName The Database Name + * @param allowSql Whether or not we allow DirectSQL to perform this query. + * @param allowJdo Whether or not we allow ORM to perform this query. + */ + public GetDbHelper(String catalogName, String dbName,boolean allowSql, boolean allowJdo) + throws MetaException { + super(catalogName, dbName,null,allowSql,allowJdo); } - if (partitionNames == null) { - throw new MetaException("Cannot obtain list of partitions by filter:\"" + filter + - "\" for " + TableName.getQualified(catName, dbName, tableName)); + @Override + protected String describeResult() { + return "db details for db ".concat(dbName); } + } - if (!ascending) { - partitionNames.sort(Collections.reverseOrder()); + private abstract class GetStatHelper extends GetHelper { + public GetStatHelper(String catalogName, String dbName, String tblName, boolean allowSql, + boolean allowJdo, String writeIdList) throws MetaException { + super(catalogName, dbName, tblName, allowSql, allowJdo); } - // Return proper response - PartitionValuesResponse response = new PartitionValuesResponse(); - response.setPartitionValues(new ArrayList<>(partitionNames.size())); - LOG.info("Converting responses to Partition values for items: {}", partitionNames.size()); - for (String partName : partitionNames) { - ArrayList vals = new ArrayList<>(Collections.nCopies(tbl.getPartitionKeys().size(), null)); - PartitionValuesRow row = new PartitionValuesRow(); - Warehouse.makeValsFromName(partName, vals); - for (String value : vals) { - row.addToRow(value); - } - response.addToPartitionValues(row); + @Override + protected String describeResult() { + return "statistics for " + (results == null ? 0 : results.getStatsObjSize()) + " columns"; } - return response; } - private List getPartitionNamesByFilter(String catName, String dbName, String tableName, - String filter, boolean ascending, long maxParts) - throws MetaException { - - boolean success = false; - List partNames = new ArrayList<>(); - Query query = null; - try { - openTransaction(); - LOG.debug("Executing getPartitionNamesByFilter"); - catName = normalizeIdentifier(catName); - dbName = dbName.toLowerCase(); - tableName = tableName.toLowerCase(); + private Table ensureGetTable(String catName, String dbName, String tblName) + throws NoSuchObjectException, MetaException { + return convertToTable(ensureGetMTable(catName, dbName, tblName), conf); + } - MTable mtable = getMTable(catName, dbName, tableName); - if( mtable == null ) { - // To be consistent with the behavior of listPartitionNames, if the - // table or db does not exist, we return an empty list - return partNames; - } - Map params = new HashMap<>(); - String queryFilterString = makeQueryFilterString(catName, dbName, mtable, filter, params); - query = pm.newQuery( - "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " - + "where " + queryFilterString); + @Override + public MDatabase ensureGetMDatabase(String catName, String dbName) + throws NoSuchObjectException { + return getMDatabase(catName, dbName); + } - if (maxParts >= 0) { - //User specified a row limit, set it on the Query - query.setRange(0, maxParts); - } - - LOG.debug("Filter specified is {}, JDOQL filter is {}", filter, - queryFilterString); + /** + * Verifies that the stats JSON string is unchanged for alter table (txn stats). + * @return Error message with the details of the change, or null if the value has not changed. + */ + public static String verifyStatsChangeCtx(String fullTableName, Map oldP, Map newP, + long writeId, String validWriteIds, boolean isColStatsChange) { + if (validWriteIds != null && writeId > 0) { + return null; // We have txn context. + } - LOG.debug("Parms is {}", params); + if (!StatsSetupConst.areBasicStatsUptoDate(newP)) { + // The validWriteIds can be absent, for example, in case of Impala alter. + // If the new value is invalid, then we don't care, let the alter operation go ahead. + return null; + } - String parameterDeclaration = makeParameterDeclarationStringObj(params); - query.declareParameters(parameterDeclaration); - if (ascending) { - query.setOrdering("partitionName ascending"); - } else { - query.setOrdering("partitionName descending"); + String oldVal = oldP == null ? null : oldP.get(StatsSetupConst.COLUMN_STATS_ACCURATE); + String newVal = newP == null ? null : newP.get(StatsSetupConst.COLUMN_STATS_ACCURATE); + if (StringUtils.equalsIgnoreCase(oldVal, newVal)) { + if (!isColStatsChange) { + return null; // No change in col stats or parameters => assume no change. } - query.setResult("partitionName"); + } - Collection names = (Collection) query.executeWithMap(params); - partNames = new ArrayList<>(names); + // Some change to the stats state is being made; it can only be made with a write ID. + return "Cannot change stats state for a transactional table " + fullTableName + " without " + + "providing the transactional write state for verification (new write ID " + + writeId + ", valid write IDs " + validWriteIds + "; current state " + oldVal + "; new" + + " state " + newVal; + } - LOG.debug("Done executing query for getPartitionNamesByFilter"); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for getPartitionNamesByFilter, size: {}", partNames.size()); - } finally { - rollbackAndCleanup(success, query); + private static MFieldSchema getColumnFromTableColumns(List cols, String col) { + if (cols == null) { + return null; + } + for (MFieldSchema mfs : cols) { + if (mfs.getName().equalsIgnoreCase(col)) { + return mfs; + } } - return partNames; + return null; } - private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn( - String catName, String dbName, String tableName, List cols, - boolean applyDistinct, long maxParts) - throws MetaException { - try (QueryWrapper q = new QueryWrapper( - pm.newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " - + "where table.database.name == t1 && table.database.catalogName == t2 && " - + "table.tableName == t3 "))) { - openTransaction(); - q.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); - - // TODO: Ordering seems to affect the distinctness, needs checking, disabling. -/* - if (ascending) { - q.setOrdering("partitionName ascending"); - } else { - q.setOrdering("partitionName descending"); - } -*/ - if (maxParts > 0) { - q.setRange(0, maxParts); - } - StringBuilder partValuesSelect = new StringBuilder(256); - if (applyDistinct) { - partValuesSelect.append("DISTINCT "); - } - List partitionKeys = - getTable(catName, dbName, tableName, null).getPartitionKeys(); - for (FieldSchema key : cols) { - partValuesSelect.append(extractPartitionKey(key, partitionKeys)).append(", "); - } - partValuesSelect.setLength(partValuesSelect.length() - 2); - LOG.info("Columns to be selected from Partitions: {}", partValuesSelect); - q.setResult(partValuesSelect.toString()); - - PartitionValuesResponse response = new PartitionValuesResponse(); - response.setPartitionValues(new ArrayList<>()); - if (cols.size() > 1) { - List results = (List) q.execute(dbName, catName, tableName); - for (Object[] row : results) { - PartitionValuesRow rowResponse = new PartitionValuesRow(); - for (Object columnValue : row) { - rowResponse.addToRow((String) columnValue); - } - response.addToPartitionValues(rowResponse); - } - } else { - List results = (List) q.execute(dbName, catName, tableName); - for (Object row : results) { - PartitionValuesRow rowResponse = new PartitionValuesRow(); - rowResponse.addToRow((String) row); - response.addToPartitionValues(rowResponse); - } + private static int getColumnIndexFromTableColumns(List cols, String col) { + if (cols == null) { + return -1; + } + for (int i = 0; i < cols.size(); i++) { + MFieldSchema mfs = cols.get(i); + if (mfs.getName().equalsIgnoreCase(col)) { + return i; } - return response; - } finally { - commitTransaction(); } + return -1; } - private List getPartitionNamesNoTxn(String catName, String dbName, String tableName, short max) { - List pns = new ArrayList<>(); - if (max == 0) { - return pns; + private boolean constraintNameAlreadyExists(MTable table, String constraintName) { + boolean commited = false; + Query constraintExistsQuery = null; + String constraintNameIfExists = null; + try { + openTransaction(); + constraintName = normalizeIdentifier(constraintName); + constraintExistsQuery = pm.newQuery(MConstraint.class, + "parentTable == parentTableP && constraintName == constraintNameP"); + constraintExistsQuery.declareParameters("MTable parentTableP, java.lang.String constraintNameP"); + constraintExistsQuery.setUnique(true); + constraintExistsQuery.setResult("constraintName"); + constraintNameIfExists = (String) constraintExistsQuery.executeWithArray(table, constraintName); + commited = commitTransaction(); + } finally { + rollbackAndCleanup(commited, constraintExistsQuery); } - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tableName = normalizeIdentifier(tableName); - try (QueryWrapper query = new QueryWrapper( - pm.newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " - + "where table.database.name == t1 && table.tableName == t2 && table.database.catalogName == t3 " - + "order by partitionName asc"))) { - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); - query.setResult("partitionName"); + return constraintNameIfExists != null && !constraintNameIfExists.isEmpty(); + } - if (max > 0) { - query.setRange(0, max); + private String generateConstraintName(MTable table, String... parameters) throws MetaException { + int hashcode = ArrayUtils.toString(parameters).hashCode() & 0xfffffff; + int counter = 0; + final int MAX_RETRIES = 10; + while (counter < MAX_RETRIES) { + String currName = (parameters.length == 0 ? "constraint_" : parameters[parameters.length-1]) + + "_" + hashcode + "_" + System.currentTimeMillis() + "_" + (counter++); + if (!constraintNameAlreadyExists(table, currName)) { + return currName; } - Collection names = (Collection) query.execute(dbName, tableName, catName); - pns.addAll(names); - - return pns; } + throw new MetaException("Error while trying to generate the constraint name for " + ArrayUtils.toString(parameters)); } @Override - public int getNumPartitionsByPs(String catName, String dbName, String tblName, List partVals) - throws MetaException, NoSuchObjectException { - - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - - return new GetHelper(catName, dbName, tblName, true, true) { - - @Override - protected String describeResult() { - return "Partition count by partial values"; - } - - @Override - protected Integer getSqlResult(GetHelper ctx) throws MetaException { - return directSql.getNumPartitionsViaSqlPs(ctx.getTable(), partVals); - } - - @Override - protected Integer getJdoResult(GetHelper ctx) - throws MetaException, NoSuchObjectException, InvalidObjectException { - // size is known since it contains dbName, catName, tblName and partialRegex pattern - Map params = new HashMap<>(4); - String filter = getJDOFilterStrForPartitionVals(ctx.getTable(), partVals, params); - try (QueryWrapper query = new QueryWrapper(pm.newQuery( - "select count(partitionName) from org.apache.hadoop.hive.metastore.model.MPartition"))) { - query.setFilter(filter); - query.declareParameters(makeParameterDeclarationString(params)); - Long result = (Long) query.executeWithMap(params); - - return result.intValue(); - } - } - }.run(true); + public List addForeignKeys( + List fks) throws InvalidObjectException, MetaException { + return addForeignKeys(fks, true, null, null); } - /** - * Retrieves a Collection of partition-related results from the database that match - * the partial specification given for a specific table. - * @param dbName the name of the database - * @param tableName the name of the table - * @param part_vals the partial specification values - * @param max_parts the maximum number of partitions to return - * @param resultsCol the metadata column of the data to return, e.g. partitionName, etc. - * if resultsCol is empty or null, a collection of MPartition objects is returned - * @return A Collection of partition-related items from the db that match the partial spec - * for a table. The type of each item in the collection corresponds to the column - * you want results for. E.g., if resultsCol is partitionName, the Collection - * has types of String, and if resultsCol is null, the types are MPartition. - */ - private Collection getPartitionPsQueryResults(String catName, String dbName, - String tableName, List part_vals, - int max_parts, String resultsCol) - throws MetaException, NoSuchObjectException { - - Preconditions.checkState(this.currentTransaction.isActive()); - - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tableName = normalizeIdentifier(tableName); - Table table = getTable(catName, dbName, tableName, null); - if (table == null) { - throw new NoSuchObjectException(TableName.getQualified(catName, dbName, tableName) + " table not found"); - } - // size is known since it contains dbName, catName, tblName and partialRegex - // pattern - Map params = new HashMap<>(4); - String filter = getJDOFilterStrForPartitionVals(table, part_vals, params); - try (QueryWrapper query = new QueryWrapper(pm.newQuery(MPartition.class))) { - query.setFilter(filter); - query.setOrdering("partitionName ascending"); - query.declareParameters(makeParameterDeclarationString(params)); - if (max_parts >= 0) { - // User specified a row limit, set it on the Query - query.setRange(0, max_parts); - } - if (resultsCol != null && !resultsCol.isEmpty()) { - query.setResult(resultsCol); - } - - Collection result = (Collection) query.executeWithMap(params); - - return Collections.unmodifiableCollection(new ArrayList<>(result)); + @Override + public String getMetastoreDbUuid() throws MetaException { + String ret = getGuidFromDB(); + if(ret != null) { + return ret; } + return createDbGuidAndPersist(); } - @Override - public List listPartitionsPsWithAuth(String catName, String db_name, String tbl_name, - GetPartitionsArgs args) throws MetaException, InvalidObjectException, NoSuchObjectException { - List partitions = new ArrayList<>(); + private String createDbGuidAndPersist() throws MetaException { boolean success = false; - + Query query = null; try { openTransaction(); - LOG.debug("executing listPartitionNamesPsWithAuth"); - MTable mtbl = getMTable(catName, db_name, tbl_name); - if (mtbl == null) { - throw new NoSuchObjectException( - TableName.getQualified(catName, db_name, tbl_name) + " table not found"); - } - String userName = args.getUserName(); - List groupNames = args.getGroupNames(); - List part_vals = args.getPart_vals(); - List partNames = args.getPartNames(); - boolean getauth = null != userName && null != groupNames && - "TRUE".equalsIgnoreCase( - mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE")); - if (MetaStoreUtils.arePartValsEmpty(part_vals) && partNames == null) { - partitions = getPartitions(catName, db_name, tbl_name, args); - } else if (partNames != null) { - partitions = getPartitionsByNames(catName, db_name, tbl_name, args); - } else { - partitions = getPartitionsByPs(catName, db_name, tbl_name, args); - } - if (getauth) { - for (Partition part : partitions) { - String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl - .getPartitionKeys()), part.getValues()); - PrincipalPrivilegeSet partAuth = getPartitionPrivilegeSet(catName, db_name, - tbl_name, partName, userName, groupNames); - part.setPrivileges(partAuth); - } - } + MMetastoreDBProperties prop = new MMetastoreDBProperties(); + prop.setPropertykey("guid"); + final String guid = UUID.randomUUID().toString(); + LOG.debug("Attempting to add a guid {} for the metastore db", guid); + prop.setPropertyValue(guid); + prop.setDescription("Metastore DB GUID generated on " + + LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS"))); + pm.makePersistent(prop); success = commitTransaction(); - } catch (InvalidObjectException | NoSuchObjectException | MetaException e) { - throw e; + if (success) { + LOG.info("Metastore db guid {} created successfully", guid); + return guid; + } } catch (Exception e) { - throw new MetaException(e.getMessage()); + LOG.warn("Metastore db guid creation failed", e); } finally { - rollbackAndCleanup(success, null); + rollbackAndCleanup(success, query); } - return partitions; - } - - private List getPartitionsByPs(String catName, String dbName, - String tblName, GetPartitionsArgs args) - throws MetaException, NoSuchObjectException { - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - - return new GetListHelper(catName, dbName, tblName, true, true) { - - @Override - protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getPartitionsViaSqlPs(ctx.getTable(), args); - } - - @Override - protected List getJdoResult(GetHelper> ctx) - throws MetaException, NoSuchObjectException { - List result = new ArrayList<>(); - Collection parts = getPartitionPsQueryResults(catName, dbName, tblName, - args.getPart_vals(), args.getMax(), null); - boolean isAcidTable = TxnUtils.isAcidTable(ctx.getTable()); - for (MPartition o : parts) { - Partition part = convertToPart(catName, dbName, tblName, o, isAcidTable, args); - result.add(part); - } - return result; - } - }.run(true); + // it possible that some other HMS instance could have created the guid + // at the same time due which this instance could not create a guid above + // in such case return the guid already generated + final String guid = getGuidFromDB(); + if (guid == null) { + throw new MetaException("Unable to create or fetch the metastore database uuid"); + } + return guid; } - @Override - public List listPartitionNamesPs(String catName, String dbName, String tableName, - List part_vals, short max_parts) throws MetaException, NoSuchObjectException { - List partitionNames = new ArrayList<>(); + private String getGuidFromDB() throws MetaException { boolean success = false; - + Query query = null; try { openTransaction(); - LOG.debug("Executing listPartitionNamesPs"); - Collection names = getPartitionPsQueryResults(catName, dbName, tableName, - part_vals, max_parts, "partitionName"); - partitionNames.addAll(names); + query = pm.newQuery(MMetastoreDBProperties.class, PTYARG_EQ_KEY); + query.declareParameters(PTYPARAM_STR_KEY); + Collection names = (Collection) query.execute("guid"); + List uuids = new ArrayList<>(); + for (Iterator i = names.iterator(); i.hasNext();) { + String uuid = i.next().getPropertyValue(); + LOG.debug("Found guid {}", uuid); + uuids.add(uuid); + } success = commitTransaction(); - } catch (NoSuchObjectException | MetaException e) { - throw e; - } catch (Exception e) { - throw new MetaException(e.getMessage()); + if(uuids.size() > 1) { + throw new MetaException("Multiple uuids found"); + } + if(!uuids.isEmpty()) { + LOG.debug("Returning guid of metastore db : {}", uuids.get(0)); + return uuids.get(0); + } } finally { - rollbackAndCleanup(success, null); + rollbackAndCleanup(success, query); } - return partitionNames; + LOG.warn("Guid for metastore db not found"); + return null; } - private List listMPartitions(String catName, String dbName, String tableName, int max) throws Exception { - LOG.debug("Executing listMPartitions"); - - Preconditions.checkState(this.currentTransaction.isActive()); - - dbName = normalizeIdentifier(dbName); - tableName = normalizeIdentifier(tableName); - - try (Query query = pm.newQuery(MPartition.class, - "table.tableName == t1 && table.database.name == t2 && table.database.catalogName == t3")) { - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); - query.setOrdering("partitionName ascending"); - if (max >= 0) { - query.setRange(0, max); + public boolean runInTransaction(Runnable exec) { + boolean success = false; + try { + if (openTransaction()) { + exec.run(); + success = commitTransaction(); } - final List mparts = (List) query.execute(tableName, dbName, catName); - LOG.debug("Done executing query for listMPartitions"); - - pm.retrieveAll(mparts); - pm.makeTransientAll(mparts); - - LOG.debug("Done retrieving all objects for listMPartitions {}", mparts); - - return Collections.unmodifiableList(new ArrayList<>(mparts)); + } catch (Exception e) { + LOG.warn("Metastore operation failed", e); + } finally { + rollbackAndCleanup(success, null); } + return success; } - // This code is only executed in JDO code path, not from direct SQL code path. - private List listMPartitionsWithProjection(List fieldNames, String jdoFilter, - Map params) throws Exception { + public boolean dropProperties(String key) { boolean success = false; - List mparts = null; + Query query = null; try { - openTransaction(); - LOG.debug("Executing listMPartitionsWithProjection"); - try (Query query = pm.newQuery(MPartition.class, jdoFilter)) { - String parameterDeclaration = makeParameterDeclarationStringObj(params); - query.declareParameters(parameterDeclaration); - query.setOrdering("partitionName ascending"); - if (fieldNames == null || fieldNames.isEmpty()) { - // full fetch of partitions - mparts = (List) query.executeWithMap(params); - pm.retrieveAll(mparts); - pm.makeTransientAll(mparts); - mparts = new ArrayList<>(mparts); - } else { - // fetch partially filled partitions using result clause - query.setResult(Joiner.on(',').join(fieldNames)); - // if more than one fields are in the result class the return type is - // List - if (fieldNames.size() > 1) { - List results = (List) query.executeWithMap(params); - mparts = new ArrayList<>(results.size()); - for (Object[] row : results) { - MPartition mpart = new MPartition(); - int i = 0; - for (Object val : row) { - MetaStoreServerUtils.setNestedProperty(mpart, fieldNames.get(i), val, true); - i++; - } - mparts.add(mpart); - } - } else { - // only one field is requested, return type is List - List results = (List) query.executeWithMap(params); - mparts = new ArrayList<>(results.size()); - for (Object row : results) { - MPartition mpart = new MPartition(); - MetaStoreServerUtils.setNestedProperty(mpart, fieldNames.get(0), row, true); - mparts.add(mpart); - } - } + if (openTransaction()) { + query = pm.newQuery(MMetastoreDBProperties.class, PTYARG_EQ_KEY); + query.declareParameters(PTYPARAM_STR_KEY); + @SuppressWarnings("unchecked") + Collection properties = (Collection) query.execute(key); + if (!properties.isEmpty()) { + pm.deletePersistentAll(properties); } + success = commitTransaction(); } - success = commitTransaction(); - LOG.debug("Done retrieving {} objects for listMPartitionsWithProjection", mparts.size()); + } catch (Exception e) { + LOG.warn("Metastore property drop failed", e); } finally { - rollbackAndCleanup(success, null); + rollbackAndCleanup(success, query); } - return mparts; + return success; } - @Override - public List getPartitionsByNames(String catName, String dbName, String tblName, - GetPartitionsArgs args) throws MetaException, NoSuchObjectException { - return getPartitionsByNamesInternal(catName, dbName, tblName, true, true, args); - } - protected List getPartitionsByNamesInternal(String catName, String dbName, - String tblName, boolean allowSql, boolean allowJdo, - GetPartitionsArgs args) throws MetaException, NoSuchObjectException { - return new GetListHelper(catName, dbName, tblName, allowSql, allowJdo) { - @Override - protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getPartitionsViaPartNames(catName, dbName, tblName, args); - } - @Override - protected List getJdoResult( - GetHelper> ctx) throws MetaException, NoSuchObjectException { - return getPartitionsViaOrmFilter(catName, dbName, tblName, false, args); + public MMetastoreDBProperties putProperties(String key, String value, String description, byte[] content) { + boolean success = false; + try { + if (openTransaction()) { + //pm.currentTransaction().setOptimistic(false); + // fetch first to determine new vs update + MMetastoreDBProperties properties = doFetchProperties(key, null); + final boolean newInstance; + if (properties == null) { + newInstance = true; + properties = new MMetastoreDBProperties(); + properties.setPropertykey(key); + } else { + newInstance = false; + } + properties.setDescription(description); + properties.setPropertyValue(value); + properties.setPropertyContent(content); + LOG.debug("Attempting to add property {} for the metastore db", key); + properties.setDescription("Metastore property " + + (newInstance ? "created" : "updated") + + " " + LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS"))); + if (newInstance) { + pm.makePersistent(properties); + } + success = commitTransaction(); + if (success) { + LOG.info("Metastore property {} created successfully", key); + return properties; + } } - }.run(false); - } - - @Override - public boolean getPartitionsByExpr(String catName, String dbName, String tblName, - List result, GetPartitionsArgs args) throws TException { - return getPartitionsByExprInternal(catName, dbName, tblName, result, true, true, args); - } - - private boolean prunePartitionNamesByExpr(String catName, String dbName, String tblName, - List result, GetPartitionsArgs args) throws MetaException { - MTable mTable = getMTable(catName, dbName, tblName); - List partitionKeys = convertToFieldSchemas(mTable.getPartitionKeys()); - boolean hasUnknownPartitions = expressionProxy.filterPartitionsByExpr( - partitionKeys, - args.getExpr(), - getDefaultPartitionName(args.getDefaultPartName()), - result); - if (args.getMax() >= 0 && result.size() > args.getMax()) { - result = result.subList(0, args.getMax()); + } finally { + rollbackAndCleanup(success, null); } - return hasUnknownPartitions; + return null; } - protected boolean getPartitionsByExprInternal(String catName, String dbName, String tblName, - List result, boolean allowSql, boolean allowJdo, GetPartitionsArgs args) throws TException { - assert result != null; - - byte[] expr = args.getExpr(); - final ExpressionTree exprTree = expr.length != 0 ? PartFilterExprUtil.makeExpressionTree( - expressionProxy, expr, getDefaultPartitionName(args.getDefaultPartName()), conf) : ExpressionTree.EMPTY_TREE; - final AtomicBoolean hasUnknownPartitions = new AtomicBoolean(false); - - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - MTable mTable = ensureGetMTable(catName, dbName, tblName); - List partitionKeys = convertToFieldSchemas(mTable.getPartitionKeys()); - boolean isAcidTable = TxnUtils.isAcidTable(mTable.getParameters()); - result.addAll(new GetListHelper(catName, dbName, tblName, allowSql, allowJdo) { - @Override - protected List getSqlResult(GetHelper> ctx) throws MetaException { - // If we have some sort of expression tree, try SQL filter pushdown. - if (exprTree != null) { - SqlFilterForPushdown filter = new SqlFilterForPushdown(); - if (directSql.generateSqlFilterForPushdown(catName, dbName, tblName, partitionKeys, - exprTree, args.getDefaultPartName(), filter)) { - String catalogName = (catName != null) ? catName : getDefaultCatalog(conf); - return directSql.getPartitionsViaSqlFilter(catalogName, dbName, tblName, filter, - isAcidTable, args); - } + public boolean renameProperties(String mapKey, String newKey) { + boolean success = false; + Query query = null; + try { + LOG.debug("Attempting to rename property {} to {} for the metastore db", mapKey, newKey); + if (openTransaction()) { + // ensure the target is clear; + // query is cleaned up in finally block + query = pm.newQuery(MMetastoreDBProperties.class, PTYARG_EQ_KEY); + query.declareParameters(PTYPARAM_STR_KEY); + query.setUnique(true); + MMetastoreDBProperties properties = (MMetastoreDBProperties) query.execute(newKey); + if (properties != null) { + return false; } - // We couldn't do SQL filter pushdown. Get names via normal means. - List partNames = new LinkedList<>(); - hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn( - catName, dbName, tblName, partitionKeys, expr, args.getDefaultPartName(), (short) args.getMax(), partNames)); - GetPartitionsArgs newArgs = new GetPartitionsArgs.GetPartitionsArgsBuilder(args).partNames(partNames).build(); - return directSql.getPartitionsViaPartNames(catName, dbName, tblName, newArgs); - } - - @Override - protected List getJdoResult( - GetHelper> ctx) throws MetaException, NoSuchObjectException { - // If we have some sort of expression tree, try JDOQL filter pushdown. - List result = null; - if (exprTree != null) { - result = getPartitionsViaOrmFilter(catName, dbName, tblName, exprTree, - false, partitionKeys, isAcidTable, args); + // ensure we got a source + properties = (MMetastoreDBProperties) query.execute(mapKey); + if (properties == null) { + return false; } - if (result == null) { - // We couldn't do JDOQL filter pushdown. Get names via normal means. - List partNames = new ArrayList<>(); - hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn( - catName, dbName, tblName, partitionKeys, expr, args.getDefaultPartName(), (short) args.getMax(), partNames)); - GetPartitionsArgs newArgs = new GetPartitionsArgs.GetPartitionsArgsBuilder(args).partNames(partNames).build(); - result = getPartitionsViaOrmFilter(catName, dbName, tblName, isAcidTable, newArgs); + byte[] content = properties.getPropertyContent(); + String value = properties.getPropertyValue(); + // remove source from persistent storage + pm.deletePersistent(properties); + // make it persist with new key + MMetastoreDBProperties newProperties = new MMetastoreDBProperties(); + // update description + newProperties.setDescription("Metastore property renamed from " + mapKey + " to " + newKey + + " " + LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS"))); + // change key + newProperties.setPropertykey(newKey); + newProperties.setPropertyValue(value); + newProperties.setPropertyContent(content); + pm.makePersistent(newProperties); + // commit + success = commitTransaction(); + if (success) { + LOG.info("Metastore property {} renamed {} successfully", mapKey, newKey); + return true; } - return result; } - }.run(false)); - return hasUnknownPartitions.get(); - } - - /** - * Gets the default partition name. - * @param inputDefaultPartName Incoming default partition name. - * @return Valid default partition name - */ - private String getDefaultPartitionName(String inputDefaultPartName) { - return (((inputDefaultPartName == null) || (inputDefaultPartName.isEmpty())) - ? MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME) - : inputDefaultPartName); - } - - /** - * Gets the partition names from a table, pruned using an expression. - * @param catName - * @param dbName - * @param tblName - * @param expr Expression. - * @param defaultPartName Default partition name from job config, if any. - * @param maxParts Maximum number of partition names to return. - * @param result The resulting names. - * @return Whether the result contains any unknown partitions. - */ - private boolean getPartitionNamesPrunedByExprNoTxn(String catName, String dbName, String tblName, List partColumns, byte[] expr, - String defaultPartName, short maxParts, List result) throws MetaException { - result.addAll(getPartitionNamesNoTxn(catName, dbName, tblName, (short) -1)); - return prunePartitionNamesByExpr(catName, dbName, tblName, result, - new GetPartitionsArgs.GetPartitionsArgsBuilder() - .expr(expr).defaultPartName(defaultPartName).max(maxParts).build()); - } - - /** - * Gets partition names from the table via ORM (JDOQL) filter pushdown. - * @param tblName The table. - * @param tree The expression tree from which JDOQL filter will be made. - * @param isValidatedFilter Whether the filter was pre-validated for JDOQL pushdown by a client - * (old hive client or non-hive one); if it was and we fail to create a filter, we will throw. - * @param args additional arguments for getting partitions - * @return Resulting partitions. Can be null if isValidatedFilter is false, and - * there was error deriving the JDO filter. - */ - private List getPartitionsViaOrmFilter(String catName, String dbName, String tblName, ExpressionTree tree, - boolean isValidatedFilter, List partitionKeys, boolean isAcidTable, - GetPartitionsArgs args) throws MetaException { - Map params = new HashMap<>(); - String jdoFilter = - makeQueryFilterString(catName, dbName, tblName, tree, params, isValidatedFilter, partitionKeys); - if (jdoFilter == null) { - assert !isValidatedFilter; - return null; - } - try (QueryWrapper query = new QueryWrapper(pm.newQuery(MPartition.class, jdoFilter))) { - if (args.getMax() >= 0) { - // User specified a row limit, set it on the Query - query.setRange(0, args.getMax()); - } - String parameterDeclaration = makeParameterDeclarationStringObj(params); - query.declareParameters(parameterDeclaration); - query.setOrdering("partitionName ascending"); - List mparts = (List) query.executeWithMap(params); - LOG.debug("Done executing query for getPartitionsViaOrmFilter"); - pm.retrieveAll(mparts); // TODO: why is this inconsistent with what we get by names? - LOG.debug("Done retrieving all objects for getPartitionsViaOrmFilter"); - List results = - convertToParts(catName, dbName, tblName, mparts, isAcidTable, args); - return results; + } finally { + rollbackAndCleanup(success, query); } + return false; } - private Integer getNumPartitionsViaOrmFilter(String catName, String dbName, String tblName, ExpressionTree tree, boolean isValidatedFilter, List partitionKeys) - throws MetaException { - Map params = new HashMap<>(); - String jdoFilter = makeQueryFilterString(catName, dbName, tblName, tree, - params, isValidatedFilter, partitionKeys); - if (jdoFilter == null) { - assert !isValidatedFilter; - return null; - } - - try (QueryWrapper query = new QueryWrapper(pm.newQuery( - "select count(partitionName) from org.apache.hadoop.hive.metastore.model.MPartition"))) { - query.setFilter(jdoFilter); - String parameterDeclaration = makeParameterDeclarationStringObj(params); - query.declareParameters(parameterDeclaration); - Long result = (Long) query.executeWithMap(params); - - return result.intValue(); + private T doFetchProperties(String key, java.util.function.Function transform) { + try(QueryWrapper query = new QueryWrapper(pm.newQuery(MMetastoreDBProperties.class, PTYARG_EQ_KEY))) { + query.declareParameters(PTYPARAM_STR_KEY); + query.setUnique(true); + MMetastoreDBProperties properties = (MMetastoreDBProperties) query.execute(key); + if (properties != null) { + return (T) (transform != null? transform.apply(properties) : properties); + } } + return null; } - /** - * Gets partition names from the table via ORM (JDOQL) name filter. - * @param dbName Database name. - * @param tblName Table name. - * @param isAcidTable True if the table is ACID - * @param args additional arguments for getting partitions - * @return Resulting partitions. - */ - private List getPartitionsViaOrmFilter(String catName, String dbName, String tblName, - boolean isAcidTable, GetPartitionsArgs args) throws MetaException { - List partNames = args.getPartNames(); - if (partNames.isEmpty()) { - return Collections.emptyList(); - } - return Batchable.runBatched(batchSize, partNames, new Batchable() { - @Override - public List run(List input) throws MetaException { - Pair> queryWithParams = - getPartQueryWithParams(catName, dbName, tblName, input); - - try (QueryWrapper query = new QueryWrapper(queryWithParams.getLeft())) { - query.setResultClass(MPartition.class); - query.setClass(MPartition.class); - query.setOrdering("partitionName ascending"); - - List mparts = (List) query.executeWithMap(queryWithParams.getRight()); - List partitions = convertToParts(catName, dbName, tblName, mparts, - isAcidTable, args); - return partitions; - } + public T fetchProperties(String key, java.util.function.Function transform) { + boolean success = false; + T properties = null; + try { + if (openTransaction()) { + properties = doFetchProperties(key, transform); + success = commitTransaction(); } - }); - } - - private void dropPartitionsNoTxn(String catName, String dbName, String tblName, List partNames) { - Pair> queryWithParams = - getPartQueryWithParams(catName, dbName, tblName, partNames); - try (QueryWrapper query = new QueryWrapper(queryWithParams.getLeft())) { - query.setClass(MPartition.class); - long deleted = query.deletePersistentAll(queryWithParams.getRight()); - LOG.debug("Deleted {} partition from store", deleted); + } finally { + rollbackAndCleanup(success, null); } + return properties; } - /** - * Detaches column descriptors from storage descriptors; returns the set of unique CDs - * thus detached. This is done before dropping partitions because CDs are reused between - * SDs; so, we remove the links to delete SDs and then check the returned CDs to see if - * they are referenced by other SDs. - */ - private Set detachCdsFromSdsNoTxn( - String catName, String dbName, String tblName, List partNames) { - Pair> queryWithParams = - getPartQueryWithParams(catName, dbName, tblName, partNames); - try (QueryWrapper query = new QueryWrapper(queryWithParams.getLeft())) { - query.setClass(MPartition.class); - query.setResult("sd"); - List sds = (List) query.executeWithMap( - queryWithParams.getRight()); - HashSet candidateCds = new HashSet<>(); - for (MStorageDescriptor sd : sds) { - if (sd != null && sd.getCD() != null) { - candidateCds.add(sd.getCD()); - sd.setCD(null); + public Map selectProperties(String key, java.util.function.Function transform) { + boolean success = false; + Query query = null; + Map results = null; + try { + if (openTransaction()) { + Collection properties; + if (key == null || key.isEmpty()) { + query = pm.newQuery(MMetastoreDBProperties.class); + properties = (Collection) query.execute(); + } else { + query = pm.newQuery(MMetastoreDBProperties.class, "this.propertyKey.startsWith(key)"); + query.declareParameters(PTYPARAM_STR_KEY); + properties = (Collection) query.execute(key); + } + pm.retrieveAll(properties); + if (!properties.isEmpty()) { + results = new TreeMap(); + for(MMetastoreDBProperties ptys : properties) { + T t = (T) (transform != null? transform.apply(ptys) : ptys); + if (t != null) { + results.put(ptys.getPropertykey(), t); + } + } } + success = commitTransaction(); } - return candidateCds; - } - } - - private String getJDOFilterStrForPartitionNames(String catName, String dbName, String tblName, - List partNames, Map params) { - StringBuilder sb = new StringBuilder("table.tableName == t1 && table.database.name == t2 &&" + - " table.database.catalogName == t3 && ("); - params.put("t1", normalizeIdentifier(tblName)); - params.put("t2", normalizeIdentifier(dbName)); - params.put("t3", normalizeIdentifier(catName)); - int n = 0; - for (Iterator itr = partNames.iterator(); itr.hasNext();) { - String pn = "p" + n; - n++; - String part = itr.next(); - params.put(pn, part); - sb.append("partitionName == ").append(pn); - sb.append(" || "); + } finally { + rollbackAndCleanup(success, query); } - sb.setLength(sb.length() - 4); // remove the last " || " - sb.append(')'); - return sb.toString(); - } - - private String getJDOFilterStrForPartitionVals(Table table, List vals, - Map params) throws MetaException { - String partNameMatcher = MetaStoreUtils.makePartNameMatcher(table, vals, ".*"); - params.put("dbName", table.getDbName()); - params.put("catName", table.getCatName()); - params.put("tableName", table.getTableName()); - params.put("partialRegex", partNameMatcher); - return "table.database.name == dbName" + " && table.database.catalogName == catName" - + " && table.tableName == tableName" + " && partitionName.matches(partialRegex)"; - } - - private Pair> getPartQueryWithParams( - String catName, String dbName, String tblName, List partNames) { - Query query = pm.newQuery(); - Map params = new HashMap<>(); - String filterStr = getJDOFilterStrForPartitionNames(catName, dbName, tblName, partNames, params); - query.setFilter(filterStr); - LOG.debug(" JDOQL filter is {}", filterStr); - query.declareParameters(makeParameterDeclarationString(params)); - return Pair.of(query, params); - } - - @Override - public List getPartitionsByFilter(String catName, String dbName, String tblName, - GetPartitionsArgs args) throws MetaException, NoSuchObjectException { - return getPartitionsByFilterInternal(catName, dbName, tblName, true, true, args); + return results; } - /** Helper class for getting stuff w/transaction, direct SQL, perf logging, etc. */ - @VisibleForTesting - public abstract class GetHelper { - private final boolean isInTxn, doTrace, allowJdo; - private boolean doUseDirectSql; - private long start; - private Table table; - protected final List partitionFields; - protected final String catName, dbName, tblName; - private boolean success = false; - protected T results = null; + //TODO: clean up this method + private List addForeignKeys(List foreignKeys, boolean retrieveCD, + List primaryKeys, List uniqueConstraints) + throws InvalidObjectException, MetaException { + if (CollectionUtils.isNotEmpty(foreignKeys)) { + List mpkfks = new ArrayList<>(); + String currentConstraintName = null; + String catName = null; + // We start iterating through the foreign keys. This list might contain more than a single + // foreign key, and each foreign key might contain multiple columns. The outer loop retrieves + // the information that is common for a single key (table information) while the inner loop + // checks / adds information about each column. + for (int i = 0; i < foreignKeys.size(); i++) { + if (catName == null) { + catName = normalizeIdentifier(foreignKeys.get(i).isSetCatName() ? foreignKeys.get(i).getCatName() : + getDefaultCatalog(conf)); + } else { + String tmpCatName = normalizeIdentifier(foreignKeys.get(i).isSetCatName() ? + foreignKeys.get(i).getCatName() : getDefaultCatalog(conf)); + if (!catName.equals(tmpCatName)) { + throw new InvalidObjectException("Foreign keys cannot span catalogs"); + } + } + final String fkTableDB = normalizeIdentifier(foreignKeys.get(i).getFktable_db()); + final String fkTableName = normalizeIdentifier(foreignKeys.get(i).getFktable_name()); + // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. + // For instance, this is the case when we are creating the table. + final AttachedMTableInfo nChildTable = getMTable(catName, fkTableDB, fkTableName, retrieveCD); + final MTable childTable = nChildTable.mtbl; + if (childTable == null) { + throw new InvalidObjectException("Child table not found: " + fkTableName); + } + MColumnDescriptor childCD = retrieveCD ? nChildTable.mcd : childTable.getSd().getCD(); + final List childCols = childCD == null || childCD.getCols() == null ? + new ArrayList<>() : new ArrayList<>(childCD.getCols()); + if (childTable.getPartitionKeys() != null) { + childCols.addAll(childTable.getPartitionKeys()); + } - public GetHelper(String catalogName, String dbName, String tblName, - boolean allowSql, boolean allowJdo) throws MetaException { - this(catalogName, dbName, tblName, null, allowSql, allowJdo); - } - - public GetHelper(String catalogName, String dbName, String tblName, - List fields, boolean allowSql, boolean allowJdo) throws MetaException { - assert allowSql || allowJdo; - this.allowJdo = allowJdo; - this.catName = (catalogName != null) ? normalizeIdentifier(catalogName) : null; - this.dbName = (dbName != null) ? normalizeIdentifier(dbName) : null; - this.partitionFields = fields; - if (tblName != null) { - this.tblName = normalizeIdentifier(tblName); - } else { - // tblName can be null in cases of Helper being used at a higher - // abstraction level, such as with datbases - this.tblName = null; - this.table = null; - } - this.doTrace = LOG.isDebugEnabled(); - this.isInTxn = isActiveTransaction(); - - boolean isConfigEnabled = MetastoreConf.getBoolVar(getConf(), ConfVars.TRY_DIRECT_SQL); - if (isConfigEnabled && directSql == null) { - directSql = new MetaStoreDirectSql(pm, getConf(), ""); - } - - if (!allowJdo && isConfigEnabled && !directSql.isCompatibleDatastore()) { - throw new MetaException("SQL is not operational"); // test path; SQL is enabled and broken. - } - this.doUseDirectSql = allowSql && isConfigEnabled && directSql.isCompatibleDatastore(); - } - - protected boolean canUseDirectSql(GetHelper ctx) throws MetaException { - return true; // By default, assume we can user directSQL - that's kind of the point. - } - protected abstract String describeResult(); - protected abstract T getSqlResult(GetHelper ctx) throws MetaException; - protected abstract T getJdoResult( - GetHelper ctx) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; - - public T run(boolean initTable) throws MetaException, NoSuchObjectException { - try { - start(initTable); - String savePoint = isInTxn && allowJdo ? "rollback_" + System.nanoTime() : null; - if (doUseDirectSql) { - try { - directSql.prepareTxn(); - setTransactionSavePoint(savePoint); - this.results = getSqlResult(this); - LOG.debug("Using direct SQL optimization."); - } catch (Exception ex) { - handleDirectSqlError(ex, savePoint); - } - } - // Note that this will be invoked in 2 cases: - // 1) DirectSQL was disabled to start with; - // 2) DirectSQL threw and was disabled in handleDirectSqlError. - if (!doUseDirectSql) { - this.results = getJdoResult(this); - LOG.debug("Not using direct SQL optimization."); - } - return commit(); - } catch (NoSuchObjectException | MetaException ex) { - throw ex; - } catch (Exception ex) { - LOG.error("", ex); - throw new MetaException(ex.getMessage()); - } finally { - close(); - } - } - - private void start(boolean initTable) throws MetaException, NoSuchObjectException { - start = doTrace ? System.nanoTime() : 0; - openTransaction(); - if (initTable && (tblName != null)) { - table = ensureGetTable(catName, dbName, tblName); - } - doUseDirectSql = doUseDirectSql && canUseDirectSql(this); - } - - private void handleDirectSqlError(Exception ex, String savePoint) throws MetaException, NoSuchObjectException { - String message = null; - try { - message = generateShorterMessage(ex); - } catch (Throwable t) { - message = ex.toString() + "; error building a better message: " + t.getMessage(); - } - LOG.warn(message); // Don't log the exception, people just get confused. - LOG.debug("Full DirectSQL callstack for debugging (not an error)", ex); - - if (!allowJdo || !DatabaseProduct.isRecoverableException(ex)) { - throw ExceptionHandler.newMetaException(ex); - } - - if (!isInTxn) { - JDOException rollbackEx = null; - try { - rollbackTransaction(); - } catch (JDOException jex) { - rollbackEx = jex; - } - if (rollbackEx != null) { - // Datanucleus propagates some pointless exceptions and rolls back in the finally. - if (currentTransaction != null && currentTransaction.isActive()) { - throw rollbackEx; // Throw if the tx wasn't rolled back. - } - LOG.info("Ignoring exception, rollback succeeded: " + rollbackEx.getMessage()); - } - - start = doTrace ? System.nanoTime() : 0; - openTransaction(); - if (table != null) { - table = ensureGetTable(catName, dbName, tblName); - } - } else { - rollbackTransactionToSavePoint(savePoint); - start = doTrace ? System.nanoTime() : 0; - } - - if (directSqlErrors != null) { - directSqlErrors.inc(); - } - - doUseDirectSql = false; - } - - private String generateShorterMessage(Exception ex) { - StringBuilder message = new StringBuilder( - "Falling back to ORM path due to direct SQL failure (this is not an error): "); - Throwable t = ex; - StackTraceElement[] prevStack = null; - while (t != null) { - message.append(t.getMessage()); - StackTraceElement[] stack = t.getStackTrace(); - int uniqueFrames = stack.length - 1; - if (prevStack != null) { - int n = prevStack.length - 1; - while (uniqueFrames >= 0 && n >= 0 && stack[uniqueFrames].equals(prevStack[n])) { - uniqueFrames--; n--; + final String pkTableDB = normalizeIdentifier(foreignKeys.get(i).getPktable_db()); + final String pkTableName = normalizeIdentifier(foreignKeys.get(i).getPktable_name()); + // For primary keys, we retrieve the column descriptors if retrieveCD is true (which means + // it is an alter table statement) or if it is a create table statement but we are + // referencing another table instead of self for the primary key. + final AttachedMTableInfo nParentTable; + final MTable parentTable; + MColumnDescriptor parentCD; + final List parentCols; + final List existingTablePrimaryKeys; + final List existingTableUniqueConstraints; + final boolean sameTable = fkTableDB.equals(pkTableDB) && fkTableName.equals(pkTableName); + if (sameTable) { + nParentTable = nChildTable; + parentTable = childTable; + parentCD = childCD; + parentCols = childCols; + existingTablePrimaryKeys = primaryKeys; + existingTableUniqueConstraints = uniqueConstraints; + } else { + nParentTable = getMTable(catName, pkTableDB, pkTableName, true); + parentTable = nParentTable.mtbl; + if (parentTable == null) { + throw new InvalidObjectException("Parent table not found: " + pkTableName); } - } - for (int i = 0; i <= uniqueFrames; ++i) { - StackTraceElement ste = stack[i]; - message.append(" at ").append(ste); - if (ste.getMethodName().contains("getSqlResult") - && (ste.getFileName() == null || ste.getFileName().contains("ObjectStore"))) { - break; + parentCD = nParentTable.mcd; + parentCols = parentCD == null || parentCD.getCols() == null ? + new ArrayList<>() : new ArrayList<>(parentCD.getCols()); + if (parentTable.getPartitionKeys() != null) { + parentCols.addAll(parentTable.getPartitionKeys()); } + PrimaryKeysRequest primaryKeysRequest = new PrimaryKeysRequest(pkTableDB, pkTableName); + primaryKeysRequest.setCatName(catName); + existingTablePrimaryKeys = getPrimaryKeys(primaryKeysRequest); + existingTableUniqueConstraints = + getUniqueConstraints(new UniqueConstraintsRequest(catName, pkTableDB, pkTableName)); } - prevStack = stack; - t = t.getCause(); - if (t != null) { - message.append(";\n Caused by: "); - } - } - return message.toString(); - } - - private T commit() { - success = commitTransaction(); - if (doTrace) { - double time = ((System.nanoTime() - start) / 1000000.0); - String result = describeResult(); - String retrieveType = doUseDirectSql ? "SQL" : "ORM"; - - LOG.debug("{} retrieved using {} in {}ms", result, retrieveType, time); - } - return results; - } - - private void close() { - if (!success) { - rollbackTransaction(); - } - } - - public Table getTable() { - return table; - } - } - - private abstract class GetListHelper extends GetHelper> { - public GetListHelper(String catName, String dbName, String tblName, boolean allowSql, - boolean allowJdo) throws MetaException { - super(catName, dbName, tblName, null, allowSql, allowJdo); - } - - public GetListHelper(String catName, String dbName, String tblName, List fields, - boolean allowSql, boolean allowJdo) throws MetaException { - super(catName, dbName, tblName, fields, allowSql, allowJdo); - } - @Override - protected String describeResult() { - return results.size() + " entries"; - } - } - - @VisibleForTesting - public abstract class GetDbHelper extends GetHelper { - /** - * GetHelper for returning db info using directSql/JDO. - * @param dbName The Database Name - * @param allowSql Whether or not we allow DirectSQL to perform this query. - * @param allowJdo Whether or not we allow ORM to perform this query. - */ - public GetDbHelper(String catalogName, String dbName,boolean allowSql, boolean allowJdo) - throws MetaException { - super(catalogName, dbName,null,allowSql,allowJdo); - } - - @Override - protected String describeResult() { - return "db details for db ".concat(dbName); - } - } - - private abstract class GetStatHelper extends GetHelper { - public GetStatHelper(String catalogName, String dbName, String tblName, boolean allowSql, - boolean allowJdo, String writeIdList) throws MetaException { - super(catalogName, dbName, tblName, allowSql, allowJdo); - } - - @Override - protected String describeResult() { - return "statistics for " + (results == null ? 0 : results.getStatsObjSize()) + " columns"; - } - } - - @Override - public int getNumPartitionsByFilter(String catName, String dbName, String tblName, - String filter) throws MetaException, NoSuchObjectException { - final ExpressionTree exprTree = org.apache.commons.lang3.StringUtils.isNotEmpty(filter) - ? PartFilterExprUtil.parseFilterTree(filter) : ExpressionTree.EMPTY_TREE; - - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - MTable mTable = ensureGetMTable(catName, dbName, tblName); - List partitionKeys = convertToFieldSchemas(mTable.getPartitionKeys()); - - return new GetHelper(catName, dbName, tblName, true, true) { - private final SqlFilterForPushdown filter = new SqlFilterForPushdown(); - - @Override - protected String describeResult() { - return "Partition count"; - } - - @Override - protected boolean canUseDirectSql(GetHelper ctx) throws MetaException { - return directSql.generateSqlFilterForPushdown(catName, dbName, tblName, partitionKeys, exprTree, null, filter); - } - - @Override - protected Integer getSqlResult(GetHelper ctx) throws MetaException { - return directSql.getNumPartitionsViaSqlFilter(filter); - } - @Override - protected Integer getJdoResult( - GetHelper ctx) throws MetaException, NoSuchObjectException { - return getNumPartitionsViaOrmFilter(catName ,dbName, tblName, exprTree, true, partitionKeys); - } - }.run(false); - } - - protected List getPartitionsByFilterInternal( - String catName, String dbName, String tblName, - boolean allowSql, boolean allowJdo, GetPartitionsArgs args) - throws MetaException, NoSuchObjectException { - - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - - MTable mTable = ensureGetMTable(catName, dbName, tblName); - List partitionKeys = convertToFieldSchemas(mTable.getPartitionKeys()); - boolean isAcidTable = TxnUtils.isAcidTable(mTable.getParameters()); - String filter = args.getFilter(); - final ExpressionTree tree = (filter != null && !filter.isEmpty()) - ? PartFilterExprUtil.parseFilterTree(filter) : ExpressionTree.EMPTY_TREE; - return new GetListHelper(catName, dbName, tblName, allowSql, allowJdo) { - private final SqlFilterForPushdown filter = new SqlFilterForPushdown(); - - @Override - protected boolean canUseDirectSql(GetHelper> ctx) throws MetaException { - return directSql.generateSqlFilterForPushdown(catName, dbName, tblName, partitionKeys, tree, null, filter); - } - - @Override - protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getPartitionsViaSqlFilter(catName, dbName, tblName, filter, isAcidTable, args); - } - - @Override - protected List getJdoResult( - GetHelper> ctx) throws MetaException, NoSuchObjectException { - return getPartitionsViaOrmFilter(catName, dbName, tblName, tree, true, - partitionKeys, isAcidTable, args); - } - }.run(false); - } - - @Override - public List getPartitionSpecsByFilterAndProjection(final Table table, - GetProjectionsSpec partitionsProjectSpec, - final GetPartitionsFilterSpec filterSpec) throws MetaException, NoSuchObjectException { - List fieldList = null; - String inputIncludePattern = null; - String inputExcludePattern = null; - if (partitionsProjectSpec != null) { - fieldList = partitionsProjectSpec.getFieldList(); - if (partitionsProjectSpec.isSetIncludeParamKeyPattern()) { - inputIncludePattern = partitionsProjectSpec.getIncludeParamKeyPattern(); - } - if (partitionsProjectSpec.isSetExcludeParamKeyPattern()) { - inputExcludePattern = partitionsProjectSpec.getExcludeParamKeyPattern(); - } - } - if (fieldList == null || fieldList.isEmpty()) { - // no fields are requested. Fallback to regular getPartitions implementation to return all the fields - GetPartitionsArgs.GetPartitionsArgsBuilder argsBuilder = new GetPartitionsArgs.GetPartitionsArgsBuilder() - .excludeParamKeyPattern(inputExcludePattern) - .includeParamKeyPattern(inputIncludePattern); - return getPartitionsInternal(table.getCatName(), table.getDbName(), table.getTableName(), - true, true, argsBuilder.build()); - } - - // anonymous class below requires final String objects - final String includeParamKeyPattern = inputIncludePattern; - final String excludeParamKeyPattern = inputExcludePattern; - - return new GetListHelper(table.getCatName(), table.getDbName(), table.getTableName(), - fieldList, true, true) { - private final SqlFilterForPushdown filter = new SqlFilterForPushdown(); - private ExpressionTree tree; - - @Override - protected boolean canUseDirectSql(GetHelper> ctx) throws MetaException { - if (filterSpec.isSetFilterMode() && filterSpec.getFilterMode().equals(PartitionFilterMode.BY_EXPR)) { - // if the filter mode is BY_EXPR initialize the filter and generate the expression tree - // if there are more than one filter string we AND them together - initExpressionTree(); - return directSql.generateSqlFilterForPushdown(table.getCatName(), table.getDbName(), table.getTableName(), - table.getPartitionKeys(), tree, null, filter); - } - // BY_VALUES and BY_NAMES are always supported - return true; - } - - private void initExpressionTree() throws MetaException { - StringBuilder filterBuilder = new StringBuilder(); - int len = filterSpec.getFilters().size(); - List filters = filterSpec.getFilters(); - for (int i = 0; i < len; i++) { - filterBuilder.append('('); - filterBuilder.append(filters.get(i)); - filterBuilder.append(')'); - if (i + 1 < len) { - filterBuilder.append(" AND "); - } - } - String filterStr = filterBuilder.toString(); - tree = PartFilterExprUtil.parseFilterTree(filterStr); - } - - @Override - protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql - .getPartitionsUsingProjectionAndFilterSpec(ctx.getTable(), ctx.partitionFields, - includeParamKeyPattern, excludeParamKeyPattern, filterSpec, filter); - } - - @Override - protected List getJdoResult( - GetHelper> ctx) throws MetaException { - // For single-valued fields we can use setResult() to implement projection of fields but - // JDO doesn't support multi-valued fields in setResult() so currently JDO implementation - // fallbacks to full-partition fetch if the requested fields contain multi-valued fields - List fieldNames = PartitionProjectionEvaluator.getMPartitionFieldNames(ctx.partitionFields); - Map params = new HashMap<>(); - String jdoFilter = null; - if (filterSpec.isSetFilterMode()) { - // generate the JDO filter string - switch(filterSpec.getFilterMode()) { - case BY_EXPR: - if (tree == null) { - // tree could be null when directSQL is disabled - initExpressionTree(); - } - jdoFilter = - makeQueryFilterString(table.getCatName(), table.getDbName(), table, tree, params, - true); - if (jdoFilter == null) { - throw new MetaException("Could not generate JDO filter from given expression"); - } - break; - case BY_NAMES: - jdoFilter = getJDOFilterStrForPartitionNames(table.getCatName(), table.getDbName(), - table.getTableName(), filterSpec.getFilters(), params); - break; - case BY_VALUES: - jdoFilter = getJDOFilterStrForPartitionVals(table, filterSpec.getFilters(), params); - break; - default: - throw new MetaException("Unsupported filter mode " + filterSpec.getFilterMode()); - } - } else { - // filter mode is not set create simple JDOFilterStr and params - jdoFilter = "table.tableName == t1 && table.database.name == t2 && table.database.catalogName == t3"; - params.put("t1", normalizeIdentifier(tblName)); - params.put("t2", normalizeIdentifier(dbName)); - params.put("t3", normalizeIdentifier(catName)); - } - try { - List mparts = listMPartitionsWithProjection(fieldNames, jdoFilter, params); - return convertToParts(catName, dbName, tblName, mparts, false, new GetPartitionsArgs.GetPartitionsArgsBuilder() - .excludeParamKeyPattern(excludeParamKeyPattern) - .includeParamKeyPattern(includeParamKeyPattern) - .build()); - } catch (MetaException me) { - throw me; - } catch (Exception e) { - throw new MetaException(e.getMessage()); - } - } - }.run(true); - - } - - /** - * Gets the table object for a given table, throws if anything goes wrong. - * @param dbName Database name. - * @param tblName Table name. - * @return Table object. - */ - @Override - public MTable ensureGetMTable(String catName, String dbName, String tblName) - throws NoSuchObjectException { - MTable mtable = getMTable(catName, dbName, tblName); - if (mtable == null) { - throw new NoSuchObjectException("Specified catalog.database.table does not exist : " - + TableName.getQualified(catName, dbName, tblName)); - } - return mtable; - } - - private Table ensureGetTable(String catName, String dbName, String tblName) - throws NoSuchObjectException, MetaException { - return convertToTable(ensureGetMTable(catName, dbName, tblName)); - } - - private Database ensureGetDatabase(String catName, String dbName) throws UnknownDBException { - try { - return getDatabase(catName, dbName); - } catch (NoSuchObjectException nsoe) { - throw new UnknownDBException("Could not find database " + DatabaseName.getQualified(catName, dbName)); - } - } - - /** - * Makes a JDO query filter string. - * Makes a JDO query filter string for tables or partitions. - * @param dbName Database name. - * @param mtable Table. If null, the query returned is over tables in a database. - * If not null, the query returned is over partitions in a table. - * @param filter The filter from which JDOQL filter will be made. - * @param params Parameters for the filter. Some parameters may be added here. - * @return Resulting filter. - */ - private String makeQueryFilterString(String catName, String dbName, MTable mtable, String filter, - Map params) throws MetaException { - ExpressionTree tree = (filter != null && !filter.isEmpty()) - ? PartFilterExprUtil.parseFilterTree(filter) : ExpressionTree.EMPTY_TREE; - return makeQueryFilterString(catName, dbName, convertToTable(mtable), tree, params, true); - } - - /** - * Makes a JDO query filter string for tables or partitions. - * @param dbName Database name. - * @param table Table. If null, the query returned is over tables in a database. - * If not null, the query returned is over partitions in a table. - * @param tree The expression tree from which JDOQL filter will be made. - * @param params Parameters for the filter. Some parameters may be added here. - * @param isValidatedFilter Whether the filter was pre-validated for JDOQL pushdown - * by the client; if it was and we fail to create a filter, we will throw. - * @return Resulting filter. Can be null if isValidatedFilter is false, and there was error. - */ - private String makeQueryFilterString(String catName, String dbName, Table table, - ExpressionTree tree, Map params, - boolean isValidatedFilter) throws MetaException { - assert tree != null; - FilterBuilder queryBuilder = new FilterBuilder(isValidatedFilter); - if (table != null) { - queryBuilder.append("table.tableName == t1 && table.database.name == t2 && table.database.catalogName == t3"); - params.put("t1", table.getTableName()); - params.put("t2", table.getDbName()); - params.put("t3", table.getCatName()); - } else { - queryBuilder.append("database.name == dbName && database.catalogName == catName"); - params.put("dbName", dbName); - params.put("catName", catName); - } - - tree.accept(new ExpressionTree.JDOFilterGenerator(getConf(), - table != null ? table.getPartitionKeys() : null, queryBuilder, params)); - if (queryBuilder.hasError()) { - assert !isValidatedFilter; - LOG.debug("JDO filter pushdown cannot be used: {}", queryBuilder.getErrorMessage()); - return null; - } - String jdoFilter = queryBuilder.getFilter(); - LOG.debug("jdoFilter = {}", jdoFilter); - return jdoFilter; - } - - private String makeQueryFilterString(String catName, String dbName, String tblName, - ExpressionTree tree, Map params, - boolean isValidatedFilter, List partitionKeys) throws MetaException { - assert tree != null; - FilterBuilder queryBuilder = new FilterBuilder(isValidatedFilter); - queryBuilder.append("table.tableName == t1 && table.database.name == t2 && table.database.catalogName == t3"); - params.put("t1", tblName); - params.put("t2", dbName); - params.put("t3", catName); - tree.accept(new ExpressionTree.JDOFilterGenerator(getConf(), partitionKeys, queryBuilder, params)); - if (queryBuilder.hasError()) { - assert !isValidatedFilter; - LOG.debug("JDO filter pushdown cannot be used: {}", queryBuilder.getErrorMessage()); - return null; - } - String jdoFilter = queryBuilder.getFilter(); - LOG.debug("jdoFilter = {}", jdoFilter); - return jdoFilter; - } - - private String makeParameterDeclarationString(Map params) { - //Create the parameter declaration string - StringBuilder paramDecl = new StringBuilder(); - for (String key : params.keySet()) { - paramDecl.append(", java.lang.String ") - .append(key); - } - return paramDecl.toString(); - } - - private String makeParameterDeclarationStringObj(Map params) { - //Create the parameter declaration string - StringBuilder paramDecl = new StringBuilder(); - for (Entry entry : params.entrySet()) { - paramDecl.append(", "); - paramDecl.append(entry.getValue().getClass().getName()); - paramDecl.append(' '); - paramDecl.append(entry.getKey()); - } - return paramDecl.toString(); - } - - @Override - public List listTableNamesByFilter(String catName, String dbName, String filter, - short maxTables) throws MetaException, UnknownDBException { - boolean success = false; - Query query = null; - List tableNames = new ArrayList<>(); - try { - openTransaction(); - LOG.debug("Executing listTableNamesByFilter"); - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - - ensureGetDatabase(catName, dbName); - - Map params = new HashMap<>(); - String queryFilterString = makeQueryFilterString(catName, dbName, null, filter, params); - query = pm.newQuery(MTable.class); - query.declareImports("import java.lang.String"); - query.setResult("tableName"); - query.setResultClass(java.lang.String.class); - if (maxTables >= 0) { - query.setRange(0, maxTables); - } - LOG.debug("filter specified is {}, JDOQL filter is {}", filter, queryFilterString); - if (LOG.isDebugEnabled()) { - for (Entry entry : params.entrySet()) { - LOG.debug("key: {} value: {} class: {}", entry.getKey(), entry.getValue(), - entry.getValue().getClass().getName()); - } - } - String parameterDeclaration = makeParameterDeclarationStringObj(params); - query.declareParameters(parameterDeclaration); - query.setFilter(queryFilterString); - Collection names = (Collection)query.executeWithMap(params); - // have to emulate "distinct", otherwise tables with the same name may be returned - tableNames = new ArrayList<>(new HashSet<>(names)); - LOG.debug("Done executing query for listTableNamesByFilter"); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listTableNamesByFilter"); - } finally { - rollbackAndCleanup(success, query); - } - return tableNames; - } - - @Override - public Table alterTable(String catName, String dbname, String name, Table newTable, - String queryValidWriteIds) throws InvalidObjectException, MetaException { - boolean success = false; - try { - openTransaction(); - name = normalizeIdentifier(name); - dbname = normalizeIdentifier(dbname); - catName = normalizeIdentifier(catName); - MTable newt = convertToMTable(newTable); - if (newt == null) { - throw new InvalidObjectException("new table is invalid"); - } - - MTable oldt = getMTable(catName, dbname, name); - if (oldt == null) { - throw new MetaException("table " + dbname + "." + name + " doesn't exist"); - } - - // For now only alter name, owner, parameters, cols, bucketcols are allowed - oldt.setDatabase(newt.getDatabase()); - oldt.setTableName(normalizeIdentifier(newt.getTableName())); - boolean isTxn = TxnUtils.isTransactionalTable(newTable); - boolean isToTxn = isTxn && !TxnUtils.isTransactionalTable(oldt.getParameters()); - if (!isToTxn && isTxn && areTxnStatsSupported) { - // Transactional table is altered without a txn. Make sure there are no changes to the flag. - String errorMsg = verifyStatsChangeCtx(TableName.getDbTable(name, dbname), oldt.getParameters(), - newTable.getParameters(), newTable.getWriteId(), queryValidWriteIds, false); - if (errorMsg != null) { - throw new MetaException(errorMsg); - } - } - oldt.setParameters(newt.getParameters()); - oldt.setOwner(newt.getOwner()); - oldt.setOwnerType(newt.getOwnerType()); - // Fully copy over the contents of the new SD into the old SD, - // so we don't create an extra SD in the metastore db that has no references. - MColumnDescriptor oldCD = null; - MStorageDescriptor oldSD = oldt.getSd(); - if (oldSD != null) { - oldCD = oldSD.getCD(); - } - copyMSD(newt.getSd(), oldt.getSd()); - removeUnusedColumnDescriptor(oldCD); - oldt.setRetention(newt.getRetention()); - oldt.setPartitionKeys(newt.getPartitionKeys()); - oldt.setTableType(newt.getTableType()); - oldt.setLastAccessTime(newt.getLastAccessTime()); - oldt.setViewOriginalText(newt.getViewOriginalText()); - oldt.setViewExpandedText(newt.getViewExpandedText()); - oldt.setRewriteEnabled(newt.isRewriteEnabled()); - - // If transactional, update the stats state for the current Stats updater query. - // Set stats invalid for ACID conversion; it doesn't pass in the write ID. - if (isTxn) { - if (!areTxnStatsSupported || isToTxn) { - StatsSetupConst.setBasicStatsState(oldt.getParameters(), StatsSetupConst.FALSE); - } else if (queryValidWriteIds != null && newTable.getWriteId() > 0) { - // Check concurrent INSERT case and set false to the flag. - if (!isCurrentStatsValidForTheQuery(oldt, queryValidWriteIds, true)) { - StatsSetupConst.setBasicStatsState(oldt.getParameters(), StatsSetupConst.FALSE); - LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the table " + - dbname + "." + name + ". will be made persistent."); - } - assert newTable.getWriteId() > 0; - oldt.setWriteId(newTable.getWriteId()); - } - } - newTable = convertToTable(oldt); - - // commit the changes - success = commitTransaction(); - } finally { - rollbackAndCleanup(success, null); - } - return newTable; - } - - /** - * Verifies that the stats JSON string is unchanged for alter table (txn stats). - * @return Error message with the details of the change, or null if the value has not changed. - */ - public static String verifyStatsChangeCtx(String fullTableName, Map oldP, Map newP, - long writeId, String validWriteIds, boolean isColStatsChange) { - if (validWriteIds != null && writeId > 0) { - return null; // We have txn context. - } - - if (!StatsSetupConst.areBasicStatsUptoDate(newP)) { - // The validWriteIds can be absent, for example, in case of Impala alter. - // If the new value is invalid, then we don't care, let the alter operation go ahead. - return null; - } - - String oldVal = oldP == null ? null : oldP.get(StatsSetupConst.COLUMN_STATS_ACCURATE); - String newVal = newP == null ? null : newP.get(StatsSetupConst.COLUMN_STATS_ACCURATE); - if (StringUtils.equalsIgnoreCase(oldVal, newVal)) { - if (!isColStatsChange) { - return null; // No change in col stats or parameters => assume no change. - } - } - - // Some change to the stats state is being made; it can only be made with a write ID. - return "Cannot change stats state for a transactional table " + fullTableName + " without " + - "providing the transactional write state for verification (new write ID " + - writeId + ", valid write IDs " + validWriteIds + "; current state " + oldVal + "; new" + - " state " + newVal; - } - - @Override - public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) - throws MetaException { - boolean success = false; - try { - openTransaction(); - catName = normalizeIdentifier(catName); - dbname = normalizeIdentifier(dbname); - tablename = normalizeIdentifier(tablename); - // Update creation metadata - MCreationMetadata newMcm = convertToMCreationMetadata(cm); - MCreationMetadata mcm = getCreationMetadata(catName, dbname, tablename); - mcm.setTables(newMcm.getTables()); - mcm.setMaterializationTime(newMcm.getMaterializationTime()); - mcm.setTxnList(newMcm.getTxnList()); - // commit the changes - success = commitTransaction(); - cm.setMaterializationTime(newMcm.getMaterializationTime()); - } finally { - rollbackAndCleanup(success, null); - } - } - - private static final class Ref { - public T t; - } - - /** - * Alters an existing partition. Initiates copy of SD. Returns the old CD. - * @param part_vals Partition values (of the original partition instance) - * @param newPart Partition object containing new information - */ - private Partition alterPartitionNoTxn(String catName, String dbname, String name, - List part_vals, Partition newPart, String validWriteIds, Ref oldCd) - throws InvalidObjectException, MetaException { - MTable table = this.getMTable(newPart.getCatName(), newPart.getDbName(), newPart.getTableName()); - MPartition oldp = getMPartition(catName, dbname, name, part_vals, table); - return alterPartitionNoTxn(catName, dbname, name, oldp, newPart, - validWriteIds, oldCd, table); - } - - private Partition alterPartitionNoTxn(String catName, String dbname, - String name, MPartition oldp, Partition newPart, - String validWriteIds, - Ref oldCd, MTable table) - throws InvalidObjectException, MetaException { - catName = normalizeIdentifier(catName); - name = normalizeIdentifier(name); - dbname = normalizeIdentifier(dbname); - MPartition newp = convertToMPart(newPart, table); - MColumnDescriptor oldCD = null; - MStorageDescriptor oldSD = oldp.getSd(); - if (oldSD != null) { - oldCD = oldSD.getCD(); - } - if (newp == null) { - throw new InvalidObjectException("partition does not exist."); - } - oldp.setValues(newp.getValues()); - oldp.setPartitionName(newp.getPartitionName()); - boolean isTxn = TxnUtils.isTransactionalTable(table.getParameters()); - if (isTxn && areTxnStatsSupported) { - // Transactional table is altered without a txn. Make sure there are no changes to the flag. - String errorMsg = verifyStatsChangeCtx(TableName.getDbTable(dbname, name), - oldp.getParameters(), - newPart.getParameters(), newPart.getWriteId(), validWriteIds, false); - if (errorMsg != null) { - throw new MetaException(errorMsg); - } - } - oldp.setParameters(newPart.getParameters()); - if (!TableType.VIRTUAL_VIEW.name().equals(oldp.getTable().getTableType())) { - copyMSD(newp.getSd(), oldp.getSd()); - } - if (newp.getCreateTime() != oldp.getCreateTime()) { - oldp.setCreateTime(newp.getCreateTime()); - } - if (newp.getLastAccessTime() != oldp.getLastAccessTime()) { - oldp.setLastAccessTime(newp.getLastAccessTime()); - } - - // If transactional, add/update the MUPdaterTransaction - // for the current updater query. - if (isTxn) { - if (!areTxnStatsSupported) { - StatsSetupConst.setBasicStatsState(oldp.getParameters(), StatsSetupConst.FALSE); - } else if (validWriteIds != null && newPart.getWriteId() > 0) { - // Check concurrent INSERT case and set false to the flag. - if (!isCurrentStatsValidForTheQuery(oldp, validWriteIds, true)) { - StatsSetupConst.setBasicStatsState(oldp.getParameters(), StatsSetupConst.FALSE); - LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the partition " + - dbname + "." + name + "." + oldp.getPartitionName() + " will be made persistent."); - } - oldp.setWriteId(newPart.getWriteId()); - } - } - - oldCd.t = oldCD; - return convertToPart(catName, dbname, name, oldp, TxnUtils.isAcidTable(table.getParameters())); - } - - @Override - public Partition alterPartition(String catName, String dbname, String name, List part_vals, - Partition newPart, String validWriteIds) throws InvalidObjectException, MetaException { - boolean success = false; - Throwable e = null; - Partition result = null; - try { - openTransaction(); - Ref oldCd = new Ref<>(); - result = alterPartitionNoTxn(catName, dbname, name, part_vals, newPart, validWriteIds, oldCd); - removeUnusedColumnDescriptor(oldCd.t); - // commit the changes - success = commitTransaction(); - } catch (Throwable exception) { - LOG.error("alterPartition failed", exception); - e = exception; - } finally { - if (!success) { - rollbackTransaction(); - MetaException metaException = new MetaException( - "The transaction for alter partition did not commit successfully."); - if (e != null) { - metaException.initCause(e); - } - throw metaException; - } - } - return result; - } - - @Override - public List alterPartitions(String catName, String dbName, String tblName, - List> part_vals, List newParts, - long writeId, String queryWriteIdList) - throws InvalidObjectException, MetaException { - List results = new ArrayList<>(newParts.size()); - if (newParts.isEmpty()) { - return results; - } - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - - boolean success = false; - try { - openTransaction(); - MTable table = ensureGetMTable(catName, dbName, tblName); - if (writeId > 0) { - newParts.forEach(newPart -> newPart.setWriteId(writeId)); - } - List partCols = convertToFieldSchemas(table.getPartitionKeys()); - List partNames = new ArrayList<>(); - for (List partVal : part_vals) { - partNames.add(Warehouse.makePartName(partCols, partVal)); - } - results = alterPartitionsInternal(table, partNames, newParts, queryWriteIdList, true, true); - // commit the changes - success = commitTransaction(); - } catch (Exception exception) { - LOG.error("Alter failed", exception); - throw new MetaException(exception.getMessage()); - } finally { - rollbackAndCleanup(success, null); - } - return results; - } - - protected List alterPartitionsInternal(MTable table, - List partNames, List newParts, String queryWriteIdList, - boolean allowSql, boolean allowJdo) - throws InvalidObjectException, MetaException, NoSuchObjectException { - // Validate new parts: StorageDescriptor and SerDeInfo must be set in Partition. - if (!TableType.VIRTUAL_VIEW.name().equals(table.getTableType())) { - for (Partition newPart : newParts) { - if (!newPart.isSetSd() || !newPart.getSd().isSetSerdeInfo()) { - throw new InvalidObjectException("Partition does not set storageDescriptor or serdeInfo."); - } - } - } - String catName = table.getDatabase().getCatalogName(); - String dbName = table.getDatabase().getName(); - String tblName = table.getTableName(); - for (Partition tmpPart : newParts) { - if (!tmpPart.getDbName().equalsIgnoreCase(dbName)) { - throw new MetaException("Invalid DB name : " + tmpPart.getDbName()); - } - if (!tmpPart.getTableName().equalsIgnoreCase(tblName)) { - throw new MetaException("Invalid table name : " + tmpPart.getDbName()); - } - } - return new GetListHelper(catName, dbName, tblName, allowSql, allowJdo) { - @Override - protected List getSqlResult(GetHelper> ctx) - throws MetaException { - return directSql.alterPartitions(table, partNames, newParts, queryWriteIdList); - } - - @Override - protected List getJdoResult(GetHelper> ctx) - throws MetaException, InvalidObjectException { - return alterPartitionsViaJdo(table, partNames, newParts, queryWriteIdList); - } - }.run(false); - } - - private List alterPartitionsViaJdo(MTable table, List partNames, - List newParts, String queryWriteIdList) - throws MetaException, InvalidObjectException { - String catName = table.getDatabase().getCatalogName(); - String dbName = table.getDatabase().getName(); - String tblName = table.getTableName(); - List results = new ArrayList<>(newParts.size()); - List mPartitionList; - - try (QueryWrapper query = new QueryWrapper(pm.newQuery(MPartition.class, - "table.tableName == t1 && table.database.name == t2 && t3.contains(partitionName) " + - " && table.database.catalogName == t4"))) { - query.declareParameters("java.lang.String t1, java.lang.String t2, java.util.Collection t3, " - + "java.lang.String t4"); - mPartitionList = (List) query.executeWithArray(tblName, dbName, partNames, catName); - pm.retrieveAll(mPartitionList); - - if (mPartitionList.size() > newParts.size()) { - throw new MetaException("Expecting only one partition but more than one partitions are found."); - } - - Map, MPartition> mPartsMap = new HashMap(); - for (MPartition mPartition : mPartitionList) { - mPartsMap.put(mPartition.getValues(), mPartition); - } - - Set oldCds = new HashSet<>(); - Ref oldCdRef = new Ref<>(); - for (Partition tmpPart : newParts) { - oldCdRef.t = null; - Partition result = alterPartitionNoTxn(catName, dbName, tblName, - mPartsMap.get(tmpPart.getValues()), tmpPart, queryWriteIdList, oldCdRef, table); - results.add(result); - if (oldCdRef.t != null) { - oldCds.add(oldCdRef.t); - } - } - for (MColumnDescriptor oldCd : oldCds) { - removeUnusedColumnDescriptor(oldCd); - } - } - - return results; - } - - private void copyMSD(MStorageDescriptor newSd, MStorageDescriptor oldSd) { - oldSd.setLocation(newSd.getLocation()); - // If the columns of the old column descriptor != the columns of the new one, - // then change the old storage descriptor's column descriptor. - // Convert the MFieldSchema's to their thrift object counterparts, because we maintain - // datastore identity (i.e., identity of the model objects are managed by JDO, - // not the application). - List oldCols = oldSd.getCD() != null && oldSd.getCD().getCols() != null ? - convertToFieldSchemas(oldSd.getCD().getCols()) : null; - List newCols = newSd.getCD() != null && newSd.getCD().getCols() != null ? - convertToFieldSchemas(newSd.getCD().getCols()) : null; - if (oldCols == null || !oldCols.equals(newCols)) { - // First replace any constraints that may be associated with this CD - // Create mapping from old col indexes to new col indexes - if (oldCols != null && newCols != null) { - Map mapping = new HashMap<>(); - for (int i = 0; i < oldCols.size(); i++) { - FieldSchema oldCol = oldCols.get(i); - //TODO: replace for loop with list.indexOf() - for (int j = 0; j < newCols.size(); j++) { - FieldSchema newCol = newCols.get(j); - if (oldCol.equals(newCol)) { - mapping.put(i, j); - break; - } - } - } - // If we find it, we will change the reference for the CD. - // If we do not find it, i.e., the column will be deleted, we do not change it - // and we let the logic in removeUnusedColumnDescriptor take care of it - try (QueryWrapper query = new QueryWrapper(pm.newQuery(MConstraint.class, "parentColumn == inCD || childColumn == inCD"))) { - query.declareParameters("MColumnDescriptor inCD"); - List mConstraintsList = (List) query.execute(oldSd.getCD()); - pm.retrieveAll(mConstraintsList); - for (MConstraint mConstraint : mConstraintsList) { - if (oldSd.getCD().equals(mConstraint.getParentColumn())) { - Integer newIdx = mapping.get(mConstraint.getParentIntegerIndex()); - if (newIdx != null) { - mConstraint.setParentColumn(newSd.getCD()); - mConstraint.setParentIntegerIndex(newIdx); - } - } - if (oldSd.getCD().equals(mConstraint.getChildColumn())) { - Integer newIdx = mapping.get(mConstraint.getChildIntegerIndex()); - if (newIdx != null) { - mConstraint.setChildColumn(newSd.getCD()); - mConstraint.setChildIntegerIndex(newIdx); - } - } - } - pm.makePersistentAll(mConstraintsList); - } - // Finally replace CD - oldSd.setCD(newSd.getCD()); - } - } - - oldSd.setBucketCols(newSd.getBucketCols()); - oldSd.setIsCompressed(newSd.isCompressed()); - oldSd.setInputFormat(newSd.getInputFormat()); - oldSd.setOutputFormat(newSd.getOutputFormat()); - oldSd.setNumBuckets(newSd.getNumBuckets()); - oldSd.getSerDeInfo().setName(newSd.getSerDeInfo().getName()); - oldSd.getSerDeInfo().setSerializationLib( - newSd.getSerDeInfo().getSerializationLib()); - oldSd.getSerDeInfo().setParameters(newSd.getSerDeInfo().getParameters()); - oldSd.getSerDeInfo().setDescription(newSd.getSerDeInfo().getDescription()); - oldSd.setSkewedColNames(newSd.getSkewedColNames()); - oldSd.setSkewedColValues(newSd.getSkewedColValues()); - oldSd.setSkewedColValueLocationMaps(newSd.getSkewedColValueLocationMaps()); - oldSd.setSortCols(newSd.getSortCols()); - oldSd.setParameters(newSd.getParameters()); - oldSd.setStoredAsSubDirectories(newSd.isStoredAsSubDirectories()); - } - - /** - * Checks if a column descriptor has any remaining references by storage descriptors - * in the db. - * @param oldCD the column descriptor to check if it has references or not - * @return true if has references - */ - private boolean hasRemainingCDReference(MColumnDescriptor oldCD) { - assert oldCD != null; - Query query = null; - - /** - * In order to workaround oracle not supporting limit statement caused performance issue, HIVE-9447 makes - * all the backend DB run select count(1) from SDS where SDS.CD_ID=? to check if the specific CD_ID is - * referenced in SDS table before drop a partition. This select count(1) statement does not scale well in - * Postgres, and there is no index for CD_ID column in SDS table. - * For a SDS table with with 1.5 million rows, select count(1) has average 700ms without index, while in - * 10-20ms with index. But the statement before - * HIVE-9447( SELECT * FROM "SDS" "A0" WHERE "A0"."CD_ID" = $1 limit 1) uses less than 10ms . - */ - try { - // HIVE-21075: Fix Postgres performance regression caused by HIVE-9447 - LOG.debug("The dbType is {} ", dbType.getHiveSchemaPostfix()); - if (dbType.isPOSTGRES() || dbType.isMYSQL()) { - query = pm.newQuery(MStorageDescriptor.class, "this.cd == inCD"); - query.declareParameters("MColumnDescriptor inCD"); - List referencedSDs = null; - LOG.debug("Executing listStorageDescriptorsWithCD"); - // User specified a row limit, set it on the Query - query.setRange(0L, 1L); - referencedSDs = (List) query.execute(oldCD); - LOG.debug("Done executing query for listStorageDescriptorsWithCD"); - pm.retrieveAll(referencedSDs); - LOG.debug("Done retrieving all objects for listStorageDescriptorsWithCD"); - //if no other SD references this CD, we can throw it out. - return referencedSDs != null && !referencedSDs.isEmpty(); - } else { - query = pm.newQuery( - "select count(1) from org.apache.hadoop.hive.metastore.model.MStorageDescriptor where (this.cd == inCD)"); - query.declareParameters("MColumnDescriptor inCD"); - long count = (Long) query.execute(oldCD); - //if no other SD references this CD, we can throw it out. - return count != 0; - } - } finally { - if (query != null) { - query.closeAll(); - } - } - } - - /** - * Checks if a column descriptor has any remaining references by storage descriptors - * in the db. If it does not, then delete the CD. If it does, then do nothing. - * @param oldCD the column descriptor to delete if it is no longer referenced anywhere - */ - private void removeUnusedColumnDescriptor(MColumnDescriptor oldCD) { - if (oldCD == null) { - return; - } - Query query = null; - boolean success = false; - LOG.debug("execute removeUnusedColumnDescriptor"); - - try { - openTransaction(); - if (!hasRemainingCDReference(oldCD)) { - // First remove any constraints that may be associated with this CD - query = pm.newQuery(MConstraint.class, "parentColumn == inCD || childColumn == inCD"); - query.declareParameters("MColumnDescriptor inCD"); - List mConstraintsList = (List) query.execute(oldCD); - if (CollectionUtils.isNotEmpty(mConstraintsList)) { - pm.deletePersistentAll(mConstraintsList); - } - // Finally remove CD - pm.retrieve(oldCD); - pm.deletePersistent(oldCD); - LOG.debug("successfully deleted a CD in removeUnusedColumnDescriptor"); - - } - success = commitTransaction(); - } finally { - rollbackAndCleanup(success, query); - } - } - - /** - * Called right before an action that would drop a storage descriptor. - * This function makes the SD's reference to a CD null, and then deletes the CD - * if it no longer is referenced in the table. - * @param msd the storage descriptor to drop - */ - private void preDropStorageDescriptor(MStorageDescriptor msd) { - if (msd == null || msd.getCD() == null) { - return; - } - - MColumnDescriptor mcd = msd.getCD(); - // Because there is a 1-N relationship between CDs and SDs, - // we must set the SD's CD to null first before dropping the storage descriptor - // to satisfy foreign key constraints. - msd.setCD(null); - removeUnusedColumnDescriptor(mcd); - } - - private static MFieldSchema getColumnFromTableColumns(List cols, String col) { - if (cols == null) { - return null; - } - for (MFieldSchema mfs : cols) { - if (mfs.getName().equalsIgnoreCase(col)) { - return mfs; - } - } - return null; - } - - private static int getColumnIndexFromTableColumns(List cols, String col) { - if (cols == null) { - return -1; - } - for (int i = 0; i < cols.size(); i++) { - MFieldSchema mfs = cols.get(i); - if (mfs.getName().equalsIgnoreCase(col)) { - return i; - } - } - return -1; - } - - private boolean constraintNameAlreadyExists(MTable table, String constraintName) { - boolean commited = false; - Query constraintExistsQuery = null; - String constraintNameIfExists = null; - try { - openTransaction(); - constraintName = normalizeIdentifier(constraintName); - constraintExistsQuery = pm.newQuery(MConstraint.class, - "parentTable == parentTableP && constraintName == constraintNameP"); - constraintExistsQuery.declareParameters("MTable parentTableP, java.lang.String constraintNameP"); - constraintExistsQuery.setUnique(true); - constraintExistsQuery.setResult("constraintName"); - constraintNameIfExists = (String) constraintExistsQuery.executeWithArray(table, constraintName); - commited = commitTransaction(); - } finally { - rollbackAndCleanup(commited, constraintExistsQuery); - } - return constraintNameIfExists != null && !constraintNameIfExists.isEmpty(); - } - - private String generateConstraintName(MTable table, String... parameters) throws MetaException { - int hashcode = ArrayUtils.toString(parameters).hashCode() & 0xfffffff; - int counter = 0; - final int MAX_RETRIES = 10; - while (counter < MAX_RETRIES) { - String currName = (parameters.length == 0 ? "constraint_" : parameters[parameters.length-1]) + - "_" + hashcode + "_" + System.currentTimeMillis() + "_" + (counter++); - if (!constraintNameAlreadyExists(table, currName)) { - return currName; - } - } - throw new MetaException("Error while trying to generate the constraint name for " + ArrayUtils.toString(parameters)); - } - - @Override - public List addForeignKeys( - List fks) throws InvalidObjectException, MetaException { - return addForeignKeys(fks, true, null, null); - } - - @Override - public String getMetastoreDbUuid() throws MetaException { - String ret = getGuidFromDB(); - if(ret != null) { - return ret; - } - return createDbGuidAndPersist(); - } - - private String createDbGuidAndPersist() throws MetaException { - boolean success = false; - Query query = null; - try { - openTransaction(); - MMetastoreDBProperties prop = new MMetastoreDBProperties(); - prop.setPropertykey("guid"); - final String guid = UUID.randomUUID().toString(); - LOG.debug("Attempting to add a guid {} for the metastore db", guid); - prop.setPropertyValue(guid); - prop.setDescription("Metastore DB GUID generated on " - + LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS"))); - pm.makePersistent(prop); - success = commitTransaction(); - if (success) { - LOG.info("Metastore db guid {} created successfully", guid); - return guid; - } - } catch (Exception e) { - LOG.warn("Metastore db guid creation failed", e); - } finally { - rollbackAndCleanup(success, query); - } - // it possible that some other HMS instance could have created the guid - // at the same time due which this instance could not create a guid above - // in such case return the guid already generated - final String guid = getGuidFromDB(); - if (guid == null) { - throw new MetaException("Unable to create or fetch the metastore database uuid"); - } - return guid; - } - - private String getGuidFromDB() throws MetaException { - boolean success = false; - Query query = null; - try { - openTransaction(); - query = pm.newQuery(MMetastoreDBProperties.class, PTYARG_EQ_KEY); - query.declareParameters(PTYPARAM_STR_KEY); - Collection names = (Collection) query.execute("guid"); - List uuids = new ArrayList<>(); - for (Iterator i = names.iterator(); i.hasNext();) { - String uuid = i.next().getPropertyValue(); - LOG.debug("Found guid {}", uuid); - uuids.add(uuid); - } - success = commitTransaction(); - if(uuids.size() > 1) { - throw new MetaException("Multiple uuids found"); - } - if(!uuids.isEmpty()) { - LOG.debug("Returning guid of metastore db : {}", uuids.get(0)); - return uuids.get(0); - } - } finally { - rollbackAndCleanup(success, query); - } - LOG.warn("Guid for metastore db not found"); - return null; - } - - public boolean runInTransaction(Runnable exec) { - boolean success = false; - try { - if (openTransaction()) { - exec.run(); - success = commitTransaction(); - } - } catch (Exception e) { - LOG.warn("Metastore operation failed", e); - } finally { - rollbackAndCleanup(success, null); - } - return success; - } - - public boolean dropProperties(String key) { - boolean success = false; - Query query = null; - try { - if (openTransaction()) { - query = pm.newQuery(MMetastoreDBProperties.class, PTYARG_EQ_KEY); - query.declareParameters(PTYPARAM_STR_KEY); - @SuppressWarnings("unchecked") - Collection properties = (Collection) query.execute(key); - if (!properties.isEmpty()) { - pm.deletePersistentAll(properties); - } - success = commitTransaction(); - } - } catch (Exception e) { - LOG.warn("Metastore property drop failed", e); - } finally { - rollbackAndCleanup(success, query); - } - return success; - } - - - public MMetastoreDBProperties putProperties(String key, String value, String description, byte[] content) { - boolean success = false; - try { - if (openTransaction()) { - //pm.currentTransaction().setOptimistic(false); - // fetch first to determine new vs update - MMetastoreDBProperties properties = doFetchProperties(key, null); - final boolean newInstance; - if (properties == null) { - newInstance = true; - properties = new MMetastoreDBProperties(); - properties.setPropertykey(key); - } else { - newInstance = false; - } - properties.setDescription(description); - properties.setPropertyValue(value); - properties.setPropertyContent(content); - LOG.debug("Attempting to add property {} for the metastore db", key); - properties.setDescription("Metastore property " - + (newInstance ? "created" : "updated") - + " " + LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS"))); - if (newInstance) { - pm.makePersistent(properties); - } - success = commitTransaction(); - if (success) { - LOG.info("Metastore property {} created successfully", key); - return properties; - } - } - } finally { - rollbackAndCleanup(success, null); - } - return null; - } - - - public boolean renameProperties(String mapKey, String newKey) { - boolean success = false; - Query query = null; - try { - LOG.debug("Attempting to rename property {} to {} for the metastore db", mapKey, newKey); - if (openTransaction()) { - // ensure the target is clear; - // query is cleaned up in finally block - query = pm.newQuery(MMetastoreDBProperties.class, PTYARG_EQ_KEY); - query.declareParameters(PTYPARAM_STR_KEY); - query.setUnique(true); - MMetastoreDBProperties properties = (MMetastoreDBProperties) query.execute(newKey); - if (properties != null) { - return false; - } - // ensure we got a source - properties = (MMetastoreDBProperties) query.execute(mapKey); - if (properties == null) { - return false; - } - byte[] content = properties.getPropertyContent(); - String value = properties.getPropertyValue(); - // remove source from persistent storage - pm.deletePersistent(properties); - // make it persist with new key - MMetastoreDBProperties newProperties = new MMetastoreDBProperties(); - // update description - newProperties.setDescription("Metastore property renamed from " + mapKey + " to " + newKey - + " " + LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS"))); - // change key - newProperties.setPropertykey(newKey); - newProperties.setPropertyValue(value); - newProperties.setPropertyContent(content); - pm.makePersistent(newProperties); - // commit - success = commitTransaction(); - if (success) { - LOG.info("Metastore property {} renamed {} successfully", mapKey, newKey); - return true; - } - } - } finally { - rollbackAndCleanup(success, query); - } - return false; - } - - private T doFetchProperties(String key, java.util.function.Function transform) { - try(QueryWrapper query = new QueryWrapper(pm.newQuery(MMetastoreDBProperties.class, PTYARG_EQ_KEY))) { - query.declareParameters(PTYPARAM_STR_KEY); - query.setUnique(true); - MMetastoreDBProperties properties = (MMetastoreDBProperties) query.execute(key); - if (properties != null) { - return (T) (transform != null? transform.apply(properties) : properties); - } - } - return null; - } - - public T fetchProperties(String key, java.util.function.Function transform) { - boolean success = false; - T properties = null; - try { - if (openTransaction()) { - properties = doFetchProperties(key, transform); - success = commitTransaction(); - } - } finally { - rollbackAndCleanup(success, null); - } - return properties; - } - - public Map selectProperties(String key, java.util.function.Function transform) { - boolean success = false; - Query query = null; - Map results = null; - try { - if (openTransaction()) { - Collection properties; - if (key == null || key.isEmpty()) { - query = pm.newQuery(MMetastoreDBProperties.class); - properties = (Collection) query.execute(); - } else { - query = pm.newQuery(MMetastoreDBProperties.class, "this.propertyKey.startsWith(key)"); - query.declareParameters(PTYPARAM_STR_KEY); - properties = (Collection) query.execute(key); - } - pm.retrieveAll(properties); - if (!properties.isEmpty()) { - results = new TreeMap(); - for(MMetastoreDBProperties ptys : properties) { - T t = (T) (transform != null? transform.apply(ptys) : ptys); - if (t != null) { - results.put(ptys.getPropertykey(), t); - } - } - } - success = commitTransaction(); - } - } finally { - rollbackAndCleanup(success, query); - } - return results; - } - - //TODO: clean up this method - private List addForeignKeys(List foreignKeys, boolean retrieveCD, - List primaryKeys, List uniqueConstraints) - throws InvalidObjectException, MetaException { - if (CollectionUtils.isNotEmpty(foreignKeys)) { - List mpkfks = new ArrayList<>(); - String currentConstraintName = null; - String catName = null; - // We start iterating through the foreign keys. This list might contain more than a single - // foreign key, and each foreign key might contain multiple columns. The outer loop retrieves - // the information that is common for a single key (table information) while the inner loop - // checks / adds information about each column. - for (int i = 0; i < foreignKeys.size(); i++) { - if (catName == null) { - catName = normalizeIdentifier(foreignKeys.get(i).isSetCatName() ? foreignKeys.get(i).getCatName() : - getDefaultCatalog(conf)); - } else { - String tmpCatName = normalizeIdentifier(foreignKeys.get(i).isSetCatName() ? - foreignKeys.get(i).getCatName() : getDefaultCatalog(conf)); - if (!catName.equals(tmpCatName)) { - throw new InvalidObjectException("Foreign keys cannot span catalogs"); - } - } - final String fkTableDB = normalizeIdentifier(foreignKeys.get(i).getFktable_db()); - final String fkTableName = normalizeIdentifier(foreignKeys.get(i).getFktable_name()); - // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. - // For instance, this is the case when we are creating the table. - final AttachedMTableInfo nChildTable = getMTable(catName, fkTableDB, fkTableName, retrieveCD); - final MTable childTable = nChildTable.mtbl; - if (childTable == null) { - throw new InvalidObjectException("Child table not found: " + fkTableName); - } - MColumnDescriptor childCD = retrieveCD ? nChildTable.mcd : childTable.getSd().getCD(); - final List childCols = childCD == null || childCD.getCols() == null ? - new ArrayList<>() : new ArrayList<>(childCD.getCols()); - if (childTable.getPartitionKeys() != null) { - childCols.addAll(childTable.getPartitionKeys()); - } - - final String pkTableDB = normalizeIdentifier(foreignKeys.get(i).getPktable_db()); - final String pkTableName = normalizeIdentifier(foreignKeys.get(i).getPktable_name()); - // For primary keys, we retrieve the column descriptors if retrieveCD is true (which means - // it is an alter table statement) or if it is a create table statement but we are - // referencing another table instead of self for the primary key. - final AttachedMTableInfo nParentTable; - final MTable parentTable; - MColumnDescriptor parentCD; - final List parentCols; - final List existingTablePrimaryKeys; - final List existingTableUniqueConstraints; - final boolean sameTable = fkTableDB.equals(pkTableDB) && fkTableName.equals(pkTableName); - if (sameTable) { - nParentTable = nChildTable; - parentTable = childTable; - parentCD = childCD; - parentCols = childCols; - existingTablePrimaryKeys = primaryKeys; - existingTableUniqueConstraints = uniqueConstraints; - } else { - nParentTable = getMTable(catName, pkTableDB, pkTableName, true); - parentTable = nParentTable.mtbl; - if (parentTable == null) { - throw new InvalidObjectException("Parent table not found: " + pkTableName); - } - parentCD = nParentTable.mcd; - parentCols = parentCD == null || parentCD.getCols() == null ? - new ArrayList<>() : new ArrayList<>(parentCD.getCols()); - if (parentTable.getPartitionKeys() != null) { - parentCols.addAll(parentTable.getPartitionKeys()); - } - PrimaryKeysRequest primaryKeysRequest = new PrimaryKeysRequest(pkTableDB, pkTableName); - primaryKeysRequest.setCatName(catName); - existingTablePrimaryKeys = getPrimaryKeys(primaryKeysRequest); - existingTableUniqueConstraints = - getUniqueConstraints(new UniqueConstraintsRequest(catName, pkTableDB, pkTableName)); - } - - // Here we build an aux structure that is used to verify that the foreign key that is declared - // is actually referencing a valid primary key or unique key. We also check that the types of - // the columns correspond. - if (existingTablePrimaryKeys.isEmpty() && existingTableUniqueConstraints.isEmpty()) { - throw new MetaException( - "Trying to define foreign key but there are no primary keys or unique keys for referenced table"); - } - final Set validPKsOrUnique = generateValidPKsOrUniqueSignatures(parentCols, - existingTablePrimaryKeys, existingTableUniqueConstraints); - - StringBuilder fkSignature = new StringBuilder(); - StringBuilder referencedKSignature = new StringBuilder(); - for (; i < foreignKeys.size(); i++) { - SQLForeignKey foreignKey = foreignKeys.get(i); - final String fkColumnName = normalizeIdentifier(foreignKey.getFkcolumn_name()); - int childIntegerIndex = getColumnIndexFromTableColumns(childCD.getCols(), fkColumnName); - if (childIntegerIndex == -1) { - if (childTable.getPartitionKeys() != null) { - childCD = null; - childIntegerIndex = getColumnIndexFromTableColumns(childTable.getPartitionKeys(), fkColumnName); - } - if (childIntegerIndex == -1) { - throw new InvalidObjectException("Child column not found: " + fkColumnName); - } - } - - final String pkColumnName = normalizeIdentifier(foreignKey.getPkcolumn_name()); - int parentIntegerIndex = getColumnIndexFromTableColumns(parentCD.getCols(), pkColumnName); - if (parentIntegerIndex == -1) { - if (parentTable.getPartitionKeys() != null) { - parentCD = null; - parentIntegerIndex = getColumnIndexFromTableColumns(parentTable.getPartitionKeys(), pkColumnName); - } - if (parentIntegerIndex == -1) { - throw new InvalidObjectException("Parent column not found: " + pkColumnName); - } - } - - if (foreignKey.getFk_name() == null) { - // When there is no explicit foreign key name associated with the constraint and the key is composite, - // we expect the foreign keys to be send in order in the input list. - // Otherwise, the below code will break. - // If this is the first column of the FK constraint, generate the foreign key name - // NB: The below code can result in race condition where duplicate names can be generated (in theory). - // However, this scenario can be ignored for practical purposes because of - // the uniqueness of the generated constraint name. - if (foreignKey.getKey_seq() == 1) { - currentConstraintName = generateConstraintName(parentTable, fkTableDB, fkTableName, pkTableDB, - pkTableName, pkColumnName, fkColumnName, "fk"); - } - } else { - currentConstraintName = normalizeIdentifier(foreignKey.getFk_name()); - if (constraintNameAlreadyExists(parentTable, currentConstraintName)) { - String fqConstraintName = String.format("%s.%s.%s", parentTable.getDatabase().getName(), - parentTable.getTableName(), currentConstraintName); - throw new InvalidObjectException("Constraint name already exists: " + fqConstraintName); - } - } - // Update Column, keys, table, database, catalog name - foreignKey.setFk_name(currentConstraintName); - foreignKey.setCatName(catName); - foreignKey.setFktable_db(fkTableDB); - foreignKey.setFktable_name(fkTableName); - foreignKey.setPktable_db(pkTableDB); - foreignKey.setPktable_name(pkTableName); - foreignKey.setFkcolumn_name(fkColumnName); - foreignKey.setPkcolumn_name(pkColumnName); - - Integer updateRule = foreignKey.getUpdate_rule(); - Integer deleteRule = foreignKey.getDelete_rule(); - int enableValidateRely = (foreignKey.isEnable_cstr() ? 4 : 0) + - (foreignKey.isValidate_cstr() ? 2 : 0) + (foreignKey.isRely_cstr() ? 1 : 0); - - MConstraint mpkfk = new MConstraint( - currentConstraintName, - foreignKey.getKey_seq(), - MConstraint.FOREIGN_KEY_CONSTRAINT, - deleteRule, - updateRule, - enableValidateRely, - parentTable, - childTable, - parentCD, - childCD, - childIntegerIndex, - parentIntegerIndex - ); - mpkfks.add(mpkfk); - - final String fkColType = getColumnFromTableColumns(childCols, fkColumnName).getType(); - fkSignature.append( - generateColNameTypeSignature(fkColumnName, fkColType)); - referencedKSignature.append( - generateColNameTypeSignature(pkColumnName, fkColType)); - - if (i + 1 < foreignKeys.size() && foreignKeys.get(i + 1).getKey_seq() == 1) { - // Next one is a new key, we bail out from the inner loop - break; - } - } - String referenced = referencedKSignature.toString(); - if (!validPKsOrUnique.contains(referenced)) { - throw new MetaException( - "Foreign key references " + referenced + " but no corresponding " - + "primary key or unique key exists. Possible keys: " + validPKsOrUnique); - } - if (sameTable && fkSignature.toString().equals(referenced)) { - throw new MetaException( - "Cannot be both foreign key and primary/unique key on same table: " + referenced); - } - fkSignature = new StringBuilder(); - referencedKSignature = new StringBuilder(); - } - pm.makePersistentAll(mpkfks); - - } - return foreignKeys; - } - - private static Set generateValidPKsOrUniqueSignatures(List tableCols, - List refTablePrimaryKeys, List refTableUniqueConstraints) { - final Set validPKsOrUnique = new HashSet<>(); - if (!refTablePrimaryKeys.isEmpty()) { - refTablePrimaryKeys.sort((o1, o2) -> { - int keyNameComp = o1.getPk_name().compareTo(o2.getPk_name()); - if (keyNameComp == 0) { - return Integer.compare(o1.getKey_seq(), o2.getKey_seq()); - } - return keyNameComp; - }); - StringBuilder pkSignature = new StringBuilder(); - for (SQLPrimaryKey pk : refTablePrimaryKeys) { - pkSignature.append( - generateColNameTypeSignature( - pk.getColumn_name(), getColumnFromTableColumns(tableCols, pk.getColumn_name()).getType())); - } - validPKsOrUnique.add(pkSignature.toString()); - } - if (!refTableUniqueConstraints.isEmpty()) { - refTableUniqueConstraints.sort((o1, o2) -> { - int keyNameComp = o1.getUk_name().compareTo(o2.getUk_name()); - if (keyNameComp == 0) { - return Integer.compare(o1.getKey_seq(), o2.getKey_seq()); - } - return keyNameComp; - }); - StringBuilder ukSignature = new StringBuilder(); - for (int j = 0; j < refTableUniqueConstraints.size(); j++) { - SQLUniqueConstraint uk = refTableUniqueConstraints.get(j); - ukSignature.append( - generateColNameTypeSignature( - uk.getColumn_name(), getColumnFromTableColumns(tableCols, uk.getColumn_name()).getType())); - if (j + 1 < refTableUniqueConstraints.size()) { - if (!refTableUniqueConstraints.get(j + 1).getUk_name().equals( - refTableUniqueConstraints.get(j).getUk_name())) { - validPKsOrUnique.add(ukSignature.toString()); - ukSignature = new StringBuilder(); - } - } else { - validPKsOrUnique.add(ukSignature.toString()); - } - } - } - return validPKsOrUnique; - } - - private static String generateColNameTypeSignature(String colName, String colType) { - return colName + ":" + colType + ";"; - } - - @Override - public List addPrimaryKeys(List pks) throws InvalidObjectException, - MetaException { - return addPrimaryKeys(pks, true); - } - - private List addPrimaryKeys(List pks, boolean retrieveCD) throws InvalidObjectException, - MetaException { - List mpks = new ArrayList<>(); - String constraintName = null; - - for (SQLPrimaryKey pk : pks) { - final String catName = normalizeIdentifier(pk.getCatName()); - final String tableDB = normalizeIdentifier(pk.getTable_db()); - final String tableName = normalizeIdentifier(pk.getTable_name()); - final String columnName = normalizeIdentifier(pk.getColumn_name()); - - // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. - // For instance, this is the case when we are creating the table. - AttachedMTableInfo nParentTable = getMTable(catName, tableDB, tableName, retrieveCD); - MTable parentTable = nParentTable.mtbl; - if (parentTable == null) { - throw new InvalidObjectException("Parent table not found: " + tableName); - } - - MColumnDescriptor parentCD = retrieveCD ? nParentTable.mcd : parentTable.getSd().getCD(); - int parentIntegerIndex = getColumnIndexFromTableColumns(parentCD == null ? null : parentCD.getCols(), columnName); - if (parentIntegerIndex == -1) { - if (parentTable.getPartitionKeys() != null) { - parentCD = null; - parentIntegerIndex = getColumnIndexFromTableColumns(parentTable.getPartitionKeys(), columnName); - } - if (parentIntegerIndex == -1) { - throw new InvalidObjectException("Parent column not found: " + columnName); - } - } - if (getPrimaryKeyConstraintName(parentTable.getDatabase().getCatalogName(), - parentTable.getDatabase().getName(), parentTable.getTableName()) != null) { - throw new MetaException(" Primary key already exists for: " + - TableName.getQualified(catName, tableDB, tableName)); - } - if (pk.getPk_name() == null) { - if (pk.getKey_seq() == 1) { - constraintName = generateConstraintName(parentTable, tableDB, tableName, columnName, "pk"); - } - } else { - constraintName = normalizeIdentifier(pk.getPk_name()); - if (constraintNameAlreadyExists(parentTable, constraintName)) { - String fqConstraintName = String.format("%s.%s.%s", parentTable.getDatabase().getName(), - parentTable.getTableName(), constraintName); - throw new InvalidObjectException("Constraint name already exists: " + fqConstraintName); - } - } - - int enableValidateRely = (pk.isEnable_cstr() ? 4 : 0) + - (pk.isValidate_cstr() ? 2 : 0) + (pk.isRely_cstr() ? 1 : 0); - MConstraint mpk = new MConstraint( - constraintName, - pk.getKey_seq(), - MConstraint.PRIMARY_KEY_CONSTRAINT, - null, - null, - enableValidateRely, - parentTable, - null, - parentCD, - null, - null, - parentIntegerIndex); - mpks.add(mpk); - - // Add normalized identifier back to result - pk.setCatName(catName); - pk.setTable_db(tableDB); - pk.setTable_name(tableName); - pk.setColumn_name(columnName); - pk.setPk_name(constraintName); - } - pm.makePersistentAll(mpks); - return pks; - } - - @Override - public List addUniqueConstraints(List uks) - throws InvalidObjectException, MetaException { - return addUniqueConstraints(uks, true); - } - - private List addUniqueConstraints(List uks, boolean retrieveCD) - throws InvalidObjectException, MetaException { - - List cstrs = new ArrayList<>(); - String constraintName = null; - - for (SQLUniqueConstraint uk : uks) { - final String catName = normalizeIdentifier(uk.getCatName()); - final String tableDB = normalizeIdentifier(uk.getTable_db()); - final String tableName = normalizeIdentifier(uk.getTable_name()); - final String columnName = normalizeIdentifier(uk.getColumn_name()); - - // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. - // For instance, this is the case when we are creating the table. - AttachedMTableInfo nParentTable = getMTable(catName, tableDB, tableName, retrieveCD); - MTable parentTable = nParentTable.mtbl; - if (parentTable == null) { - throw new InvalidObjectException("Parent table not found: " + tableName); - } - - MColumnDescriptor parentCD = retrieveCD ? nParentTable.mcd : parentTable.getSd().getCD(); - int parentIntegerIndex = getColumnIndexFromTableColumns(parentCD == null ? null : parentCD.getCols(), columnName); - if (parentIntegerIndex == -1) { - if (parentTable.getPartitionKeys() != null) { - parentCD = null; - parentIntegerIndex = getColumnIndexFromTableColumns(parentTable.getPartitionKeys(), columnName); - } - if (parentIntegerIndex == -1) { - throw new InvalidObjectException("Parent column not found: " + columnName); - } - } - if (uk.getUk_name() == null) { - if (uk.getKey_seq() == 1) { - constraintName = generateConstraintName(parentTable, tableDB, tableName, columnName, "uk"); - } - } else { - constraintName = normalizeIdentifier(uk.getUk_name()); - if (constraintNameAlreadyExists(parentTable, constraintName)) { - String fqConstraintName = String.format("%s.%s.%s", parentTable.getDatabase().getName(), - parentTable.getTableName(), constraintName); - throw new InvalidObjectException("Constraint name already exists: " + fqConstraintName); - } - } - - - int enableValidateRely = (uk.isEnable_cstr() ? 4 : 0) + - (uk.isValidate_cstr() ? 2 : 0) + (uk.isRely_cstr() ? 1 : 0); - MConstraint muk = new MConstraint( - constraintName, - uk.getKey_seq(), - MConstraint.UNIQUE_CONSTRAINT, - null, - null, - enableValidateRely, - parentTable, - null, - parentCD, - null, - null, - parentIntegerIndex); - cstrs.add(muk); - - // Add normalized identifier back to result - uk.setCatName(catName); - uk.setTable_db(tableDB); - uk.setTable_name(tableName); - uk.setColumn_name(columnName); - uk.setUk_name(constraintName); - - } - pm.makePersistentAll(cstrs); - return uks; - } - - @Override - public List addNotNullConstraints(List nns) - throws InvalidObjectException, MetaException { - return addNotNullConstraints(nns, true); - } - - @Override - public List addDefaultConstraints(List nns) - throws InvalidObjectException, MetaException { - return addDefaultConstraints(nns, true); - } - - @Override - public List addCheckConstraints(List nns) - throws InvalidObjectException, MetaException { - return addCheckConstraints(nns, true); - } - - private List addCheckConstraints(List ccs, boolean retrieveCD) - throws InvalidObjectException, MetaException { - List cstrs = new ArrayList<>(); - - for (SQLCheckConstraint cc: ccs) { - final String catName = normalizeIdentifier(cc.getCatName()); - final String tableDB = normalizeIdentifier(cc.getTable_db()); - final String tableName = normalizeIdentifier(cc.getTable_name()); - final String columnName = cc.getColumn_name() == null? null - : normalizeIdentifier(cc.getColumn_name()); - final String ccName = cc.getDc_name(); - boolean isEnable = cc.isEnable_cstr(); - boolean isValidate = cc.isValidate_cstr(); - boolean isRely = cc.isRely_cstr(); - String constraintValue = cc.getCheck_expression(); - MConstraint muk = addConstraint(catName, tableDB, tableName, columnName, ccName, isEnable, isRely, isValidate, - MConstraint.CHECK_CONSTRAINT, constraintValue, retrieveCD); - cstrs.add(muk); - - // Add normalized identifier back to result - cc.setCatName(catName); - cc.setTable_db(tableDB); - cc.setTable_name(tableName); - cc.setColumn_name(columnName); - cc.setDc_name(muk.getConstraintName()); - } - pm.makePersistentAll(cstrs); - return ccs; - } - - private MConstraint addConstraint(String catName, String tableDB, String tableName, String columnName, String ccName, - boolean isEnable, boolean isRely, boolean isValidate, int constraintType, - String constraintValue, boolean retrieveCD) - throws InvalidObjectException, MetaException { - String constraintName = null; - // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. - // For instance, this is the case when we are creating the table. - AttachedMTableInfo nParentTable = getMTable(catName, tableDB, tableName, retrieveCD); - MTable parentTable = nParentTable.mtbl; - if (parentTable == null) { - throw new InvalidObjectException("Parent table not found: " + tableName); - } - - MColumnDescriptor parentCD = retrieveCD ? nParentTable.mcd : parentTable.getSd().getCD(); - int parentIntegerIndex = getColumnIndexFromTableColumns(parentCD == null ? null : parentCD.getCols(), columnName); - if (parentIntegerIndex == -1) { - if (parentTable.getPartitionKeys() != null) { - parentCD = null; - parentIntegerIndex = getColumnIndexFromTableColumns(parentTable.getPartitionKeys(), columnName); - } - } - if (ccName == null) { - constraintName = generateConstraintName(parentTable, tableDB, tableName, columnName, "dc"); - } else { - constraintName = normalizeIdentifier(ccName); - if (constraintNameAlreadyExists(parentTable, constraintName)) { - String fqConstraintName = String.format("%s.%s.%s", parentTable.getDatabase().getName(), - parentTable.getTableName(), constraintName); - throw new InvalidObjectException("Constraint name already exists: " + fqConstraintName); - } - } - - int enableValidateRely = (isEnable ? 4 : 0) + - (isValidate ? 2 : 0) + (isRely ? 1 : 0); - MConstraint muk = new MConstraint( - constraintName, - 1, - constraintType, // Not null constraint should reference a single column - null, - null, - enableValidateRely, - parentTable, - null, - parentCD, - null, - null, - parentIntegerIndex, - constraintValue); - - return muk; - } - - private List addDefaultConstraints(List dcs, boolean retrieveCD) - throws InvalidObjectException, MetaException { - - List cstrs = new ArrayList<>(); - for (SQLDefaultConstraint dc : dcs) { - final String catName = normalizeIdentifier(dc.getCatName()); - final String tableDB = normalizeIdentifier(dc.getTable_db()); - final String tableName = normalizeIdentifier(dc.getTable_name()); - final String columnName = normalizeIdentifier(dc.getColumn_name()); - final String dcName = dc.getDc_name(); - boolean isEnable = dc.isEnable_cstr(); - boolean isValidate = dc.isValidate_cstr(); - boolean isRely = dc.isRely_cstr(); - String constraintValue = dc.getDefault_value(); - MConstraint muk = addConstraint(catName, tableDB, tableName, columnName, dcName, isEnable, isRely, isValidate, - MConstraint.DEFAULT_CONSTRAINT, constraintValue, retrieveCD); - cstrs.add(muk); - - // Add normalized identifier back to result - dc.setCatName(catName); - dc.setTable_db(tableDB); - dc.setTable_name(tableName); - dc.setColumn_name(columnName); - dc.setDc_name(muk.getConstraintName()); - } - pm.makePersistentAll(cstrs); - return dcs; - } - - private List addNotNullConstraints(List nns, boolean retrieveCD) - throws InvalidObjectException, MetaException { - - List cstrs = new ArrayList<>(); - String constraintName; - - for (SQLNotNullConstraint nn : nns) { - final String catName = normalizeIdentifier(nn.getCatName()); - final String tableDB = normalizeIdentifier(nn.getTable_db()); - final String tableName = normalizeIdentifier(nn.getTable_name()); - final String columnName = normalizeIdentifier(nn.getColumn_name()); - - // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. - // For instance, this is the case when we are creating the table. - AttachedMTableInfo nParentTable = getMTable(catName, tableDB, tableName, retrieveCD); - MTable parentTable = nParentTable.mtbl; - if (parentTable == null) { - throw new InvalidObjectException("Parent table not found: " + tableName); - } - - MColumnDescriptor parentCD = retrieveCD ? nParentTable.mcd : parentTable.getSd().getCD(); - int parentIntegerIndex = getColumnIndexFromTableColumns(parentCD == null ? null : parentCD.getCols(), columnName); - if (parentIntegerIndex == -1) { - if (parentTable.getPartitionKeys() != null) { - parentCD = null; - parentIntegerIndex = getColumnIndexFromTableColumns(parentTable.getPartitionKeys(), columnName); - } - if (parentIntegerIndex == -1) { - throw new InvalidObjectException("Parent column not found: " + columnName); - } - } - if (nn.getNn_name() == null) { - constraintName = generateConstraintName(parentTable, tableDB, tableName, columnName, "nn"); - } else { - constraintName = normalizeIdentifier(nn.getNn_name()); - if (constraintNameAlreadyExists(parentTable, constraintName)) { - String fqConstraintName = String.format("%s.%s.%s", parentTable.getDatabase().getName(), - parentTable.getTableName(), constraintName); - throw new InvalidObjectException("Constraint name already exists: " + fqConstraintName); - } - } - - int enableValidateRely = (nn.isEnable_cstr() ? 4 : 0) + - (nn.isValidate_cstr() ? 2 : 0) + (nn.isRely_cstr() ? 1 : 0); - MConstraint muk = new MConstraint( - constraintName, - 1, - MConstraint.NOT_NULL_CONSTRAINT, // Not null constraint should reference a single column - null, - null, - enableValidateRely, - parentTable, - null, - parentCD, - null, - null, - parentIntegerIndex); - cstrs.add(muk); - // Add normalized identifier back to result - nn.setCatName(catName); - nn.setTable_db(tableDB); - nn.setTable_name(tableName); - nn.setColumn_name(columnName); - nn.setNn_name(constraintName); - } - pm.makePersistentAll(cstrs); - return nns; - } - - @Override - public boolean addRole(String roleName, String ownerName) - throws InvalidObjectException, MetaException, NoSuchObjectException { - boolean success = false; - boolean commited = false; - try { - openTransaction(); - MRole nameCheck = this.getMRole(roleName); - if (nameCheck != null) { - throw new InvalidObjectException("Role " + roleName + " already exists."); - } - int now = (int) (System.currentTimeMillis() / 1000); - MRole mRole = new MRole(roleName, now, ownerName); - pm.makePersistent(mRole); - commited = commitTransaction(); - success = true; - } finally { - rollbackAndCleanup(commited, null); - } - return success; - } - - @Override - public boolean grantRole(Role role, String userName, - PrincipalType principalType, String grantor, PrincipalType grantorType, - boolean grantOption) throws MetaException, NoSuchObjectException,InvalidObjectException { - boolean success = false; - boolean commited = false; - try { - openTransaction(); - MRoleMap roleMap = null; - try { - roleMap = this.getMSecurityUserRoleMap(userName, principalType, role - .getRoleName()); - } catch (Exception e) { - } - if (roleMap != null) { - throw new InvalidObjectException("Principal " + userName - + " already has the role " + role.getRoleName()); - } - if (principalType == PrincipalType.ROLE) { - validateRole(userName); - } - MRole mRole = getMRole(role.getRoleName()); - long now = System.currentTimeMillis()/1000; - MRoleMap roleMember = new MRoleMap(userName, principalType.toString(), - mRole, (int) now, grantor, grantorType.toString(), grantOption); - pm.makePersistent(roleMember); - commited = commitTransaction(); - success = true; - } finally { - rollbackAndCleanup(commited, null); - } - return success; - } - - /** - * Verify that role with given name exists, if not throw exception - */ - private void validateRole(String roleName) throws NoSuchObjectException { - // if grantee is a role, check if it exists - MRole granteeRole = getMRole(roleName); - if (granteeRole == null) { - throw new NoSuchObjectException("Role " + roleName + " does not exist"); - } - } - - @Override - public boolean revokeRole(Role role, String userName, PrincipalType principalType, - boolean grantOption) throws MetaException, NoSuchObjectException { - boolean success = false; - try { - openTransaction(); - MRoleMap roleMember = getMSecurityUserRoleMap(userName, principalType, - role.getRoleName()); - if (grantOption) { - // Revoke with grant option - only remove the grant option but keep the role. - if (roleMember.getGrantOption()) { - roleMember.setGrantOption(false); - } else { - throw new MetaException("User " + userName - + " does not have grant option with role " + role.getRoleName()); - } - } else { - // No grant option in revoke, remove the whole role. - pm.deletePersistent(roleMember); - } - success = commitTransaction(); - } finally { - rollbackAndCleanup(success, null); - } - return success; - } - - private MRoleMap getMSecurityUserRoleMap(String userName, PrincipalType principalType, - String roleName) { - MRoleMap mRoleMember = null; - boolean commited = false; - Query query = null; - try { - openTransaction(); - query = - pm.newQuery(MRoleMap.class, - "principalName == t1 && principalType == t2 && role.roleName == t3"); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); - query.setUnique(true); - mRoleMember = (MRoleMap) query.executeWithArray(userName, principalType.toString(), roleName); - pm.retrieve(mRoleMember); - commited = commitTransaction(); - } finally { - rollbackAndCleanup(commited, query); - } - return mRoleMember; - } - - @Override - public boolean removeRole(String roleName) throws MetaException, - NoSuchObjectException { - boolean success = false; - try { - openTransaction(); - MRole mRol = getMRole(roleName); - pm.retrieve(mRol); - if (mRol != null) { - // first remove all the membership, the membership that this role has - // been granted - List roleMap = listMRoleMembers(mRol.getRoleName()); - if (CollectionUtils.isNotEmpty(roleMap)) { - pm.deletePersistentAll(roleMap); - } - List roleMember = listMSecurityPrincipalMembershipRole(mRol - .getRoleName(), PrincipalType.ROLE); - if (CollectionUtils.isNotEmpty(roleMember)) { - pm.deletePersistentAll(roleMember); - } - - // then remove all the grants - List userGrants = listPrincipalMGlobalGrants( - mRol.getRoleName(), PrincipalType.ROLE); - if (CollectionUtils.isNotEmpty(userGrants)) { - pm.deletePersistentAll(userGrants); - } - - List dbGrants = listPrincipalAllDBGrant(mRol - .getRoleName(), PrincipalType.ROLE); - if (CollectionUtils.isNotEmpty(dbGrants)) { - pm.deletePersistentAll(dbGrants); - } - - List dcGrants = listPrincipalAllDCGrant(mRol - .getRoleName(), PrincipalType.ROLE); - if (CollectionUtils.isNotEmpty(dcGrants)) { - pm.deletePersistentAll(dcGrants); - } - - List tabPartGrants = listPrincipalAllTableGrants( - mRol.getRoleName(), PrincipalType.ROLE); - if (CollectionUtils.isNotEmpty(tabPartGrants)) { - pm.deletePersistentAll(tabPartGrants); - } - - List partGrants = listPrincipalAllPartitionGrants( - mRol.getRoleName(), PrincipalType.ROLE); - if (CollectionUtils.isNotEmpty(partGrants)) { - pm.deletePersistentAll(partGrants); - } - - List tblColumnGrants = listPrincipalAllTableColumnGrants( - mRol.getRoleName(), PrincipalType.ROLE); - if (CollectionUtils.isNotEmpty(tblColumnGrants)) { - pm.deletePersistentAll(tblColumnGrants); - } - - List partColumnGrants = listPrincipalAllPartitionColumnGrants( - mRol.getRoleName(), PrincipalType.ROLE); - if (CollectionUtils.isNotEmpty(partColumnGrants)) { - pm.deletePersistentAll(partColumnGrants); - } - - // finally remove the role - pm.deletePersistent(mRol); - } - success = commitTransaction(); - } catch (Exception e) { - throw new MetaException(e.getMessage()); - } finally { - rollbackAndCleanup(success, null); - } - return success; - } - - /** - * Get all the roles in the role hierarchy that this user and groupNames belongs to - */ - private Set listAllRolesInHierarchy(String userName, - List groupNames) { - List ret = new ArrayList<>(); - if(userName != null) { - ret.addAll(listMRoles(userName, PrincipalType.USER)); - } - if (groupNames != null) { - for (String groupName: groupNames) { - ret.addAll(listMRoles(groupName, PrincipalType.GROUP)); - } - } - // get names of these roles and its ancestors - Set roleNames = new HashSet<>(); - getAllRoleAncestors(roleNames, ret); - return roleNames; - } - - /** - * Add role names of parentRoles and its parents to processedRoles - */ - private void getAllRoleAncestors(Set processedRoleNames, List parentRoles) { - for (MRoleMap parentRole : parentRoles) { - String parentRoleName = parentRole.getRole().getRoleName(); - if (!processedRoleNames.contains(parentRoleName)) { - // unprocessed role: get its parents, add it to processed, and call this - // function recursively - List nextParentRoles = listMRoles(parentRoleName, PrincipalType.ROLE); - processedRoleNames.add(parentRoleName); - getAllRoleAncestors(processedRoleNames, nextParentRoles); - } - } - } - - public List listMRoles(String principalName, - PrincipalType principalType) { - boolean success = false; - Query query = null; - List mRoleMember = new ArrayList<>(); - - try { - LOG.debug("Executing listRoles"); - - openTransaction(); - query = pm.newQuery(MRoleMap.class, "principalName == t1 && principalType == t2"); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - query.setUnique(false); - List mRoles = - (List) query.executeWithArray(principalName, principalType.toString()); - pm.retrieveAll(mRoles); - success = commitTransaction(); - - mRoleMember.addAll(mRoles); - - LOG.debug("Done retrieving all objects for listRoles"); - } finally { - rollbackAndCleanup(success, query); - } - - if (principalType == PrincipalType.USER) { - // All users belong to public role implicitly, add that role - // TODO MS-SPLIT Change this back to HMSHandler.PUBLIC once HiveMetaStore has moved to - // stand-alone metastore. - //MRole publicRole = new MRole(HMSHandler.PUBLIC, 0, HMSHandler.PUBLIC); - MRole publicRole = new MRole("public", 0, "public"); - mRoleMember.add(new MRoleMap(principalName, principalType.toString(), publicRole, 0, null, - null, false)); - } - - return mRoleMember; - } - - @Override - public List listRoles(String principalName, PrincipalType principalType) { - List result = new ArrayList<>(); - List roleMaps = listMRoles(principalName, principalType); - if (roleMaps != null) { - for (MRoleMap roleMap : roleMaps) { - MRole mrole = roleMap.getRole(); - Role role = new Role(mrole.getRoleName(), mrole.getCreateTime(), mrole.getOwnerName()); - result.add(role); - } - } - return result; - } - - @Override - public List listRolesWithGrants(String principalName, - PrincipalType principalType) { - List result = new ArrayList<>(); - List roleMaps = listMRoles(principalName, principalType); - if (roleMaps != null) { - for (MRoleMap roleMap : roleMaps) { - RolePrincipalGrant rolePrinGrant = new RolePrincipalGrant( - roleMap.getRole().getRoleName(), - roleMap.getPrincipalName(), - PrincipalType.valueOf(roleMap.getPrincipalType()), - roleMap.getGrantOption(), - roleMap.getAddTime(), - roleMap.getGrantor(), - // no grantor type for public role, hence the null check - roleMap.getGrantorType() == null ? null - : PrincipalType.valueOf(roleMap.getGrantorType()) - ); - result.add(rolePrinGrant); - } - } - return result; - } - - private List listMSecurityPrincipalMembershipRole(final String roleName, - final PrincipalType principalType) throws Exception { - LOG.debug("Executing listMSecurityPrincipalMembershipRole"); - - Preconditions.checkState(this.currentTransaction.isActive()); - - try (Query query = pm.newQuery(MRoleMap.class, "principalName == t1 && principalType == t2")) { - query.declareParameters("java.lang.String t1, java.lang.String t2"); - final List mRoleMemebership = (List) query.execute(roleName, principalType.toString()); - - LOG.debug("Retrieving all objects for listMSecurityPrincipalMembershipRole"); - pm.retrieveAll(mRoleMemebership); - LOG.debug("Done retrieving all objects for listMSecurityPrincipalMembershipRole: {}", mRoleMemebership); - - return Collections.unmodifiableList(new ArrayList<>(mRoleMemebership)); - } - } - - @Override - public Role getRole(String roleName) throws NoSuchObjectException { - MRole mRole = this.getMRole(roleName); - if (mRole == null) { - throw new NoSuchObjectException(roleName + " role can not be found."); - } - return new Role(mRole.getRoleName(), mRole.getCreateTime(), mRole - .getOwnerName()); - } - - private MRole getMRole(String roleName) { - MRole mrole = null; - boolean commited = false; - Query query = null; - try { - openTransaction(); - query = pm.newQuery(MRole.class, "roleName == t1"); - query.declareParameters("java.lang.String t1"); - query.setUnique(true); - mrole = (MRole) query.execute(roleName); - pm.retrieve(mrole); - commited = commitTransaction(); - } finally { - rollbackAndCleanup(commited, query); - } - return mrole; - } - - @Override - public List listRoleNames() { - boolean success = false; - Query query = null; - try { - openTransaction(); - LOG.debug("Executing listAllRoleNames"); - query = pm.newQuery("select roleName from org.apache.hadoop.hive.metastore.model.MRole"); - query.setResult("roleName"); - Collection names = (Collection) query.execute(); - List roleNames = new ArrayList<>(); - for (Iterator i = names.iterator(); i.hasNext();) { - roleNames.add((String) i.next()); - } - success = commitTransaction(); - return roleNames; - } finally { - rollbackAndCleanup(success, query); - } - } - - @Override - public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, - List groupNames) throws InvalidObjectException, MetaException { - boolean commited = false; - PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); - try { - openTransaction(); - if (userName != null) { - List user = this.listPrincipalMGlobalGrants(userName, PrincipalType.USER); - if(CollectionUtils.isNotEmpty(user)) { - Map> userPriv = new HashMap<>(); - List grantInfos = new ArrayList<>(user.size()); - for (int i = 0; i < user.size(); i++) { - MGlobalPrivilege item = user.get(i); - grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item - .getCreateTime(), item.getGrantor(), getPrincipalTypeFromStr(item - .getGrantorType()), item.getGrantOption())); - } - userPriv.put(userName, grantInfos); - ret.setUserPrivileges(userPriv); - } - } - if (CollectionUtils.isNotEmpty(groupNames)) { - Map> groupPriv = new HashMap<>(); - for(String groupName: groupNames) { - List group = - this.listPrincipalMGlobalGrants(groupName, PrincipalType.GROUP); - if(CollectionUtils.isNotEmpty(group)) { - List grantInfos = new ArrayList<>(group.size()); - for (int i = 0; i < group.size(); i++) { - MGlobalPrivilege item = group.get(i); - grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item - .getCreateTime(), item.getGrantor(), getPrincipalTypeFromStr(item - .getGrantorType()), item.getGrantOption())); - } - groupPriv.put(groupName, grantInfos); - } - } - ret.setGroupPrivileges(groupPriv); - } - commited = commitTransaction(); - } finally { - rollbackAndCleanup(commited, null); - } - return ret; - } - - private List getDBPrivilege(String catName, String dbName, - String principalName, PrincipalType principalType) { - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - - if (principalName != null) { - List userNameDbPriv = this.listPrincipalMDBGrants( - principalName, principalType, catName, dbName); - if (CollectionUtils.isNotEmpty(userNameDbPriv)) { - List grantInfos = new ArrayList<>( - userNameDbPriv.size()); - for (int i = 0; i < userNameDbPriv.size(); i++) { - MDBPrivilege item = userNameDbPriv.get(i); - grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item - .getCreateTime(), item.getGrantor(), getPrincipalTypeFromStr(item - .getGrantorType()), item.getGrantOption())); - } - return grantInfos; - } - } - return Collections.emptyList(); - } - - - @Override - public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, - String userName, List groupNames) throws InvalidObjectException, - MetaException { - boolean commited = false; - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - - PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); - try { - openTransaction(); - if (userName != null) { - Map> dbUserPriv = new HashMap<>(); - dbUserPriv.put(userName, getDBPrivilege(catName, dbName, userName, - PrincipalType.USER)); - ret.setUserPrivileges(dbUserPriv); - } - if (CollectionUtils.isNotEmpty(groupNames)) { - Map> dbGroupPriv = new HashMap<>(); - for (String groupName : groupNames) { - dbGroupPriv.put(groupName, getDBPrivilege(catName, dbName, groupName, - PrincipalType.GROUP)); - } - ret.setGroupPrivileges(dbGroupPriv); - } - Set roleNames = listAllRolesInHierarchy(userName, groupNames); - if (CollectionUtils.isNotEmpty(roleNames)) { - Map> dbRolePriv = new HashMap<>(); - for (String roleName : roleNames) { - dbRolePriv - .put(roleName, getDBPrivilege(catName, dbName, roleName, PrincipalType.ROLE)); - } - ret.setRolePrivileges(dbRolePriv); - } - commited = commitTransaction(); - } finally { - rollbackAndCleanup(commited, null); - } - return ret; - } - - private List getConnectorPrivilege(String catName, String connectorName, - String principalName, PrincipalType principalType) { - - // normalize string name - catName = normalizeIdentifier(catName); - connectorName = normalizeIdentifier(connectorName); - - if (principalName != null) { - // get all data connector granted privilege - List userNameDcPriv = this.listPrincipalMDCGrants( - principalName, principalType, catName, connectorName); - - // populate and return grantInfos - if (CollectionUtils.isNotEmpty(userNameDcPriv)) { - List grantInfos = new ArrayList<>( - userNameDcPriv.size()); - for (int i = 0; i < userNameDcPriv.size(); i++) { - MDCPrivilege item = userNameDcPriv.get(i); - grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item - .getCreateTime(), item.getGrantor(), getPrincipalTypeFromStr(item - .getGrantorType()), item.getGrantOption())); - } - return grantInfos; - } - } - - // return empty list if no principalName - return Collections.emptyList(); - } - - @Override - public PrincipalPrivilegeSet getConnectorPrivilegeSet (String catName, String connectorName, - String userName, List groupNames) throws InvalidObjectException, - MetaException { - - boolean commited = false; - catName = normalizeIdentifier(catName); - connectorName = normalizeIdentifier(connectorName); - - PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); - try { - openTransaction(); - - // get user privileges - if (userName != null) { - Map> connectorUserPriv = new HashMap<>(); - connectorUserPriv.put(userName, getConnectorPrivilege(catName, connectorName, userName, - PrincipalType.USER)); - ret.setUserPrivileges(connectorUserPriv); - } - - // get group privileges - if (CollectionUtils.isNotEmpty(groupNames)) { - Map> dbGroupPriv = new HashMap<>(); - for (String groupName : groupNames) { - dbGroupPriv.put(groupName, getConnectorPrivilege(catName, connectorName, groupName, - PrincipalType.GROUP)); - } - ret.setGroupPrivileges(dbGroupPriv); - } - - // get role privileges - Set roleNames = listAllRolesInHierarchy(userName, groupNames); - if (CollectionUtils.isNotEmpty(roleNames)) { - Map> dbRolePriv = new HashMap<>(); - for (String roleName : roleNames) { - dbRolePriv.put(roleName, getConnectorPrivilege(catName, connectorName, roleName, - PrincipalType.ROLE)); - } - ret.setRolePrivileges(dbRolePriv); - } - commited = commitTransaction(); - } finally { - rollbackAndCleanup(commited, null); - } - return ret; - - - } - - @Override - public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName, - String tableName, String partition, String userName, - List groupNames) throws InvalidObjectException, MetaException { - boolean commited = false; - PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); - tableName = normalizeIdentifier(tableName); - dbName = normalizeIdentifier(dbName); - catName = normalizeIdentifier(catName); - - try { - openTransaction(); - if (userName != null) { - Map> partUserPriv = new HashMap<>(); - partUserPriv.put(userName, getPartitionPrivilege(catName, dbName, - tableName, partition, userName, PrincipalType.USER)); - ret.setUserPrivileges(partUserPriv); - } - if (CollectionUtils.isNotEmpty(groupNames)) { - Map> partGroupPriv = new HashMap<>(); - for (String groupName : groupNames) { - partGroupPriv.put(groupName, getPartitionPrivilege(catName, dbName, tableName, - partition, groupName, PrincipalType.GROUP)); - } - ret.setGroupPrivileges(partGroupPriv); - } - Set roleNames = listAllRolesInHierarchy(userName, groupNames); - if (CollectionUtils.isNotEmpty(roleNames)) { - Map> partRolePriv = new HashMap<>(); - for (String roleName : roleNames) { - partRolePriv.put(roleName, getPartitionPrivilege(catName, dbName, tableName, - partition, roleName, PrincipalType.ROLE)); - } - ret.setRolePrivileges(partRolePriv); - } - commited = commitTransaction(); - } finally { - rollbackAndCleanup(commited, null); - } - return ret; - } - - @Override - public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName, - String tableName, String userName, List groupNames) - throws InvalidObjectException, MetaException { - boolean commited = false; - PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); - tableName = normalizeIdentifier(tableName); - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - - try { - openTransaction(); - if (userName != null) { - Map> tableUserPriv = new HashMap<>(); - tableUserPriv.put(userName, getTablePrivilege(catName, dbName, - tableName, userName, PrincipalType.USER)); - ret.setUserPrivileges(tableUserPriv); - } - if (CollectionUtils.isNotEmpty(groupNames)) { - Map> tableGroupPriv = new HashMap<>(); - for (String groupName : groupNames) { - tableGroupPriv.put(groupName, getTablePrivilege(catName, dbName, tableName, - groupName, PrincipalType.GROUP)); - } - ret.setGroupPrivileges(tableGroupPriv); - } - Set roleNames = listAllRolesInHierarchy(userName, groupNames); - if (CollectionUtils.isNotEmpty(roleNames)) { - Map> tableRolePriv = new HashMap<>(); - for (String roleName : roleNames) { - tableRolePriv.put(roleName, getTablePrivilege(catName, dbName, tableName, - roleName, PrincipalType.ROLE)); - } - ret.setRolePrivileges(tableRolePriv); - } - commited = commitTransaction(); - } finally { - rollbackAndCleanup(commited, null); - } - return ret; - } - - @Override - public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName, - String tableName, String partitionName, String columnName, - String userName, List groupNames) throws InvalidObjectException, - MetaException { - tableName = normalizeIdentifier(tableName); - dbName = normalizeIdentifier(dbName); - columnName = normalizeIdentifier(columnName); - catName = normalizeIdentifier(catName); - - boolean commited = false; - PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); - try { - openTransaction(); - if (userName != null) { - Map> columnUserPriv = new HashMap<>(); - columnUserPriv.put(userName, getColumnPrivilege(catName, dbName, tableName, - columnName, partitionName, userName, PrincipalType.USER)); - ret.setUserPrivileges(columnUserPriv); - } - if (CollectionUtils.isNotEmpty(groupNames)) { - Map> columnGroupPriv = new HashMap<>(); - for (String groupName : groupNames) { - columnGroupPriv.put(groupName, getColumnPrivilege(catName, dbName, tableName, - columnName, partitionName, groupName, PrincipalType.GROUP)); - } - ret.setGroupPrivileges(columnGroupPriv); - } - Set roleNames = listAllRolesInHierarchy(userName, groupNames); - if (CollectionUtils.isNotEmpty(roleNames)) { - Map> columnRolePriv = new HashMap<>(); - for (String roleName : roleNames) { - columnRolePriv.put(roleName, getColumnPrivilege(catName, dbName, tableName, - columnName, partitionName, roleName, PrincipalType.ROLE)); - } - ret.setRolePrivileges(columnRolePriv); - } - commited = commitTransaction(); - } finally { - rollbackAndCleanup(commited, null); - } - return ret; - } - - private List getPartitionPrivilege(String catName, String dbName, - String tableName, String partName, String principalName, - PrincipalType principalType) { - - tableName = normalizeIdentifier(tableName); - dbName = normalizeIdentifier(dbName); - catName = normalizeIdentifier(catName); - - if (principalName != null) { - List userNameTabPartPriv = this - .listPrincipalMPartitionGrants(principalName, principalType, - catName, dbName, tableName, partName); - if (CollectionUtils.isNotEmpty(userNameTabPartPriv)) { - List grantInfos = new ArrayList<>( - userNameTabPartPriv.size()); - for (int i = 0; i < userNameTabPartPriv.size(); i++) { - MPartitionPrivilege item = userNameTabPartPriv.get(i); - grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item - .getCreateTime(), item.getGrantor(), - getPrincipalTypeFromStr(item.getGrantorType()), item.getGrantOption())); - - } - return grantInfos; - } - } - return new ArrayList<>(0); - } - - private PrincipalType getPrincipalTypeFromStr(String str) { - return str == null ? null : PrincipalType.valueOf(str); - } - - private List getTablePrivilege(String catName, String dbName, - String tableName, String principalName, PrincipalType principalType) { - tableName = normalizeIdentifier(tableName); - dbName = normalizeIdentifier(dbName); - catName = normalizeIdentifier(catName); - - if (principalName != null) { - List userNameTabPartPriv = this - .listAllMTableGrants(principalName, principalType, - catName, dbName, tableName); - if (CollectionUtils.isNotEmpty(userNameTabPartPriv)) { - List grantInfos = new ArrayList<>( - userNameTabPartPriv.size()); - for (int i = 0; i < userNameTabPartPriv.size(); i++) { - MTablePrivilege item = userNameTabPartPriv.get(i); - grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item - .getCreateTime(), item.getGrantor(), getPrincipalTypeFromStr(item - .getGrantorType()), item.getGrantOption())); - } - return grantInfos; - } - } - return Collections.emptyList(); - } - - private List getColumnPrivilege(String catName, String dbName, - String tableName, String columnName, String partitionName, - String principalName, PrincipalType principalType) { - - tableName = normalizeIdentifier(tableName); - dbName = normalizeIdentifier(dbName); - columnName = normalizeIdentifier(columnName); - catName = normalizeIdentifier(catName); - - if (partitionName == null) { - List userNameColumnPriv = this - .listPrincipalMTableColumnGrants(principalName, principalType, - catName, dbName, tableName, columnName); - if (CollectionUtils.isNotEmpty(userNameColumnPriv)) { - List grantInfos = new ArrayList<>( - userNameColumnPriv.size()); - for (int i = 0; i < userNameColumnPriv.size(); i++) { - MTableColumnPrivilege item = userNameColumnPriv.get(i); - grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item - .getCreateTime(), item.getGrantor(), getPrincipalTypeFromStr(item - .getGrantorType()), item.getGrantOption())); - } - return grantInfos; - } - } else { - List userNameColumnPriv = this - .listPrincipalMPartitionColumnGrants(principalName, - principalType, catName, dbName, tableName, partitionName, columnName); - if (CollectionUtils.isNotEmpty(userNameColumnPriv)) { - List grantInfos = new ArrayList<>( - userNameColumnPriv.size()); - for (int i = 0; i < userNameColumnPriv.size(); i++) { - MPartitionColumnPrivilege item = userNameColumnPriv.get(i); - grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item - .getCreateTime(), item.getGrantor(), getPrincipalTypeFromStr(item - .getGrantorType()), item.getGrantOption())); - } - return grantInfos; - } - } - return Collections.emptyList(); - } - - @Override - public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectException, - MetaException, NoSuchObjectException { - boolean committed = false; - int now = (int) (System.currentTimeMillis() / 1000); - try { - openTransaction(); - List persistentObjs = new ArrayList<>(); - - List privilegeList = privileges.getPrivileges(); - - if (CollectionUtils.isNotEmpty(privilegeList)) { - Iterator privIter = privilegeList.iterator(); - Set privSet = new HashSet<>(); - while (privIter.hasNext()) { - HiveObjectPrivilege privDef = privIter.next(); - HiveObjectRef hiveObject = privDef.getHiveObject(); - String privilegeStr = privDef.getGrantInfo().getPrivilege(); - String[] privs = privilegeStr.split(","); - String userName = privDef.getPrincipalName(); - String authorizer = privDef.getAuthorizer(); - PrincipalType principalType = privDef.getPrincipalType(); - String grantor = privDef.getGrantInfo().getGrantor(); - String grantorType = privDef.getGrantInfo().getGrantorType().toString(); - boolean grantOption = privDef.getGrantInfo().isGrantOption(); - privSet.clear(); - - if(principalType == PrincipalType.ROLE){ - validateRole(userName); - } - - String catName = hiveObject.isSetCatName() ? hiveObject.getCatName() : - getDefaultCatalog(conf); - if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { - List globalPrivs = this - .listPrincipalMGlobalGrants(userName, principalType, authorizer); - for (MGlobalPrivilege priv : globalPrivs) { - if (priv.getGrantor().equalsIgnoreCase(grantor)) { - privSet.add(priv.getPrivilege()); - } - } - for (String privilege : privs) { - if (privSet.contains(privilege)) { - throw new InvalidObjectException(privilege - + " is already granted by " + grantor); - } - MGlobalPrivilege mGlobalPrivs = new MGlobalPrivilege(userName, - principalType.toString(), privilege, now, grantor, grantorType, grantOption, - authorizer); - persistentObjs.add(mGlobalPrivs); - } - } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { - MDatabase dbObj = getMDatabase(catName, hiveObject.getDbName()); - List dbPrivs = this.listPrincipalMDBGrants( - userName, principalType, catName, hiveObject.getDbName(), authorizer); - for (MDBPrivilege priv : dbPrivs) { - if (priv.getGrantor().equalsIgnoreCase(grantor)) { - privSet.add(priv.getPrivilege()); - } - } - for (String privilege : privs) { - if (privSet.contains(privilege)) { - throw new InvalidObjectException(privilege - + " is already granted on database " - + hiveObject.getDbName() + " by " + grantor); - } - MDBPrivilege mDb = new MDBPrivilege(userName, principalType - .toString(), dbObj, privilege, now, grantor, grantorType, grantOption, authorizer); - persistentObjs.add(mDb); - } - } else if (hiveObject.getObjectType() == HiveObjectType.DATACONNECTOR) { - MDataConnector dcObj = getMDataConnector(hiveObject.getObjectName()); - List dcPrivs = this.listPrincipalMDCGrants(userName, principalType, - hiveObject.getObjectName(), authorizer); - for (MDCPrivilege priv : dcPrivs) { - if (priv.getGrantor().equalsIgnoreCase(grantor)) { - privSet.add(priv.getPrivilege()); - } - } - for (String privilege : privs) { - if (privSet.contains(privilege)) { - throw new InvalidObjectException(privilege - + " is already granted on data connector " - + hiveObject.getDbName() + " by " + grantor); - } - MDCPrivilege mDc = new MDCPrivilege(userName, principalType - .toString(), dcObj, privilege, now, grantor, grantorType, grantOption, authorizer); - persistentObjs.add(mDc); - } - } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) { - MTable tblObj = getMTable(catName, hiveObject.getDbName(), hiveObject - .getObjectName()); - if (tblObj != null) { - List tablePrivs = this - .listAllMTableGrants(userName, principalType, - catName, hiveObject.getDbName(), hiveObject.getObjectName(), authorizer); - for (MTablePrivilege priv : tablePrivs) { - if (priv.getGrantor() != null - && priv.getGrantor().equalsIgnoreCase(grantor)) { - privSet.add(priv.getPrivilege()); - } - } - for (String privilege : privs) { - if (privSet.contains(privilege)) { - throw new InvalidObjectException(privilege - + " is already granted on table [" - + hiveObject.getDbName() + "," - + hiveObject.getObjectName() + "] by " + grantor); - } - MTablePrivilege mTab = new MTablePrivilege( - userName, principalType.toString(), tblObj, - privilege, now, grantor, grantorType, grantOption, authorizer); - persistentObjs.add(mTab); - } - } - } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { - MPartition partObj = this.getMPartition(catName, hiveObject.getDbName(), - hiveObject.getObjectName(), hiveObject.getPartValues(), null); - String partName = null; - if (partObj != null) { - partName = partObj.getPartitionName(); - List partPrivs = this - .listPrincipalMPartitionGrants(userName, - principalType, catName, hiveObject.getDbName(), hiveObject - .getObjectName(), partObj.getPartitionName(), authorizer); - for (MPartitionPrivilege priv : partPrivs) { - if (priv.getGrantor().equalsIgnoreCase(grantor)) { - privSet.add(priv.getPrivilege()); - } - } - for (String privilege : privs) { - if (privSet.contains(privilege)) { - throw new InvalidObjectException(privilege - + " is already granted on partition [" - + hiveObject.getDbName() + "," - + hiveObject.getObjectName() + "," - + partName + "] by " + grantor); - } - MPartitionPrivilege mTab = new MPartitionPrivilege(userName, - principalType.toString(), partObj, privilege, now, grantor, - grantorType, grantOption, authorizer); - persistentObjs.add(mTab); - } - } - } else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { - MTable tblObj = getMTable(catName, hiveObject.getDbName(), hiveObject - .getObjectName()); - if (tblObj != null) { - if (hiveObject.getPartValues() != null) { - MPartition partObj = null; - List colPrivs = null; - partObj = this.getMPartition(catName, hiveObject.getDbName(), hiveObject - .getObjectName(), hiveObject.getPartValues(), tblObj); - if (partObj == null) { - continue; - } - colPrivs = this.listPrincipalMPartitionColumnGrants( - userName, principalType, catName, hiveObject.getDbName(), hiveObject - .getObjectName(), partObj.getPartitionName(), - hiveObject.getColumnName(), authorizer); - - for (MPartitionColumnPrivilege priv : colPrivs) { - if (priv.getGrantor().equalsIgnoreCase(grantor)) { - privSet.add(priv.getPrivilege()); - } - } - for (String privilege : privs) { - if (privSet.contains(privilege)) { - throw new InvalidObjectException(privilege - + " is already granted on column " - + hiveObject.getColumnName() + " [" - + hiveObject.getDbName() + "," - + hiveObject.getObjectName() + "," - + partObj.getPartitionName() + "] by " + grantor); - } - MPartitionColumnPrivilege mCol = new MPartitionColumnPrivilege(userName, - principalType.toString(), partObj, hiveObject - .getColumnName(), privilege, now, grantor, grantorType, - grantOption, authorizer); - persistentObjs.add(mCol); - } - - } else { - List colPrivs = null; - colPrivs = this.listPrincipalMTableColumnGrants( - userName, principalType, catName, hiveObject.getDbName(), hiveObject - .getObjectName(), hiveObject.getColumnName(), authorizer); - - for (MTableColumnPrivilege priv : colPrivs) { - if (priv.getGrantor().equalsIgnoreCase(grantor)) { - privSet.add(priv.getPrivilege()); - } - } - for (String privilege : privs) { - if (privSet.contains(privilege)) { - throw new InvalidObjectException(privilege - + " is already granted on column " - + hiveObject.getColumnName() + " [" - + hiveObject.getDbName() + "," - + hiveObject.getObjectName() + "] by " + grantor); - } - MTableColumnPrivilege mCol = new MTableColumnPrivilege(userName, - principalType.toString(), tblObj, hiveObject - .getColumnName(), privilege, now, grantor, grantorType, - grantOption, authorizer); - persistentObjs.add(mCol); - } - } - } - } - } - } - if (CollectionUtils.isNotEmpty(persistentObjs)) { - pm.makePersistentAll(persistentObjs); - } - committed = commitTransaction(); - } finally { - rollbackAndCleanup(committed, null); - } - return committed; - } - - @Override - public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) - throws InvalidObjectException, MetaException, NoSuchObjectException { - boolean committed = false; - try { - openTransaction(); - List persistentObjs = new ArrayList<>(); - - List privilegeList = privileges.getPrivileges(); - - - if (CollectionUtils.isNotEmpty(privilegeList)) { - Iterator privIter = privilegeList.iterator(); - - while (privIter.hasNext()) { - HiveObjectPrivilege privDef = privIter.next(); - HiveObjectRef hiveObject = privDef.getHiveObject(); - String privilegeStr = privDef.getGrantInfo().getPrivilege(); - if (privilegeStr == null || privilegeStr.trim().equals("")) { - continue; - } - String[] privs = privilegeStr.split(","); - String userName = privDef.getPrincipalName(); - PrincipalType principalType = privDef.getPrincipalType(); - - String catName = hiveObject.isSetCatName() ? hiveObject.getCatName() : - getDefaultCatalog(conf); - if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { - List mSecUser = this.listPrincipalMGlobalGrants( - userName, principalType); - boolean found = false; - for (String privilege : privs) { - for (MGlobalPrivilege userGrant : mSecUser) { - String userGrantPrivs = userGrant.getPrivilege(); - if (privilege.equals(userGrantPrivs)) { - found = true; - if (grantOption) { - if (userGrant.getGrantOption()) { - userGrant.setGrantOption(false); - } else { - throw new MetaException("User " + userName - + " does not have grant option with privilege " + privilege); - } - } - persistentObjs.add(userGrant); - break; - } - } - if (!found) { - throw new InvalidObjectException( - "No user grant found for privileges " + privilege); - } - } - - } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { - MDatabase dbObj = getMDatabase(catName, hiveObject.getDbName()); - String db = hiveObject.getDbName(); - boolean found = false; - List dbGrants = this.listPrincipalMDBGrants( - userName, principalType, catName, db); - for (String privilege : privs) { - for (MDBPrivilege dbGrant : dbGrants) { - String dbGrantPriv = dbGrant.getPrivilege(); - if (privilege.equals(dbGrantPriv)) { - found = true; - if (grantOption) { - if (dbGrant.getGrantOption()) { - dbGrant.setGrantOption(false); - } else { - throw new MetaException("User " + userName - + " does not have grant option with privilege " + privilege); - } - } - persistentObjs.add(dbGrant); - break; - } - } - if (!found) { - throw new InvalidObjectException( - "No database grant found for privileges " + privilege - + " on database " + db); - } - } - } else if (hiveObject.getObjectType() == HiveObjectType.DATACONNECTOR) { - MDataConnector dCObj = getMDataConnector(hiveObject.getObjectName()); - String dc = hiveObject.getObjectName(); - boolean found = false; - List dcGrants = this.listPrincipalMDCGrants( - userName, principalType, catName, dc); - for (String privilege : privs) { - for (MDCPrivilege dcGrant : dcGrants) { - String dcGrantPriv = dcGrant.getPrivilege(); - if (privilege.equals(dcGrantPriv)) { - found = true; - if (grantOption) { - if (dcGrant.getGrantOption()) { - dcGrant.setGrantOption(false); - } else { - throw new MetaException("User " + userName - + " does not have grant option with privilege " + privilege); - } - } - persistentObjs.add(dcGrant); - break; - } - } - if (!found) { - throw new InvalidObjectException( - "No dataconnector grant found for privileges " + privilege - + " on data connector " + dc); - } - } - } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) { - boolean found = false; - List tableGrants = this - .listAllMTableGrants(userName, principalType, - catName, hiveObject.getDbName(), hiveObject.getObjectName()); - for (String privilege : privs) { - for (MTablePrivilege tabGrant : tableGrants) { - String tableGrantPriv = tabGrant.getPrivilege(); - if (privilege.equalsIgnoreCase(tableGrantPriv)) { - found = true; - if (grantOption) { - if (tabGrant.getGrantOption()) { - tabGrant.setGrantOption(false); - } else { - throw new MetaException("User " + userName - + " does not have grant option with privilege " + privilege); - } - } - persistentObjs.add(tabGrant); - break; - } - } - if (!found) { - throw new InvalidObjectException("No grant (" + privilege - + ") found " + " on table " + hiveObject.getObjectName() - + ", database is " + hiveObject.getDbName()); - } - } - } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { - - boolean found = false; - Table tabObj = - this.getTable(catName, hiveObject.getDbName(), - hiveObject.getObjectName(), null); - String partName = null; - if (hiveObject.getPartValues() != null) { - partName = Warehouse.makePartName(tabObj.getPartitionKeys(), hiveObject.getPartValues()); - } - List partitionGrants = this - .listPrincipalMPartitionGrants(userName, principalType, - catName, hiveObject.getDbName(), hiveObject.getObjectName(), partName); - for (String privilege : privs) { - for (MPartitionPrivilege partGrant : partitionGrants) { - String partPriv = partGrant.getPrivilege(); - if (partPriv.equalsIgnoreCase(privilege)) { - found = true; - if (grantOption) { - if (partGrant.getGrantOption()) { - partGrant.setGrantOption(false); - } else { - throw new MetaException("User " + userName - + " does not have grant option with privilege " + privilege); - } - } - persistentObjs.add(partGrant); - break; - } - } - if (!found) { - throw new InvalidObjectException("No grant (" + privilege - + ") found " + " on table " + tabObj.getTableName() - + ", partition is " + partName + ", database is " + tabObj.getDbName()); - } - } - } else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { - - Table tabObj = this.getTable(catName, hiveObject.getDbName(), hiveObject - .getObjectName(), null); - String partName = null; - if (hiveObject.getPartValues() != null) { - partName = Warehouse.makePartName(tabObj.getPartitionKeys(), - hiveObject.getPartValues()); - } - - if (partName != null) { - List mSecCol = listPrincipalMPartitionColumnGrants( - userName, principalType, catName, hiveObject.getDbName(), hiveObject - .getObjectName(), partName, hiveObject.getColumnName()); - boolean found = false; - for (String privilege : privs) { - for (MPartitionColumnPrivilege col : mSecCol) { - String colPriv = col.getPrivilege(); - if (colPriv.equalsIgnoreCase(privilege)) { - found = true; - if (grantOption) { - if (col.getGrantOption()) { - col.setGrantOption(false); - } else { - throw new MetaException("User " + userName - + " does not have grant option with privilege " + privilege); - } - } - persistentObjs.add(col); - break; - } - } - if (!found) { - throw new InvalidObjectException("No grant (" + privilege - + ") found " + " on table " + tabObj.getTableName() - + ", partition is " + partName + ", column name = " - + hiveObject.getColumnName() + ", database is " - + tabObj.getDbName()); - } - } - } else { - List mSecCol = listPrincipalMTableColumnGrants( - userName, principalType, catName, hiveObject.getDbName(), hiveObject - .getObjectName(), hiveObject.getColumnName()); - boolean found = false; - for (String privilege : privs) { - for (MTableColumnPrivilege col : mSecCol) { - String colPriv = col.getPrivilege(); - if (colPriv.equalsIgnoreCase(privilege)) { - found = true; - if (grantOption) { - if (col.getGrantOption()) { - col.setGrantOption(false); - } else { - throw new MetaException("User " + userName - + " does not have grant option with privilege " + privilege); - } - } - persistentObjs.add(col); - break; - } - } - if (!found) { - throw new InvalidObjectException("No grant (" + privilege - + ") found " + " on table " + tabObj.getTableName() - + ", column name = " - + hiveObject.getColumnName() + ", database is " - + tabObj.getDbName()); - } - } - } - - } - } - } - - if (CollectionUtils.isNotEmpty(persistentObjs)) { - if (grantOption) { - // If grant option specified, only update the privilege, don't remove it. - // Grant option has already been removed from the privileges in the section above - } else { - pm.deletePersistentAll(persistentObjs); - } - } - committed = commitTransaction(); - } finally { - rollbackAndCleanup(committed, null); - } - return committed; - } - - class PrivilegeWithoutCreateTimeComparator implements Comparator { - @Override - public int compare(HiveObjectPrivilege o1, HiveObjectPrivilege o2) { - int createTime1 = o1.getGrantInfo().getCreateTime(); - int createTime2 = o2.getGrantInfo().getCreateTime(); - o1.getGrantInfo().setCreateTime(0); - o2.getGrantInfo().setCreateTime(0); - int result = o1.compareTo(o2); - o1.getGrantInfo().setCreateTime(createTime1); - o2.getGrantInfo().setCreateTime(createTime2); - return result; - } - } - - @Override - public boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, PrivilegeBag grantPrivileges) - throws InvalidObjectException, MetaException, NoSuchObjectException { - boolean committed = false; - try { - openTransaction(); - Set revokePrivilegeSet - = new TreeSet<>(new PrivilegeWithoutCreateTimeComparator()); - Set grantPrivilegeSet - = new TreeSet<>(new PrivilegeWithoutCreateTimeComparator()); - - List grants = null; - String catName = objToRefresh.isSetCatName() ? objToRefresh.getCatName() : - getDefaultCatalog(conf); - switch (objToRefresh.getObjectType()) { - case DATABASE: - try { - grants = this.listDBGrantsAll(catName, objToRefresh.getDbName(), authorizer); - } catch (Exception e) { - throw new MetaException(e.getMessage()); - } - break; - case DATACONNECTOR: - try { - grants = this.listDCGrantsAll(objToRefresh.getObjectName(), authorizer); - } catch (Exception e) { - throw new MetaException(e.getMessage()); - } - break; - case TABLE: - grants = listTableGrantsAll(catName, objToRefresh.getDbName(), objToRefresh.getObjectName(), authorizer); - break; - case COLUMN: - Preconditions.checkArgument(objToRefresh.getColumnName()==null, "columnName must be null"); - grants = getTableAllColumnGrants(catName, objToRefresh.getDbName(), - objToRefresh.getObjectName(), authorizer); - break; - default: - throw new MetaException("Unexpected object type " + objToRefresh.getObjectType()); - } - revokePrivilegeSet.addAll(grants); - - // Optimize revoke/grant list, remove the overlapping - if (grantPrivileges.getPrivileges() != null) { - for (HiveObjectPrivilege grantPrivilege : grantPrivileges.getPrivileges()) { - if (revokePrivilegeSet.contains(grantPrivilege)) { - revokePrivilegeSet.remove(grantPrivilege); - } else { - grantPrivilegeSet.add(grantPrivilege); - } - } - } - if (!revokePrivilegeSet.isEmpty()) { - LOG.debug("Found " + revokePrivilegeSet.size() + " new revoke privileges to be synced."); - PrivilegeBag remainingRevokePrivileges = new PrivilegeBag(); - for (HiveObjectPrivilege revokePrivilege : revokePrivilegeSet) { - remainingRevokePrivileges.addToPrivileges(revokePrivilege); - } - revokePrivileges(remainingRevokePrivileges, false); - } else { - LOG.debug("No new revoke privileges are required to be synced."); - } - if (!grantPrivilegeSet.isEmpty()) { - LOG.debug("Found " + grantPrivilegeSet.size() + " new grant privileges to be synced."); - PrivilegeBag remainingGrantPrivileges = new PrivilegeBag(); - for (HiveObjectPrivilege grantPrivilege : grantPrivilegeSet) { - remainingGrantPrivileges.addToPrivileges(grantPrivilege); - } - grantPrivileges(remainingGrantPrivileges); - } else { - LOG.debug("No new grant privileges are required to be synced."); - } - committed = commitTransaction(); - } finally { - rollbackAndCleanup(committed, null); - } - return committed; - } - - private List getTableAllColumnGrants(String catName, String dbName, - String tableName, String authorizer) - throws MetaException, NoSuchObjectException { - return new GetListHelper(normalizeIdentifier(catName), - normalizeIdentifier(dbName), normalizeIdentifier(tableName), true, true) { - - @Override - protected String describeResult() { - return "Table column privileges."; - } - - @Override - protected List getSqlResult(GetHelper> ctx) - throws MetaException { - return directSql.getTableAllColumnGrants(catName, dbName, tblName, authorizer); - } - - @Override - protected List getJdoResult(GetHelper> ctx) { - return convertTableCols(listTableAllColumnGrants(catName, dbName, tblName, authorizer)); - } - }.run(false); - } - - public List listMRoleMembers(String roleName) { - boolean success = false; - Query query = null; - List mRoleMemeberList = new ArrayList<>(); - try { - LOG.debug("Executing listRoleMembers"); - - openTransaction(); - query = pm.newQuery(MRoleMap.class, "role.roleName == t1"); - query.declareParameters("java.lang.String t1"); - query.setUnique(false); - List mRoles = (List) query.execute(roleName); - pm.retrieveAll(mRoles); - success = commitTransaction(); - - mRoleMemeberList.addAll(mRoles); - - LOG.debug("Done retrieving all objects for listRoleMembers"); - } finally { - rollbackAndCleanup(success, query); - } - return mRoleMemeberList; - } - - @Override - public List listRoleMembers(String roleName) { - List roleMaps = listMRoleMembers(roleName); - List rolePrinGrantList = new ArrayList<>(); - - if (roleMaps != null) { - for (MRoleMap roleMap : roleMaps) { - RolePrincipalGrant rolePrinGrant = new RolePrincipalGrant( - roleMap.getRole().getRoleName(), - roleMap.getPrincipalName(), - PrincipalType.valueOf(roleMap.getPrincipalType()), - roleMap.getGrantOption(), - roleMap.getAddTime(), - roleMap.getGrantor(), - // no grantor type for public role, hence the null check - roleMap.getGrantorType() == null ? null - : PrincipalType.valueOf(roleMap.getGrantorType()) - ); - rolePrinGrantList.add(rolePrinGrant); - - } - } - return rolePrinGrantList; - } - - private List listPrincipalMGlobalGrants(String principalName, - PrincipalType principalType) { - return listPrincipalMGlobalGrants(principalName, principalType, null); - } - - private List listPrincipalMGlobalGrants(String principalName, - PrincipalType principalType, String authorizer) { - boolean commited = false; - Query query = null; - List userNameDbPriv = new ArrayList<>(); - try { - List mPrivs = null; - openTransaction(); - if (principalName != null) { - if (authorizer != null) { - query = pm.newQuery(MGlobalPrivilege.class, "principalName == t1 && principalType == t2 " - + "&& authorizer == t3"); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); - mPrivs = (List) query - .executeWithArray(principalName, principalType.toString(), authorizer); - } else { - query = pm.newQuery(MGlobalPrivilege.class, "principalName == t1 && principalType == t2 "); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - mPrivs = (List) query - .executeWithArray(principalName, principalType.toString()); - } - pm.retrieveAll(mPrivs); - } - commited = commitTransaction(); - if (mPrivs != null) { - userNameDbPriv.addAll(mPrivs); - } - } finally { - rollbackAndCleanup(commited, query); - } - return userNameDbPriv; - } - - @Override - public List listPrincipalGlobalGrants(String principalName, - PrincipalType principalType) { - List mUsers = - listPrincipalMGlobalGrants(principalName, principalType); - if (mUsers.isEmpty()) { - return Collections.emptyList(); - } - List result = new ArrayList<>(); - for (int i = 0; i < mUsers.size(); i++) { - MGlobalPrivilege sUsr = mUsers.get(i); - HiveObjectRef objectRef = new HiveObjectRef( - HiveObjectType.GLOBAL, null, null, null, null); - HiveObjectPrivilege secUser = new HiveObjectPrivilege( - objectRef, sUsr.getPrincipalName(), principalType, - new PrivilegeGrantInfo(sUsr.getPrivilege(), sUsr - .getCreateTime(), sUsr.getGrantor(), PrincipalType - .valueOf(sUsr.getGrantorType()), sUsr.getGrantOption()), - sUsr.getAuthorizer()); - result.add(secUser); - } - return result; - } - - @Override - public List listGlobalGrantsAll() { - boolean commited = false; - Query query = null; - try { - openTransaction(); - query = pm.newQuery(MGlobalPrivilege.class); - List userNameDbPriv = (List) query.execute(); - pm.retrieveAll(userNameDbPriv); - commited = commitTransaction(); - return convertGlobal(userNameDbPriv); - } finally { - rollbackAndCleanup(commited, query); - } - } - - private List convertGlobal(List privs) { - List result = new ArrayList<>(); - for (MGlobalPrivilege priv : privs) { - String pname = priv.getPrincipalName(); - String authorizer = priv.getAuthorizer(); - PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); - - HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.GLOBAL, null, null, null, null); - PrivilegeGrantInfo grantor = new PrivilegeGrantInfo(priv.getPrivilege(), priv.getCreateTime(), - priv.getGrantor(), PrincipalType.valueOf(priv.getGrantorType()), priv.getGrantOption()); - - result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor, authorizer)); - } - return result; - } - - private List listPrincipalMDBGrants(String principalName, - PrincipalType principalType, String catName, String dbName) { - return listPrincipalMDBGrants(principalName, principalType, catName, dbName, null); - } - - private List listPrincipalMDBGrants(String principalName, - PrincipalType principalType, String catName, String dbName, String authorizer) { - boolean success = false; - Query query = null; - List mSecurityDBList = new ArrayList<>(); - dbName = normalizeIdentifier(dbName); - try { - LOG.debug("Executing listPrincipalDBGrants"); - - openTransaction(); - List mPrivs; - if (authorizer != null) { - query = pm.newQuery(MDBPrivilege.class, - "principalName == t1 && principalType == t2 && database.name == t3 && " + - "database.catalogName == t4 && authorizer == t5"); - query.declareParameters( - "java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4, " - + "java.lang.String t5"); - mPrivs = (List) query.executeWithArray(principalName, principalType.toString(), - dbName, catName, authorizer); - } else { - query = pm.newQuery(MDBPrivilege.class, - "principalName == t1 && principalType == t2 && database.name == t3 && database.catalogName == t4"); - query.declareParameters( - "java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4"); - mPrivs = (List) query.executeWithArray(principalName, principalType.toString(), - dbName, catName); - } - pm.retrieveAll(mPrivs); - success = commitTransaction(); - - mSecurityDBList.addAll(mPrivs); - LOG.debug("Done retrieving all objects for listPrincipalDBGrants"); - } finally { - rollbackAndCleanup(success, query); - } - return mSecurityDBList; - } - - private List listPrincipalMDCGrants(String principalName, - PrincipalType principalType, String dcName) { - return listPrincipalMDCGrants(principalName, principalType, dcName, null); - } - - private List listPrincipalMDCGrants(String principalName, - PrincipalType principalType, String dcName, String authorizer) { - boolean success = false; - Query query = null; - List mSecurityDCList = new ArrayList<>(); - dcName = normalizeIdentifier(dcName); - try { - LOG.debug("Executing listPrincipalDCGrants"); - - openTransaction(); - List mPrivs; - if (authorizer != null) { - query = pm.newQuery(MDCPrivilege.class, - "principalName == t1 && principalType == t2 && dataConnector.name == t3 && " + - "authorizer == t4"); - query.declareParameters( - "java.lang.String t1, java.lang.String t2, java.lang.String t3, " - + "java.lang.String t4"); - mPrivs = (List) query.executeWithArray(principalName, principalType.toString(), - dcName, authorizer); - } else { - query = pm.newQuery(MDCPrivilege.class, - "principalName == t1 && principalType == t2 && dataConnector.name == t3"); - query.declareParameters( - "java.lang.String t1, java.lang.String t2, java.lang.String t3"); - mPrivs = (List) query.executeWithArray(principalName, principalType.toString(), dcName); - } - pm.retrieveAll(mPrivs); - success = commitTransaction(); - - mSecurityDCList.addAll(mPrivs); - LOG.debug("Done retrieving all objects for listPrincipalDCGrants"); - } finally { - rollbackAndCleanup(success, query); - } - return mSecurityDCList; - } - - @Override - public List listPrincipalDBGrants(String principalName, - PrincipalType principalType, - String catName, String dbName) { - List mDbs = listPrincipalMDBGrants(principalName, principalType, catName, dbName); - if (mDbs.isEmpty()) { - return Collections.emptyList(); - } - List result = new ArrayList<>(); - for (int i = 0; i < mDbs.size(); i++) { - MDBPrivilege sDB = mDbs.get(i); - HiveObjectRef objectRef = new HiveObjectRef( - HiveObjectType.DATABASE, dbName, null, null, null); - objectRef.setCatName(catName); - HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, - sDB.getPrincipalName(), principalType, - new PrivilegeGrantInfo(sDB.getPrivilege(), sDB - .getCreateTime(), sDB.getGrantor(), PrincipalType - .valueOf(sDB.getGrantorType()), sDB.getGrantOption()), sDB.getAuthorizer()); - result.add(secObj); - } - return result; - } - - @Override - public List listPrincipalDBGrantsAll(String principalName, PrincipalType principalType) { - List results = Collections.emptyList(); - boolean success = false; - try { - openTransaction(); - results = convertDB(listPrincipalAllDBGrant(principalName, principalType)); - success = commitTransaction(); - } catch (Exception e) { - throw new RuntimeException(e); - } finally { - rollbackAndCleanup(success, null); - } - return results; - } - - @Override - public List listDBGrantsAll(String catName, String dbName) { - List results = Collections.emptyList(); - boolean success = false; - try { - openTransaction(); - results = listDBGrantsAll(catName, dbName, null); - success = commitTransaction(); - } catch (Exception e) { - throw new RuntimeException(e); - } finally { - rollbackAndCleanup(success, null); - } - return results; - } - - private List listDBGrantsAll(String catName, String dbName, String authorizer) throws Exception { - return convertDB(listDatabaseGrants(catName, dbName, authorizer)); - } - - private List convertDB(List privs) { - List result = new ArrayList<>(); - for (MDBPrivilege priv : privs) { - String pname = priv.getPrincipalName(); - String authorizer = priv.getAuthorizer(); - PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); - String database = priv.getDatabase().getName(); - - HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.DATABASE, database, - null, null, null); - objectRef.setCatName(priv.getDatabase().getCatalogName()); - PrivilegeGrantInfo grantor = new PrivilegeGrantInfo(priv.getPrivilege(), priv.getCreateTime(), - priv.getGrantor(), PrincipalType.valueOf(priv.getGrantorType()), priv.getGrantOption()); - - result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor, authorizer)); - } - return result; - } - - private List listPrincipalAllDBGrant(String principalName, PrincipalType principalType) - throws Exception { - final List mSecurityDBList; - - LOG.debug("Executing listPrincipalAllDBGrant"); - - Preconditions.checkState(this.currentTransaction.isActive()); - - if (principalName != null && principalType != null) { - try (Query query = pm.newQuery(MDBPrivilege.class, "principalName == t1 && principalType == t2")) { - query.declareParameters("java.lang.String t1, java.lang.String t2"); - mSecurityDBList = (List) query.execute(principalName, principalType.toString()); - pm.retrieveAll(mSecurityDBList); - LOG.debug("Done retrieving all objects for listPrincipalAllDBGrant: {}", mSecurityDBList); - return Collections.unmodifiableList(new ArrayList<>(mSecurityDBList)); - } - } else { - try (Query query = pm.newQuery(MDBPrivilege.class)) { - mSecurityDBList = (List) query.execute(); - pm.retrieveAll(mSecurityDBList); - LOG.debug("Done retrieving all objects for listPrincipalAllDBGrant: {}", mSecurityDBList); - return Collections.unmodifiableList(new ArrayList<>(mSecurityDBList)); - } - } - } - - @Override - public List listPrincipalDCGrants(String principalName, - PrincipalType principalType, - String dcName) { - List mDcs = listPrincipalMDCGrants(principalName, principalType, dcName); - if (mDcs.isEmpty()) { - return Collections.emptyList(); - } - List result = new ArrayList<>(); - for (int i = 0; i < mDcs.size(); i++) { - MDCPrivilege sDC = mDcs.get(i); - HiveObjectRef objectRef = new HiveObjectRef( - HiveObjectType.DATACONNECTOR, null, dcName, null, null); - HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, - sDC.getPrincipalName(), principalType, - new PrivilegeGrantInfo(sDC.getPrivilege(), sDC - .getCreateTime(), sDC.getGrantor(), PrincipalType - .valueOf(sDC.getGrantorType()), sDC.getGrantOption()), sDC.getAuthorizer()); - result.add(secObj); - } - return result; - } - - @Override - public List listPrincipalDCGrantsAll(String principalName, PrincipalType principalType) { - List results = Collections.emptyList(); - boolean success = false; - try { - openTransaction(); - results = convertDC(listPrincipalAllDCGrant(principalName, principalType)); - success = commitTransaction(); - } catch (Exception e) { - throw new RuntimeException(e); - } finally { - rollbackAndCleanup(success, null); - } - return results; - } - - @Override - public List listDCGrantsAll(String dcName) { - List results = Collections.emptyList(); - boolean success = false; - try { - openTransaction(); - results = listDCGrantsAll(dcName, null); - success = commitTransaction(); - } catch (Exception e) { - throw new RuntimeException(e); - } finally { - rollbackAndCleanup(success, null); - } - return results; - } - - private List listDCGrantsAll(String dcName, String authorizer) throws Exception { - return convertDC(listDataConnectorGrants(dcName, authorizer)); - } - - private List convertDC(List privs) { - List result = new ArrayList<>(); - for (MDCPrivilege priv : privs) { - String pname = priv.getPrincipalName(); - String authorizer = priv.getAuthorizer(); - PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); - String dataConnectorName = priv.getDataConnector().getName(); - - HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.DATACONNECTOR, null, - dataConnectorName, null, null); - PrivilegeGrantInfo grantor = new PrivilegeGrantInfo(priv.getPrivilege(), priv.getCreateTime(), - priv.getGrantor(), PrincipalType.valueOf(priv.getGrantorType()), priv.getGrantOption()); - - result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor, authorizer)); - } - return result; - } - - private List listPrincipalAllDCGrant(String principalName, PrincipalType principalType) - throws Exception { - final List mSecurityDCList; - - LOG.debug("Executing listPrincipalAllDCGrant"); - - Preconditions.checkState(this.currentTransaction.isActive()); - - if (principalName != null && principalType != null) { - try (Query query = pm.newQuery(MDCPrivilege.class, "principalName == t1 && principalType == t2")) { - query.declareParameters("java.lang.String t1, java.lang.String t2"); - mSecurityDCList = (List) query.execute(principalName, principalType.toString()); - pm.retrieveAll(mSecurityDCList); - LOG.debug("Done retrieving all objects for listPrincipalAllDCGrant: {}", mSecurityDCList); - return Collections.unmodifiableList(new ArrayList<>(mSecurityDCList)); - } - } else { - try (Query query = pm.newQuery(MDCPrivilege.class)) { - mSecurityDCList = (List) query.execute(); - pm.retrieveAll(mSecurityDCList); - LOG.debug("Done retrieving all objects for listPrincipalAllDCGrant: {}", mSecurityDCList); - return Collections.unmodifiableList(new ArrayList<>(mSecurityDCList)); - } - } - } - - private List listAllTableGrants(String catName, String dbName, String tableName) { - boolean success = false; - Query query = null; - List mSecurityTabList = new ArrayList<>(); - tableName = normalizeIdentifier(tableName); - dbName = normalizeIdentifier(dbName); - catName = normalizeIdentifier(catName); - try { - LOG.debug("Executing listAllTableGrants"); - - openTransaction(); - String queryStr = "table.tableName == t1 && table.database.name == t2" + - "&& table.database.catalogName == t3"; - query = pm.newQuery(MTablePrivilege.class, queryStr); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); - List mPrivs = - (List) query.executeWithArray(tableName, dbName, catName); - LOG.debug("Done executing query for listAllTableGrants"); - pm.retrieveAll(mPrivs); - success = commitTransaction(); - - mSecurityTabList.addAll(mPrivs); - - LOG.debug("Done retrieving all objects for listAllTableGrants"); - } finally { - rollbackAndCleanup(success, query); - } - return mSecurityTabList; - } - - private List listTableAllPartitionGrants(String catName, String dbName, String tableName) { - tableName = normalizeIdentifier(tableName); - dbName = normalizeIdentifier(dbName); - catName = normalizeIdentifier(catName); - boolean success = false; - Query query = null; - List mSecurityTabPartList = new ArrayList<>(); - try { - LOG.debug("Executing listTableAllPartitionGrants"); - - openTransaction(); - String queryStr = "partition.table.tableName == t1 && partition.table.database.name == t2 " + - "&& partition.table.database.catalogName == t3"; - query = pm.newQuery(MPartitionPrivilege.class, queryStr); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); - List mPrivs = - (List) query.executeWithArray(tableName, dbName, catName); - pm.retrieveAll(mPrivs); - success = commitTransaction(); - - mSecurityTabPartList.addAll(mPrivs); - - LOG.debug("Done retrieving all objects for listTableAllPartitionGrants"); - } finally { - rollbackAndCleanup(success, query); - } - return mSecurityTabPartList; - } - - private List listTableAllColumnGrants( - String catName, String dbName, String tableName) { - return listTableAllColumnGrants(catName, dbName, tableName, null); - } - - private List listTableAllColumnGrants( - String catName, String dbName, String tableName, String authorizer) { - boolean success = false; - Query query = null; - List mTblColPrivilegeList = new ArrayList<>(); - tableName = normalizeIdentifier(tableName); - dbName = normalizeIdentifier(dbName); - catName = normalizeIdentifier(catName); - try { - LOG.debug("Executing listTableAllColumnGrants"); - - openTransaction(); - List mPrivs = null; - if (authorizer != null) { - String queryStr = "table.tableName == t1 && table.database.name == t2 &&" + - "table.database.catalogName == t3 && authorizer == t4"; - query = pm.newQuery(MTableColumnPrivilege.class, queryStr); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " + - "java.lang.String t4"); - mPrivs = (List) query.executeWithArray(tableName, dbName, catName, authorizer); - } else { - String queryStr = "table.tableName == t1 && table.database.name == t2 &&" + - "table.database.catalogName == t3"; - query = pm.newQuery(MTableColumnPrivilege.class, queryStr); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); - mPrivs = (List) query.executeWithArray(tableName, dbName, catName); - } - LOG.debug("Query to obtain objects for listTableAllColumnGrants finished"); - pm.retrieveAll(mPrivs); - LOG.debug("RetrieveAll on all the objects for listTableAllColumnGrants finished"); - success = commitTransaction(); - LOG.debug("Transaction running query to obtain objects for listTableAllColumnGrants " + - "committed"); - - mTblColPrivilegeList.addAll(mPrivs); - - LOG.debug("Done retrieving " + mPrivs.size() + " objects for listTableAllColumnGrants"); - } finally { - rollbackAndCleanup(success, query); - } - return mTblColPrivilegeList; - } - - private List listTableAllPartitionColumnGrants( - String catName, String dbName, String tableName) { - boolean success = false; - Query query = null; - tableName = normalizeIdentifier(tableName); - dbName = normalizeIdentifier(dbName); - catName = normalizeIdentifier(catName); - List mSecurityColList = new ArrayList<>(); - try { - LOG.debug("Executing listTableAllPartitionColumnGrants"); - - openTransaction(); - String queryStr = "partition.table.tableName == t1 && partition.table.database.name == t2 " + - "&& partition.table.database.catalogName == t3"; - query = pm.newQuery(MPartitionColumnPrivilege.class, queryStr); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); - List mPrivs = - (List) query.executeWithArray(tableName, dbName, catName); - pm.retrieveAll(mPrivs); - success = commitTransaction(); - - mSecurityColList.addAll(mPrivs); - - LOG.debug("Done retrieving all objects for listTableAllPartitionColumnGrants"); - } finally { - rollbackAndCleanup(success, query); - } - return mSecurityColList; - } - - private void dropPartitionAllColumnGrantsNoTxn( - String catName, String dbName, String tableName, List partNames) { - Pair queryWithParams = makeQueryByPartitionNames(catName, - dbName, tableName, partNames, MPartitionColumnPrivilege.class, - "partition.table.tableName", "partition.table.database.name", "partition.partitionName", - "partition.table.database.catalogName"); - try (QueryWrapper wrapper = new QueryWrapper(queryWithParams.getLeft())) { - wrapper.deletePersistentAll(queryWithParams.getRight()); - } - } - - private List listDatabaseGrants(String catName, String dbName, String authorizer) throws Exception { - LOG.debug("Executing listDatabaseGrants"); - - Preconditions.checkState(currentTransaction.isActive()); - - dbName = normalizeIdentifier(dbName); - catName = normalizeIdentifier(catName); - - final Query query; - final String[] args; - - if (authorizer != null) { - query = pm.newQuery(MDBPrivilege.class, "database.name == t1 && database.catalogName == t2 && authorizer == t3"); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); - args = new String[] { dbName, catName, authorizer }; - } else { - query = pm.newQuery(MDBPrivilege.class, "database.name == t1 && database.catalogName == t2"); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - args = new String[] { dbName, catName }; - } - - try (QueryWrapper q = new QueryWrapper(query)) { - final List mSecurityDBList = (List) q.executeWithArray(args); - pm.retrieveAll(mSecurityDBList); - LOG.debug("Done retrieving all objects for listDatabaseGrants: {}", mSecurityDBList); - return Collections.unmodifiableList(new ArrayList<>(mSecurityDBList)); - } - } - - private List listDataConnectorGrants(String dcName, String authorizer) throws Exception { - LOG.debug("Executing listDataConnectorGrants"); - - Preconditions.checkState(currentTransaction.isActive()); - - dcName = normalizeIdentifier(dcName); - - final Query query; - String[] args = null; - final List mSecurityDCList; - - if (authorizer != null) { - query = pm.newQuery(MDCPrivilege.class, "dataConnector.name == t1 && authorizer == t2"); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - args = new String[] { dcName, authorizer }; - } else { - query = pm.newQuery(MDCPrivilege.class, "dataConnector.name == t1"); - query.declareParameters("java.lang.String t1"); - } - try (QueryWrapper wrapper = new QueryWrapper(query)) { - if (args != null) { - mSecurityDCList = (List) wrapper.executeWithArray(args); - } else { - mSecurityDCList = (List) wrapper.execute(dcName); - } - pm.retrieveAll(mSecurityDCList); - LOG.debug("Done retrieving all objects for listDataConnectorGrants: {}", mSecurityDCList); - return Collections.unmodifiableList(new ArrayList<>(mSecurityDCList)); - } - } - - private void dropPartitionGrantsNoTxn(String catName, String dbName, String tableName, - List partNames) { - Pair queryWithParams = makeQueryByPartitionNames(catName, - dbName, tableName, partNames,MPartitionPrivilege.class, "partition.table.tableName", - "partition.table.database.name", "partition.partitionName", - "partition.table.database.catalogName"); - try (QueryWrapper wrapper = new QueryWrapper(queryWithParams.getLeft())) { - wrapper.deletePersistentAll(queryWithParams.getRight()); - } - } - - private Pair makeQueryByPartitionNames( - String catName, String dbName, String tableName, List partNames, Class clazz, - String tbCol, String dbCol, String partCol, String catCol) { - StringBuilder queryStr = new StringBuilder(tbCol + " == t1 && " + dbCol + " == t2 && " + catCol + " == t3"); - StringBuilder paramStr = new StringBuilder("java.lang.String t1, java.lang.String t2, java.lang.String t3"); - Object[] params = new Object[3 + partNames.size()]; - params[0] = normalizeIdentifier(tableName); - params[1] = normalizeIdentifier(dbName); - params[2] = normalizeIdentifier(catName); - int index = 0; - for (String partName : partNames) { - params[index + 3] = partName; - queryStr.append(((index == 0) ? " && (" : " || ") + partCol + " == p" + index); - paramStr.append(", java.lang.String p" + index); - ++index; - } - queryStr.append(")"); - Query query = pm.newQuery(clazz, queryStr.toString()); - query.declareParameters(paramStr.toString()); - return Pair.of(query, params); - } - - private List listAllMTableGrants( - String principalName, PrincipalType principalType, String catName, String dbName, - String tableName) { - return listAllMTableGrants(principalName, principalType, catName, dbName, tableName, null); - } - - private List listAllMTableGrants( - String principalName, PrincipalType principalType, String catName, String dbName, - String tableName, String authorizer) { - tableName = normalizeIdentifier(tableName); - dbName = normalizeIdentifier(dbName); - catName = normalizeIdentifier(catName); - boolean success = false; - Query query = null; - List mSecurityTabPartList = new ArrayList<>(); - try { - openTransaction(); - LOG.debug("Executing listAllTableGrants"); - List mPrivs; - if (authorizer != null) { - query = pm.newQuery(MTablePrivilege.class, - "principalName == t1 && principalType == t2 && table.tableName == t3 &&" + - "table.database.name == t4 && table.database.catalogName == t5 && authorizer == t6"); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3," + - "java.lang.String t4, java.lang.String t5, java.lang.String t6"); - mPrivs = (List) query.executeWithArray(principalName, principalType.toString(), - tableName, dbName, catName, authorizer); - } else { - query = pm.newQuery(MTablePrivilege.class, - "principalName == t1 && principalType == t2 && table.tableName == t3 &&" + - "table.database.name == t4 && table.database.catalogName == t5"); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3," + - "java.lang.String t4, java.lang.String t5"); - mPrivs = (List) query.executeWithArray(principalName, principalType.toString(), - tableName, dbName, catName); - } - pm.retrieveAll(mPrivs); - success = commitTransaction(); - - mSecurityTabPartList.addAll(mPrivs); - - LOG.debug("Done retrieving all objects for listAllTableGrants"); - } finally { - rollbackAndCleanup(success, query); - } - return mSecurityTabPartList; - } - - @Override - public List listAllTableGrants(String principalName, - PrincipalType principalType, - String catName, - String dbName, - String tableName) { - List mTbls = - listAllMTableGrants(principalName, principalType, catName, dbName, tableName); - if (mTbls.isEmpty()) { - return Collections.emptyList(); - } - List result = new ArrayList<>(); - for (int i = 0; i < mTbls.size(); i++) { - MTablePrivilege sTbl = mTbls.get(i); - HiveObjectRef objectRef = new HiveObjectRef( - HiveObjectType.TABLE, dbName, tableName, null, null); - objectRef.setCatName(catName); - HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, - sTbl.getPrincipalName(), principalType, - new PrivilegeGrantInfo(sTbl.getPrivilege(), sTbl.getCreateTime(), sTbl - .getGrantor(), PrincipalType.valueOf(sTbl - .getGrantorType()), sTbl.getGrantOption()), sTbl.getAuthorizer()); - result.add(secObj); - } - return result; - } - - private List listPrincipalMPartitionGrants( - String principalName, PrincipalType principalType, String catName, String dbName, - String tableName, String partName) { - return listPrincipalMPartitionGrants(principalName, principalType, catName, dbName, tableName, partName, null); - } - - private List listPrincipalMPartitionGrants( - String principalName, PrincipalType principalType, String catName, String dbName, - String tableName, String partName, String authorizer) { - boolean success = false; - Query query = null; - tableName = normalizeIdentifier(tableName); - dbName = normalizeIdentifier(dbName); - catName = normalizeIdentifier(catName); - List mSecurityTabPartList = new ArrayList<>(); - try { - LOG.debug("Executing listPrincipalPartitionGrants"); - - openTransaction(); - List mPrivs; - if (authorizer != null) { - query = pm.newQuery(MPartitionPrivilege.class, - "principalName == t1 && principalType == t2 && partition.table.tableName == t3 " - + "&& partition.table.database.name == t4 && partition.table.database.catalogName == t5" - + "&& partition.partitionName == t6 && authorizer == t7"); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4, " - + "java.lang.String t5, java.lang.String t6, java.lang.String t7"); - mPrivs = (List) query.executeWithArray(principalName, - principalType.toString(), tableName, dbName, catName, partName, authorizer); - } else { - query = pm.newQuery(MPartitionPrivilege.class, - "principalName == t1 && principalType == t2 && partition.table.tableName == t3 " - + "&& partition.table.database.name == t4 && partition.table.database.catalogName == t5" - + "&& partition.partitionName == t6"); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4, " - + "java.lang.String t5, java.lang.String t6"); - mPrivs = (List) query.executeWithArray(principalName, - principalType.toString(), tableName, dbName, catName, partName); - } - pm.retrieveAll(mPrivs); - success = commitTransaction(); - - mSecurityTabPartList.addAll(mPrivs); - - LOG.debug("Done retrieving all objects for listPrincipalPartitionGrants"); - } finally { - rollbackAndCleanup(success, query); - } - return mSecurityTabPartList; - } - - @Override - public List listPrincipalPartitionGrants(String principalName, - PrincipalType principalType, - String catName, - String dbName, - String tableName, - List partValues, - String partName) { - List mParts = listPrincipalMPartitionGrants(principalName, - principalType, catName, dbName, tableName, partName); - if (mParts.isEmpty()) { - return Collections.emptyList(); - } - List result = new ArrayList<>(); - for (int i = 0; i < mParts.size(); i++) { - MPartitionPrivilege sPart = mParts.get(i); - HiveObjectRef objectRef = new HiveObjectRef( - HiveObjectType.PARTITION, dbName, tableName, partValues, null); - objectRef.setCatName(catName); - HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, - sPart.getPrincipalName(), principalType, - new PrivilegeGrantInfo(sPart.getPrivilege(), sPart - .getCreateTime(), sPart.getGrantor(), PrincipalType - .valueOf(sPart.getGrantorType()), sPart - .getGrantOption()), sPart.getAuthorizer()); - - result.add(secObj); - } - return result; - } - - private List listPrincipalMTableColumnGrants( - String principalName, PrincipalType principalType, String catName, String dbName, - String tableName, String columnName) { - return listPrincipalMTableColumnGrants(principalName, principalType, catName, dbName, tableName, - columnName, null); - } - - private List listPrincipalMTableColumnGrants( - String principalName, PrincipalType principalType, String catName, String dbName, - String tableName, String columnName, String authorizer) { - boolean success = false; - Query query = null; - tableName = normalizeIdentifier(tableName); - dbName = normalizeIdentifier(dbName); - columnName = normalizeIdentifier(columnName); - List mSecurityColList = new ArrayList<>(); - try { - LOG.debug("Executing listPrincipalTableColumnGrants"); - - openTransaction(); - List mPrivs; - if (authorizer != null) { - String queryStr = - "principalName == t1 && principalType == t2 && " - + "table.tableName == t3 && table.database.name == t4 && " + - "table.database.catalogName == t5 && columnName == t6 && authorizer == t7"; - query = pm.newQuery(MTableColumnPrivilege.class, queryStr); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " - + "java.lang.String t4, java.lang.String t5, java.lang.String t6, java.lang.String t7"); - mPrivs = (List) query.executeWithArray(principalName, - principalType.toString(), tableName, dbName, catName, columnName, authorizer); - } else { - String queryStr = - "principalName == t1 && principalType == t2 && " - + "table.tableName == t3 && table.database.name == t4 && " + - "table.database.catalogName == t5 && columnName == t6 "; - query = pm.newQuery(MTableColumnPrivilege.class, queryStr); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " - + "java.lang.String t4, java.lang.String t5, java.lang.String t6"); - mPrivs = (List) query.executeWithArray(principalName, - principalType.toString(), tableName, dbName, catName, columnName); - } - pm.retrieveAll(mPrivs); - success = commitTransaction(); + // Here we build an aux structure that is used to verify that the foreign key that is declared + // is actually referencing a valid primary key or unique key. We also check that the types of + // the columns correspond. + if (existingTablePrimaryKeys.isEmpty() && existingTableUniqueConstraints.isEmpty()) { + throw new MetaException( + "Trying to define foreign key but there are no primary keys or unique keys for referenced table"); + } + final Set validPKsOrUnique = generateValidPKsOrUniqueSignatures(parentCols, + existingTablePrimaryKeys, existingTableUniqueConstraints); - mSecurityColList.addAll(mPrivs); + StringBuilder fkSignature = new StringBuilder(); + StringBuilder referencedKSignature = new StringBuilder(); + for (; i < foreignKeys.size(); i++) { + SQLForeignKey foreignKey = foreignKeys.get(i); + final String fkColumnName = normalizeIdentifier(foreignKey.getFkcolumn_name()); + int childIntegerIndex = getColumnIndexFromTableColumns(childCD.getCols(), fkColumnName); + if (childIntegerIndex == -1) { + if (childTable.getPartitionKeys() != null) { + childCD = null; + childIntegerIndex = getColumnIndexFromTableColumns(childTable.getPartitionKeys(), fkColumnName); + } + if (childIntegerIndex == -1) { + throw new InvalidObjectException("Child column not found: " + fkColumnName); + } + } - LOG.debug("Done retrieving all objects for listPrincipalTableColumnGrants"); - } finally { - rollbackAndCleanup(success, query); - } - return mSecurityColList; - } + final String pkColumnName = normalizeIdentifier(foreignKey.getPkcolumn_name()); + int parentIntegerIndex = getColumnIndexFromTableColumns(parentCD.getCols(), pkColumnName); + if (parentIntegerIndex == -1) { + if (parentTable.getPartitionKeys() != null) { + parentCD = null; + parentIntegerIndex = getColumnIndexFromTableColumns(parentTable.getPartitionKeys(), pkColumnName); + } + if (parentIntegerIndex == -1) { + throw new InvalidObjectException("Parent column not found: " + pkColumnName); + } + } - @Override - public List listPrincipalTableColumnGrants(String principalName, - PrincipalType principalType, - String catName, - String dbName, - String tableName, - String columnName) { - List mTableCols = - listPrincipalMTableColumnGrants(principalName, principalType, catName, dbName, tableName, columnName); - if (mTableCols.isEmpty()) { - return Collections.emptyList(); - } - List result = new ArrayList<>(); - for (int i = 0; i < mTableCols.size(); i++) { - MTableColumnPrivilege sCol = mTableCols.get(i); - HiveObjectRef objectRef = new HiveObjectRef( - HiveObjectType.COLUMN, dbName, tableName, null, sCol.getColumnName()); - objectRef.setCatName(catName); - HiveObjectPrivilege secObj = new HiveObjectPrivilege( - objectRef, sCol.getPrincipalName(), principalType, - new PrivilegeGrantInfo(sCol.getPrivilege(), sCol - .getCreateTime(), sCol.getGrantor(), PrincipalType - .valueOf(sCol.getGrantorType()), sCol - .getGrantOption()), sCol.getAuthorizer()); - result.add(secObj); - } - return result; - } + if (foreignKey.getFk_name() == null) { + // When there is no explicit foreign key name associated with the constraint and the key is composite, + // we expect the foreign keys to be send in order in the input list. + // Otherwise, the below code will break. + // If this is the first column of the FK constraint, generate the foreign key name + // NB: The below code can result in race condition where duplicate names can be generated (in theory). + // However, this scenario can be ignored for practical purposes because of + // the uniqueness of the generated constraint name. + if (foreignKey.getKey_seq() == 1) { + currentConstraintName = generateConstraintName(parentTable, fkTableDB, fkTableName, pkTableDB, + pkTableName, pkColumnName, fkColumnName, "fk"); + } + } else { + currentConstraintName = normalizeIdentifier(foreignKey.getFk_name()); + if (constraintNameAlreadyExists(parentTable, currentConstraintName)) { + String fqConstraintName = String.format("%s.%s.%s", parentTable.getDatabase().getName(), + parentTable.getTableName(), currentConstraintName); + throw new InvalidObjectException("Constraint name already exists: " + fqConstraintName); + } + } + // Update Column, keys, table, database, catalog name + foreignKey.setFk_name(currentConstraintName); + foreignKey.setCatName(catName); + foreignKey.setFktable_db(fkTableDB); + foreignKey.setFktable_name(fkTableName); + foreignKey.setPktable_db(pkTableDB); + foreignKey.setPktable_name(pkTableName); + foreignKey.setFkcolumn_name(fkColumnName); + foreignKey.setPkcolumn_name(pkColumnName); - private List listPrincipalMPartitionColumnGrants( - String principalName, PrincipalType principalType, String catName, String dbName, - String tableName, String partitionName, String columnName) { - return listPrincipalMPartitionColumnGrants(principalName, principalType, catName, dbName, - tableName, partitionName, columnName, null); - } + Integer updateRule = foreignKey.getUpdate_rule(); + Integer deleteRule = foreignKey.getDelete_rule(); + int enableValidateRely = (foreignKey.isEnable_cstr() ? 4 : 0) + + (foreignKey.isValidate_cstr() ? 2 : 0) + (foreignKey.isRely_cstr() ? 1 : 0); - private List listPrincipalMPartitionColumnGrants( - String principalName, PrincipalType principalType, String catName, String dbName, - String tableName, String partitionName, String columnName, String authorizer) { - boolean success = false; - Query query = null; - tableName = normalizeIdentifier(tableName); - dbName = normalizeIdentifier(dbName); - columnName = normalizeIdentifier(columnName); - catName = normalizeIdentifier(catName); - List mSecurityColList = new ArrayList<>(); - try { - LOG.debug("Executing listPrincipalPartitionColumnGrants"); + MConstraint mpkfk = new MConstraint( + currentConstraintName, + foreignKey.getKey_seq(), + MConstraint.FOREIGN_KEY_CONSTRAINT, + deleteRule, + updateRule, + enableValidateRely, + parentTable, + childTable, + parentCD, + childCD, + childIntegerIndex, + parentIntegerIndex + ); + mpkfks.add(mpkfk); - openTransaction(); - List mPrivs; - if (authorizer != null) { - query = pm.newQuery( - MPartitionColumnPrivilege.class, - "principalName == t1 && principalType == t2 && partition.table.tableName == t3 " - + "&& partition.table.database.name == t4 && partition.table.database.catalogName == t5" + - " && partition.partitionName == t6 && columnName == t7 && authorizer == t8"); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " - + "java.lang.String t4, java.lang.String t5, java.lang.String t6, java.lang.String t7, " - + "java.lang.String t8"); - mPrivs = (List) query.executeWithArray(principalName, - principalType.toString(), tableName, dbName, catName, partitionName, columnName, authorizer); - } else { - query = pm.newQuery( - MPartitionColumnPrivilege.class, - "principalName == t1 && principalType == t2 && partition.table.tableName == t3 " - + "&& partition.table.database.name == t4 && partition.table.database.catalogName == t5" + - " && partition.partitionName == t6 && columnName == t7"); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " - + "java.lang.String t4, java.lang.String t5, java.lang.String t6, java.lang.String t7"); - mPrivs = (List) query.executeWithArray(principalName, - principalType.toString(), tableName, dbName, catName, partitionName, columnName); - } - pm.retrieveAll(mPrivs); - success = commitTransaction(); + final String fkColType = getColumnFromTableColumns(childCols, fkColumnName).getType(); + fkSignature.append( + generateColNameTypeSignature(fkColumnName, fkColType)); + referencedKSignature.append( + generateColNameTypeSignature(pkColumnName, fkColType)); - mSecurityColList.addAll(mPrivs); + if (i + 1 < foreignKeys.size() && foreignKeys.get(i + 1).getKey_seq() == 1) { + // Next one is a new key, we bail out from the inner loop + break; + } + } + String referenced = referencedKSignature.toString(); + if (!validPKsOrUnique.contains(referenced)) { + throw new MetaException( + "Foreign key references " + referenced + " but no corresponding " + + "primary key or unique key exists. Possible keys: " + validPKsOrUnique); + } + if (sameTable && fkSignature.toString().equals(referenced)) { + throw new MetaException( + "Cannot be both foreign key and primary/unique key on same table: " + referenced); + } + fkSignature = new StringBuilder(); + referencedKSignature = new StringBuilder(); + } + pm.makePersistentAll(mpkfks); - LOG.debug("Done retrieving all objects for listPrincipalPartitionColumnGrants"); - } finally { - rollbackAndCleanup(success, query); } - return mSecurityColList; + return foreignKeys; } - @Override - public List listPrincipalPartitionColumnGrants(String principalName, - PrincipalType principalType, - String catName, - String dbName, - String tableName, - List partValues, - String partitionName, - String columnName) { - List mPartitionCols = - listPrincipalMPartitionColumnGrants(principalName, principalType, catName, dbName, tableName, - partitionName, columnName); - if (mPartitionCols.isEmpty()) { - return Collections.emptyList(); - } - List result = new ArrayList<>(); - for (int i = 0; i < mPartitionCols.size(); i++) { - MPartitionColumnPrivilege sCol = mPartitionCols.get(i); - HiveObjectRef objectRef = new HiveObjectRef( - HiveObjectType.COLUMN, dbName, tableName, partValues, sCol.getColumnName()); - objectRef.setCatName(catName); - HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, - sCol.getPrincipalName(), principalType, - new PrivilegeGrantInfo(sCol.getPrivilege(), sCol - .getCreateTime(), sCol.getGrantor(), PrincipalType - .valueOf(sCol.getGrantorType()), sCol.getGrantOption()), sCol.getAuthorizer()); - result.add(secObj); + private static Set generateValidPKsOrUniqueSignatures(List tableCols, + List refTablePrimaryKeys, List refTableUniqueConstraints) { + final Set validPKsOrUnique = new HashSet<>(); + if (!refTablePrimaryKeys.isEmpty()) { + refTablePrimaryKeys.sort((o1, o2) -> { + int keyNameComp = o1.getPk_name().compareTo(o2.getPk_name()); + if (keyNameComp == 0) { + return Integer.compare(o1.getKey_seq(), o2.getKey_seq()); + } + return keyNameComp; + }); + StringBuilder pkSignature = new StringBuilder(); + for (SQLPrimaryKey pk : refTablePrimaryKeys) { + pkSignature.append( + generateColNameTypeSignature( + pk.getColumn_name(), getColumnFromTableColumns(tableCols, pk.getColumn_name()).getType())); + } + validPKsOrUnique.add(pkSignature.toString()); } - return result; - } - - @Override - public List listPrincipalPartitionColumnGrantsAll( - String principalName, PrincipalType principalType) { - boolean success = false; - Query query = null; - try { - openTransaction(); - LOG.debug("Executing listPrincipalPartitionColumnGrantsAll"); - List mSecurityTabPartList; - if (principalName != null && principalType != null) { - query = - pm.newQuery(MPartitionColumnPrivilege.class, - "principalName == t1 && principalType == t2"); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - mSecurityTabPartList = - (List) query.executeWithArray(principalName, - principalType.toString()); - } else { - query = pm.newQuery(MPartitionColumnPrivilege.class); - mSecurityTabPartList = (List) query.execute(); + if (!refTableUniqueConstraints.isEmpty()) { + refTableUniqueConstraints.sort((o1, o2) -> { + int keyNameComp = o1.getUk_name().compareTo(o2.getUk_name()); + if (keyNameComp == 0) { + return Integer.compare(o1.getKey_seq(), o2.getKey_seq()); + } + return keyNameComp; + }); + StringBuilder ukSignature = new StringBuilder(); + for (int j = 0; j < refTableUniqueConstraints.size(); j++) { + SQLUniqueConstraint uk = refTableUniqueConstraints.get(j); + ukSignature.append( + generateColNameTypeSignature( + uk.getColumn_name(), getColumnFromTableColumns(tableCols, uk.getColumn_name()).getType())); + if (j + 1 < refTableUniqueConstraints.size()) { + if (!refTableUniqueConstraints.get(j + 1).getUk_name().equals( + refTableUniqueConstraints.get(j).getUk_name())) { + validPKsOrUnique.add(ukSignature.toString()); + ukSignature = new StringBuilder(); + } + } else { + validPKsOrUnique.add(ukSignature.toString()); + } } - LOG.debug("Done executing query for listPrincipalPartitionColumnGrantsAll"); - pm.retrieveAll(mSecurityTabPartList); - List result = convertPartCols(mSecurityTabPartList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPrincipalPartitionColumnGrantsAll"); - return result; - } finally { - rollbackAndCleanup(success, query); } + return validPKsOrUnique; } - @Override - public List listPartitionColumnGrantsAll( - String catName, String dbName, String tableName, String partitionName, String columnName) { - boolean success = false; - Query query = null; - try { - openTransaction(); - LOG.debug("Executing listPartitionColumnGrantsAll"); - query = - pm.newQuery(MPartitionColumnPrivilege.class, - "partition.table.tableName == t3 && partition.table.database.name == t4 && " - + "partition.table.database.name == t5 && " - + "partition.partitionName == t6 && columnName == t7"); - query.declareParameters("java.lang.String t3, java.lang.String t4, java.lang.String t5," + - "java.lang.String t6, java.lang.String t7"); - List mSecurityTabPartList = - (List) query.executeWithArray(tableName, dbName, catName, - partitionName, columnName); - LOG.debug("Done executing query for listPartitionColumnGrantsAll"); - pm.retrieveAll(mSecurityTabPartList); - List result = convertPartCols(mSecurityTabPartList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPartitionColumnGrantsAll"); - return result; - } finally { - rollbackAndCleanup(success, query); - } + private static String generateColNameTypeSignature(String colName, String colType) { + return colName + ":" + colType + ";"; } - private List convertPartCols(List privs) { - List result = new ArrayList<>(); - for (MPartitionColumnPrivilege priv : privs) { - String pname = priv.getPrincipalName(); - String authorizer = priv.getAuthorizer(); - PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); - - MPartition mpartition = priv.getPartition(); - MTable mtable = mpartition.getTable(); - MDatabase mdatabase = mtable.getDatabase(); - - HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.COLUMN, - mdatabase.getName(), mtable.getTableName(), mpartition.getValues(), priv.getColumnName()); - objectRef.setCatName(mdatabase.getCatalogName()); - PrivilegeGrantInfo grantor = new PrivilegeGrantInfo(priv.getPrivilege(), priv.getCreateTime(), - priv.getGrantor(), PrincipalType.valueOf(priv.getGrantorType()), priv.getGrantOption()); - - result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor, authorizer)); - } - return result; + @Override + public List addPrimaryKeys(List pks) throws InvalidObjectException, + MetaException { + return addPrimaryKeys(pks, true); } - private List listPrincipalAllTableGrants(String principalName, PrincipalType principalType) - throws Exception { - LOG.debug("Executing listPrincipalAllTableGrants"); - - Preconditions.checkState(this.currentTransaction.isActive()); - - try (Query query = pm.newQuery(MTablePrivilege.class, "principalName == t1 && principalType == t2")) { - query.declareParameters("java.lang.String t1, java.lang.String t2"); - final List mSecurityTabPartList = - (List) query.execute(principalName, principalType.toString()); - - pm.retrieveAll(mSecurityTabPartList); + private List addPrimaryKeys(List pks, boolean retrieveCD) throws InvalidObjectException, + MetaException { + List mpks = new ArrayList<>(); + String constraintName = null; - LOG.debug("Done retrieving all objects for listPrincipalAllTableGrants"); + for (SQLPrimaryKey pk : pks) { + final String catName = normalizeIdentifier(pk.getCatName()); + final String tableDB = normalizeIdentifier(pk.getTable_db()); + final String tableName = normalizeIdentifier(pk.getTable_name()); + final String columnName = normalizeIdentifier(pk.getColumn_name()); - return Collections.unmodifiableList(new ArrayList<>(mSecurityTabPartList)); - } - } + // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. + // For instance, this is the case when we are creating the table. + AttachedMTableInfo nParentTable = getMTable(catName, tableDB, tableName, retrieveCD); + MTable parentTable = nParentTable.mtbl; + if (parentTable == null) { + throw new InvalidObjectException("Parent table not found: " + tableName); + } - @Override - public List listPrincipalTableGrantsAll(String principalName, - PrincipalType principalType) { - boolean success = false; - Query query = null; - try { - openTransaction(); - LOG.debug("Executing listPrincipalAllTableGrants"); - List mSecurityTabPartList; - if (principalName != null && principalType != null) { - query = pm.newQuery(MTablePrivilege.class, "principalName == t1 && principalType == t2"); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - mSecurityTabPartList = - (List) query.execute(principalName, principalType.toString()); + MColumnDescriptor parentCD = retrieveCD ? nParentTable.mcd : parentTable.getSd().getCD(); + int parentIntegerIndex = getColumnIndexFromTableColumns(parentCD == null ? null : parentCD.getCols(), columnName); + if (parentIntegerIndex == -1) { + if (parentTable.getPartitionKeys() != null) { + parentCD = null; + parentIntegerIndex = getColumnIndexFromTableColumns(parentTable.getPartitionKeys(), columnName); + } + if (parentIntegerIndex == -1) { + throw new InvalidObjectException("Parent column not found: " + columnName); + } + } + if (getPrimaryKeyConstraintName(parentTable.getDatabase().getCatalogName(), + parentTable.getDatabase().getName(), parentTable.getTableName()) != null) { + throw new MetaException(" Primary key already exists for: " + + TableName.getQualified(catName, tableDB, tableName)); + } + if (pk.getPk_name() == null) { + if (pk.getKey_seq() == 1) { + constraintName = generateConstraintName(parentTable, tableDB, tableName, columnName, "pk"); + } } else { - query = pm.newQuery(MTablePrivilege.class); - mSecurityTabPartList = (List) query.execute(); + constraintName = normalizeIdentifier(pk.getPk_name()); + if (constraintNameAlreadyExists(parentTable, constraintName)) { + String fqConstraintName = String.format("%s.%s.%s", parentTable.getDatabase().getName(), + parentTable.getTableName(), constraintName); + throw new InvalidObjectException("Constraint name already exists: " + fqConstraintName); + } } - LOG.debug("Done executing query for listPrincipalAllTableGrants"); - pm.retrieveAll(mSecurityTabPartList); - List result = convertTable(mSecurityTabPartList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPrincipalAllTableGrants"); - return result; - } finally { - rollbackAndCleanup(success, query); + + int enableValidateRely = (pk.isEnable_cstr() ? 4 : 0) + + (pk.isValidate_cstr() ? 2 : 0) + (pk.isRely_cstr() ? 1 : 0); + MConstraint mpk = new MConstraint( + constraintName, + pk.getKey_seq(), + MConstraint.PRIMARY_KEY_CONSTRAINT, + null, + null, + enableValidateRely, + parentTable, + null, + parentCD, + null, + null, + parentIntegerIndex); + mpks.add(mpk); + + // Add normalized identifier back to result + pk.setCatName(catName); + pk.setTable_db(tableDB); + pk.setTable_name(tableName); + pk.setColumn_name(columnName); + pk.setPk_name(constraintName); } + pm.makePersistentAll(mpks); + return pks; } @Override - public List listTableGrantsAll(String catName, String dbName, String tableName) { - return listTableGrantsAll(catName, dbName, tableName, null); - } - - private List listTableGrantsAll(String catName, String dbName, String tableName, - String authorizer) { - boolean success = false; - Query query = null; - dbName = normalizeIdentifier(dbName); - tableName = normalizeIdentifier(tableName); - try { - openTransaction(); - LOG.debug("Executing listTableGrantsAll"); - List mSecurityTabPartList = null; - if (authorizer != null) { - query = pm.newQuery(MTablePrivilege.class, - "table.tableName == t1 && table.database.name == t2 && table.database.catalogName == t3" + - " && authorizer == t4"); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " + - "java.lang.String t4"); - mSecurityTabPartList = (List) query.executeWithArray(tableName, dbName, catName, authorizer); - } else { - query = pm.newQuery(MTablePrivilege.class, - "table.tableName == t1 && table.database.name == t2 && table.database.catalogName == t3"); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); - mSecurityTabPartList = (List) query.executeWithArray(tableName, dbName, catName); - } - LOG.debug("Done executing query for listTableGrantsAll"); - pm.retrieveAll(mSecurityTabPartList); - List result = convertTable(mSecurityTabPartList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPrincipalAllTableGrants"); - return result; - } finally { - rollbackAndCleanup(success, query); - } + public List addUniqueConstraints(List uks) + throws InvalidObjectException, MetaException { + return addUniqueConstraints(uks, true); } - private List convertTable(List privs) { - List result = new ArrayList<>(); - for (MTablePrivilege priv : privs) { - String pname = priv.getPrincipalName(); - String authorizer = priv.getAuthorizer(); - PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); + private List addUniqueConstraints(List uks, boolean retrieveCD) + throws InvalidObjectException, MetaException { - String table = priv.getTable().getTableName(); - String database = priv.getTable().getDatabase().getName(); + List cstrs = new ArrayList<>(); + String constraintName = null; - HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.TABLE, database, table, - null, null); - objectRef.setCatName(priv.getTable().getDatabase().getCatalogName()); - PrivilegeGrantInfo grantor = new PrivilegeGrantInfo(priv.getPrivilege(), priv.getCreateTime(), - priv.getGrantor(), PrincipalType.valueOf(priv.getGrantorType()), priv.getGrantOption()); + for (SQLUniqueConstraint uk : uks) { + final String catName = normalizeIdentifier(uk.getCatName()); + final String tableDB = normalizeIdentifier(uk.getTable_db()); + final String tableName = normalizeIdentifier(uk.getTable_name()); + final String columnName = normalizeIdentifier(uk.getColumn_name()); - result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor, authorizer)); - } - return result; - } + // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. + // For instance, this is the case when we are creating the table. + AttachedMTableInfo nParentTable = getMTable(catName, tableDB, tableName, retrieveCD); + MTable parentTable = nParentTable.mtbl; + if (parentTable == null) { + throw new InvalidObjectException("Parent table not found: " + tableName); + } - private List listPrincipalAllPartitionGrants(String principalName, PrincipalType principalType) - throws Exception { - LOG.debug("Executing listPrincipalAllPartitionGrants"); + MColumnDescriptor parentCD = retrieveCD ? nParentTable.mcd : parentTable.getSd().getCD(); + int parentIntegerIndex = getColumnIndexFromTableColumns(parentCD == null ? null : parentCD.getCols(), columnName); + if (parentIntegerIndex == -1) { + if (parentTable.getPartitionKeys() != null) { + parentCD = null; + parentIntegerIndex = getColumnIndexFromTableColumns(parentTable.getPartitionKeys(), columnName); + } + if (parentIntegerIndex == -1) { + throw new InvalidObjectException("Parent column not found: " + columnName); + } + } + if (uk.getUk_name() == null) { + if (uk.getKey_seq() == 1) { + constraintName = generateConstraintName(parentTable, tableDB, tableName, columnName, "uk"); + } + } else { + constraintName = normalizeIdentifier(uk.getUk_name()); + if (constraintNameAlreadyExists(parentTable, constraintName)) { + String fqConstraintName = String.format("%s.%s.%s", parentTable.getDatabase().getName(), + parentTable.getTableName(), constraintName); + throw new InvalidObjectException("Constraint name already exists: " + fqConstraintName); + } + } - Preconditions.checkState(this.currentTransaction.isActive()); - try (Query query = pm.newQuery(MPartitionPrivilege.class, "principalName == t1 && principalType == t2")) { - query.declareParameters("java.lang.String t1, java.lang.String t2"); - final List mSecurityTabPartList = - (List) query.execute(principalName, principalType.toString()); + int enableValidateRely = (uk.isEnable_cstr() ? 4 : 0) + + (uk.isValidate_cstr() ? 2 : 0) + (uk.isRely_cstr() ? 1 : 0); + MConstraint muk = new MConstraint( + constraintName, + uk.getKey_seq(), + MConstraint.UNIQUE_CONSTRAINT, + null, + null, + enableValidateRely, + parentTable, + null, + parentCD, + null, + null, + parentIntegerIndex); + cstrs.add(muk); - pm.retrieveAll(mSecurityTabPartList); - LOG.debug("Done retrieving all objects for listPrincipalAllPartitionGrants"); + // Add normalized identifier back to result + uk.setCatName(catName); + uk.setTable_db(tableDB); + uk.setTable_name(tableName); + uk.setColumn_name(columnName); + uk.setUk_name(constraintName); - return Collections.unmodifiableList(new ArrayList<>(mSecurityTabPartList)); } + pm.makePersistentAll(cstrs); + return uks; } @Override - public List listPrincipalPartitionGrantsAll(String principalName, - PrincipalType principalType) { - boolean success = false; - Query query = null; - try { - openTransaction(); - LOG.debug("Executing listPrincipalPartitionGrantsAll"); - List mSecurityTabPartList; - if (principalName != null && principalType != null) { - query = - pm.newQuery(MPartitionPrivilege.class, "principalName == t1 && principalType == t2"); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - mSecurityTabPartList = - (List) query.execute(principalName, principalType.toString()); - } else { - query = pm.newQuery(MPartitionPrivilege.class); - mSecurityTabPartList = (List) query.execute(); - } - LOG.debug("Done executing query for listPrincipalPartitionGrantsAll"); - pm.retrieveAll(mSecurityTabPartList); - List result = convertPartition(mSecurityTabPartList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPrincipalPartitionGrantsAll"); - return result; - } finally { - rollbackAndCleanup(success, query); - } + public List addNotNullConstraints(List nns) + throws InvalidObjectException, MetaException { + return addNotNullConstraints(nns, true); } @Override - public List listPartitionGrantsAll(String catName, String dbName, String tableName, - String partitionName) { - boolean success = false; - Query query = null; - try { - openTransaction(); - LOG.debug("Executing listPrincipalPartitionGrantsAll"); - query = - pm.newQuery(MPartitionPrivilege.class, - "partition.table.tableName == t3 && partition.table.database.name == t4 && " - + "partition.table.database.catalogName == t5 && partition.partitionName == t6"); - query.declareParameters("java.lang.String t3, java.lang.String t4, java.lang.String t5, " + - "java.lang.String t6"); - List mSecurityTabPartList = - (List) query.executeWithArray(tableName, dbName, catName, partitionName); - LOG.debug("Done executing query for listPrincipalPartitionGrantsAll"); - pm.retrieveAll(mSecurityTabPartList); - List result = convertPartition(mSecurityTabPartList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPrincipalPartitionGrantsAll"); - return result; - } finally { - rollbackAndCleanup(success, query); - } + public List addDefaultConstraints(List nns) + throws InvalidObjectException, MetaException { + return addDefaultConstraints(nns, true); } - private List convertPartition(List privs) { - List result = new ArrayList<>(); - for (MPartitionPrivilege priv : privs) { - String pname = priv.getPrincipalName(); - String authorizer = priv.getAuthorizer(); - PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); - - MPartition mpartition = priv.getPartition(); - MTable mtable = mpartition.getTable(); - MDatabase mdatabase = mtable.getDatabase(); - - HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.PARTITION, - mdatabase.getName(), mtable.getTableName(), mpartition.getValues(), null); - objectRef.setCatName(mdatabase.getCatalogName()); - PrivilegeGrantInfo grantor = new PrivilegeGrantInfo(priv.getPrivilege(), priv.getCreateTime(), - priv.getGrantor(), PrincipalType.valueOf(priv.getGrantorType()), priv.getGrantOption()); - - result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor, authorizer)); - } - return result; + @Override + public List addCheckConstraints(List nns) + throws InvalidObjectException, MetaException { + return addCheckConstraints(nns, true); } - private List listPrincipalAllTableColumnGrants(String principalName, - PrincipalType principalType) throws Exception { - - LOG.debug("Executing listPrincipalAllTableColumnGrants"); - - Preconditions.checkState(this.currentTransaction.isActive()); - - try (Query query = pm.newQuery(MTableColumnPrivilege.class, "principalName == t1 && principalType == t2")) { - query.declareParameters("java.lang.String t1, java.lang.String t2"); - final List mSecurityColumnList = - (List) query.execute(principalName, principalType.toString()); + private List addCheckConstraints(List ccs, boolean retrieveCD) + throws InvalidObjectException, MetaException { + List cstrs = new ArrayList<>(); - pm.retrieveAll(mSecurityColumnList); - LOG.debug("Done retrieving all objects for listPrincipalAllTableColumnGrants"); + for (SQLCheckConstraint cc: ccs) { + final String catName = normalizeIdentifier(cc.getCatName()); + final String tableDB = normalizeIdentifier(cc.getTable_db()); + final String tableName = normalizeIdentifier(cc.getTable_name()); + final String columnName = cc.getColumn_name() == null? null + : normalizeIdentifier(cc.getColumn_name()); + final String ccName = cc.getDc_name(); + boolean isEnable = cc.isEnable_cstr(); + boolean isValidate = cc.isValidate_cstr(); + boolean isRely = cc.isRely_cstr(); + String constraintValue = cc.getCheck_expression(); + MConstraint muk = addConstraint(catName, tableDB, tableName, columnName, ccName, isEnable, isRely, isValidate, + MConstraint.CHECK_CONSTRAINT, constraintValue, retrieveCD); + cstrs.add(muk); - return Collections.unmodifiableList(new ArrayList<>(mSecurityColumnList)); + // Add normalized identifier back to result + cc.setCatName(catName); + cc.setTable_db(tableDB); + cc.setTable_name(tableName); + cc.setColumn_name(columnName); + cc.setDc_name(muk.getConstraintName()); } + pm.makePersistentAll(cstrs); + return ccs; } - @Override - public List listPrincipalTableColumnGrantsAll(String principalName, - PrincipalType principalType) { - boolean success = false; - Query query = null; - try { - openTransaction(); - LOG.debug("Executing listPrincipalTableColumnGrantsAll"); - - List mSecurityTabPartList; - if (principalName != null && principalType != null) { - query = - pm.newQuery(MTableColumnPrivilege.class, "principalName == t1 && principalType == t2"); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - mSecurityTabPartList = - (List) query.execute(principalName, principalType.toString()); - } else { - query = pm.newQuery(MTableColumnPrivilege.class); - mSecurityTabPartList = (List) query.execute(); - } - LOG.debug("Done executing query for listPrincipalTableColumnGrantsAll"); - pm.retrieveAll(mSecurityTabPartList); - List result = convertTableCols(mSecurityTabPartList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPrincipalTableColumnGrantsAll"); - return result; - } finally { - rollbackAndCleanup(success, query); + private MConstraint addConstraint(String catName, String tableDB, String tableName, String columnName, String ccName, + boolean isEnable, boolean isRely, boolean isValidate, int constraintType, + String constraintValue, boolean retrieveCD) + throws InvalidObjectException, MetaException { + String constraintName = null; + // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. + // For instance, this is the case when we are creating the table. + AttachedMTableInfo nParentTable = getMTable(catName, tableDB, tableName, retrieveCD); + MTable parentTable = nParentTable.mtbl; + if (parentTable == null) { + throw new InvalidObjectException("Parent table not found: " + tableName); } - } - @Override - public List listTableColumnGrantsAll(String catName, String dbName, String tableName, - String columnName) { - boolean success = false; - Query query = null; - dbName = normalizeIdentifier(dbName); - tableName = normalizeIdentifier(tableName); - try { - openTransaction(); - LOG.debug("Executing listPrincipalTableColumnGrantsAll"); - query = - pm.newQuery(MTableColumnPrivilege.class, - "table.tableName == t3 && table.database.name == t4 && " + - "table.database.catalogName == t5 && columnName == t6"); - query.declareParameters("java.lang.String t3, java.lang.String t4, java.lang.String t5, " + - "java.lang.String t6"); - List mSecurityTabPartList = - (List) query.executeWithArray(tableName, dbName, - catName, columnName); - LOG.debug("Done executing query for listPrincipalTableColumnGrantsAll"); - pm.retrieveAll(mSecurityTabPartList); - List result = convertTableCols(mSecurityTabPartList); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listPrincipalTableColumnGrantsAll"); - return result; - } finally { - rollbackAndCleanup(success, query); + MColumnDescriptor parentCD = retrieveCD ? nParentTable.mcd : parentTable.getSd().getCD(); + int parentIntegerIndex = getColumnIndexFromTableColumns(parentCD == null ? null : parentCD.getCols(), columnName); + if (parentIntegerIndex == -1) { + if (parentTable.getPartitionKeys() != null) { + parentCD = null; + parentIntegerIndex = getColumnIndexFromTableColumns(parentTable.getPartitionKeys(), columnName); + } } - } - - private List convertTableCols(List privs) { - List result = new ArrayList<>(); - for (MTableColumnPrivilege priv : privs) { - String pname = priv.getPrincipalName(); - String authorizer = priv.getAuthorizer(); - PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); - - MTable mtable = priv.getTable(); - MDatabase mdatabase = mtable.getDatabase(); - - HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.COLUMN, - mdatabase.getName(), mtable.getTableName(), null, priv.getColumnName()); - objectRef.setCatName(mdatabase.getCatalogName()); - PrivilegeGrantInfo grantor = new PrivilegeGrantInfo(priv.getPrivilege(), priv.getCreateTime(), - priv.getGrantor(), PrincipalType.valueOf(priv.getGrantorType()), priv.getGrantOption()); - - result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor, authorizer)); + if (ccName == null) { + constraintName = generateConstraintName(parentTable, tableDB, tableName, columnName, "dc"); + } else { + constraintName = normalizeIdentifier(ccName); + if (constraintNameAlreadyExists(parentTable, constraintName)) { + String fqConstraintName = String.format("%s.%s.%s", parentTable.getDatabase().getName(), + parentTable.getTableName(), constraintName); + throw new InvalidObjectException("Constraint name already exists: " + fqConstraintName); + } } - return result; - } - private List listPrincipalAllPartitionColumnGrants(String principalName, - PrincipalType principalType) throws Exception { - LOG.debug("Executing listPrincipalAllTableColumnGrants"); + int enableValidateRely = (isEnable ? 4 : 0) + + (isValidate ? 2 : 0) + (isRely ? 1 : 0); + MConstraint muk = new MConstraint( + constraintName, + 1, + constraintType, // Not null constraint should reference a single column + null, + null, + enableValidateRely, + parentTable, + null, + parentCD, + null, + null, + parentIntegerIndex, + constraintValue); - Preconditions.checkState(this.currentTransaction.isActive()); + return muk; + } - try (Query query = pm.newQuery(MPartitionColumnPrivilege.class, "principalName == t1 && principalType == t2")) { - query.declareParameters("java.lang.String t1, java.lang.String t2"); - final List mSecurityColumnList = - (List) query.execute(principalName, principalType.toString()); + private List addDefaultConstraints(List dcs, boolean retrieveCD) + throws InvalidObjectException, MetaException { - pm.retrieveAll(mSecurityColumnList); - LOG.debug("Done retrieving all objects for listPrincipalAllTableColumnGrants"); + List cstrs = new ArrayList<>(); + for (SQLDefaultConstraint dc : dcs) { + final String catName = normalizeIdentifier(dc.getCatName()); + final String tableDB = normalizeIdentifier(dc.getTable_db()); + final String tableName = normalizeIdentifier(dc.getTable_name()); + final String columnName = normalizeIdentifier(dc.getColumn_name()); + final String dcName = dc.getDc_name(); + boolean isEnable = dc.isEnable_cstr(); + boolean isValidate = dc.isValidate_cstr(); + boolean isRely = dc.isRely_cstr(); + String constraintValue = dc.getDefault_value(); + MConstraint muk = addConstraint(catName, tableDB, tableName, columnName, dcName, isEnable, isRely, isValidate, + MConstraint.DEFAULT_CONSTRAINT, constraintValue, retrieveCD); + cstrs.add(muk); - return Collections.unmodifiableList(new ArrayList<>(mSecurityColumnList)); + // Add normalized identifier back to result + dc.setCatName(catName); + dc.setTable_db(tableDB); + dc.setTable_name(tableName); + dc.setColumn_name(columnName); + dc.setDc_name(muk.getConstraintName()); } + pm.makePersistentAll(cstrs); + return dcs; } - @Override - public boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName, - Map partName, PartitionEventType evtType) throws UnknownTableException, - MetaException, InvalidPartitionException, UnknownPartitionException { - boolean success = false; - Query query = null; - - try { - LOG.debug("Begin Executing isPartitionMarkedForEvent"); - - openTransaction(); - query = pm.newQuery(MPartitionEvent.class, - "dbName == t1 && tblName == t2 && partName == t3 && eventType == t4 && catalogName == t5"); - query - .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, int t4," + - "java.lang.String t5"); - Table tbl = getTable(catName, dbName, tblName, null); // Make sure dbName and tblName are valid. - if (null == tbl) { - throw new UnknownTableException("Table: " + tblName + " is not found."); - } - Collection partEvents = - (Collection) query.executeWithArray(dbName, tblName, - getPartitionStr(tbl, partName), evtType.getValue(), catName); - pm.retrieveAll(partEvents); - success = commitTransaction(); + private List addNotNullConstraints(List nns, boolean retrieveCD) + throws InvalidObjectException, MetaException { - LOG.debug("Done executing isPartitionMarkedForEvent"); - return partEvents != null && !partEvents.isEmpty(); - } finally { - rollbackAndCleanup(success, query); - } - } + List cstrs = new ArrayList<>(); + String constraintName; - @Override - public Table markPartitionForEvent(String catName, String dbName, String tblName, Map partName, - PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { + for (SQLNotNullConstraint nn : nns) { + final String catName = normalizeIdentifier(nn.getCatName()); + final String tableDB = normalizeIdentifier(nn.getTable_db()); + final String tableName = normalizeIdentifier(nn.getTable_name()); + final String columnName = normalizeIdentifier(nn.getColumn_name()); - LOG.debug("Begin executing markPartitionForEvent"); - boolean success = false; - Table tbl = null; - try{ - openTransaction(); - tbl = getTable(catName, dbName, tblName, null); // Make sure dbName and tblName are valid. - if(null == tbl) { - throw new UnknownTableException("Table: "+ tblName + " is not found."); - } - pm.makePersistent(new MPartitionEvent(catName, dbName,tblName,getPartitionStr(tbl, partName), evtType.getValue())); - success = commitTransaction(); - LOG.debug("Done executing markPartitionForEvent"); - } finally { - rollbackAndCleanup(success, null); - } - return tbl; - } + // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. + // For instance, this is the case when we are creating the table. + AttachedMTableInfo nParentTable = getMTable(catName, tableDB, tableName, retrieveCD); + MTable parentTable = nParentTable.mtbl; + if (parentTable == null) { + throw new InvalidObjectException("Parent table not found: " + tableName); + } - private String getPartitionStr(Table tbl, Map partName) throws InvalidPartitionException{ - if(tbl.getPartitionKeysSize() != partName.size()){ - throw new InvalidPartitionException("Number of partition columns in table: "+ tbl.getPartitionKeysSize() + - " doesn't match with number of supplied partition values: "+partName.size()); - } - final List storedVals = new ArrayList<>(tbl.getPartitionKeysSize()); - for(FieldSchema partKey : tbl.getPartitionKeys()){ - String partVal = partName.get(partKey.getName()); - if(null == partVal) { - throw new InvalidPartitionException("No value found for partition column: "+partKey.getName()); + MColumnDescriptor parentCD = retrieveCD ? nParentTable.mcd : parentTable.getSd().getCD(); + int parentIntegerIndex = getColumnIndexFromTableColumns(parentCD == null ? null : parentCD.getCols(), columnName); + if (parentIntegerIndex == -1) { + if (parentTable.getPartitionKeys() != null) { + parentCD = null; + parentIntegerIndex = getColumnIndexFromTableColumns(parentTable.getPartitionKeys(), columnName); + } + if (parentIntegerIndex == -1) { + throw new InvalidObjectException("Parent column not found: " + columnName); + } + } + if (nn.getNn_name() == null) { + constraintName = generateConstraintName(parentTable, tableDB, tableName, columnName, "nn"); + } else { + constraintName = normalizeIdentifier(nn.getNn_name()); + if (constraintNameAlreadyExists(parentTable, constraintName)) { + String fqConstraintName = String.format("%s.%s.%s", parentTable.getDatabase().getName(), + parentTable.getTableName(), constraintName); + throw new InvalidObjectException("Constraint name already exists: " + fqConstraintName); + } } - storedVals.add(partVal); + + int enableValidateRely = (nn.isEnable_cstr() ? 4 : 0) + + (nn.isValidate_cstr() ? 2 : 0) + (nn.isRely_cstr() ? 1 : 0); + MConstraint muk = new MConstraint( + constraintName, + 1, + MConstraint.NOT_NULL_CONSTRAINT, // Not null constraint should reference a single column + null, + null, + enableValidateRely, + parentTable, + null, + parentCD, + null, + null, + parentIntegerIndex); + cstrs.add(muk); + // Add normalized identifier back to result + nn.setCatName(catName); + nn.setTable_db(tableDB); + nn.setTable_name(tableName); + nn.setColumn_name(columnName); + nn.setNn_name(constraintName); } - return join(storedVals,','); + pm.makePersistentAll(cstrs); + return nns; } private void writeMTableColumnStatistics(Table table, MTableColumnStatistics mStatsObj, @@ -9231,7 +3449,7 @@ public Map updateTableColumnStatistics(ColumnStatistics colStats long sleepInterval = MetastoreConf.getTimeVar(conf, ConfVars.METASTORE_S4U_NOWAIT_RETRY_SLEEP_INTERVAL, TimeUnit.MILLISECONDS); Map result = new RetryingExecutor<>(maxRetries, () -> { - Ref exceptionRef = new Ref<>(); + AtomicReference exceptionRef = new AtomicReference<>(); String savePoint = "uts_" + ThreadLocalRandom.current().nextInt(10000) + "_" + System.nanoTime(); setTransactionSavePoint(savePoint); executePlainSQL( @@ -9239,13 +3457,13 @@ public Map updateTableColumnStatistics(ColumnStatistics colStats true, exception -> { rollbackTransactionToSavePoint(savePoint); - exceptionRef.t = exception; + exceptionRef.set(exception); }); - if (exceptionRef.t != null) { - throw new RetryingExecutor.RetryException(exceptionRef.t); + if (exceptionRef.get() != null) { + throw new RetryingExecutor.RetryException(exceptionRef.get()); } pm.refresh(mTable); - Table table = convertToTable(mTable); + Table table = convertToTable(mTable, conf); List colNames = new ArrayList<>(); for (ColumnStatisticsObj statsObj : statsObjs) { colNames.add(statsObj.getColName()); @@ -9315,8 +3533,8 @@ public Map updatePartitionColumnStatistics(Table table, MTable m String catName = statsDesc.isSetCatName() ? statsDesc.getCatName() : getDefaultCatalog(conf); try { openTransaction(); - MPartition mPartition = getMPartition( - catName, statsDesc.getDbName(), statsDesc.getTableName(), partVals, mTable); + MPartition mPartition = + ensureGetMPartition(new TableName(catName, statsDesc.getDbName(), statsDesc.getTableName()), partVals); if (mPartition == null) { throw new NoSuchObjectException("Partition for which stats is gathered doesn't exist."); } @@ -9329,7 +3547,7 @@ public Map updatePartitionColumnStatistics(Table table, MTable m long sleepInterval = MetastoreConf.getTimeVar(conf, ConfVars.METASTORE_S4U_NOWAIT_RETRY_SLEEP_INTERVAL, TimeUnit.MILLISECONDS); Map result = new RetryingExecutor<>(maxRetries, () -> { - Ref exceptionRef = new Ref<>(); + AtomicReference exceptionRef = new AtomicReference<>(); String savePoint = "ups_" + ThreadLocalRandom.current().nextInt(10000) + "_" + System.nanoTime(); setTransactionSavePoint(savePoint); executePlainSQL(sqlGenerator.addForUpdateNoWait( @@ -9337,14 +3555,14 @@ public Map updatePartitionColumnStatistics(Table table, MTable m true, exception -> { rollbackTransactionToSavePoint(savePoint); - exceptionRef.t = exception; + exceptionRef.set(exception); }); - if (exceptionRef.t != null) { - throw new RetryingExecutor.RetryException(exceptionRef.t); + if (exceptionRef.get() != null) { + throw new RetryingExecutor.RetryException(exceptionRef.get()); } pm.refresh(mPartition); Partition partition = convertToPart(catName, statsDesc.getDbName(), statsDesc.getTableName(), - mPartition, TxnUtils.isAcidTable(table)); + mPartition, TxnUtils.isAcidTable(table), conf); Map oldStats = Maps.newHashMap(); List stats = getMPartitionColumnStatistics(table, Lists.newArrayList(statsDesc.getPartName()), colNames, colStats.getEngine()); @@ -9371,7 +3589,7 @@ public Map updatePartitionColumnStatistics(Table table, MTable m if (errorMsg != null) { throw new MetaException(errorMsg); } - if (!isCurrentStatsValidForTheQuery(mPartition, validWriteIds, true)) { + if (!isCurrentStatsValidForTheQuery(mPartition.getParameters(), mPartition.getWriteId(), validWriteIds, true)) { // Make sure we set the flag to invalid regardless of the current value. StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE); LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the partition: {}, {} ", @@ -9697,10 +3915,10 @@ public List getPartitionColumnStatistics( } else { // TODO: this could be improved to get partitions in bulk for (ColumnStatistics cs : allStats) { - MPartition mpart = getMPartition(catName, dbName, tableName, - Warehouse.getPartValuesFromPartName(cs.getStatsDesc().getPartName()), null); + MPartition mpart = ensureGetMPartition(new TableName(catName, dbName, tableName), + Warehouse.getPartValuesFromPartName(cs.getStatsDesc().getPartName())); if (mpart == null - || !isCurrentStatsValidForTheQuery(mpart, writeIdList, false)) { + || !isCurrentStatsValidForTheQuery(mpart.getParameters(), mpart.getWriteId(), writeIdList, false)) { if (mpart != null) { LOG.debug("The current metastore transactional partition column statistics for {}.{}.{} " + "(write ID {}) are not valid for current query ({} {})", dbName, tableName, @@ -9790,7 +4008,7 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, String tblNam // checking isolation-level-compliance of each partition column stats. for (Partition part : parts) { - if (!isCurrentStatsValidForTheQuery(part, part.getWriteId(), writeIdList, false)) { + if (!isCurrentStatsValidForTheQuery(part.getParameters(), part.getWriteId(), writeIdList, false)) { String partName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues()); LOG.debug("The current metastore transactional partition column " + "statistics for {}.{}.{} is not valid for the current query", @@ -9920,16 +4138,6 @@ private List getMPartitionColumnStatistics(Table tab } } - private void dropPartitionColumnStatisticsNoTxn( - String catName, String dbName, String tableName, List partNames) { - Pair queryWithParams = makeQueryByPartitionNames( - catName, dbName, tableName, partNames, MPartitionColumnStatistics.class, - "partition.table.tableName", "partition.table.database.name", "partition.partitionName", "partition.table.database.catalogName"); - try (QueryWrapper wrapper = new QueryWrapper(queryWithParams.getLeft())) { - wrapper.deletePersistentAll(queryWithParams.getRight()); - } - } - @Override public void deleteAllPartitionColumnStatistics(TableName tn, String writeIdList) { @@ -10089,7 +4297,7 @@ public List run(List input) throws Exception { Batchable.runBatched(batchSize, partNames, new Batchable() { @Override public List run(List input) throws MetaException { - Pair> queryWithParams = getPartQueryWithParams(catalog, database, tableName, + Pair> queryWithParams = getPartQueryWithParams(pm, catalog, database, tableName, input); try (QueryWrapper qw = new QueryWrapper(queryWithParams.getLeft())) { qw.setResultClass(MPartition.class); @@ -10556,14 +4764,6 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro } } - @Override - public boolean doesPartitionExist(String catName, String dbName, String tableName, - List partKeys, List partVals) - throws MetaException { - String name = Warehouse.makePartName(partKeys, partVals); - return this.getMPartition(catName, dbName, tableName, name) != null; - } - private void debugLog(final String message) { if (LOG.isDebugEnabled()) { if (LOG.isTraceEnabled()) { @@ -11053,130 +5253,6 @@ private MPackage findMPackage(String catName, String db, String packageName) { return pkg; } - @Override - public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) { - boolean commited = false; - Query query = null; - - NotificationEventResponse result = new NotificationEventResponse(); - result.setEvents(new ArrayList<>()); - try { - openTransaction(); - long lastEvent = rqst.getLastEvent(); - List parameterVals = new ArrayList<>(); - parameterVals.add(lastEvent); - // filterBuilder parameter is used for construction of conditional clause in the select query - StringBuilder filterBuilder = new StringBuilder("eventId > para" + parameterVals.size()); - // parameterBuilder parameter is used for specify what types of parameters will go into the filterBuilder - StringBuilder parameterBuilder = new StringBuilder("java.lang.Long para" + parameterVals.size()); - /* A fully constructed query would like: - -> filterBuilder: eventId > para0 && catalogName == para1 && dbName == para2 && (tableName == para3 - || tableName == para4) && eventType != para5 - -> parameterBuilder: java.lang.Long para0, java.lang.String para1, java.lang.String para2 - , java.lang.String para3, java.lang.String para4, java.lang.String para5 - */ - if (rqst.isSetCatName()) { - parameterVals.add(normalizeIdentifier(rqst.getCatName())); - parameterBuilder.append(", java.lang.String para" + parameterVals.size()); - filterBuilder.append(" && catalogName == para" + parameterVals.size()); - } - if (rqst.isSetDbName()) { - parameterVals.add(normalizeIdentifier(rqst.getDbName())); - parameterBuilder.append(", java.lang.String para" + parameterVals.size()); - filterBuilder.append(" && dbName == para" + parameterVals.size()); - } - if (rqst.isSetTableNames() && !rqst.getTableNames().isEmpty()) { - filterBuilder.append(" && ("); - for (String tableName : rqst.getTableNames()) { - parameterVals.add(normalizeIdentifier(tableName)); - parameterBuilder.append(", java.lang.String para" + parameterVals.size()); - filterBuilder.append("tableName == para" + parameterVals.size()+ " || "); - } - filterBuilder.setLength(filterBuilder.length() - 4); // remove the last " || " - filterBuilder.append(") "); - } - if (rqst.isSetEventTypeList()) { - filterBuilder.append(" && ("); - for (String eventType : rqst.getEventTypeList()) { - parameterVals.add(eventType); - parameterBuilder.append(", java.lang.String para" + parameterVals.size()); - filterBuilder.append("eventType == para" + parameterVals.size() + " || "); - } - filterBuilder.setLength(filterBuilder.length() - 4); // remove the last " || " - filterBuilder.append(") "); - } - if (rqst.isSetEventTypeSkipList()) { - for (String eventType : rqst.getEventTypeSkipList()) { - parameterVals.add(eventType); - parameterBuilder.append(", java.lang.String para" + parameterVals.size()); - filterBuilder.append(" && eventType != para" + parameterVals.size()); - } - } - query = pm.newQuery(MNotificationLog.class, filterBuilder.toString()); - query.declareParameters(parameterBuilder.toString()); - query.setOrdering("eventId ascending"); - int maxEventResponse = MetastoreConf.getIntVar(conf, ConfVars.METASTORE_MAX_EVENT_RESPONSE); - int maxEvents = (rqst.getMaxEvents() < maxEventResponse && rqst.getMaxEvents() > 0) ? rqst.getMaxEvents() : maxEventResponse; - query.setRange(0, maxEvents); - Collection events = - (Collection) query.executeWithArray(parameterVals.toArray(new Object[0])); - commited = commitTransaction(); - if (events == null) { - return result; - } - Iterator i = events.iterator(); - while (i.hasNext()) { - result.addToEvents(translateDbToThrift(i.next())); - } - return result; - } finally { - rollbackAndCleanup(commited, query); - } - } - - @Override - public void cleanWriteNotificationEvents(int olderThan) { - cleanOlderEvents(olderThan, MTxnWriteNotificationLog.class, "TxnWriteNotificationLog"); - } - - @Override - public List getAllWriteEventInfo(long txnId, String dbName, String tableName) throws MetaException { - List writeEventInfoList = null; - boolean commited = false; - Query query = null; - try { - openTransaction(); - List parameterVals = new ArrayList<>(); - StringBuilder filterBuilder = new StringBuilder(" txnId == " + Long.toString(txnId)); - if (dbName != null && !"*".equals(dbName)) { // * means get all database, so no need to add filter - appendSimpleCondition(filterBuilder, "database", new String[]{dbName}, parameterVals); - } - if (tableName != null && !"*".equals(tableName)) { - appendSimpleCondition(filterBuilder, "table", new String[]{tableName}, parameterVals); - } - query = pm.newQuery(MTxnWriteNotificationLog.class, filterBuilder.toString()); - query.setOrdering("database,table ascending"); - List mplans = (List)query.executeWithArray( - parameterVals.toArray(new String[0])); - pm.retrieveAll(mplans); - commited = commitTransaction(); - if (mplans != null && mplans.size() > 0) { - writeEventInfoList = Lists.newArrayList(); - for (MTxnWriteNotificationLog mplan : mplans) { - WriteEventInfo writeEventInfo = new WriteEventInfo(mplan.getWriteId(), mplan.getDatabase(), - mplan.getTable(), mplan.getFiles()); - writeEventInfo.setPartition(mplan.getPartition()); - writeEventInfo.setPartitionObj(mplan.getPartObject()); - writeEventInfo.setTableObj(mplan.getTableObject()); - writeEventInfoList.add(writeEventInfo); - } - } - } finally { - rollbackAndCleanup(commited, query); - } - return writeEventInfoList; - } - private void executePlainSQL(String sql, boolean atLeastOneRecord, Consumer exceptionConsumer) @@ -11209,287 +5285,6 @@ private void executePlainSQL(String sql, } } - private void lockNotificationSequenceForUpdate() throws MetaException { - int maxRetries = - MetastoreConf.getIntVar(conf, ConfVars.NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES); - long sleepInterval = MetastoreConf.getTimeVar(conf, - ConfVars.NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL, TimeUnit.MILLISECONDS); - if (sqlGenerator.getDbProduct().isDERBY() && directSql != null) { - // Derby doesn't allow FOR UPDATE to lock the row being selected (See https://db.apache - // .org/derby/docs/10.1/ref/rrefsqlj31783.html) . So lock the whole table. Since there's - // only one row in the table, this shouldn't cause any performance degradation. - new RetryingExecutor(maxRetries, () -> { - directSql.lockDbTable("NOTIFICATION_SEQUENCE"); - return null; - }).commandName("lockNotificationSequenceForUpdate").sleepInterval(sleepInterval).run(); - } else { - String selectQuery = "select \"NEXT_EVENT_ID\" from \"NOTIFICATION_SEQUENCE\""; - String lockingQuery = sqlGenerator.addForUpdateClause(selectQuery); - new RetryingExecutor(maxRetries, () -> { - executePlainSQL(lockingQuery, false, null); - return null; - }).commandName("lockNotificationSequenceForUpdate").sleepInterval(sleepInterval).run(); - } - } - - @Override - public void addNotificationEvent(NotificationEvent entry) throws MetaException { - boolean commited = false; - Query query = null; - try { - pm.flush(); - openTransaction(); - lockNotificationSequenceForUpdate(); - query = pm.newQuery(MNotificationNextId.class); - Collection ids = (Collection) query.execute(); - MNotificationNextId mNotificationNextId = null; - boolean needToPersistId; - if (CollectionUtils.isEmpty(ids)) { - mNotificationNextId = new MNotificationNextId(1L); - needToPersistId = true; - } else { - mNotificationNextId = ids.iterator().next(); - needToPersistId = false; - } - entry.setEventId(mNotificationNextId.getNextEventId()); - mNotificationNextId.incrementEventId(); - if (needToPersistId) { - pm.makePersistent(mNotificationNextId); - } - pm.makePersistent(translateThriftToDb(entry)); - commited = commitTransaction(); - } catch (MetaException e) { - LOG.error("Couldn't get lock for update", e); - throw e; - } finally { - rollbackAndCleanup(commited, query); - } - } - - @Override - public void cleanNotificationEvents(int olderThan) { - cleanOlderEvents(olderThan, MNotificationLog.class, "NotificationLog"); - } - - private void cleanOlderEvents(int olderThan, Class table, String tableName) { - final int eventBatchSize = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.EVENT_CLEAN_MAX_EVENTS); - - final long ageSec = olderThan; - final Instant now = Instant.now(); - - final int tooOld = Math.toIntExact(now.getEpochSecond() - ageSec); - - final Optional batchSize = (eventBatchSize > 0) ? Optional.of(eventBatchSize) : Optional.empty(); - - final long start = System.nanoTime(); - int deleteCount = doCleanNotificationEvents(tooOld, batchSize, table, tableName); - - if (deleteCount == 0) { - LOG.info("No {} events found to be cleaned with eventTime < {}", tableName, tooOld); - } else { - int batchCount = 0; - do { - batchCount = doCleanNotificationEvents(tooOld, batchSize, table, tableName); - deleteCount += batchCount; - } while (batchCount > 0); - } - - final long finish = System.nanoTime(); - - LOG.info("Deleted {} {} events older than epoch:{} in {}ms", deleteCount, tableName, tooOld, - TimeUnit.NANOSECONDS.toMillis(finish - start)); - } - - private int doCleanNotificationEvents(final int ageSec, final Optional batchSize, Class tableClass, String tableName) { - final Transaction tx = pm.currentTransaction(); - int eventsCount = 0; - - try { - String key = null; - tx.begin(); - - try (Query query = pm.newQuery(tableClass, "eventTime <= tooOld")) { - query.declareParameters("java.lang.Integer tooOld"); - if (MNotificationLog.class.equals(tableClass)) { - key = "eventId"; - } else if (MTxnWriteNotificationLog.class.equals(tableClass)) { - key = "txnId"; - } - query.setOrdering(key + " ascending"); - if (batchSize.isPresent()) { - query.setRange(0, batchSize.get()); - } - - List events = (List) query.execute(ageSec); - if (CollectionUtils.isNotEmpty(events)) { - eventsCount = events.size(); - if (LOG.isDebugEnabled()) { - int minEventTime, maxEventTime; - long minId, maxId; - T firstNotification = events.get(0); - T lastNotification = events.get(eventsCount - 1); - if (MNotificationLog.class.equals(tableClass)) { - minEventTime = ((MNotificationLog)firstNotification).getEventTime(); - minId = ((MNotificationLog)firstNotification).getEventId(); - maxEventTime = ((MNotificationLog)lastNotification).getEventTime(); - maxId = ((MNotificationLog)lastNotification).getEventId(); - } else if (MTxnWriteNotificationLog.class.equals(tableClass)) { - minEventTime = ((MTxnWriteNotificationLog)firstNotification).getEventTime(); - minId = ((MTxnWriteNotificationLog)firstNotification).getTxnId(); - maxEventTime = ((MTxnWriteNotificationLog)lastNotification).getEventTime(); - maxId = ((MTxnWriteNotificationLog)lastNotification).getTxnId(); - } else { - throw new RuntimeException("Cleaning of older " + tableName + " events failed. " + - "Reason: Unknown table encountered " + tableClass.getName()); - } - - LOG.debug( - "Remove {} batch of {} events with eventTime < {}, min {}: {}, max {}: {}, min eventTime {}, max eventTime {}", - tableName, eventsCount, ageSec, key, minId, key, maxId, minEventTime, maxEventTime); - } - - pm.deletePersistentAll(events); - } - } - - tx.commit(); - } catch (Exception e) { - LOG.error("Unable to delete batch of " + tableName + " events", e); - eventsCount = 0; - } finally { - if (tx.isActive()) { - tx.rollback(); - } - } - - return eventsCount; - } - - @Override - public CurrentNotificationEventId getCurrentNotificationEventId() { - boolean commited = false; - Query query = null; - try { - openTransaction(); - query = pm.newQuery(MNotificationNextId.class); - Collection ids = (Collection) query.execute(); - long id = 0; - if (CollectionUtils.isNotEmpty(ids)) { - id = ids.iterator().next().getNextEventId() - 1; - } - commited = commitTransaction(); - return new CurrentNotificationEventId(id); - } finally { - rollbackAndCleanup(commited, query); - } - } - - @Override - public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) { - Long result = 0L; - boolean commited = false; - Query query = null; - try { - openTransaction(); - long fromEventId = rqst.getFromEventId(); - String inputDbName = rqst.getDbName(); - String catName = rqst.isSetCatName() ? rqst.getCatName() : getDefaultCatalog(conf); - long toEventId; - String paramSpecs; - List paramVals = new ArrayList<>(); - - // We store a catalog name in lower case in metastore and also use the same way everywhere in - // hive. - assert catName.equals(catName.toLowerCase()); - - // Build the query to count events, part by part - String queryStr = "select count(eventId) from " + MNotificationLog.class.getName(); - // count fromEventId onwards events - queryStr = queryStr + " where eventId > fromEventId"; - paramSpecs = "java.lang.Long fromEventId"; - paramVals.add(Long.valueOf(fromEventId)); - - // Input database name can be a database name or a *. In the first case we add a filter - // condition on dbName column, but not in the second case, since a * means all the - // databases. In case we support more elaborate database name patterns in future, we will - // have to apply a method similar to getNextNotification() method of MetaStoreClient. - if (!inputDbName.equals("*")) { - // dbName could be NULL in case of transaction related events, which also need to be - // counted. - queryStr = queryStr + " && (dbName == inputDbName || dbName == null)"; - paramSpecs = paramSpecs + ", java.lang.String inputDbName"; - // We store a database name in lower case in metastore. - paramVals.add(inputDbName.toLowerCase()); - } - - // catName could be NULL in case of transaction related events, which also need to be - // counted. - queryStr = queryStr + " && (catalogName == catName || catalogName == null)"; - paramSpecs = paramSpecs +", java.lang.String catName"; - paramVals.add(catName); - - // count events upto toEventId if specified - if (rqst.isSetToEventId()) { - toEventId = rqst.getToEventId(); - queryStr = queryStr + " && eventId <= toEventId"; - paramSpecs = paramSpecs + ", java.lang.Long toEventId"; - paramVals.add(Long.valueOf(toEventId)); - } - // Specify list of table names in the query string and parameter types - if (rqst.isSetTableNames() && !rqst.getTableNames().isEmpty()) { - queryStr = queryStr + " && ("; - for (String tableName : rqst.getTableNames()) { - paramVals.add(tableName.toLowerCase()); - queryStr = queryStr + "tableName == tableName" + paramVals.size() + " || "; - paramSpecs = paramSpecs + ", java.lang.String tableName" + paramVals.size(); - } - queryStr = queryStr.substring(0, queryStr.length() - 4); // remove the last " || " - queryStr += ")"; - } - - query = pm.newQuery(queryStr); - query.declareParameters(paramSpecs); - result = (Long) query.executeWithArray(paramVals.toArray()); - commited = commitTransaction(); - - // Cap the event count by limit if specified. - long eventCount = result.longValue(); - if (rqst.isSetLimit() && eventCount > rqst.getLimit()) { - eventCount = rqst.getLimit(); - } - - return new NotificationEventsCountResponse(eventCount); - } finally { - rollbackAndCleanup(commited, query); - } - } - - private MNotificationLog translateThriftToDb(NotificationEvent entry) { - MNotificationLog dbEntry = new MNotificationLog(); - dbEntry.setEventId(entry.getEventId()); - dbEntry.setEventTime(entry.getEventTime()); - dbEntry.setEventType(entry.getEventType()); - dbEntry.setCatalogName(entry.isSetCatName() ? entry.getCatName() : getDefaultCatalog(conf)); - dbEntry.setDbName(entry.getDbName()); - dbEntry.setTableName(entry.getTableName()); - dbEntry.setMessage(entry.getMessage()); - dbEntry.setMessageFormat(entry.getMessageFormat()); - return dbEntry; - } - - private NotificationEvent translateDbToThrift(MNotificationLog dbEvent) { - NotificationEvent event = new NotificationEvent(); - event.setEventId(dbEvent.getEventId()); - event.setEventTime(dbEvent.getEventTime()); - event.setEventType(dbEvent.getEventType()); - event.setCatName(dbEvent.getCatalogName()); - event.setDbName(dbEvent.getDbName()); - event.setTableName(dbEvent.getTableName()); - event.setMessage((dbEvent.getMessage())); - event.setMessageFormat(dbEvent.getMessageFormat()); - return event; - } - @Override public List getPrimaryKeys(PrimaryKeysRequest request) throws MetaException { try { @@ -12371,7 +6166,7 @@ public SerDeInfo getSerDeInfo(String serDeName) throws NoSuchObjectException, Me if (mSerDeInfo == null) { throw new NoSuchObjectException("No SerDe named " + serDeName); } - SerDeInfo serde = convertToSerDeInfo(mSerDeInfo, false); + SerDeInfo serde = convertToSerDeInfo(mSerDeInfo, conf, false); committed = commitTransaction(); return serde; } finally { @@ -12479,7 +6274,7 @@ private SchemaVersion convertToSchemaVersion(MSchemaVersion mSchemaVersion) thro schemaVersion.setName(mSchemaVersion.getName()); } if (mSchemaVersion.getSerDe() != null) { - schemaVersion.setSerDe(convertToSerDeInfo(mSchemaVersion.getSerDe(), false)); + schemaVersion.setSerDe(convertToSerDeInfo(mSchemaVersion.getSerDe(), conf, false)); } return schemaVersion; } @@ -13698,24 +7493,9 @@ private boolean isCurrentStatsValidForTheQuery(MTable tbl, String queryValidWrit * the conjunction of the following two are true: * ~ COLUMN_STATE_ACCURATE(CSA) state is true * ~ Isolation-level (snapshot) compliant with the query - * @param part MPartition of the stats entity * @param queryValidWriteIdList valid writeId list of the query * @Precondition "part" should be retrieved from the PARTITIONS table. */ - private boolean isCurrentStatsValidForTheQuery(MPartition part, - String queryValidWriteIdList, boolean isCompleteStatsWriter) - throws MetaException { - return isCurrentStatsValidForTheQuery(part.getParameters(), part.getWriteId(), - queryValidWriteIdList, isCompleteStatsWriter); - } - - private boolean isCurrentStatsValidForTheQuery(Partition part, long partWriteId, - String queryValidWriteIdList, boolean isCompleteStatsWriter) - throws MetaException { - return isCurrentStatsValidForTheQuery(part.getParameters(), partWriteId, - queryValidWriteIdList, isCompleteStatsWriter); - } - // TODO: move to somewhere else public static boolean isCurrentStatsValidForTheQuery( Map statsParams, long statsWriteId, String queryValidWriteIdList, diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index 9ac7f921e077..1f95cd20ee25 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -23,6 +23,8 @@ import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; @@ -116,9 +118,15 @@ import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; import org.apache.hadoop.hive.metastore.api.WriteEventInfo; import org.apache.hadoop.hive.metastore.client.builder.GetPartitionsArgs; +import org.apache.hadoop.hive.metastore.model.MDatabase; +import org.apache.hadoop.hive.metastore.model.MPartition; import org.apache.hadoop.hive.metastore.model.MTable; import org.apache.hadoop.hive.metastore.properties.PropertyStore; +import org.apache.hadoop.hive.metastore.metastore.iface.NotificationStore; +import org.apache.hadoop.hive.metastore.metastore.iface.PrivilegeStore; +import org.apache.hadoop.hive.metastore.metastore.iface.TableStore; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.ColStatsObjWithSourceInfo; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.thrift.TException; public interface RawStore extends Configurable { @@ -320,8 +328,10 @@ boolean alterDataConnector(String dcName, DataConnector connector) boolean dropType(String typeName); - void createTable(Table tbl) throws InvalidObjectException, - MetaException; + default void createTable(Table tbl) throws InvalidObjectException, + MetaException { + unwrap(TableStore.class).createTable(tbl); + } /** * Drop a table. @@ -334,8 +344,10 @@ void createTable(Table tbl) throws InvalidObjectException, * @throws InvalidObjectException Don't think this is ever actually thrown * @throws InvalidInputException Don't think this is ever actually thrown */ - boolean dropTable(String catalogName, String dbName, String tableName) - throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; + default boolean dropTable(String catalogName, String dbName, String tableName) + throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { + return unwrap(TableStore.class).dropTable(new TableName(catalogName, dbName, tableName)); + } /** * Drop all partitions from the table, and return the partition's location that not a child of baseLocationToNotShow, @@ -347,8 +359,10 @@ boolean dropTable(String catalogName, String dbName, String tableName) * @throws MetaException something went wrong, usually in the RDBMS or storage * @throws InvalidInputException unable to drop all partitions due to the invalid input */ - List dropAllPartitionsAndGetLocations(TableName table, String baseLocationToNotShow, AtomicReference message) - throws MetaException, InvalidInputException, NoSuchObjectException, InvalidObjectException; + default List dropAllPartitionsAndGetLocations(TableName table, String baseLocationToNotShow, AtomicReference message) + throws MetaException, InvalidInputException, NoSuchObjectException, InvalidObjectException { + return unwrap(TableStore.class).dropAllPartitionsAndGetLocations(table, baseLocationToNotShow, message); + } /** * Get a table object. @@ -359,7 +373,9 @@ List dropAllPartitionsAndGetLocations(TableName table, String baseLocati * consistently returned null or consistently threw NoSuchObjectException). * @throws MetaException something went wrong in the RDBMS */ - Table getTable(String catalogName, String dbName, String tableName) throws MetaException; + default Table getTable(String catalogName, String dbName, String tableName) throws MetaException { + return unwrap(TableStore.class).getTable(new TableName(catalogName, dbName, tableName), null, -1); + } /** * Get a table object. @@ -371,8 +387,10 @@ List dropAllPartitionsAndGetLocations(TableName table, String baseLocati * consistently returned null or consistently threw NoSuchObjectException). * @throws MetaException something went wrong in the RDBMS */ - Table getTable(String catalogName, String dbName, String tableName, - String writeIdList) throws MetaException; + default Table getTable(String catalogName, String dbName, String tableName, + String writeIdList) throws MetaException { + return unwrap(TableStore.class).getTable(new TableName(catalogName, dbName, tableName), writeIdList, -1); + } /** * Get a table object. @@ -384,8 +402,10 @@ Table getTable(String catalogName, String dbName, String tableName, * consistently returned null or consistently threw NoSuchObjectException). * @throws MetaException something went wrong in the RDBMS */ - Table getTable(String catalogName, String dbName, String tableName, - String writeIdList, long tableId) throws MetaException; + default Table getTable(String catalogName, String dbName, String tableName, + String writeIdList, long tableId) throws MetaException { + return unwrap(TableStore.class).getTable(new TableName(catalogName, dbName, tableName), writeIdList, tableId); + } /** * Add a partition. @@ -394,8 +414,13 @@ Table getTable(String catalogName, String dbName, String tableName, * @throws InvalidObjectException the provided partition object is not valid. * @throws MetaException error writing to the RDBMS. */ - boolean addPartition(Partition part) - throws InvalidObjectException, MetaException; + default boolean addPartition(Partition part) + throws InvalidObjectException, MetaException { + String catName = part.getCatName() == null ? + MetaStoreUtils.getDefaultCatalog(getConf()) : part.getCatName(); + return unwrap(TableStore.class) + .addPartitions(new TableName(catName, part.getDbName(), part.getTableName()), Arrays.asList(part)); + } /** * Add a list of partitions to a table. @@ -408,8 +433,10 @@ boolean addPartition(Partition part) * @throws MetaException the partitions don't belong to the indicated table or error writing to * the RDBMS. */ - boolean addPartitions(String catName, String dbName, String tblName, List parts) - throws InvalidObjectException, MetaException; + default boolean addPartitions(String catName, String dbName, String tblName, List parts) + throws InvalidObjectException, MetaException { + return unwrap(TableStore.class).addPartitions(new TableName(catName, dbName, tblName), parts); + } /** * Get a partition. @@ -421,8 +448,10 @@ boolean addPartitions(String catName, String dbName, String tblName, List part_vals) throws MetaException, NoSuchObjectException; + default Partition getPartition(String catName, String dbName, String tableName, + List part_vals) throws MetaException, NoSuchObjectException { + return unwrap(TableStore.class).getPartition(new TableName(catName, dbName, tableName), part_vals, null); + } /** * Get a partition. * @param catName catalog name. @@ -434,10 +463,12 @@ Partition getPartition(String catName, String dbName, String tableName, * @throws MetaException error reading from RDBMS. * @throws NoSuchObjectException no partition matching this specification exists. */ - Partition getPartition(String catName, String dbName, String tableName, + default Partition getPartition(String catName, String dbName, String tableName, List part_vals, String writeIdList) - throws MetaException, NoSuchObjectException; + throws MetaException, NoSuchObjectException { + return unwrap(TableStore.class).getPartition(new TableName(catName, dbName, tableName), part_vals, writeIdList); + } /** * Check whether a partition exists. @@ -450,9 +481,12 @@ Partition getPartition(String catName, String dbName, String tableName, * @throws MetaException failure reading RDBMS * @throws NoSuchObjectException this is never thrown. */ - boolean doesPartitionExist(String catName, String dbName, String tableName, + @Deprecated + default boolean doesPartitionExist(String catName, String dbName, String tableName, List partKeys, List part_vals) - throws MetaException, NoSuchObjectException; + throws MetaException, NoSuchObjectException { + return unwrap(TableStore.class).getPartition(new TableName(catName, dbName, tableName), part_vals, null) != null; + } /** * Drop a partition. @@ -466,8 +500,10 @@ boolean doesPartitionExist(String catName, String dbName, String tableName, * @throws InvalidObjectException error dropping the statistics for the partition * @throws InvalidInputException error dropping the statistics for the partition */ - boolean dropPartition(String catName, String dbName, String tableName, String partName) - throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; + default boolean dropPartition(String catName, String dbName, String tableName, String partName) + throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { + return unwrap(TableStore.class).dropPartitions(new TableName(catName, dbName, tableName), Arrays.asList(partName)); + } /** * Get some or all partitions for a table. @@ -479,8 +515,10 @@ boolean dropPartition(String catName, String dbName, String tableName, String pa * @throws MetaException error access the RDBMS. * @throws NoSuchObjectException no such table exists */ - List getPartitions(String catName, String dbName, String tableName, - GetPartitionsArgs args) throws MetaException, NoSuchObjectException; + default List getPartitions(String catName, String dbName, String tableName, + GetPartitionsArgs args) throws MetaException, NoSuchObjectException { + return unwrap(TableStore.class).getPartitions(new TableName(catName, dbName, tableName), args); + } /** * Get the location for every partition of a given table. If a partition location is a child of @@ -494,8 +532,10 @@ List getPartitions(String catName, String dbName, String tableName, * @param max The maximum number of partition locations returned, or -1 for all * @return The map of the partitionName, location pairs */ - Map getPartitionLocations(String catName, String dbName, String tblName, - String baseLocationToNotShow, int max); + default Map getPartitionLocations(String catName, String dbName, String tblName, + String baseLocationToNotShow, int max) { + return unwrap(TableStore.class).getPartitionLocations(new TableName(catName, dbName, tblName), baseLocationToNotShow, max); + } /** * Alter a table. @@ -508,9 +548,11 @@ Map getPartitionLocations(String catName, String dbName, String * @throws InvalidObjectException The new table object is invalid. * @throws MetaException something went wrong, usually in the RDBMS or storage. */ - Table alterTable(String catName, String dbname, String name, Table newTable, + default Table alterTable(String catName, String dbname, String name, Table newTable, String queryValidWriteIds) - throws InvalidObjectException, MetaException; + throws InvalidObjectException, MetaException { + return unwrap(TableStore.class).alterTable(new TableName(catName, dbname, name), newTable, queryValidWriteIds); + } /** * Update creation metadata for a materialized view. @@ -520,8 +562,10 @@ Table alterTable(String catName, String dbname, String name, Table newTable, * @param cm new creation metadata * @throws MetaException error accessing the RDBMS. */ - void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) - throws MetaException; + default void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) + throws MetaException { + unwrap(TableStore.class).updateCreationMetadata(new TableName(catName, dbname, tablename), cm); + } /** * Get table names that match a pattern. @@ -531,8 +575,10 @@ void updateCreationMetadata(String catName, String dbname, String tablename, Cre * @return list of table names, if any * @throws MetaException failure in querying the RDBMS */ - List getTables(String catName, String dbName, String pattern) - throws MetaException; + default List getTables(String catName, String dbName, String pattern) + throws MetaException { + return unwrap(TableStore.class).getTables(catName, dbName, pattern, null, -1); + } /** * Get table names that match a pattern. @@ -544,16 +590,20 @@ List getTables(String catName, String dbName, String pattern) * @return list of table names, if any * @throws MetaException failure in querying the RDBMS */ - List getTables(String catName, String dbName, String pattern, TableType tableType, int limit) - throws MetaException; + default List getTables(String catName, String dbName, String pattern, TableType tableType, int limit) + throws MetaException { + return unwrap(TableStore.class).getTables(catName, dbName, pattern, tableType, limit); + } /** * Retrieve all materialized views. * @return all materialized views in a catalog * @throws MetaException error querying the RDBMS */ - List
getAllMaterializedViewObjectsForRewriting(String catName) - throws MetaException; + default List
getAllMaterializedViewObjectsForRewriting(String catName) + throws MetaException { + return unwrap(TableStore.class).getAllMaterializedViewObjectsForRewriting(catName); + } /** * Get list of materialized views in a database. @@ -563,8 +613,10 @@ List
getAllMaterializedViewObjectsForRewriting(String catName) * @throws MetaException error querying the RDBMS * @throws NoSuchObjectException no such database */ - List getMaterializedViewsForRewriting(String catName, String dbName) - throws MetaException, NoSuchObjectException; + default List getMaterializedViewsForRewriting(String catName, String dbName) + throws MetaException, NoSuchObjectException { + return unwrap(TableStore.class).getMaterializedViewsForRewriting(catName, dbName); + } /** @@ -575,8 +627,10 @@ List getMaterializedViewsForRewriting(String catName, String dbName) * @return list of matching table meta information. * @throws MetaException failure in querying the RDBMS. */ - List getTableMeta(String catName, String dbNames, String tableNames, - List tableTypes) throws MetaException; + default List getTableMeta(String catName, String dbNames, String tableNames, + List tableTypes) throws MetaException { + return unwrap(TableStore.class).getTableMeta(catName, dbNames, tableNames, tableTypes); + } /** * @param catName catalog name @@ -589,8 +643,10 @@ List getTableMeta(String catName, String dbNames, String tableNames, * If there are duplicate names, only one instance of the table will be returned * @throws MetaException failure in querying the RDBMS. */ - List
getTableObjectsByName(String catName, String dbname, List tableNames) - throws MetaException, UnknownDBException; + default List
getTableObjectsByName(String catName, String dbname, List tableNames) + throws MetaException, UnknownDBException { + return unwrap(TableStore.class).getTableObjectsByName(catName, dbname, tableNames, null, null); + } /** * Multi-table table-parameter update. @@ -612,8 +668,10 @@ List
getTableObjectsByName(String catName, String dbname, List ta * If there are duplicate names, only one instance of the table will be returned * @throws MetaException failure in querying the RDBMS. */ - List
getTableObjectsByName(String catName, String dbname, List tableNames, - GetProjectionsSpec projectionSpec, String tablePattern) throws MetaException, UnknownDBException; + default List
getTableObjectsByName(String catName, String dbname, List tableNames, + GetProjectionsSpec projectionSpec, String tablePattern) throws MetaException, UnknownDBException { + return unwrap(TableStore.class).getTableObjectsByName(catName, dbname, tableNames, projectionSpec, tablePattern); + } /** * Get all tables in a database. @@ -622,7 +680,9 @@ List
getTableObjectsByName(String catName, String dbname, List ta * @return list of table names * @throws MetaException failure in querying the RDBMS. */ - List getAllTables(String catName, String dbName) throws MetaException; + default List getAllTables(String catName, String dbName) throws MetaException { + return unwrap(TableStore.class).getTables(catName, dbName, null, null, -1); + } /** * Gets a list of tables based on a filter string and filter type. @@ -637,8 +697,10 @@ List
getTableObjectsByName(String catName, String dbname, List ta * @throws MetaException * @throws UnknownDBException */ - List listTableNamesByFilter(String catName, String dbName, String filter, - short max_tables) throws MetaException, UnknownDBException; + default List listTableNamesByFilter(String catName, String dbName, String filter, + short max_tables) throws MetaException, UnknownDBException { + return unwrap(TableStore.class).listTableNamesByFilter(catName, dbName, filter, max_tables); + } /** * Get a partial or complete list of names for partitions of a table. @@ -649,8 +711,17 @@ List listTableNamesByFilter(String catName, String dbName, String filter * @return list of partition names. * @throws MetaException there was an error accessing the RDBMS */ - List listPartitionNames(String catName, String db_name, - String tbl_name, short max_parts) throws MetaException; + default List listPartitionNames(String catName, String db_name, + String tbl_name, short max_parts) throws MetaException { + try { + return unwrap(TableStore.class).listPartitionNames(new TableName(catName, db_name, tbl_name), + null, null, null, max_parts); + } catch (NoSuchObjectException nse) { + // In case of NoSuchObjectException, this method returns an empty list to + // take care of the old clients. + return Collections.emptyList(); + } + } /** * Get a partial or complete list of names for partitions of a table. @@ -664,9 +735,12 @@ List listPartitionNames(String catName, String db_name, * @return list of partition names. * @throws MetaException there was an error accessing the RDBMS */ - List listPartitionNames(String catName, String dbName, String tblName, + default List listPartitionNames(String catName, String dbName, String tblName, String defaultPartName, byte[] exprBytes, String order, - int maxParts) throws MetaException, NoSuchObjectException; + int maxParts) throws MetaException, NoSuchObjectException { + return unwrap(TableStore.class).listPartitionNames(new TableName(catName, dbName, tblName), + defaultPartName, exprBytes, order, maxParts); + } /** * Get partition names with a filter. This is a portion of the SQL where clause. @@ -678,8 +752,10 @@ List listPartitionNames(String catName, String dbName, String tblName, * @throws MetaException Error accessing the RDBMS or processing the filter. * @throws NoSuchObjectException no such table. */ - List listPartitionNamesByFilter(String catName, String dbName, String tblName, - GetPartitionsArgs args) throws MetaException, NoSuchObjectException; + default List listPartitionNamesByFilter(String catName, String dbName, String tblName, + GetPartitionsArgs args) throws MetaException, NoSuchObjectException { + return unwrap(TableStore.class).listPartitionNamesByFilter(new TableName(catName, dbName, tblName), args); + } /** * Get a list of partition values as one big struct. @@ -695,9 +771,12 @@ List listPartitionNamesByFilter(String catName, String dbName, String tb * @return struct with all of the partition value information * @throws MetaException error access the RDBMS */ - PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name, + default PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name, List cols, boolean applyDistinct, String filter, boolean ascending, - List order, long maxParts) throws MetaException; + List order, long maxParts) throws MetaException { + return unwrap(TableStore.class).listPartitionValues(new TableName(catName, db_name, tbl_name), + cols, applyDistinct, filter, ascending, order, maxParts); + } /** * Alter a partition. @@ -711,9 +790,11 @@ PartitionValuesResponse listPartitionValues(String catName, String db_name, Stri * @throws InvalidObjectException No such partition. * @throws MetaException error accessing the RDBMS. */ - Partition alterPartition(String catName, String db_name, String tbl_name, List part_vals, + default Partition alterPartition(String catName, String db_name, String tbl_name, List part_vals, Partition new_part, String queryValidWriteIds) - throws InvalidObjectException, MetaException; + throws InvalidObjectException, MetaException { + return unwrap(TableStore.class).alterPartition(new TableName(catName, db_name, tbl_name), part_vals, new_part, queryValidWriteIds); + } /** * Alter a set of partitions. @@ -731,10 +812,13 @@ Partition alterPartition(String catName, String db_name, String tbl_name, List alterPartitions(String catName, String db_name, String tbl_name, + default List alterPartitions(String catName, String db_name, String tbl_name, List> part_vals_list, List new_parts, long writeId, String queryValidWriteIds) - throws InvalidObjectException, MetaException; + throws InvalidObjectException, MetaException { + return unwrap(TableStore.class).alterPartitions(new TableName(catName, db_name, tbl_name), + part_vals_list, new_parts, writeId, queryValidWriteIds); + } /** * Get partitions with a filter. This is a portion of the SQL where clause. @@ -746,9 +830,11 @@ List alterPartitions(String catName, String db_name, String tbl_name, * @throws MetaException Error accessing the RDBMS or processing the filter. * @throws NoSuchObjectException no such table. */ - List getPartitionsByFilter( + default List getPartitionsByFilter( String catName, String dbName, String tblName, GetPartitionsArgs args) - throws MetaException, NoSuchObjectException; + throws MetaException, NoSuchObjectException { + return unwrap(TableStore.class).getPartitionsByFilter(new TableName(catName, dbName, tblName), args); + } /** * Generic Partition request API, providing different kinds of filtering and controlling output. @@ -778,9 +864,11 @@ List getPartitionsByFilter( * @throws MetaException in case of errors * @throws NoSuchObjectException when table isn't found */ - List getPartitionSpecsByFilterAndProjection(Table table, + default List getPartitionSpecsByFilterAndProjection(Table table, GetProjectionsSpec projectionSpec, GetPartitionsFilterSpec filterSpec) - throws MetaException, NoSuchObjectException; + throws MetaException, NoSuchObjectException { + return unwrap(TableStore.class).getPartitionSpecsByFilterAndProjection(table, projectionSpec, filterSpec); + } /** * Get partitions using an already parsed expression. @@ -791,9 +879,11 @@ List getPartitionSpecsByFilterAndProjection(Table table, * @return true if the result contains unknown partitions. * @throws TException error executing the expression */ - boolean getPartitionsByExpr(String catName, String dbName, String tblName, + default boolean getPartitionsByExpr(String catName, String dbName, String tblName, List result, GetPartitionsArgs args) - throws TException; + throws TException { + return unwrap(TableStore.class).getPartitionsByExpr(new TableName(catName, dbName, tblName), result, args); + } /** * Get the number of partitions that match a provided SQL filter. @@ -805,8 +895,10 @@ boolean getPartitionsByExpr(String catName, String dbName, String tblName, * @throws MetaException error accessing the RDBMS or executing the filter * @throws NoSuchObjectException no such table */ - int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) - throws MetaException, NoSuchObjectException; + default int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) + throws MetaException, NoSuchObjectException { + return unwrap(TableStore.class).getNumPartitionsByFilter(new TableName(catName, dbName, tblName), filter); + } /** * Get the number of partitions that match a given partial specification. @@ -819,8 +911,10 @@ int getNumPartitionsByFilter(String catName, String dbName, String tblName, Stri * @throws MetaException error accessing the RDBMS or working with the specification. * @throws NoSuchObjectException no such table. */ - int getNumPartitionsByPs(String catName, String dbName, String tblName, List partVals) - throws MetaException, NoSuchObjectException; + default int getNumPartitionsByPs(String catName, String dbName, String tblName, List partVals) + throws MetaException, NoSuchObjectException { + return unwrap(TableStore.class).getNumPartitionsByPs(new TableName(catName, dbName, tblName), partVals); + } /** * Get partitions by name. @@ -849,27 +943,45 @@ default List getPartitionsByNames(String catName, String dbName, Stri * @throws MetaException error accessing the RDBMS. * @throws NoSuchObjectException No such table. */ - List getPartitionsByNames(String catName, String dbName, String tblName, - GetPartitionsArgs args) throws MetaException, NoSuchObjectException; + default List getPartitionsByNames(String catName, String dbName, String tblName, + GetPartitionsArgs args) throws MetaException, NoSuchObjectException { + return unwrap(TableStore.class).getPartitionsByNames(new TableName(catName, dbName, tblName), args); + } - Table markPartitionForEvent(String catName, String dbName, String tblName, Map partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; + default Table markPartitionForEvent(String catName, String dbName, String tblName, Map partVals, PartitionEventType evtType) + throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { + return unwrap(TableStore.class).markPartitionForEvent(new TableName(catName, dbName, tblName), partVals, evtType); + } - boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; + default boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName, Map partName, PartitionEventType evtType) + throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { + return unwrap(TableStore.class).isPartitionMarkedForEvent(new TableName(catName, dbName, tblName), partName, evtType); + } - boolean addRole(String rowName, String ownerName) - throws InvalidObjectException, MetaException, NoSuchObjectException; + default boolean addRole(String rowName, String ownerName) + throws InvalidObjectException, MetaException, NoSuchObjectException { + return unwrap(PrivilegeStore.class).addRole(rowName, ownerName); + } - boolean removeRole(String roleName) throws MetaException, NoSuchObjectException; + default boolean removeRole(String roleName) throws MetaException, NoSuchObjectException { + return unwrap(PrivilegeStore.class).removeRole(roleName); + } - boolean grantRole(Role role, String userName, PrincipalType principalType, + default boolean grantRole(Role role, String userName, PrincipalType principalType, String grantor, PrincipalType grantorType, boolean grantOption) - throws MetaException, NoSuchObjectException, InvalidObjectException; + throws MetaException, NoSuchObjectException, InvalidObjectException { + return unwrap(PrivilegeStore.class).grantRole(role, userName, principalType, grantor, grantorType, grantOption); + } - boolean revokeRole(Role role, String userName, PrincipalType principalType, - boolean grantOption) throws MetaException, NoSuchObjectException; + default boolean revokeRole(Role role, String userName, PrincipalType principalType, + boolean grantOption) throws MetaException, NoSuchObjectException { + return unwrap(PrivilegeStore.class).revokeRole(role, userName, principalType, grantOption); + } - PrincipalPrivilegeSet getUserPrivilegeSet(String userName, - List groupNames) throws InvalidObjectException, MetaException; + default PrincipalPrivilegeSet getUserPrivilegeSet(String userName, + List groupNames) throws InvalidObjectException, MetaException { + return unwrap(PrivilegeStore.class).getUserPrivilegeSet(userName, groupNames); + } /** * Get privileges for a database for a user. @@ -881,8 +993,10 @@ PrincipalPrivilegeSet getUserPrivilegeSet(String userName, * @throws InvalidObjectException no such database * @throws MetaException error accessing the RDBMS */ - PrincipalPrivilegeSet getDBPrivilegeSet (String catName, String dbName, String userName, - List groupNames) throws InvalidObjectException, MetaException; + default PrincipalPrivilegeSet getDBPrivilegeSet (String catName, String dbName, String userName, + List groupNames) throws InvalidObjectException, MetaException { + return unwrap(PrivilegeStore.class).getDBPrivilegeSet(catName, dbName, userName, groupNames); + } /** * Get privileges for a connector for a user. @@ -894,8 +1008,10 @@ PrincipalPrivilegeSet getDBPrivilegeSet (String catName, String dbName, String u * @throws InvalidObjectException no such database * @throws MetaException error accessing the RDBMS */ - PrincipalPrivilegeSet getConnectorPrivilegeSet (String catName, String connectorName, String userName, - List groupNames) throws InvalidObjectException, MetaException; + default PrincipalPrivilegeSet getConnectorPrivilegeSet (String catName, String connectorName, String userName, + List groupNames) throws InvalidObjectException, MetaException { + return unwrap(PrivilegeStore.class).getConnectorPrivilegeSet(catName, connectorName, userName, groupNames); + } /** * Get privileges for a table for a user. @@ -908,8 +1024,10 @@ PrincipalPrivilegeSet getConnectorPrivilegeSet (String catName, String connector * @throws InvalidObjectException no such table * @throws MetaException error accessing the RDBMS */ - PrincipalPrivilegeSet getTablePrivilegeSet (String catName, String dbName, String tableName, - String userName, List groupNames) throws InvalidObjectException, MetaException; + default PrincipalPrivilegeSet getTablePrivilegeSet (String catName, String dbName, String tableName, + String userName, List groupNames) throws InvalidObjectException, MetaException { + return unwrap(PrivilegeStore.class).getTablePrivilegeSet(new TableName(catName, dbName, tableName), userName, groupNames); + } /** * Get privileges for a partition for a user. @@ -923,8 +1041,10 @@ PrincipalPrivilegeSet getTablePrivilegeSet (String catName, String dbName, Strin * @throws InvalidObjectException no such partition * @throws MetaException error accessing the RDBMS */ - PrincipalPrivilegeSet getPartitionPrivilegeSet (String catName, String dbName, String tableName, - String partition, String userName, List groupNames) throws InvalidObjectException, MetaException; + default PrincipalPrivilegeSet getPartitionPrivilegeSet (String catName, String dbName, String tableName, + String partition, String userName, List groupNames) throws InvalidObjectException, MetaException { + return unwrap(PrivilegeStore.class).getPartitionPrivilegeSet(new TableName(catName, dbName, tableName), partition, userName, groupNames); + } /** * Get privileges for a column in a table or partition for a user. @@ -939,11 +1059,16 @@ PrincipalPrivilegeSet getPartitionPrivilegeSet (String catName, String dbName, S * @throws InvalidObjectException no such table, partition, or column * @throws MetaException error accessing the RDBMS */ - PrincipalPrivilegeSet getColumnPrivilegeSet (String catName, String dbName, String tableName, String partitionName, - String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException; + default PrincipalPrivilegeSet getColumnPrivilegeSet (String catName, String dbName, String tableName, String partitionName, + String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException { + return unwrap(PrivilegeStore.class).getColumnPrivilegeSet(new TableName(catName, dbName, tableName), + partitionName, columnName, userName, groupNames); + } - List listPrincipalGlobalGrants(String principalName, - PrincipalType principalType); + default List listPrincipalGlobalGrants(String principalName, + PrincipalType principalType) { + return unwrap(PrivilegeStore.class).listPrincipalGlobalGrants(principalName, principalType); + } /** * For a given principal name and type, list the DB Grants @@ -953,8 +1078,10 @@ List listPrincipalGlobalGrants(String principalName, * @param dbName database name * @return list of privileges for that principal on the specified database. */ - List listPrincipalDBGrants(String principalName, - PrincipalType principalType, String catName, String dbName); + default List listPrincipalDBGrants(String principalName, + PrincipalType principalType, String catName, String dbName) { + return unwrap(PrivilegeStore.class).listPrincipalDBGrants(principalName, principalType, catName, dbName); + } /** * For a given principal name and type, list the DC Grants @@ -963,8 +1090,10 @@ List listPrincipalDBGrants(String principalName, * @param dcName data connector name * @return list of privileges for that principal on the specified data connector. */ - List listPrincipalDCGrants(String principalName, - PrincipalType principalType, String dcName); + default List listPrincipalDCGrants(String principalName, + PrincipalType principalType, String dcName) { + return unwrap(PrivilegeStore.class).listPrincipalDCGrants(principalName, principalType, dcName); + } /** * For a given principal name and type, list the Table Grants @@ -975,9 +1104,11 @@ List listPrincipalDCGrants(String principalName, * @param tableName table name * @return list of privileges for that principal on the specified database. */ - List listAllTableGrants( + default List listAllTableGrants( String principalName, PrincipalType principalType, String catName, String dbName, - String tableName); + String tableName) { + return unwrap(PrivilegeStore.class).listAllTableGrants(principalName, principalType, new TableName(catName, dbName, tableName)); + } /** * For a given principal name and type, list the Table Grants @@ -989,9 +1120,12 @@ List listAllTableGrants( * @param partName partition name (not value) * @return list of privileges for that principal on the specified database. */ - List listPrincipalPartitionGrants( + default List listPrincipalPartitionGrants( String principalName, PrincipalType principalType, String catName, String dbName, - String tableName, List partValues, String partName); + String tableName, List partValues, String partName) { + return unwrap(PrivilegeStore.class).listPrincipalPartitionGrants(principalName, principalType, + new TableName(catName, dbName, tableName), partValues, partName); + } /** * For a given principal name and type, list the Table Grants @@ -1003,9 +1137,12 @@ List listPrincipalPartitionGrants( * @param columnName column name * @return list of privileges for that principal on the specified database. */ - List listPrincipalTableColumnGrants( + default List listPrincipalTableColumnGrants( String principalName, PrincipalType principalType, String catName, String dbName, - String tableName, String columnName); + String tableName, String columnName) { + return unwrap(PrivilegeStore.class).listPrincipalTableColumnGrants(principalName, principalType, + new TableName(catName, dbName, tableName), columnName); + } /** * For a given principal name and type, list the Table Grants @@ -1018,29 +1155,46 @@ List listPrincipalTableColumnGrants( * @param columnName column name * @return list of privileges for that principal on the specified database. */ - List listPrincipalPartitionColumnGrants( + default List listPrincipalPartitionColumnGrants( String principalName, PrincipalType principalType, String catName, String dbName, - String tableName, List partValues, String partName, String columnName); + String tableName, List partValues, String partName, String columnName) { + return unwrap(PrivilegeStore.class).listPrincipalPartitionColumnGrants(principalName, principalType, + new TableName(catName, dbName, tableName), partValues, partName, columnName); + } - boolean grantPrivileges (PrivilegeBag privileges) - throws InvalidObjectException, MetaException, NoSuchObjectException; + default boolean grantPrivileges (PrivilegeBag privileges) + throws InvalidObjectException, MetaException, NoSuchObjectException { + return unwrap(PrivilegeStore.class).grantPrivileges(privileges); + } - boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) - throws InvalidObjectException, MetaException, NoSuchObjectException; + default boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) + throws InvalidObjectException, MetaException, NoSuchObjectException { + return unwrap(PrivilegeStore.class).revokePrivileges(privileges, grantOption); + } - boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, PrivilegeBag grantPrivileges) - throws InvalidObjectException, MetaException, NoSuchObjectException; + default boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, PrivilegeBag grantPrivileges) + throws InvalidObjectException, MetaException, NoSuchObjectException { + return unwrap(PrivilegeStore.class).refreshPrivileges(objToRefresh, authorizer, grantPrivileges); + } - org.apache.hadoop.hive.metastore.api.Role getRole( - String roleName) throws NoSuchObjectException; + default org.apache.hadoop.hive.metastore.api.Role getRole( + String roleName) throws NoSuchObjectException { + return unwrap(PrivilegeStore.class).getRole(roleName); + } - List listRoleNames(); + default List listRoleNames() { + return unwrap(PrivilegeStore.class).listRoleNames(); + } - List listRoles(String principalName, - PrincipalType principalType); + default List listRoles(String principalName, + PrincipalType principalType) { + return unwrap(PrivilegeStore.class).listRoles(principalName, principalType); + } - List listRolesWithGrants(String principalName, - PrincipalType principalType); + default List listRolesWithGrants(String principalName, + PrincipalType principalType) { + return unwrap(PrivilegeStore.class).listRolesWithGrants(principalName, principalType); + } /** @@ -1048,7 +1202,9 @@ List listRolesWithGrants(String principalName, * @param roleName * @return */ - List listRoleMembers(String roleName); + default List listRoleMembers(String roleName) { + return unwrap(PrivilegeStore.class).listRoleMembers(roleName); + } /** * Fetch a partition along with privilege information for a particular user. @@ -1063,9 +1219,12 @@ List listRolesWithGrants(String principalName, * @throws NoSuchObjectException no such partition exists * @throws InvalidObjectException error fetching privilege information */ - Partition getPartitionWithAuth(String catName, String dbName, String tblName, + default Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, String user_name, List group_names) - throws MetaException, NoSuchObjectException, InvalidObjectException; + throws MetaException, NoSuchObjectException, InvalidObjectException { + return unwrap(TableStore.class) + .getPartitionWithAuth(new TableName(catName, dbName, tblName), partVals, user_name, group_names); + } /** * Lists partition names that match a given partial specification @@ -1083,9 +1242,11 @@ Partition getPartitionWithAuth(String catName, String dbName, String tblName, * @throws MetaException error accessing RDBMS * @throws NoSuchObjectException No such table exists */ - List listPartitionNamesPs(String catName, String db_name, String tbl_name, + default List listPartitionNamesPs(String catName, String db_name, String tbl_name, List part_vals, short max_parts) - throws MetaException, NoSuchObjectException; + throws MetaException, NoSuchObjectException { + return unwrap(TableStore.class).listPartitionNamesPs(new TableName(catName, db_name, tbl_name), part_vals, max_parts); + } /** * Lists partitions that match a given partial specification and sets their auth privileges. @@ -1101,8 +1262,10 @@ List listPartitionNamesPs(String catName, String db_name, String tbl_nam * @throws NoSuchObjectException No such table exists * @throws InvalidObjectException error access privilege information */ - List listPartitionsPsWithAuth(String catName, String db_name, String tbl_name, - GetPartitionsArgs args) throws MetaException, InvalidObjectException, NoSuchObjectException; + default List listPartitionsPsWithAuth(String catName, String db_name, String tbl_name, + GetPartitionsArgs args) throws MetaException, InvalidObjectException, NoSuchObjectException { + return unwrap(TableStore.class).listPartitionsPsWithAuth(new TableName(catName, db_name, tbl_name), args); + } /** Persists the given column statistics object to the metastore * @param colStats object to persist @@ -1306,8 +1469,10 @@ void updateMasterKey(Integer seqNo, String key) * @throws MetaException error access RDBMS or storage. * @throws NoSuchObjectException One or more of the partitions does not exist. */ - void dropPartitions(String catName, String dbName, String tblName, List partNames) - throws MetaException, NoSuchObjectException; + default void dropPartitions(String catName, String dbName, String tblName, List partNames) + throws MetaException, NoSuchObjectException { + unwrap(TableStore.class).dropPartitions(new TableName(catName, dbName, tblName), partNames); + } /** * List all DB grants for a given principal. @@ -1315,8 +1480,10 @@ void dropPartitions(String catName, String dbName, String tblName, List * @param principalType type * @return all DB grants for this principal */ - List listPrincipalDBGrantsAll( - String principalName, PrincipalType principalType); + default List listPrincipalDBGrantsAll( + String principalName, PrincipalType principalType) { + return unwrap(PrivilegeStore.class).listPrincipalDBGrantsAll(principalName, principalType); + } /** * List all DC grants for a given principal. @@ -1324,8 +1491,10 @@ List listPrincipalDBGrantsAll( * @param principalType type * @return all DC grants for this principal */ - List listPrincipalDCGrantsAll( - String principalName, PrincipalType principalType); + default List listPrincipalDCGrantsAll( + String principalName, PrincipalType principalType) { + return unwrap(PrivilegeStore.class).listPrincipalDCGrantsAll(principalName, principalType); + } /** * List all Table grants for a given principal @@ -1333,8 +1502,10 @@ List listPrincipalDCGrantsAll( * @param principalType type * @return all Table grants for this principal */ - List listPrincipalTableGrantsAll( - String principalName, PrincipalType principalType); + default List listPrincipalTableGrantsAll( + String principalName, PrincipalType principalType) { + return unwrap(PrivilegeStore.class).listPrincipalTableGrantsAll(principalName, principalType); + } /** * List all Partition grants for a given principal @@ -1342,8 +1513,10 @@ List listPrincipalTableGrantsAll( * @param principalType type * @return all Partition grants for this principal */ - List listPrincipalPartitionGrantsAll( - String principalName, PrincipalType principalType); + default List listPrincipalPartitionGrantsAll( + String principalName, PrincipalType principalType) { + return unwrap(PrivilegeStore.class).listPrincipalPartitionGrantsAll(principalName, principalType); + } /** * List all Table column grants for a given principal @@ -1351,8 +1524,10 @@ List listPrincipalPartitionGrantsAll( * @param principalType type * @return all Table column grants for this principal */ - List listPrincipalTableColumnGrantsAll( - String principalName, PrincipalType principalType); + default List listPrincipalTableColumnGrantsAll( + String principalName, PrincipalType principalType) { + return unwrap(PrivilegeStore.class).listPrincipalTableColumnGrantsAll(principalName, principalType); + } /** * List all Partition column grants for a given principal @@ -1360,10 +1535,14 @@ List listPrincipalTableColumnGrantsAll( * @param principalType type * @return all Partition column grants for this principal */ - List listPrincipalPartitionColumnGrantsAll( - String principalName, PrincipalType principalType); + default List listPrincipalPartitionColumnGrantsAll( + String principalName, PrincipalType principalType) { + return unwrap(PrivilegeStore.class).listPrincipalPartitionColumnGrantsAll(principalName, principalType); + } - List listGlobalGrantsAll(); + default List listGlobalGrantsAll() { + return unwrap(PrivilegeStore.class).listGlobalGrantsAll(); + } /** * Find all the privileges for a given database. @@ -1371,14 +1550,18 @@ List listPrincipalPartitionColumnGrantsAll( * @param dbName database name * @return list of all privileges. */ - List listDBGrantsAll(String catName, String dbName); + default List listDBGrantsAll(String catName, String dbName) { + return unwrap(PrivilegeStore.class).listDBGrantsAll(catName, dbName); + } /** * Find all the privileges for a given data connector. * @param dcName data connector name * @return list of all privileges. */ - List listDCGrantsAll(String dcName); + default List listDCGrantsAll(String dcName) { + return unwrap(PrivilegeStore.class).listDCGrantsAll(dcName); + } /** * Find all of the privileges for a given column in a given partition. @@ -1389,8 +1572,11 @@ List listPrincipalPartitionColumnGrantsAll( * @param columnName column name * @return all privileges on this column in this partition */ - List listPartitionColumnGrantsAll( - String catName, String dbName, String tableName, String partitionName, String columnName); + default List listPartitionColumnGrantsAll( + String catName, String dbName, String tableName, String partitionName, String columnName) { + return unwrap(PrivilegeStore.class).listPartitionColumnGrantsAll(new TableName(catName, dbName, tableName), + partitionName, columnName); + } /** * Find all of the privileges for a given table @@ -1399,7 +1585,9 @@ List listPartitionColumnGrantsAll( * @param tableName table name * @return all privileges on this table */ - List listTableGrantsAll(String catName, String dbName, String tableName); + default List listTableGrantsAll(String catName, String dbName, String tableName) { + return unwrap(PrivilegeStore.class).listTableGrantsAll(new TableName(catName, dbName, tableName)); + } /** * Find all of the privileges for a given partition. @@ -1409,8 +1597,10 @@ List listPartitionColumnGrantsAll( * @param partitionName partition name (not value) * @return all privileges on this partition */ - List listPartitionGrantsAll( - String catName, String dbName, String tableName, String partitionName); + default List listPartitionGrantsAll( + String catName, String dbName, String tableName, String partitionName) { + return unwrap(PrivilegeStore.class).listPartitionGrantsAll(new TableName(catName, dbName, tableName), partitionName); + } /** * Find all of the privileges for a given column in a given table. @@ -1420,8 +1610,10 @@ List listPartitionGrantsAll( * @param columnName column name * @return all privileges on this column in this table */ - List listTableColumnGrantsAll( - String catName, String dbName, String tableName, String columnName); + default List listTableColumnGrantsAll( + String catName, String dbName, String tableName, String columnName) { + return unwrap(PrivilegeStore.class).listTableColumnGrantsAll(new TableName(catName, dbName, tableName), columnName); + } /** * Register a user-defined function based on the function specification passed in. @@ -1532,22 +1724,27 @@ List getPartitionColStatsForDatabase(String catName, * @param rqst Request containing information on the last processed notification. * @return list of notifications, sorted by eventId */ - NotificationEventResponse getNextNotification(NotificationEventRequest rqst); - + default NotificationEventResponse getNextNotification(NotificationEventRequest rqst) { + return unwrap(NotificationStore.class).getNextNotification(rqst); + } /** * Add a notification entry. This should only be called from inside the metastore * @param event the notification to add * @throws MetaException error accessing RDBMS */ - void addNotificationEvent(NotificationEvent event) throws MetaException; + default void addNotificationEvent(NotificationEvent event) throws MetaException { + unwrap(NotificationStore.class).addNotificationEvent(event); + } /** * Remove older notification events. * * @param olderThan Remove any events older or equal to a given number of seconds */ - void cleanNotificationEvents(int olderThan); + default void cleanNotificationEvents(int olderThan) { + unwrap(NotificationStore.class).cleanNotificationEvents(olderThan); + } /** * Get the last issued notification event id. This is intended for use by the export command @@ -1555,14 +1752,18 @@ List getPartitionColStatsForDatabase(String catName, * and determine which notification events happened before or after the export. * @return */ - CurrentNotificationEventId getCurrentNotificationEventId(); + default CurrentNotificationEventId getCurrentNotificationEventId() { + return unwrap(NotificationStore.class).getCurrentNotificationEventId(); + } /** * Get the number of events corresponding to given database with fromEventId. * This is intended for use by the repl commands to track the progress of incremental dump. * @return */ - NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst); + default NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) { + return unwrap(NotificationStore.class).getNotificationEventsCount(rqst); + } /* * Flush any catalog objects held by the metastore implementation. Note that this does not @@ -1626,19 +1827,25 @@ default FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { * Gets total number of tables. */ @InterfaceStability.Evolving - int getTableCount() throws MetaException; + default int getTableCount() throws MetaException { + return unwrap(TableStore.class).getObjectCount("tableName", MTable.class.getName()); + } /** * Gets total number of partitions. */ @InterfaceStability.Evolving - int getPartitionCount() throws MetaException; + default int getPartitionCount() throws MetaException { + return unwrap(TableStore.class).getObjectCount("partitionName", MPartition.class.getName()); + } /** * Gets total number of databases. */ @InterfaceStability.Evolving - int getDatabaseCount() throws MetaException; + default int getDatabaseCount() throws MetaException { + return unwrap(TableStore.class).getObjectCount("name", MDatabase.class.getName()); + } /** * SQLPrimaryKey represents a single primary key column. @@ -2020,7 +2227,9 @@ Map> getPartitionColsWithStats(String catName, String dbNam * Remove older notification events. * @param olderThan Remove any events older or equal to a given number of seconds */ - void cleanWriteNotificationEvents(int olderThan); + default void cleanWriteNotificationEvents(int olderThan) { + unwrap(NotificationStore.class).cleanWriteNotificationEvents(olderThan); + } /** * Get all write events for a specific transaction . @@ -2028,7 +2237,9 @@ Map> getPartitionColsWithStats(String catName, String dbNam * @param dbName the name of db for which dump is being taken * @param tableName the name of the table for which the dump is being taken */ - List getAllWriteEventInfo(long txnId, String dbName, String tableName) throws MetaException; + default List getAllWriteEventInfo(long txnId, String dbName, String tableName) throws MetaException { + return unwrap(NotificationStore.class).getAllWriteEventInfo(txnId, dbName, tableName); + } /** * Checking if table is part of a materialized view. @@ -2037,7 +2248,9 @@ Map> getPartitionColsWithStats(String catName, String dbNam * @param tblName table name * @return list of materialized views that uses the table */ - List isPartOfMaterializedView(String catName, String dbName, String tblName); + default List isPartOfMaterializedView(String catName, String dbName, String tblName) { + return unwrap(TableStore.class).isPartOfMaterializedView(new TableName(catName, dbName, tblName)); + } /** * Returns details about a scheduled query by name. @@ -2103,7 +2316,15 @@ Map> updatePartitionColumnStatisticsInBatch( Package findPackage(GetPackageRequest request); List listPackages(ListPackageRequest request); void dropPackage(DropPackageRequest request); - public MTable ensureGetMTable(String catName, String dbName, String tblName) throws NoSuchObjectException; + default MTable ensureGetMTable(String catName, String dbName, String tblName) throws NoSuchObjectException { + return unwrap(TableStore.class).ensureGetMTable(new TableName(catName, dbName, tblName)); + } + + MDatabase ensureGetMDatabase(String catName, String dbName) throws NoSuchObjectException; + + default MPartition ensureGetMPartition(TableName tableName, List partVals) throws MetaException { + return unwrap(TableStore.class).ensureGetMPartition(tableName, partVals); + } /** Persistent Property Management. */ default PropertyStore getPropertyStore() { @@ -2117,7 +2338,16 @@ default PropertyStore getPropertyStore() { */ default long updateParameterWithExpectedValue(Table table, String key, String expectedValue, String newValue) throws MetaException, NoSuchObjectException { - throw new UnsupportedOperationException("This Store doesn't support updating table parameter with expected value"); + return unwrap(TableStore.class).updateParameterWithExpectedValue(table, key, expectedValue, newValue); } + /** + * Returns an object that implements the given interface to allow the operation + * on the specific metadata. + * + * @param iface A Class defining an interface that the result must implement + * @return an object that implements the interface + * @throws RuntimeException If the context cannot be unwrapped to the provided class + */ + T unwrap(Class iface); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index 04468bd2139c..80e9fb808bb1 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -64,6 +64,7 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.messaging.*; +import org.apache.hadoop.hive.metastore.model.MDatabase; import org.apache.hadoop.hive.metastore.model.MTable; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.metastore.utils.FileUtils; @@ -1494,11 +1495,6 @@ public Table getTable(String catName, String dbName, String tblName, String vali return parts; } - @Override public Map getPartitionLocations(String catName, String dbName, String tblName, - String baseLocationToNotShow, int max) { - return rawStore.getPartitionLocations(catName, dbName, tblName, baseLocationToNotShow, max); - } - @Override public Table alterTable(String catName, String dbName, String tblName, Table newTable, String validWriteIds) throws InvalidObjectException, MetaException { newTable = rawStore.alterTable(catName, dbName, tblName, newTable, validWriteIds); @@ -1531,39 +1527,11 @@ public Table getTable(String catName, String dbName, String tblName, String vali return newTable; } - @Override public void updateTableParams(List updates) throws MetaException, NoSuchObjectException { + @Override + public void updateTableParams(List updates) throws MetaException, NoSuchObjectException { rawStore.updateTableParams(updates); } - @Override public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) - throws MetaException { - rawStore.updateCreationMetadata(catName, dbname, tablename, cm); - } - - @Override public List getTables(String catName, String dbName, String pattern) throws MetaException { - return rawStore.getTables(catName, dbName, pattern); - } - - @Override public List getTables(String catName, String dbName, String pattern, TableType tableType, int limit) - throws MetaException { - return rawStore.getTables(catName, dbName, pattern, tableType, limit); - } - - @Override public List
getAllMaterializedViewObjectsForRewriting(String catName) throws MetaException { - // TODO functionCache - return rawStore.getAllMaterializedViewObjectsForRewriting(catName); - } - - @Override public List getMaterializedViewsForRewriting(String catName, String dbName) - throws MetaException, NoSuchObjectException { - return rawStore.getMaterializedViewsForRewriting(catName, dbName); - } - - @Override public List getTableMeta(String catName, String dbNames, String tableNames, - List tableTypes) throws MetaException { - return rawStore.getTableMeta(catName, dbNames, tableNames, tableTypes); - } - @Override public List
getTableObjectsByName(String catName, String dbName, List tblNames) throws MetaException, UnknownDBException { if (canUseEvents && rawStore.isActiveTransaction()) { @@ -1601,25 +1569,8 @@ public Table getTable(String catName, String dbName, String tblName, String vali return tables; } - @Override - public List
getTableObjectsByName(String catName, String db, List tbl_names, - GetProjectionsSpec projectionsSpec, String tablePattern) throws MetaException, UnknownDBException { - return rawStore.getTableObjectsByName(catName, db, tbl_names, projectionsSpec, tablePattern); - } - - @Override public List getAllTables(String catName, String dbName) throws MetaException { - return rawStore.getAllTables(catName, dbName); - } - - @Override - // TODO: implement using SharedCache - public List listTableNamesByFilter(String catName, String dbName, String filter, short maxTables) - throws MetaException, UnknownDBException { - return rawStore.listTableNamesByFilter(catName, dbName, filter, maxTables); - } - @Override public List listPartitionNames(String catName, String dbName, String tblName, short maxParts) - throws MetaException { + throws MetaException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); @@ -1641,24 +1592,6 @@ public List listTableNamesByFilter(String catName, String dbName, String return partitionNames; } - @Override - public List listPartitionNames(String catName, String dbName, String tblName, String defaultPartName, - byte[] exprBytes, String order, int maxParts) throws MetaException, NoSuchObjectException { - throw new UnsupportedOperationException(); - } - - @Override - public List listPartitionNamesByFilter(String catName, String dbName, String tblName, - GetPartitionsArgs args) throws MetaException, NoSuchObjectException { - throw new UnsupportedOperationException(); - } - - @Override public PartitionValuesResponse listPartitionValues(String catName, String dbName, String tblName, - List cols, boolean applyDistinct, String filter, boolean ascending, List order, - long maxParts) throws MetaException { - throw new UnsupportedOperationException(); - } - @Override public Partition alterPartition(String catName, String dbName, String tblName, List partVals, Partition newPart, String validWriteIds) throws InvalidObjectException, MetaException { newPart = rawStore.alterPartition(catName, dbName, tblName, partVals, newPart, validWriteIds); @@ -1708,22 +1641,6 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, Str return expressionProxy.filterPartitionsByExpr(table.getPartitionKeys(), expr, defaultPartName, result); } - @Override - // TODO: implement using SharedCache - public List getPartitionsByFilter(String catName, String dbName, String tblName, GetPartitionsArgs args) - throws MetaException, NoSuchObjectException { - return rawStore.getPartitionsByFilter(catName, dbName, tblName, args); - } - - @Override - /** - * getPartitionSpecsByFilterAndProjection interface is currently non-cacheable. - */ public List getPartitionSpecsByFilterAndProjection(Table table, - GetProjectionsSpec projectionSpec, GetPartitionsFilterSpec filterSpec) - throws MetaException, NoSuchObjectException { - return rawStore.getPartitionSpecsByFilterAndProjection(table, projectionSpec, filterSpec); - } - @Override public boolean getPartitionsByExpr(String catName, String dbName, String tblName, List result, GetPartitionsArgs args) throws TException { catName = StringUtils.normalizeIdentifier(catName); @@ -1749,11 +1666,6 @@ public List getPartitionsByFilter(String catName, String dbName, Stri return hasUnknownPartitions; } - @Override public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) - throws MetaException, NoSuchObjectException { - return rawStore.getNumPartitionsByFilter(catName, dbName, tblName, filter); - } - @VisibleForTesting public static List partNameToVals(String name) { if (name == null) { return null; @@ -1789,145 +1701,6 @@ public List getPartitionsByFilter(String catName, String dbName, Stri return partitions; } - @Override public Table markPartitionForEvent(String catName, String dbName, String tblName, - Map partVals, PartitionEventType evtType) - throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { - return rawStore.markPartitionForEvent(catName, dbName, tblName, partVals, evtType); - } - - @Override public boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName, - Map partName, PartitionEventType evtType) - throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { - return rawStore.isPartitionMarkedForEvent(catName, dbName, tblName, partName, evtType); - } - - @Override public boolean addRole(String rowName, String ownerName) - throws InvalidObjectException, MetaException, NoSuchObjectException { - return rawStore.addRole(rowName, ownerName); - } - - @Override public boolean removeRole(String roleName) throws MetaException, NoSuchObjectException { - return rawStore.removeRole(roleName); - } - - @Override public boolean grantRole(Role role, String userName, PrincipalType principalType, String grantor, - PrincipalType grantorType, boolean grantOption) - throws MetaException, NoSuchObjectException, InvalidObjectException { - return rawStore.grantRole(role, userName, principalType, grantor, grantorType, grantOption); - } - - @Override public boolean revokeRole(Role role, String userName, PrincipalType principalType, boolean grantOption) - throws MetaException, NoSuchObjectException { - return rawStore.revokeRole(role, userName, principalType, grantOption); - } - - @Override public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List groupNames) - throws InvalidObjectException, MetaException { - return rawStore.getUserPrivilegeSet(userName, groupNames); - } - - @Override public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, String userName, - List groupNames) throws InvalidObjectException, MetaException { - return rawStore.getDBPrivilegeSet(catName, dbName, userName, groupNames); - } - - @Override public PrincipalPrivilegeSet getConnectorPrivilegeSet(String catName, String connectorName, String userName, - List groupNames) throws InvalidObjectException, MetaException { - return rawStore.getConnectorPrivilegeSet(catName, connectorName, userName, groupNames); - } - - @Override public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName, String tableName, - String userName, List groupNames) throws InvalidObjectException, MetaException { - return rawStore.getTablePrivilegeSet(catName, dbName, tableName, userName, groupNames); - } - - @Override public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName, String tableName, - String partition, String userName, List groupNames) throws InvalidObjectException, MetaException { - return rawStore.getPartitionPrivilegeSet(catName, dbName, tableName, partition, userName, groupNames); - } - - @Override public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName, String tableName, - String partitionName, String columnName, String userName, List groupNames) - throws InvalidObjectException, MetaException { - return rawStore.getColumnPrivilegeSet(catName, dbName, tableName, partitionName, columnName, userName, groupNames); - } - - @Override public List listPrincipalGlobalGrants(String principalName, - PrincipalType principalType) { - return rawStore.listPrincipalGlobalGrants(principalName, principalType); - } - - @Override public List listPrincipalDBGrants(String principalName, PrincipalType principalType, - String catName, String dbName) { - return rawStore.listPrincipalDBGrants(principalName, principalType, catName, dbName); - } - - @Override public List listPrincipalDCGrants(String principalName, PrincipalType principalType, - String dcName) { - return rawStore.listPrincipalDCGrants(principalName, principalType, dcName); - } - - @Override public List listAllTableGrants(String principalName, PrincipalType principalType, - String catName, String dbName, String tableName) { - return rawStore.listAllTableGrants(principalName, principalType, catName, dbName, tableName); - } - - @Override public List listPrincipalPartitionGrants(String principalName, - PrincipalType principalType, String catName, String dbName, String tableName, List partValues, - String partName) { - return rawStore - .listPrincipalPartitionGrants(principalName, principalType, catName, dbName, tableName, partValues, partName); - } - - @Override public List listPrincipalTableColumnGrants(String principalName, - PrincipalType principalType, String catName, String dbName, String tableName, String columnName) { - return rawStore - .listPrincipalTableColumnGrants(principalName, principalType, catName, dbName, tableName, columnName); - } - - @Override public List listPrincipalPartitionColumnGrants(String principalName, - PrincipalType principalType, String catName, String dbName, String tableName, List partValues, - String partName, String columnName) { - return rawStore - .listPrincipalPartitionColumnGrants(principalName, principalType, catName, dbName, tableName, partValues, - partName, columnName); - } - - @Override public boolean grantPrivileges(PrivilegeBag privileges) - throws InvalidObjectException, MetaException, NoSuchObjectException { - return rawStore.grantPrivileges(privileges); - } - - @Override public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) - throws InvalidObjectException, MetaException, NoSuchObjectException { - return rawStore.revokePrivileges(privileges, grantOption); - } - - @Override public boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, - PrivilegeBag grantPrivileges) throws InvalidObjectException, MetaException, NoSuchObjectException { - return rawStore.refreshPrivileges(objToRefresh, authorizer, grantPrivileges); - } - - @Override public Role getRole(String roleName) throws NoSuchObjectException { - return rawStore.getRole(roleName); - } - - @Override public List listRoleNames() { - return rawStore.listRoleNames(); - } - - @Override public List listRoles(String principalName, PrincipalType principalType) { - return rawStore.listRoles(principalName, principalType); - } - - @Override public List listRolesWithGrants(String principalName, PrincipalType principalType) { - return rawStore.listRolesWithGrants(principalName, principalType); - } - - @Override public List listRoleMembers(String roleName) { - return rawStore.listRoleMembers(roleName); - } - @Override public Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { catName = StringUtils.normalizeIdentifier(catName); @@ -1979,12 +1752,6 @@ public List getPartitionsByFilter(String catName, String dbName, Stri return partitionNames; } - @Override - public int getNumPartitionsByPs(String catName, String dbName, String tblName, List partSpecs) - throws MetaException, NoSuchObjectException { - return rawStore.getNumPartitionsByPs(catName, dbName, tblName, partSpecs); - } - @Override public List listPartitionsPsWithAuth(String catName, String dbName, String tblName, GetPartitionsArgs args) throws MetaException, InvalidObjectException, NoSuchObjectException { catName = StringUtils.normalizeIdentifier(catName); @@ -2532,67 +2299,6 @@ long getPartsFound() { rawStore.setMetaStoreSchemaVersion(version, comment); } - @Override public List listPrincipalDBGrantsAll(String principalName, - PrincipalType principalType) { - return rawStore.listPrincipalDBGrantsAll(principalName, principalType); - } - - @Override public List listPrincipalDCGrantsAll(String principalName, - PrincipalType principalType) { - return rawStore.listPrincipalDCGrantsAll(principalName, principalType); - } - - @Override public List listPrincipalTableGrantsAll(String principalName, - PrincipalType principalType) { - return rawStore.listPrincipalTableGrantsAll(principalName, principalType); - } - - @Override public List listPrincipalPartitionGrantsAll(String principalName, - PrincipalType principalType) { - return rawStore.listPrincipalPartitionGrantsAll(principalName, principalType); - } - - @Override public List listPrincipalTableColumnGrantsAll(String principalName, - PrincipalType principalType) { - return rawStore.listPrincipalTableColumnGrantsAll(principalName, principalType); - } - - @Override public List listPrincipalPartitionColumnGrantsAll(String principalName, - PrincipalType principalType) { - return rawStore.listPrincipalPartitionColumnGrantsAll(principalName, principalType); - } - - @Override public List listGlobalGrantsAll() { - return rawStore.listGlobalGrantsAll(); - } - - @Override public List listDBGrantsAll(String catName, String dbName) { - return rawStore.listDBGrantsAll(catName, dbName); - } - - @Override public List listDCGrantsAll(String dcName) { - return rawStore.listDCGrantsAll(dcName); - } - - @Override public List listPartitionColumnGrantsAll(String catName, String dbName, - String tableName, String partitionName, String columnName) { - return rawStore.listPartitionColumnGrantsAll(catName, dbName, tableName, partitionName, columnName); - } - - @Override public List listTableGrantsAll(String catName, String dbName, String tableName) { - return rawStore.listTableGrantsAll(catName, dbName, tableName); - } - - @Override public List listPartitionGrantsAll(String catName, String dbName, String tableName, - String partitionName) { - return rawStore.listPartitionGrantsAll(catName, dbName, tableName, partitionName); - } - - @Override public List listTableColumnGrantsAll(String catName, String dbName, String tableName, - String columnName) { - return rawStore.listTableColumnGrantsAll(catName, dbName, tableName, columnName); - } - @Override public void createFunction(Function func) throws InvalidObjectException, MetaException { // TODO functionCache rawStore.createFunction(func); @@ -2625,38 +2331,6 @@ long getPartsFound() { return rawStore.getFunctionsRequest(catName, dbName, pattern, isReturnNames); } - @Override public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) { - return rawStore.getNextNotification(rqst); - } - - @Override public void addNotificationEvent(NotificationEvent event) throws MetaException { - rawStore.addNotificationEvent(event); - } - - @Override public void cleanNotificationEvents(int olderThan) { - rawStore.cleanNotificationEvents(olderThan); - } - - @Override public CurrentNotificationEventId getCurrentNotificationEventId() { - return rawStore.getCurrentNotificationEventId(); - } - - @Override public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) { - return rawStore.getNotificationEventsCount(rqst); - } - - @Override public int getTableCount() throws MetaException { - return rawStore.getTableCount(); - } - - @Override public int getPartitionCount() throws MetaException { - return rawStore.getPartitionCount(); - } - - @Override public int getDatabaseCount() throws MetaException { - return rawStore.getDatabaseCount(); - } - @Override public List getPrimaryKeys(PrimaryKeysRequest request) throws MetaException { String catName = StringUtils.normalizeIdentifier(request.getCatName()); @@ -3045,14 +2719,6 @@ public long getCacheUpdateCount() { return sharedCache.getUpdateCount(); } - @Override public void cleanWriteNotificationEvents(int olderThan) { - rawStore.cleanWriteNotificationEvents(olderThan); - } - - @Override public List getAllWriteEventInfo(long txnId, String dbName, String tableName) - throws MetaException { - return rawStore.getAllWriteEventInfo(txnId, dbName, tableName); - } static boolean isNotInBlackList(String catName, String dbName, String tblName) { String str = TableName.getQualified(catName, dbName, tblName); @@ -3248,13 +2914,18 @@ public void dropPackage(DropPackageRequest request) { rawStore.dropPackage(request); } - @Override - public MTable ensureGetMTable(String catName, String dbName, String tblName) throws NoSuchObjectException { - return rawStore.ensureGetMTable(catName, dbName, tblName); - } - private boolean shouldGetConstraintFromRawStore(String catName, String dbName, String tblName) { return !shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction()) || !sharedCache.isTableConstraintValid(catName, dbName, tblName); } + + @Override + public MDatabase ensureGetMDatabase(String catName, String dbName) throws NoSuchObjectException { + return rawStore.ensureGetMDatabase(catName, dbName); + } + + @Override + public T unwrap(Class iface) { + return rawStore.unwrap(iface); + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/directsql/MetastoreDirectSqlUtils.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/directsql/MetastoreDirectSqlUtils.java index a3c4523dc872..489a61a08596 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/directsql/MetastoreDirectSqlUtils.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/directsql/MetastoreDirectSqlUtils.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.metastore.PersistenceManagerProxy; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; import org.datanucleus.ExecutionContext; import org.datanucleus.api.jdo.JDOPersistenceManager; @@ -619,7 +620,14 @@ else if (value instanceof byte[]) { static Long getModelIdentity(PersistenceManager pm, Class modelClass) throws MetaException { - ExecutionContext ec = ((JDOPersistenceManager) pm).getExecutionContext(); + ExecutionContext ec; + if (pm instanceof JDOPersistenceManager jp) { + ec = jp.getExecutionContext(); + } else if (pm instanceof PersistenceManagerProxy.ExecutionContextReference ecr) { + ec = ecr.getExecutionContext(); + } else { + throw new MetaException("Unknown " + pm); + } AbstractClassMetaData cmd = ec.getMetaDataManager().getMetaDataForClass(modelClass, ec.getClassLoaderResolver()); switch (cmd.getIdentityType()) { case DATASTORE : diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/GetHelper.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/GetHelper.java new file mode 100644 index 000000000000..296e4654f879 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/GetHelper.java @@ -0,0 +1,275 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.metastore; + +import com.codahale.metrics.Counter; +import com.google.common.annotations.VisibleForTesting; + +import javax.jdo.JDOException; +import javax.jdo.PersistenceManager; +import java.util.List; + +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.metastore.DatabaseProduct; +import org.apache.hadoop.hive.metastore.ExceptionHandler; +import org.apache.hadoop.hive.metastore.directsql.MetaStoreDirectSql; +import org.apache.hadoop.hive.metastore.RawStore; +import org.apache.hadoop.hive.metastore.api.InvalidInputException; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.metrics.Metrics; +import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; +import org.apache.hadoop.hive.metastore.metastore.iface.TableStore; +import org.datanucleus.api.jdo.JDOTransaction; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** Helper class for getting stuff w/transaction, direct SQL, perf logging, etc. */ +@VisibleForTesting +public abstract class GetHelper { + private static final Logger LOG = LoggerFactory.getLogger(GetHelper.class); + private static final Counter directSqlErrors = Metrics.getRegistry() != null ? + Metrics.getOrCreateCounter(MetricsConstants.DIRECTSQL_ERRORS) : new Counter(); + private final boolean isInTxn, doTrace, allowJdo; + private boolean doUseDirectSql; + private long start; + private Table table; + protected final RawStore baseStore; + protected final PersistenceManager pm; + private MetaStoreDirectSql directSql; + protected final List partitionFields; + protected final A argument; + private boolean success = false; + protected T results = null; + + public GetHelper(RawStoreAware rsa, A args) throws MetaException { + this(rsa, args, null); + } + + public GetHelper(RawStoreAware rsa, + A args, List fields) throws MetaException { + this.baseStore = rsa.getBaseStore(); + this.partitionFields = fields; + this.argument = args; + this.doTrace = LOG.isDebugEnabled(); + this.isInTxn = baseStore.isActiveTransaction(); + this.pm = rsa.getPersistentManager(); + this.allowJdo = canUseJdoQuery(); + + boolean isConfigEnabled = MetastoreConf.getBoolVar(baseStore.getConf(), + MetastoreConf.ConfVars.TRY_DIRECT_SQL); + if (isConfigEnabled) { + directSql = new MetaStoreDirectSql(pm, baseStore.getConf(), ""); + } + + if (!allowJdo && isConfigEnabled && !directSql.isCompatibleDatastore()) { + throw new MetaException("SQL is not operational"); // test path; SQL is enabled and broken. + } + this.doUseDirectSql = isConfigEnabled && directSql.isCompatibleDatastore(); + } + + protected boolean canUseDirectSql() throws MetaException { + return true; // By default, assume we can user directSQL - that's kind of the point. + } + + protected boolean canUseJdoQuery() throws MetaException { + return true; + } + + protected abstract String describeResult(); + protected abstract T getSqlResult() throws MetaException; + protected abstract T getJdoResult() + throws MetaException, NoSuchObjectException, InvalidObjectException, + InvalidInputException; + + public T run(boolean initTable) throws MetaException, NoSuchObjectException { + try { + start(initTable); + String savePoint = isInTxn && allowJdo ? "rollback_" + System.nanoTime() : null; + if (doUseDirectSql) { + try { + directSql.prepareTxn(); + setTransactionSavePoint(savePoint); + this.results = getSqlResult(); + LOG.debug("Using direct SQL optimization."); + } catch (Exception ex) { + handleDirectSqlError(ex, savePoint); + } + } + // Note that this will be invoked in 2 cases: + // 1) DirectSQL was disabled to start with; + // 2) DirectSQL threw and was disabled in handleDirectSqlError. + if (!doUseDirectSql && canUseJdoQuery()) { + this.results = getJdoResult(); + LOG.debug("Not using direct SQL optimization."); + } + return commit(); + } catch (NoSuchObjectException | MetaException ex) { + throw ex; + } catch (Exception ex) { + LOG.error("", ex); + throw new MetaException(ex.getMessage()); + } finally { + close(); + } + } + + private void start(boolean initTable) throws MetaException, NoSuchObjectException { + start = doTrace ? System.nanoTime() : 0; + baseStore.openTransaction(); + if (initTable && (argument != null)) { + TableStore store = baseStore.unwrap(TableStore.class); + table = store.getTable((TableName) argument, null, -1); + if (table == null) { + throw new NoSuchObjectException( + "Specified catalog.database.table does not exist : " + argument); + } + } + doUseDirectSql = doUseDirectSql && canUseDirectSql(); + } + + private void handleDirectSqlError(Exception ex, String savePoint) throws MetaException, NoSuchObjectException { + String message = null; + try { + message = generateShorterMessage(ex); + } catch (Throwable t) { + message = ex.toString() + "; error building a better message: " + t.getMessage(); + } + LOG.warn(message); // Don't log the exception, people just get confused. + LOG.debug("Full DirectSQL callstack for debugging (not an error)", ex); + + if (!allowJdo || !DatabaseProduct.isRecoverableException(ex)) { + throw ExceptionHandler.newMetaException(ex); + } + + if (!isInTxn) { + JDOException rollbackEx = null; + try { + baseStore.rollbackTransaction(); + } catch (JDOException jex) { + rollbackEx = jex; + } + if (rollbackEx != null) { + // Datanucleus propagates some pointless exceptions and rolls back in the finally. + if (baseStore.isActiveTransaction()) { + throw rollbackEx; // Throw if the tx wasn't rolled back. + } + LOG.info("Ignoring exception, rollback succeeded: " + rollbackEx.getMessage()); + } + + start = doTrace ? System.nanoTime() : 0; + baseStore.openTransaction(); + if (table != null) { + TableStore store = baseStore.unwrap(TableStore.class); + table = store.getTable((TableName) argument, null, -1); + if (table == null) { + throw new NoSuchObjectException( + "Specified catalog.database.table does not exist : " + argument); + } + } + } else { + rollbackTransactionToSavePoint(savePoint); + start = doTrace ? System.nanoTime() : 0; + } + + directSqlErrors.inc(); + doUseDirectSql = false; + } + + private void setTransactionSavePoint(String savePoint) { + if (savePoint != null) { + ((JDOTransaction) pm.currentTransaction()).setSavepoint(savePoint); + } + } + + private void rollbackTransactionToSavePoint(String savePoint) { + if (savePoint != null) { + ((JDOTransaction) pm.currentTransaction()).rollbackToSavepoint(savePoint); + } + } + + private String generateShorterMessage(Exception ex) { + StringBuilder message = new StringBuilder( + "Falling back to ORM path due to direct SQL failure (this is not an error): "); + Throwable t = ex; + StackTraceElement[] prevStack = null; + while (t != null) { + message.append(t.getMessage()); + StackTraceElement[] stack = t.getStackTrace(); + int uniqueFrames = stack.length - 1; + if (prevStack != null) { + int n = prevStack.length - 1; + while (uniqueFrames >= 0 && n >= 0 && stack[uniqueFrames].equals(prevStack[n])) { + uniqueFrames--; n--; + } + } + for (int i = 0; i <= uniqueFrames; ++i) { + StackTraceElement ste = stack[i]; + message.append(" at ").append(ste); + if (ste.getMethodName().contains("getSqlResult") + && (ste.getFileName() == null || ste.getFileName().contains("ObjectStore"))) { + break; + } + } + prevStack = stack; + t = t.getCause(); + if (t != null) { + message.append(";\n Caused by: "); + } + } + return message.toString(); + } + + private T commit() { + success = baseStore.commitTransaction(); + if (doTrace) { + double time = ((System.nanoTime() - start) / 1000000.0); + String result = describeResult(); + String retrieveType = doUseDirectSql ? "SQL" : "ORM"; + + LOG.debug("{} retrieved using {} in {}ms", result, retrieveType, time); + } + return results; + } + + private void close() { + if (!success) { + baseStore.rollbackTransaction(); + } + } + + public Table getTable() { + return table; + } + + public MetaStoreDirectSql getDirectSql() { + return directSql; + } + + public List getPartitionFields() { + return partitionFields; + } + + public static long getDirectSqlErrors() { + return directSqlErrors.getCount(); + } +} \ No newline at end of file diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/GetListHelper.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/GetListHelper.java new file mode 100644 index 000000000000..fdd9e769cbed --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/GetListHelper.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.metastore; + +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.MetaException; + +public abstract class GetListHelper extends GetHelper> { + public GetListHelper(RawStoreAware rsa, A args) throws MetaException { + super(rsa, args, null); + } + + public GetListHelper(RawStoreAware rsa, + A args, List fields) throws MetaException { + super(rsa, args, fields); + } + + @Override + protected String describeResult() { + return results.size() + " entries"; + } +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/MetaDescriptor.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/MetaDescriptor.java new file mode 100644 index 000000000000..74607e89c5b0 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/MetaDescriptor.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.metastore; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +@InterfaceStability.Evolving +@InterfaceAudience.LimitedPrivate("Hive developer") +@Retention(RetentionPolicy.RUNTIME) +public @interface MetaDescriptor { + String alias(); + Class defaultImpl(); + @Retention(RetentionPolicy.RUNTIME) + @Target(ElementType.METHOD) + @interface NoTransaction {} +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/PersistenceManagerProxy.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/PersistenceManagerProxy.java new file mode 100644 index 000000000000..63822639f6a5 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/PersistenceManagerProxy.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.metastore; + +import javax.jdo.PersistenceManager; +import javax.jdo.Query; +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.util.List; +import java.util.Objects; + +import org.datanucleus.ExecutionContext; + +public class PersistenceManagerProxy implements InvocationHandler { + private final PersistenceManager target; + private final MethodHandle getExecutionContext; + private final List openedQueries; + + private PersistenceManagerProxy(PersistenceManager pm, List trackOpenedQueries) { + this.target = Objects.requireNonNull(pm); + this.openedQueries = Objects.requireNonNull(trackOpenedQueries); + MethodHandles.Lookup lookup = MethodHandles.lookup(); + try { + java.lang.invoke.MethodType type = java.lang.invoke.MethodType.methodType(ExecutionContext.class); + this.getExecutionContext = lookup.findVirtual(target.getClass(), "getExecutionContext", type); + } catch (Exception e) { + throw new RuntimeException("Method getExecutionContext not found", e); + } + } + + public static PersistenceManager getProxy(PersistenceManager pm, List trackOpenedQueries) { + return (PersistenceManager) Proxy.newProxyInstance(pm.getClass().getClassLoader(), + new Class[] {PersistenceManager.class, ExecutionContextReference.class}, + new PersistenceManagerProxy(pm, trackOpenedQueries)); + } + + @Override + public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + // Redirect if the interface method "getExecutionContext" is called + if (method.getName().equals("getExecutionContext")) { + MethodHandle boundGetExecutionContext = getExecutionContext.bindTo(target); + return args == null ? boundGetExecutionContext.invokeWithArguments() + : boundGetExecutionContext.invokeWithArguments(args); + } else if (method.getName().equals("newQuery")) { + Object result = method.invoke(target, args); + openedQueries.add((Query) result); + return result; + } + // Otherwise, proceed with the standard call + return method.invoke(target, args); + } + + // PersistenceManager doesn't provide a way to get the ExecutionContext + // if we create a proxy around the JDOPersistenceManager, which we use it + // to save a savepoint, or generate the primary key. + public interface ExecutionContextReference { + /** + * @return ExecutionContext the current JDOPersistenceManager holds + */ + ExecutionContext getExecutionContext(); + } +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/RawStoreAware.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/RawStoreAware.java new file mode 100644 index 000000000000..1d16764811cb --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/RawStoreAware.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.metastore; + +import javax.jdo.PersistenceManager; + +import java.util.Objects; + +import org.apache.hadoop.hive.metastore.RawStore; + +public abstract class RawStoreAware { + protected RawStore baseStore; + protected PersistenceManager pm; + + public void setBaseStore(RawStore store) { + this.baseStore = Objects.requireNonNull(store); + } + + public void setPersistentManager(PersistenceManager manager) { + this.pm = Objects.requireNonNull(manager); + } + + public RawStore getBaseStore() { + return baseStore; + } + + public PersistenceManager getPersistentManager() { + return pm; + } +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/TransactionHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/TransactionHandler.java new file mode 100644 index 000000000000..5e50673fe7a9 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/TransactionHandler.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.metastore; + +import javax.jdo.Query; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.lang.reflect.UndeclaredThrowableException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.apache.hadoop.hive.metastore.RawStore; + +public record TransactionHandler (RawStore rs, T simpl, List closeQueriesAfterUse) + implements InvocationHandler { + + @SuppressWarnings("unchecked") + public static T getProxy(Class iface, TransactionHandler handler) { + List interfaces = new ArrayList<>(); + interfaces.add(iface); + interfaces.addAll(Arrays.asList(iface.getInterfaces())); + return (T) Proxy.newProxyInstance(iface.getClassLoader(), + interfaces.toArray(new Class[0]), handler); + } + + @Override + public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + boolean openTxn = method.getAnnotation(MetaDescriptor.NoTransaction.class) == null; + boolean success = false; + if (openTxn) { + rs.openTransaction(); + } + try { + Object result = method.invoke(simpl, args); + if (openTxn) { + success = rs.commitTransaction(); + } + return result; + } catch (InvocationTargetException | UndeclaredThrowableException e) { + throw e.getCause(); + } finally { + if (openTxn && !success) { + rs.rollbackTransaction(); + } + for (Query q : closeQueriesAfterUse) { + q.closeAll(); + } + closeQueriesAfterUse.clear(); + } + } +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/iface/NotificationStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/iface/NotificationStore.java new file mode 100644 index 000000000000..a1d1a1db680d --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/iface/NotificationStore.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.metastore.iface; + +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; +import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; +import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest; +import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse; +import org.apache.hadoop.hive.metastore.api.WriteEventInfo; +import org.apache.hadoop.hive.metastore.metastore.MetaDescriptor; +import org.apache.hadoop.hive.metastore.metastore.impl.NotificationStoreImpl; + +@MetaDescriptor(alias = "notification", defaultImpl = NotificationStoreImpl.class) +public interface NotificationStore { + /** + * Get the next notification event. + * @param rqst Request containing information on the last processed notification. + * @return list of notifications, sorted by eventId + */ + NotificationEventResponse getNextNotification(NotificationEventRequest rqst); + + + /** + * Add a notification entry. This should only be called from inside the metastore + * @param event the notification to add + * @throws MetaException error accessing RDBMS + */ + void addNotificationEvent(NotificationEvent event) throws MetaException; + + /** + * Remove older notification events. + * + * @param olderThan Remove any events older or equal to a given number of seconds + */ + void cleanNotificationEvents(int olderThan); + + /** + * Get the last issued notification event id. This is intended for use by the export command + * so that users can determine the state of the system at the point of the export, + * and determine which notification events happened before or after the export. + * @return + */ + CurrentNotificationEventId getCurrentNotificationEventId(); + + /** + * Get the number of events corresponding to given database with fromEventId. + * This is intended for use by the repl commands to track the progress of incremental dump. + * @return + */ + NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst); + + /** + * Remove older notification events. + * @param olderThan Remove any events older or equal to a given number of seconds + */ + void cleanWriteNotificationEvents(int olderThan); + + /** + * Get all write events for a specific transaction . + * @param txnId get all the events done by this transaction + * @param dbName the name of db for which dump is being taken + * @param tableName the name of the table for which the dump is being taken + */ + List getAllWriteEventInfo(long txnId, String dbName, String tableName) throws MetaException; +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/iface/PrivilegeStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/iface/PrivilegeStore.java new file mode 100644 index 000000000000..128288b7b74a --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/iface/PrivilegeStore.java @@ -0,0 +1,328 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.metastore.iface; + +import java.util.List; + +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; +import org.apache.hadoop.hive.metastore.api.HiveObjectRef; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.metastore.MetaDescriptor; +import org.apache.hadoop.hive.metastore.metastore.impl.PrivilegeStoreImpl; +import org.apache.hadoop.hive.metastore.model.MDBPrivilege; +import org.apache.hadoop.hive.metastore.model.MDCPrivilege; + +@MetaDescriptor(alias = "privilege", defaultImpl = PrivilegeStoreImpl.class) +public interface PrivilegeStore { + boolean addRole(String roleName, String ownerName) + throws InvalidObjectException, MetaException, NoSuchObjectException; + + boolean removeRole(String roleName) throws MetaException, NoSuchObjectException; + + boolean grantRole(Role role, String userName, PrincipalType principalType, + String grantor, PrincipalType grantorType, boolean grantOption) + throws MetaException, NoSuchObjectException, InvalidObjectException; + + boolean revokeRole(Role role, String userName, PrincipalType principalType, + boolean grantOption) throws MetaException, NoSuchObjectException; + + PrincipalPrivilegeSet getUserPrivilegeSet(String userName, + List groupNames) throws InvalidObjectException, MetaException; + + /** + * Get privileges for a database for a user. + * @param catName catalog name + * @param dbName database name + * @param userName user name + * @param groupNames list of groups the user is in + * @return privileges for that user on indicated database + * @throws InvalidObjectException no such database + * @throws MetaException error accessing the RDBMS + */ + PrincipalPrivilegeSet getDBPrivilegeSet (String catName, String dbName, String userName, + List groupNames) throws InvalidObjectException, MetaException; + + /** + * Get privileges for a connector for a user. + * @param catName catalog name + * @param connectorName connector name + * @param userName user name + * @param groupNames list of groups the user is in + * @return privileges for that user on indicated connector + * @throws InvalidObjectException no such database + * @throws MetaException error accessing the RDBMS + */ + PrincipalPrivilegeSet getConnectorPrivilegeSet (String catName, String connectorName, String userName, + List groupNames) throws InvalidObjectException, MetaException; + + /** + * Get privileges for a table for a user. + * @param tableName table name + * @param userName user name + * @param groupNames list of groups the user is in + * @return privileges for that user on indicated table + * @throws InvalidObjectException no such table + * @throws MetaException error accessing the RDBMS + */ + PrincipalPrivilegeSet getTablePrivilegeSet (TableName tableName, + String userName, List groupNames) throws InvalidObjectException, MetaException; + + /** + * Get privileges for a partition for a user. + * @param tableName table name + * @param partition partition name + * @param userName user name + * @param groupNames list of groups the user is in + * @return privileges for that user on indicated partition + * @throws InvalidObjectException no such partition + * @throws MetaException error accessing the RDBMS + */ + PrincipalPrivilegeSet getPartitionPrivilegeSet (TableName tableName, + String partition, String userName, List groupNames) throws InvalidObjectException, MetaException; + + /** + * Get privileges for a column in a table or partition for a user. + * @param tableName table name + * @param partitionName partition name, or null for table level column permissions + * @param columnName column name + * @param userName user name + * @param groupNames list of groups the user is in + * @return privileges for that user on indicated column in the table or partition + * @throws InvalidObjectException no such table, partition, or column + * @throws MetaException error accessing the RDBMS + */ + PrincipalPrivilegeSet getColumnPrivilegeSet (TableName tableName, String partitionName, + String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException; + + List listPrincipalGlobalGrants(String principalName, + PrincipalType principalType); + + /** + * For a given principal name and type, list the DB Grants + * @param principalName principal name + * @param principalType type + * @param catName catalog name + * @param dbName database name + * @return list of privileges for that principal on the specified database. + */ + List listPrincipalDBGrants(String principalName, + PrincipalType principalType, String catName, String dbName); + + /** + * For a given principal name and type, list the DC Grants + * @param principalName principal name + * @param principalType type + * @param dcName data connector name + * @return list of privileges for that principal on the specified data connector. + */ + List listPrincipalDCGrants(String principalName, + PrincipalType principalType, String dcName); + + /** + * For a given principal name and type, list the Table Grants + * @param principalName principal name + * @param principalType type + * @param tableName table name + * @return list of privileges for that principal on the specified database. + */ + List listAllTableGrants( + String principalName, PrincipalType principalType, TableName tableName); + + /** + * For a given principal name and type, list the Table Grants + * @param principalName principal name + * @param principalType type + * @param tableName table name + * @param partName partition name (not value) + * @return list of privileges for that principal on the specified database. + */ + List listPrincipalPartitionGrants( + String principalName, PrincipalType principalType, TableName tableName, + List partValues, String partName); + + /** + * For a given principal name and type, list the Table Grants + * @param principalName principal name + * @param principalType type + * @param tableName table name + * @param columnName column name + * @return list of privileges for that principal on the specified database. + */ + List listPrincipalTableColumnGrants( + String principalName, PrincipalType principalType, TableName tableName, String columnName); + + /** + * For a given principal name and type, list the Table Grants + * @param principalName principal name + * @param principalType type + * @param tableName table name + * @param partName partition name (not value) + * @param columnName column name + * @return list of privileges for that principal on the specified database. + */ + List listPrincipalPartitionColumnGrants( + String principalName, PrincipalType principalType, TableName tableName, + List partValues, String partName, String columnName); + + boolean grantPrivileges (PrivilegeBag privileges) + throws InvalidObjectException, MetaException, NoSuchObjectException; + + boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) + throws InvalidObjectException, MetaException, NoSuchObjectException; + + boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, PrivilegeBag grantPrivileges) + throws InvalidObjectException, MetaException, NoSuchObjectException; + + org.apache.hadoop.hive.metastore.api.Role getRole( + String roleName) throws NoSuchObjectException; + + List listRoleNames(); + + List listRoles(String principalName, + PrincipalType principalType); + + List listRolesWithGrants(String principalName, + PrincipalType principalType); + + + /** + * Get the role to principal grant mapping for given role + * @param roleName + * @return + */ + List listRoleMembers(String roleName); + + /** + * List all DB grants for a given principal. + * @param principalName principal name + * @param principalType type + * @return all DB grants for this principal + */ + List listPrincipalDBGrantsAll( + String principalName, PrincipalType principalType); + + /** + * List all DC grants for a given principal. + * @param principalName principal name + * @param principalType type + * @return all DC grants for this principal + */ + List listPrincipalDCGrantsAll( + String principalName, PrincipalType principalType); + + /** + * List all Table grants for a given principal + * @param principalName principal name + * @param principalType type + * @return all Table grants for this principal + */ + List listPrincipalTableGrantsAll( + String principalName, PrincipalType principalType); + + /** + * List all Partition grants for a given principal + * @param principalName principal name + * @param principalType type + * @return all Partition grants for this principal + */ + List listPrincipalPartitionGrantsAll( + String principalName, PrincipalType principalType); + + /** + * List all Table column grants for a given principal + * @param principalName principal name + * @param principalType type + * @return all Table column grants for this principal + */ + List listPrincipalTableColumnGrantsAll( + String principalName, PrincipalType principalType); + + /** + * List all Partition column grants for a given principal + * @param principalName principal name + * @param principalType type + * @return all Partition column grants for this principal + */ + List listPrincipalPartitionColumnGrantsAll( + String principalName, PrincipalType principalType); + + List listGlobalGrantsAll(); + + /** + * Find all the privileges for a given database. + * @param catName catalog name + * @param dbName database name + * @return list of all privileges. + */ + List listDBGrantsAll(String catName, String dbName); + + /** + * Find all the privileges for a given data connector. + * @param dcName data connector name + * @return list of all privileges. + */ + List listDCGrantsAll(String dcName); + + /** + * Find all of the privileges for a given column in a given partition. + * @param tableName table name + * @param partitionName partition name (not value) + * @param columnName column name + * @return all privileges on this column in this partition + */ + List listPartitionColumnGrantsAll( + TableName tableName, String partitionName, String columnName); + + /** + * Find all of the privileges for a given table + * @param tableName table name + * @return all privileges on this table + */ + List listTableGrantsAll(TableName tableName); + + /** + * Find all of the privileges for a given partition. + * @param tableName table name + * @param partitionName partition name (not value) + * @return all privileges on this partition + */ + List listPartitionGrantsAll( + TableName tableName, String partitionName); + + /** + * Find all of the privileges for a given column in a given table. + * @param tableName table name + * @param columnName column name + * @return all privileges on this column in this table + */ + List listTableColumnGrantsAll( + TableName tableName, String columnName); + + List listDatabaseGrants(String catName, String dbName, String authorizer); + + List listDataConnectorGrants(String dcName, String authorizer); +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/iface/TableStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/iface/TableStore.java new file mode 100644 index 000000000000..bf180f8766ea --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/iface/TableStore.java @@ -0,0 +1,477 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.metastore.iface; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.GetPartitionsFilterSpec; +import org.apache.hadoop.hive.metastore.api.GetProjectionsSpec; +import org.apache.hadoop.hive.metastore.api.InvalidInputException; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PartitionEventType; +import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.TableMeta; +import org.apache.hadoop.hive.metastore.api.UnknownDBException; +import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; +import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.client.builder.GetPartitionsArgs; +import org.apache.hadoop.hive.metastore.model.MPartition; +import org.apache.hadoop.hive.metastore.model.MTable; +import org.apache.hadoop.hive.metastore.metastore.MetaDescriptor; +import org.apache.hadoop.hive.metastore.metastore.impl.TableStoreImpl; +import org.apache.thrift.TException; + +@MetaDescriptor(alias = "table", defaultImpl = TableStoreImpl.class) +public interface TableStore { + + void createTable(Table tbl) throws InvalidObjectException, + MetaException; + /** + * Drop a table. + * @param table the table to be dropped + * @return true if the table was dropped + * @throws MetaException something went wrong, usually in the RDBMS or storage + * @throws NoSuchObjectException No table of this name + * @throws InvalidObjectException Don't think this is ever actually thrown + * @throws InvalidInputException Don't think this is ever actually thrown + */ + boolean dropTable(TableName table) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; + + /** + * Drop all partitions from the table, and return the partition's location that not a child of baseLocationToNotShow, + * when the baseLocationToNotShow is not null. + * @param table the table to drop partitions from + * @param baseLocationToNotShow Partition locations which are child of this path are omitted + * @param message postgres of this drop + * @return list of partition locations outside baseLocationToNotShow + * @throws MetaException something went wrong, usually in the RDBMS or storage + * @throws InvalidInputException unable to drop all partitions due to the invalid input + */ + List dropAllPartitionsAndGetLocations(TableName table, String baseLocationToNotShow, AtomicReference message) + throws MetaException, InvalidInputException, NoSuchObjectException, InvalidObjectException; + + /** + * Get a table object. + * @param table the table to be got + * @param writeIdList string format of valid writeId transaction list + * @return table object, or null if no such table exists (wow it would be nice if we either + * consistently returned null or consistently threw NoSuchObjectException). + * @throws MetaException something went wrong in the RDBMS + */ + Table getTable(TableName table, + String writeIdList, long tableId) throws MetaException; + + /** + * Add a list of partitions to a table. + * @param table the table this partitions added to . + * @param parts list of partitions to be added. + * @return true if the operation succeeded. + * @throws InvalidObjectException never throws this AFAICT + * @throws MetaException the partitions don't belong to the indicated table or error writing to + * the RDBMS. + */ + boolean addPartitions(TableName table, List parts) + throws InvalidObjectException, MetaException; + + /** + * Get a partition. + * @param table table name. + * @param part_vals partition values for this table. + * @param writeIdList string format of valid writeId transaction list + * @return the partition. + * @throws MetaException error reading from RDBMS. + * @throws NoSuchObjectException no partition matching this specification exists. + */ + Partition getPartition(TableName table, + List part_vals, + String writeIdList) + throws MetaException, NoSuchObjectException; + + /** + * Get some or all partitions for a table. + * @param table table name + * @param args additional arguments for getting partitions + * @return list of partitions + * @throws MetaException error access the RDBMS. + * @throws NoSuchObjectException no such table exists + */ + List getPartitions(TableName table, + GetPartitionsArgs args) throws MetaException, NoSuchObjectException; + + /** + * Get the location for every partition of a given table. If a partition location is a child of + * baseLocationToNotShow then the partitionName is returned, but the only null location is + * returned. + * @param tableName table name. + * @param baseLocationToNotShow Partition locations which are child of this path are omitted, and + * null value returned instead. + * @param max The maximum number of partition locations returned, or -1 for all + * @return The map of the partitionName, location pairs + */ + Map getPartitionLocations(TableName tableName, String baseLocationToNotShow, int max); + + /** + * Alter a table. + * @param tableName name of the table. + * @param newTable New table object. Which parts of the table can be altered are + * implementation specific. + * @return + * @throws InvalidObjectException The new table object is invalid. + * @throws MetaException something went wrong, usually in the RDBMS or storage. + */ + Table alterTable(TableName tableName, Table newTable, + String queryValidWriteIds) + throws InvalidObjectException, MetaException; + + + boolean dropPartitions(TableName tableName, List partNames) + throws MetaException, NoSuchObjectException; + + /** + * Get table names that match a pattern. + * @param catName catalog to search in + * @param dbName database to search in + * @param pattern pattern to match + * @param tableType type of table to look for + * @param limit Maximum number of tables to return (undeterministic set) + * @return list of table names, if any + * @throws MetaException failure in querying the RDBMS + */ + List getTables(String catName, String dbName, String pattern, TableType tableType, int limit) + throws MetaException; + + /** + * @param catName catalog name + * @param dbname + * The name of the database from which to retrieve the tables + * @param tableNames + * The names of the tables to retrieve. + * @param projectionSpec + * Projection Specification containing the columns that need to be returned. + * @return A list of the tables retrievable from the database + * whose names are in the list tableNames. + * If there are duplicate names, only one instance of the table will be returned + * @throws MetaException failure in querying the RDBMS. + */ + List
getTableObjectsByName(String catName, String dbname, List tableNames, + GetProjectionsSpec projectionSpec, String tablePattern) throws MetaException, + UnknownDBException; + + /** + * Get list of materialized views in a database. + * @param catName catalog name + * @param dbName database name + * @return names of all materialized views in the database + * @throws MetaException error querying the RDBMS + * @throws NoSuchObjectException no such database + */ + List getMaterializedViewsForRewriting(String catName, String dbName) + throws MetaException, NoSuchObjectException; + + /** + + * @param catName catalog name to search in. Search must be confined to one catalog. + * @param dbNames databases to search in. + * @param tableNames names of tables to select. + * @param tableTypes types of tables to look for. + * @return list of matching table meta information. + * @throws MetaException failure in querying the RDBMS. + */ + List getTableMeta(String catName, String dbNames, String tableNames, + List tableTypes) throws MetaException; + + /** + * Gets a list of tables based on a filter string and filter type. + * @param catName catalog name + * @param dbName + * The name of the database from which you will retrieve the table names + * @param filter + * The filter string + * @param max_tables + * The maximum number of tables returned + * @return A list of table names that match the desired filter + * @throws MetaException + * @throws UnknownDBException + */ + List listTableNamesByFilter(String catName, String dbName, String filter, + short max_tables) throws MetaException, UnknownDBException; + + /** + * Get partition names with a filter. This is a portion of the SQL where clause. + * @param tableName table name + * @param args additional arguments for getting partition names + * @return list of partition names matching the criteria + * @throws MetaException Error accessing the RDBMS or processing the filter. + * @throws NoSuchObjectException no such table. + */ + List listPartitionNamesByFilter(TableName tableName, + GetPartitionsArgs args) throws MetaException, NoSuchObjectException; + + /** + * Get a partial or complete list of names for partitions of a table. + * @param tableName the table + * @param defaultPartName default partition name. + * @param exprBytes expression for filtering resulting list, serialized from ExprNodeDesc. + * @param order ordered the resulting list. + * @param maxParts maximum number of partitions to retrieve, -1 for all. + * @return list of partition names. + * @throws MetaException there was an error accessing the RDBMS + */ + List listPartitionNames(TableName tableName, + String defaultPartName, byte[] exprBytes, String order, + int maxParts) throws MetaException, NoSuchObjectException; + + /** + * Get partitions using an already parsed expression. + * @param tableName the table + * @param args additional arguments for getting partitions + * @return true if the result contains unknown partitions. + * @throws TException error executing the expression + */ + boolean getPartitionsByExpr(TableName tableName, + List result, GetPartitionsArgs args) + throws TException; + + /** + * Get partitions by name. + * @param tableName the table. + * @param args additional arguments for getting partitions + * @return list of matching partitions + * @throws MetaException error accessing the RDBMS. + * @throws NoSuchObjectException No such table. + */ + List getPartitionsByNames(TableName tableName, + GetPartitionsArgs args) throws MetaException, NoSuchObjectException; + + /** + * Alter a partition. + * @param tableName the table. + * @param part_vals partition values that describe the partition. + * @param new_part new partition object. This should be a complete copy of the old with + * changes values, not just the parts to update. + * @return + * @throws InvalidObjectException No such partition. + * @throws MetaException error accessing the RDBMS. + */ + Partition alterPartition(TableName tableName, List part_vals, + Partition new_part, String queryValidWriteIds) + throws InvalidObjectException, MetaException; + + /** + * Alter a set of partitions. + * @param tableName table name. + * @param part_vals_list list of list of partition values. Each outer list describes one + * partition (with its list of partition values). + * @param new_parts list of new partitions. The order must match the old partitions described in + * part_vals_list. Each of these should be a complete copy of the new + * partition, not just the pieces to update. + * @param writeId write id of the transaction for the table + * @param queryValidWriteIds valid write id list of the transaction on the current table + * @return + * @throws InvalidObjectException One of the indicated partitions does not exist. + * @throws MetaException error accessing the RDBMS. + */ + List alterPartitions(TableName tableName, + List> part_vals_list, List new_parts, long writeId, + String queryValidWriteIds) + throws InvalidObjectException, MetaException; + + + /** + * Get partitions with a filter. This is a portion of the SQL where clause. + * @param tableName table name + * @param args additional arguments for getting partitions + * @return list of partition objects matching the criteria + * @throws MetaException Error accessing the RDBMS or processing the filter. + * @throws NoSuchObjectException no such table. + */ + List getPartitionsByFilter( + TableName tableName, GetPartitionsArgs args) + throws MetaException, NoSuchObjectException; + + /** + * Generic Partition request API, providing different kinds of filtering and controlling output. + * + * @param table table for which whose partitions are requested + * * @param table table for which partitions are requested + * @param projectionSpec the projection spec from the GetPartitionsRequest + * This projection spec includes a fieldList which represents the fields which must be returned. + * Any other field which is not in the fieldList may be unset in the returned + * partitions (it is up to the implementation to decide whether it chooses to + * include or exclude such fields). E.g. setting the field list to sd.location, + * serdeInfo.name, sd.cols.name, sd.cols.type will + * return partitions which will have location field set in the storage descriptor. + * Also the serdeInf in the returned storage descriptor will only have name field + * set. This applies to multi-valued fields as well like sd.cols, so in the + * example above only name and type fields will be set for sd.cols. + * If the fieldList is empty or not present, all the fields will be set. + * Additionally, it also includes a includeParamKeyPattern and excludeParamKeyPattern + * which is a SQL-92 compliant regex pattern to include or exclude parameters. The paramKeyPattern + * supports _ or % wildcards which represent one character and 0 or more characters respectively + * @param filterSpec The filter spec from GetPartitionsRequest which includes the filter mode + * and the list of filter strings. The filter mode could be BY_NAMES, BY_VALUES or BY_EXPR + * to filter by partition names, partition values or expression. The filter strings are provided + * in the list of filters within the filterSpec. When more than one filters are provided in the list + * they are logically AND together + * @return List of matching partitions which which may be partially filled according to fieldList. + * @throws MetaException in case of errors + * @throws NoSuchObjectException when table isn't found + */ + List getPartitionSpecsByFilterAndProjection(Table table, + GetProjectionsSpec projectionSpec, GetPartitionsFilterSpec filterSpec) + throws MetaException, NoSuchObjectException; + + /** + * Fetch a partition along with privilege information for a particular user. + * @param tableName table name. + * @param partVals partition values + * @param user_name user to get privilege information for. + * @param group_names groups to get privilege information for. + * @return a partition + * @throws MetaException error accessing the RDBMS. + * @throws NoSuchObjectException no such partition exists + * @throws InvalidObjectException error fetching privilege information + */ + Partition getPartitionWithAuth(TableName tableName, + List partVals, String user_name, List group_names) + throws MetaException, NoSuchObjectException, InvalidObjectException; + + /** + * Lists partition names that match a given partial specification + * @param tableName + * The name of the table which has the partitions + * @param partVals + * A partial list of values for partitions in order of the table's partition keys. + * Entries can be empty if you only want to specify latter partitions. + * @param maxParts + * The maximum number of partitions to return + * @return A list of partition names that match the partial spec. + * @throws MetaException error accessing RDBMS + * @throws NoSuchObjectException No such table exists + */ + List listPartitionNamesPs(TableName tableName, List partVals, short maxParts) + throws MetaException, NoSuchObjectException; + + /** + * Lists partitions that match a given partial specification and sets their auth privileges. + * If userName and groupNames null, then no auth privileges are set. + * @param tableName + * The name of the table which has the partitions + * @param args additional arguments for getting partitions + * @return A list of partitions that match the partial spec. + * @throws MetaException error access RDBMS + * @throws NoSuchObjectException No such table exists + * @throws InvalidObjectException error access privilege information + */ + List listPartitionsPsWithAuth(TableName tableName, + GetPartitionsArgs args) throws MetaException, InvalidObjectException, NoSuchObjectException; + + /** + * Get the number of partitions that match a provided SQL filter. + * @param tableName table name. + * @param filter filter from Hive's SQL where clause + * @return number of matching partitions. + * @throws MetaException error accessing the RDBMS or executing the filter + * @throws NoSuchObjectException no such table + */ + int getNumPartitionsByFilter(TableName tableName, String filter) + throws MetaException, NoSuchObjectException; + + /** + * Get the number of partitions that match a given partial specification. + * @param tableName table name. + * @param partVals A partial list of values for partitions in order of the table's partition keys. + * Entries can be empty if you need to specify latter partitions. + * @return number of matching partitions. + * @throws MetaException error accessing the RDBMS or working with the specification. + * @throws NoSuchObjectException no such table. + */ + int getNumPartitionsByPs(TableName tableName, List partVals) + throws MetaException, NoSuchObjectException; + + /** + * Get a list of partition values as one big struct. + * @param tableName table name. + * @param cols partition key columns + * @param applyDistinct whether to apply distinct to the list + * @param filter filter to apply to the partition names + * @param ascending whether to put in ascending order + * @param order whether to order + * @param maxParts maximum number of parts to return, or -1 for all + * @return struct with all of the partition value information + * @throws MetaException error access the RDBMS + */ + PartitionValuesResponse listPartitionValues(TableName tableName, + List cols, boolean applyDistinct, String filter, boolean ascending, + List order, long maxParts) throws MetaException; + + /** + * Update creation metadata for a materialized view. + * @param tableName table name. + * @param cm new creation metadata + * @throws MetaException error accessing the RDBMS. + */ + void updateCreationMetadata(TableName tableName, CreationMetadata cm) + throws MetaException; + + /** + * Retrieve all materialized views. + * @return all materialized views in a catalog + * @throws MetaException error querying the RDBMS + */ + List
getAllMaterializedViewObjectsForRewriting(String catName) throws MetaException; + + MTable ensureGetMTable(TableName tableName) throws NoSuchObjectException; + + /** + * Checking if table is part of a materialized view. + * @param tableName table name + * @return list of materialized views that uses the table + */ + List isPartOfMaterializedView(TableName tableName); + + Table markPartitionForEvent(TableName tableName, Map partVals, PartitionEventType evtType) + throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; + + boolean isPartitionMarkedForEvent(TableName tableName, Map partName, PartitionEventType evtType) + throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; + + int getObjectCount(String fieldName, String objName); + + /** + * Updates a given table parameter with expected value. + * + * @return the number of rows updated + */ + long updateParameterWithExpectedValue(Table table, String key, String expectedValue, String newValue) + throws MetaException, NoSuchObjectException; + + MPartition ensureGetMPartition(TableName tableName, List partVals) throws MetaException; +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/impl/NotificationStoreImpl.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/impl/NotificationStoreImpl.java new file mode 100644 index 000000000000..db6c606f1826 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/impl/NotificationStoreImpl.java @@ -0,0 +1,428 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.metastore.impl; + +import com.google.common.collect.Lists; + +import javax.jdo.Query; +import javax.jdo.datastore.JDOConnection; + +import java.sql.Connection; +import java.sql.Statement; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.collections4.CollectionUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.DatabaseProduct; +import org.apache.hadoop.hive.metastore.directsql.MetaStoreDirectSql; +import org.apache.hadoop.hive.metastore.PersistenceManagerProvider; +import org.apache.hadoop.hive.metastore.RawStore; +import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; +import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; +import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest; +import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse; +import org.apache.hadoop.hive.metastore.api.WriteEventInfo; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.metastore.RawStoreAware; +import org.apache.hadoop.hive.metastore.model.MNotificationLog; +import org.apache.hadoop.hive.metastore.model.MNotificationNextId; +import org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog; +import org.apache.hadoop.hive.metastore.metastore.iface.NotificationStore; +import org.apache.hadoop.hive.metastore.tools.SQLGenerator; +import org.apache.hadoop.hive.metastore.utils.RetryingExecutor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static org.apache.hadoop.hive.metastore.ObjectStore.appendSimpleCondition; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; +import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; + +public class NotificationStoreImpl extends RawStoreAware implements NotificationStore { + private static final Logger LOG = LoggerFactory.getLogger(NotificationStoreImpl.class); + private Configuration conf; + private SQLGenerator sqlGenerator; + private MetaStoreDirectSql directSql; + + @Override + public void setBaseStore(RawStore store) { + super.setBaseStore(store); + this.conf = baseStore.getConf(); + DatabaseProduct dbType = PersistenceManagerProvider.getDatabaseProduct(); + this.sqlGenerator = new SQLGenerator(dbType, conf); + } + + @Override + public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) { + NotificationEventResponse result = new NotificationEventResponse(); + result.setEvents(new ArrayList<>()); + long lastEvent = rqst.getLastEvent(); + List parameterVals = new ArrayList<>(); + parameterVals.add(lastEvent); + // filterBuilder parameter is used for construction of conditional clause in the select query + StringBuilder filterBuilder = new StringBuilder("eventId > para" + parameterVals.size()); + // parameterBuilder parameter is used for specify what types of parameters will go into the filterBuilder + StringBuilder parameterBuilder = new StringBuilder("java.lang.Long para" + parameterVals.size()); + /* A fully constructed query would like: + -> filterBuilder: eventId > para0 && catalogName == para1 && dbName == para2 && (tableName == para3 + || tableName == para4) && eventType != para5 + -> parameterBuilder: java.lang.Long para0, java.lang.String para1, java.lang.String para2 + , java.lang.String para3, java.lang.String para4, java.lang.String para5 + */ + if (rqst.isSetCatName()) { + parameterVals.add(normalizeIdentifier(rqst.getCatName())); + parameterBuilder.append(", java.lang.String para" + parameterVals.size()); + filterBuilder.append(" && catalogName == para" + parameterVals.size()); + } + if (rqst.isSetDbName()) { + parameterVals.add(normalizeIdentifier(rqst.getDbName())); + parameterBuilder.append(", java.lang.String para" + parameterVals.size()); + filterBuilder.append(" && dbName == para" + parameterVals.size()); + } + if (rqst.isSetTableNames() && !rqst.getTableNames().isEmpty()) { + filterBuilder.append(" && ("); + for (String tableName : rqst.getTableNames()) { + parameterVals.add(normalizeIdentifier(tableName)); + parameterBuilder.append(", java.lang.String para" + parameterVals.size()); + filterBuilder.append("tableName == para" + parameterVals.size()+ " || "); + } + filterBuilder.setLength(filterBuilder.length() - 4); // remove the last " || " + filterBuilder.append(") "); + } + if (rqst.isSetEventTypeList()) { + filterBuilder.append(" && ("); + for (String eventType : rqst.getEventTypeList()) { + parameterVals.add(eventType); + parameterBuilder.append(", java.lang.String para" + parameterVals.size()); + filterBuilder.append("eventType == para" + parameterVals.size() + " || "); + } + filterBuilder.setLength(filterBuilder.length() - 4); // remove the last " || " + filterBuilder.append(") "); + } + if (rqst.isSetEventTypeSkipList()) { + for (String eventType : rqst.getEventTypeSkipList()) { + parameterVals.add(eventType); + parameterBuilder.append(", java.lang.String para" + parameterVals.size()); + filterBuilder.append(" && eventType != para" + parameterVals.size()); + } + } + Query query = pm.newQuery(MNotificationLog.class, filterBuilder.toString()); + query.declareParameters(parameterBuilder.toString()); + query.setOrdering("eventId ascending"); + int maxEventResponse = MetastoreConf.getIntVar(baseStore.getConf(), MetastoreConf.ConfVars.METASTORE_MAX_EVENT_RESPONSE); + int maxEvents = (rqst.getMaxEvents() < maxEventResponse && rqst.getMaxEvents() > 0) ? rqst.getMaxEvents() : maxEventResponse; + query.setRange(0, maxEvents); + Collection events = + (Collection) query.executeWithArray(parameterVals.toArray(new Object[0])); + if (events == null) { + return result; + } + Iterator i = events.iterator(); + while (i.hasNext()) { + result.addToEvents(translateDbToThrift(i.next())); + } + return result; + } + + private NotificationEvent translateDbToThrift(MNotificationLog dbEvent) { + NotificationEvent event = new NotificationEvent(); + event.setEventId(dbEvent.getEventId()); + event.setEventTime(dbEvent.getEventTime()); + event.setEventType(dbEvent.getEventType()); + event.setCatName(dbEvent.getCatalogName()); + event.setDbName(dbEvent.getDbName()); + event.setTableName(dbEvent.getTableName()); + event.setMessage((dbEvent.getMessage())); + event.setMessageFormat(dbEvent.getMessageFormat()); + return event; + } + + private void lockNotificationSequenceForUpdate() throws MetaException { + int maxRetries = + MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES); + long sleepInterval = MetastoreConf.getTimeVar(conf, + MetastoreConf.ConfVars.NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL, TimeUnit.MILLISECONDS); + if (sqlGenerator.getDbProduct().isDERBY()) { + // Derby doesn't allow FOR UPDATE to lock the row being selected (See https://db.apache + // .org/derby/docs/10.1/ref/rrefsqlj31783.html) . So lock the whole table. Since there's + // only one row in the table, this shouldn't cause any performance degradation. + new RetryingExecutor(maxRetries, () -> { + if (directSql == null) { + directSql = new MetaStoreDirectSql(pm, conf, ""); + } + directSql.lockDbTable("NOTIFICATION_SEQUENCE"); + return null; + }).commandName("lockNotificationSequenceForUpdate").sleepInterval(sleepInterval).run(); + } else { + String selectQuery = "select \"NEXT_EVENT_ID\" from \"NOTIFICATION_SEQUENCE\""; + String lockingQuery = sqlGenerator.addForUpdateClause(selectQuery); + new RetryingExecutor(maxRetries, () -> { + String s = sqlGenerator.getDbProduct().getPrepareTxnStmt(); + assert pm.currentTransaction().isActive(); + JDOConnection jdoConn = pm.getDataStoreConnection(); + Connection conn = (Connection) jdoConn.getNativeConnection(); + try (Statement statement = conn.createStatement()) { + if (s != null) { + statement.execute(s); + } + statement.execute(lockingQuery); + } finally { + jdoConn.close(); + } + return null; + }).commandName("lockNotificationSequenceForUpdate").sleepInterval(sleepInterval).run(); + } + } + + @Override + public void addNotificationEvent(NotificationEvent entry) throws MetaException { + pm.flush(); + lockNotificationSequenceForUpdate(); + Query query = pm.newQuery(MNotificationNextId.class); + Collection ids = (Collection) query.execute(); + MNotificationNextId mNotificationNextId = null; + boolean needToPersistId; + if (CollectionUtils.isEmpty(ids)) { + mNotificationNextId = new MNotificationNextId(1L); + needToPersistId = true; + } else { + mNotificationNextId = ids.iterator().next(); + needToPersistId = false; + } + entry.setEventId(mNotificationNextId.getNextEventId()); + mNotificationNextId.incrementEventId(); + if (needToPersistId) { + pm.makePersistent(mNotificationNextId); + } + pm.makePersistent(translateThriftToDb(entry)); + } + + private MNotificationLog translateThriftToDb(NotificationEvent entry) { + MNotificationLog dbEntry = new MNotificationLog(); + dbEntry.setEventId(entry.getEventId()); + dbEntry.setEventTime(entry.getEventTime()); + dbEntry.setEventType(entry.getEventType()); + dbEntry.setCatalogName(entry.isSetCatName() ? entry.getCatName() : getDefaultCatalog(baseStore.getConf())); + dbEntry.setDbName(entry.getDbName()); + dbEntry.setTableName(entry.getTableName()); + dbEntry.setMessage(entry.getMessage()); + dbEntry.setMessageFormat(entry.getMessageFormat()); + return dbEntry; + } + + @Override + public void cleanNotificationEvents(int olderThan) { + cleanOlderEvents(olderThan, MNotificationLog.class, "NotificationLog"); + } + + private void cleanOlderEvents(int olderThan, Class table, String tableName) { + final int eventBatchSize = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.EVENT_CLEAN_MAX_EVENTS); + final long ageSec = olderThan; + final Instant now = Instant.now(); + final int tooOld = Math.toIntExact(now.getEpochSecond() - ageSec); + final Optional batchSize = (eventBatchSize > 0) ? Optional.of(eventBatchSize) : Optional.empty(); + + final long start = System.nanoTime(); + int deleteCount = doCleanNotificationEvents(tooOld, batchSize, table, tableName); + + if (deleteCount == 0) { + LOG.info("No {} events found to be cleaned with eventTime < {}", tableName, tooOld); + } else { + int batchCount = 0; + do { + batchCount = doCleanNotificationEvents(tooOld, batchSize, table, tableName); + deleteCount += batchCount; + } while (batchCount > 0); + } + + final long finish = System.nanoTime(); + LOG.info("Deleted {} {} events older than epoch:{} in {}ms", deleteCount, tableName, tooOld, + TimeUnit.NANOSECONDS.toMillis(finish - start)); + } + + private int doCleanNotificationEvents(final int ageSec, final Optional batchSize, Class tableClass, String tableName) { + int eventsCount = 0; + Query query = pm.newQuery(tableClass, "eventTime <= tooOld"); + String key = null; + query.declareParameters("java.lang.Integer tooOld"); + if (MNotificationLog.class.equals(tableClass)) { + key = "eventId"; + } else if (MTxnWriteNotificationLog.class.equals(tableClass)) { + key = "txnId"; + } + query.setOrdering(key + " ascending"); + if (batchSize.isPresent()) { + query.setRange(0, batchSize.get()); + } + + List events = (List) query.execute(ageSec); + if (CollectionUtils.isNotEmpty(events)) { + eventsCount = events.size(); + if (LOG.isDebugEnabled()) { + int minEventTime, maxEventTime; + long minId, maxId; + T firstNotification = events.get(0); + T lastNotification = events.get(eventsCount - 1); + if (MNotificationLog.class.equals(tableClass)) { + minEventTime = ((MNotificationLog) firstNotification).getEventTime(); + minId = ((MNotificationLog) firstNotification).getEventId(); + maxEventTime = ((MNotificationLog) lastNotification).getEventTime(); + maxId = ((MNotificationLog) lastNotification).getEventId(); + } else if (MTxnWriteNotificationLog.class.equals(tableClass)) { + minEventTime = ((MTxnWriteNotificationLog) firstNotification).getEventTime(); + minId = ((MTxnWriteNotificationLog) firstNotification).getTxnId(); + maxEventTime = ((MTxnWriteNotificationLog) lastNotification).getEventTime(); + maxId = ((MTxnWriteNotificationLog) lastNotification).getTxnId(); + } else { + throw new RuntimeException( + "Cleaning of older " + tableName + " events failed. " + "Reason: Unknown table encountered " + tableClass.getName()); + } + LOG.debug( + "Remove {} batch of {} events with eventTime < {}, min {}: {}, max {}: {}, min eventTime {}, max eventTime {}", + tableName, eventsCount, ageSec, key, minId, key, maxId, minEventTime, maxEventTime); + } + pm.deletePersistentAll(events); + } + return eventsCount; + } + + @Override + public CurrentNotificationEventId getCurrentNotificationEventId() { + Query query = pm.newQuery(MNotificationNextId.class); + Collection ids = (Collection) query.execute(); + long id = 0; + if (CollectionUtils.isNotEmpty(ids)) { + id = ids.iterator().next().getNextEventId() - 1; + } + return new CurrentNotificationEventId(id); + } + + @Override + public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) { + Long result = 0L; + long fromEventId = rqst.getFromEventId(); + String inputDbName = rqst.getDbName(); + String catName = rqst.isSetCatName() ? rqst.getCatName() : getDefaultCatalog(conf); + long toEventId; + String paramSpecs; + List paramVals = new ArrayList<>(); + + // We store a catalog name in lower case in metastore and also use the same way everywhere in + // hive. + assert catName.equals(catName.toLowerCase()); + + // Build the query to count events, part by part + String queryStr = "select count(eventId) from " + MNotificationLog.class.getName(); + // count fromEventId onwards events + queryStr = queryStr + " where eventId > fromEventId"; + paramSpecs = "java.lang.Long fromEventId"; + paramVals.add(Long.valueOf(fromEventId)); + + // Input database name can be a database name or a *. In the first case we add a filter + // condition on dbName column, but not in the second case, since a * means all the + // databases. In case we support more elaborate database name patterns in future, we will + // have to apply a method similar to getNextNotification() method of MetaStoreClient. + if (!inputDbName.equals("*")) { + // dbName could be NULL in case of transaction related events, which also need to be + // counted. + queryStr = queryStr + " && (dbName == inputDbName || dbName == null)"; + paramSpecs = paramSpecs + ", java.lang.String inputDbName"; + // We store a database name in lower case in metastore. + paramVals.add(inputDbName.toLowerCase()); + } + + // catName could be NULL in case of transaction related events, which also need to be + // counted. + queryStr = queryStr + " && (catalogName == catName || catalogName == null)"; + paramSpecs = paramSpecs +", java.lang.String catName"; + paramVals.add(catName); + + // count events upto toEventId if specified + if (rqst.isSetToEventId()) { + toEventId = rqst.getToEventId(); + queryStr = queryStr + " && eventId <= toEventId"; + paramSpecs = paramSpecs + ", java.lang.Long toEventId"; + paramVals.add(Long.valueOf(toEventId)); + } + // Specify list of table names in the query string and parameter types + if (rqst.isSetTableNames() && !rqst.getTableNames().isEmpty()) { + queryStr = queryStr + " && ("; + for (String tableName : rqst.getTableNames()) { + paramVals.add(tableName.toLowerCase()); + queryStr = queryStr + "tableName == tableName" + paramVals.size() + " || "; + paramSpecs = paramSpecs + ", java.lang.String tableName" + paramVals.size(); + } + queryStr = queryStr.substring(0, queryStr.length() - 4); // remove the last " || " + queryStr += ")"; + } + + Query query = pm.newQuery(queryStr); + query.declareParameters(paramSpecs); + result = (Long) query.executeWithArray(paramVals.toArray()); + // Cap the event count by limit if specified. + long eventCount = result.longValue(); + if (rqst.isSetLimit() && eventCount > rqst.getLimit()) { + eventCount = rqst.getLimit(); + } + return new NotificationEventsCountResponse(eventCount); + } + + @Override + public void cleanWriteNotificationEvents(int olderThan) { + cleanOlderEvents(olderThan, MTxnWriteNotificationLog.class, "TxnWriteNotificationLog"); + } + + @Override + public List getAllWriteEventInfo(long txnId, String dbName, String tableName) throws MetaException { + List writeEventInfoList = null; + List parameterVals = new ArrayList<>(); + StringBuilder filterBuilder = new StringBuilder(" txnId == " + Long.toString(txnId)); + if (dbName != null && !"*".equals(dbName)) { // * means get all database, so no need to add filter + appendSimpleCondition(filterBuilder, "database", new String[]{dbName}, parameterVals); + } + if (tableName != null && !"*".equals(tableName)) { + appendSimpleCondition(filterBuilder, "table", new String[]{tableName}, parameterVals); + } + Query query = pm.newQuery(MTxnWriteNotificationLog.class, filterBuilder.toString()); + query.setOrdering("database,table ascending"); + List mplans = (List) query.executeWithArray( + parameterVals.toArray(new String[0])); + pm.retrieveAll(mplans); + if (mplans != null && mplans.size() > 0) { + writeEventInfoList = Lists.newArrayList(); + for (MTxnWriteNotificationLog mplan : mplans) { + WriteEventInfo writeEventInfo = new WriteEventInfo(mplan.getWriteId(), mplan.getDatabase(), + mplan.getTable(), mplan.getFiles()); + writeEventInfo.setPartition(mplan.getPartition()); + writeEventInfo.setPartitionObj(mplan.getPartObject()); + writeEventInfo.setTableObj(mplan.getTableObject()); + writeEventInfoList.add(writeEventInfo); + } + } + return writeEventInfoList; + } +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/impl/PrivilegeStoreImpl.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/impl/PrivilegeStoreImpl.java new file mode 100644 index 000000000000..7351b276cb39 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/impl/PrivilegeStoreImpl.java @@ -0,0 +1,2372 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.metastore.impl; + +import com.google.common.base.Preconditions; + +import javax.jdo.Query; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +import org.apache.commons.collections4.CollectionUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.metastore.RawStore; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; +import org.apache.hadoop.hive.metastore.api.HiveObjectRef; +import org.apache.hadoop.hive.metastore.api.HiveObjectType; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.metastore.RawStoreAware; +import org.apache.hadoop.hive.metastore.metastore.iface.TableStore; +import org.apache.hadoop.hive.metastore.model.MDBPrivilege; +import org.apache.hadoop.hive.metastore.model.MDCPrivilege; +import org.apache.hadoop.hive.metastore.model.MDataConnector; +import org.apache.hadoop.hive.metastore.model.MDatabase; +import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege; +import org.apache.hadoop.hive.metastore.model.MPartition; +import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege; +import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege; +import org.apache.hadoop.hive.metastore.model.MRole; +import org.apache.hadoop.hive.metastore.model.MRoleMap; +import org.apache.hadoop.hive.metastore.model.MTable; +import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege; +import org.apache.hadoop.hive.metastore.model.MTablePrivilege; +import org.apache.hadoop.hive.metastore.metastore.GetHelper; +import org.apache.hadoop.hive.metastore.metastore.GetListHelper; +import org.apache.hadoop.hive.metastore.metastore.iface.PrivilegeStore; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static org.apache.hadoop.hive.metastore.ObjectStore.convert; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; +import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; + +public class PrivilegeStoreImpl extends RawStoreAware implements PrivilegeStore { + private static final Logger LOG = LoggerFactory.getLogger(PrivilegeStoreImpl.class); + private Configuration conf; + + @Override + public boolean addRole(String roleName, String ownerName) + throws InvalidObjectException, MetaException, NoSuchObjectException { + MRole nameCheck = this.getMRole(roleName); + if (nameCheck != null) { + throw new InvalidObjectException("Role " + roleName + " already exists."); + } + int now = (int) (System.currentTimeMillis() / 1000); + MRole mRole = new MRole(roleName, now, ownerName); + pm.makePersistent(mRole); + return true; + } + + @Override + public boolean grantRole(Role role, String userName, + PrincipalType principalType, String grantor, PrincipalType grantorType, + boolean grantOption) throws MetaException, NoSuchObjectException,InvalidObjectException { + MRoleMap roleMap = null; + try { + roleMap = this.getMSecurityUserRoleMap(userName, principalType, role + .getRoleName()); + } catch (Exception e) { + } + if (roleMap != null) { + throw new InvalidObjectException("Principal " + userName + + " already has the role " + role.getRoleName()); + } + if (principalType == PrincipalType.ROLE) { + validateRole(userName); + } + MRole mRole = getMRole(role.getRoleName()); + long now = System.currentTimeMillis()/1000; + MRoleMap roleMember = new MRoleMap(userName, principalType.toString(), + mRole, (int) now, grantor, grantorType.toString(), grantOption); + pm.makePersistent(roleMember); + return true; + } + + /** + * Verify that role with given name exists, if not throw exception + */ + private void validateRole(String roleName) throws NoSuchObjectException { + // if grantee is a role, check if it exists + MRole granteeRole = getMRole(roleName); + if (granteeRole == null) { + throw new NoSuchObjectException("Role " + roleName + " does not exist"); + } + } + + @Override + public boolean revokeRole(Role role, String userName, PrincipalType principalType, + boolean grantOption) throws MetaException, NoSuchObjectException { + MRoleMap roleMember = getMSecurityUserRoleMap(userName, principalType, + role.getRoleName()); + if (grantOption) { + // Revoke with grant option - only remove the grant option but keep the role. + if (roleMember.getGrantOption()) { + roleMember.setGrantOption(false); + } else { + throw new MetaException("User " + userName + + " does not have grant option with role " + role.getRoleName()); + } + } else { + // No grant option in revoke, remove the whole role. + pm.deletePersistent(roleMember); + } + return true; + } + + private MRoleMap getMSecurityUserRoleMap(String userName, PrincipalType principalType, + String roleName) { + MRoleMap mRoleMember = null; + Query query = + pm.newQuery(MRoleMap.class, + "principalName == t1 && principalType == t2 && role.roleName == t3"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + query.setUnique(true); + mRoleMember = (MRoleMap) query.executeWithArray(userName, principalType.toString(), roleName); + pm.retrieve(mRoleMember);; + return mRoleMember; + } + + @Override + public boolean removeRole(String roleName) throws MetaException, + NoSuchObjectException { + try { + MRole mRol = getMRole(roleName); + pm.retrieve(mRol); + if (mRol != null) { + // first remove all the membership, the membership that this role has + // been granted + List roleMap = listMRoleMembers(mRol.getRoleName()); + if (CollectionUtils.isNotEmpty(roleMap)) { + pm.deletePersistentAll(roleMap); + } + List roleMember = listMSecurityPrincipalMembershipRole(mRol + .getRoleName(), PrincipalType.ROLE); + if (CollectionUtils.isNotEmpty(roleMember)) { + pm.deletePersistentAll(roleMember); + } + + // then remove all the grants + List userGrants = listPrincipalMGlobalGrants( + mRol.getRoleName(), PrincipalType.ROLE); + if (CollectionUtils.isNotEmpty(userGrants)) { + pm.deletePersistentAll(userGrants); + } + + List dbGrants = listPrincipalAllDBGrant(mRol + .getRoleName(), PrincipalType.ROLE); + if (CollectionUtils.isNotEmpty(dbGrants)) { + pm.deletePersistentAll(dbGrants); + } + + List dcGrants = listPrincipalAllDCGrant(mRol + .getRoleName(), PrincipalType.ROLE); + if (CollectionUtils.isNotEmpty(dcGrants)) { + pm.deletePersistentAll(dcGrants); + } + + List tabPartGrants = listPrincipalAllTableGrants( + mRol.getRoleName(), PrincipalType.ROLE); + if (CollectionUtils.isNotEmpty(tabPartGrants)) { + pm.deletePersistentAll(tabPartGrants); + } + + List partGrants = listPrincipalAllPartitionGrants( + mRol.getRoleName(), PrincipalType.ROLE); + if (CollectionUtils.isNotEmpty(partGrants)) { + pm.deletePersistentAll(partGrants); + } + + List tblColumnGrants = listPrincipalAllTableColumnGrants( + mRol.getRoleName(), PrincipalType.ROLE); + if (CollectionUtils.isNotEmpty(tblColumnGrants)) { + pm.deletePersistentAll(tblColumnGrants); + } + + List partColumnGrants = listPrincipalAllPartitionColumnGrants( + mRol.getRoleName(), PrincipalType.ROLE); + if (CollectionUtils.isNotEmpty(partColumnGrants)) { + pm.deletePersistentAll(partColumnGrants); + } + + // finally remove the role + pm.deletePersistent(mRol); + } + return true; + } catch (Exception e) { + throw new MetaException(e.getMessage()); + } + } + + /** + * Get all the roles in the role hierarchy that this user and groupNames belongs to + */ + private Set listAllRolesInHierarchy(String userName, + List groupNames) { + List ret = new ArrayList<>(); + if(userName != null) { + ret.addAll(listMRoles(userName, PrincipalType.USER)); + } + if (groupNames != null) { + for (String groupName: groupNames) { + ret.addAll(listMRoles(groupName, PrincipalType.GROUP)); + } + } + // get names of these roles and its ancestors + Set roleNames = new HashSet<>(); + getAllRoleAncestors(roleNames, ret); + return roleNames; + } + + /** + * Add role names of parentRoles and its parents to processedRoles + */ + private void getAllRoleAncestors(Set processedRoleNames, List parentRoles) { + for (MRoleMap parentRole : parentRoles) { + String parentRoleName = parentRole.getRole().getRoleName(); + if (!processedRoleNames.contains(parentRoleName)) { + // unprocessed role: get its parents, add it to processed, and call this + // function recursively + List nextParentRoles = listMRoles(parentRoleName, PrincipalType.ROLE); + processedRoleNames.add(parentRoleName); + getAllRoleAncestors(processedRoleNames, nextParentRoles); + } + } + } + + public List listMRoles(String principalName, + PrincipalType principalType) { + Query query = pm.newQuery(MRoleMap.class, "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + query.setUnique(false); + List mRoles = + (List) query.executeWithArray(principalName, principalType.toString()); + pm.retrieveAll(mRoles);; + List mRoleMember = new ArrayList<>(mRoles); + + if (principalType == PrincipalType.USER) { + // All users belong to public role implicitly, add that role + // TODO MS-SPLIT Change this back to HMSHandler.PUBLIC once HiveMetaStore has moved to + // stand-alone metastore. + //MRole publicRole = new MRole(HMSHandler.PUBLIC, 0, HMSHandler.PUBLIC); + MRole publicRole = new MRole("public", 0, "public"); + mRoleMember.add(new MRoleMap(principalName, principalType.toString(), publicRole, 0, null, + null, false)); + } + + return mRoleMember; + } + + @Override + public List listRoles(String principalName, PrincipalType principalType) { + List result = new ArrayList<>(); + List roleMaps = listMRoles(principalName, principalType); + if (roleMaps != null) { + for (MRoleMap roleMap : roleMaps) { + MRole mrole = roleMap.getRole(); + Role role = new Role(mrole.getRoleName(), mrole.getCreateTime(), mrole.getOwnerName()); + result.add(role); + } + } + return result; + } + + @Override + public List listRolesWithGrants(String principalName, + PrincipalType principalType) { + List result = new ArrayList<>(); + List roleMaps = listMRoles(principalName, principalType); + if (roleMaps != null) { + for (MRoleMap roleMap : roleMaps) { + RolePrincipalGrant rolePrinGrant = new RolePrincipalGrant( + roleMap.getRole().getRoleName(), + roleMap.getPrincipalName(), + PrincipalType.valueOf(roleMap.getPrincipalType()), + roleMap.getGrantOption(), + roleMap.getAddTime(), + roleMap.getGrantor(), + // no grantor type for public role, hence the null check + roleMap.getGrantorType() == null ? null + : PrincipalType.valueOf(roleMap.getGrantorType()) + ); + result.add(rolePrinGrant); + } + } + return result; + } + + private List listMSecurityPrincipalMembershipRole(final String roleName, + final PrincipalType principalType) throws Exception { + LOG.debug("Executing listMSecurityPrincipalMembershipRole"); + Query query = pm.newQuery(MRoleMap.class, "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + final List mRoleMemebership = (List) query.execute(roleName, principalType.toString()); + + LOG.debug("Retrieving all objects for listMSecurityPrincipalMembershipRole"); + pm.retrieveAll(mRoleMemebership); + LOG.debug("Done retrieving all objects for listMSecurityPrincipalMembershipRole: {}", mRoleMemebership); + + return Collections.unmodifiableList(new ArrayList<>(mRoleMemebership)); + } + + @Override + public Role getRole(String roleName) throws NoSuchObjectException { + MRole mRole = this.getMRole(roleName); + if (mRole == null) { + throw new NoSuchObjectException(roleName + " role can not be found."); + } + return new Role(mRole.getRoleName(), mRole.getCreateTime(), mRole + .getOwnerName()); + } + + private MRole getMRole(String roleName) { + MRole mrole = null; + Query query = pm.newQuery(MRole.class, "roleName == t1"); + query.declareParameters("java.lang.String t1"); + query.setUnique(true); + mrole = (MRole) query.execute(roleName); + pm.retrieve(mrole); + return mrole; + } + + @Override + public List listRoleNames() { + LOG.debug("Executing listAllRoleNames"); + Query query = pm.newQuery("select roleName from org.apache.hadoop.hive.metastore.model.MRole"); + query.setResult("roleName"); + Collection names = (Collection) query.execute(); + List roleNames = new ArrayList<>(); + for (Iterator i = names.iterator(); i.hasNext();) { + roleNames.add((String) i.next()); + } + return roleNames; + } + + @Override + public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, + List groupNames) throws InvalidObjectException, MetaException { + PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); + if (userName != null) { + List user = this.listPrincipalMGlobalGrants(userName, PrincipalType.USER); + if(CollectionUtils.isNotEmpty(user)) { + Map> userPriv = new HashMap<>(); + List grantInfos = new ArrayList<>(user.size()); + for (int i = 0; i < user.size(); i++) { + MGlobalPrivilege item = user.get(i); + grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item + .getCreateTime(), item.getGrantor(), getPrincipalTypeFromStr(item + .getGrantorType()), item.getGrantOption())); + } + userPriv.put(userName, grantInfos); + ret.setUserPrivileges(userPriv); + } + } + if (CollectionUtils.isNotEmpty(groupNames)) { + Map> groupPriv = new HashMap<>(); + for(String groupName: groupNames) { + List group = + this.listPrincipalMGlobalGrants(groupName, PrincipalType.GROUP); + if(CollectionUtils.isNotEmpty(group)) { + List grantInfos = new ArrayList<>(group.size()); + for (int i = 0; i < group.size(); i++) { + MGlobalPrivilege item = group.get(i); + grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item + .getCreateTime(), item.getGrantor(), getPrincipalTypeFromStr(item + .getGrantorType()), item.getGrantOption())); + } + groupPriv.put(groupName, grantInfos); + } + } + ret.setGroupPrivileges(groupPriv); + } + return ret; + } + + private List getDBPrivilege(String catName, String dbName, + String principalName, PrincipalType principalType) { + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + + if (principalName != null) { + List userNameDbPriv = this.listPrincipalMDBGrants( + principalName, principalType, catName, dbName); + if (CollectionUtils.isNotEmpty(userNameDbPriv)) { + List grantInfos = new ArrayList<>( + userNameDbPriv.size()); + for (int i = 0; i < userNameDbPriv.size(); i++) { + MDBPrivilege item = userNameDbPriv.get(i); + grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item + .getCreateTime(), item.getGrantor(), getPrincipalTypeFromStr(item + .getGrantorType()), item.getGrantOption())); + } + return grantInfos; + } + } + return Collections.emptyList(); + } + + + @Override + public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, + String userName, List groupNames) throws InvalidObjectException, + MetaException { + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + + PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); + if (userName != null) { + Map> dbUserPriv = new HashMap<>(); + dbUserPriv.put(userName, getDBPrivilege(catName, dbName, userName, + PrincipalType.USER)); + ret.setUserPrivileges(dbUserPriv); + } + if (CollectionUtils.isNotEmpty(groupNames)) { + Map> dbGroupPriv = new HashMap<>(); + for (String groupName : groupNames) { + dbGroupPriv.put(groupName, getDBPrivilege(catName, dbName, groupName, + PrincipalType.GROUP)); + } + ret.setGroupPrivileges(dbGroupPriv); + } + Set roleNames = listAllRolesInHierarchy(userName, groupNames); + if (CollectionUtils.isNotEmpty(roleNames)) { + Map> dbRolePriv = new HashMap<>(); + for (String roleName : roleNames) { + dbRolePriv + .put(roleName, getDBPrivilege(catName, dbName, roleName, PrincipalType.ROLE)); + } + ret.setRolePrivileges(dbRolePriv); + } + return ret; + } + + private List getConnectorPrivilege(String catName, String connectorName, + String principalName, PrincipalType principalType) { + + // normalize string name + catName = normalizeIdentifier(catName); + connectorName = normalizeIdentifier(connectorName); + + if (principalName != null) { + // get all data connector granted privilege + List userNameDcPriv = this.listPrincipalMDCGrants( + principalName, principalType, catName, connectorName); + + // populate and return grantInfos + if (CollectionUtils.isNotEmpty(userNameDcPriv)) { + List grantInfos = new ArrayList<>( + userNameDcPriv.size()); + for (int i = 0; i < userNameDcPriv.size(); i++) { + MDCPrivilege item = userNameDcPriv.get(i); + grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item + .getCreateTime(), item.getGrantor(), getPrincipalTypeFromStr(item + .getGrantorType()), item.getGrantOption())); + } + return grantInfos; + } + } + + // return empty list if no principalName + return Collections.emptyList(); + } + + @Override + public PrincipalPrivilegeSet getConnectorPrivilegeSet (String catName, String connectorName, + String userName, List groupNames) throws InvalidObjectException, + MetaException { + catName = normalizeIdentifier(catName); + connectorName = normalizeIdentifier(connectorName); + + PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); + // get user privileges + if (userName != null) { + Map> connectorUserPriv = new HashMap<>(); + connectorUserPriv.put(userName, getConnectorPrivilege(catName, connectorName, userName, + PrincipalType.USER)); + ret.setUserPrivileges(connectorUserPriv); + } + + // get group privileges + if (CollectionUtils.isNotEmpty(groupNames)) { + Map> dbGroupPriv = new HashMap<>(); + for (String groupName : groupNames) { + dbGroupPriv.put(groupName, getConnectorPrivilege(catName, connectorName, groupName, + PrincipalType.GROUP)); + } + ret.setGroupPrivileges(dbGroupPriv); + } + + // get role privileges + Set roleNames = listAllRolesInHierarchy(userName, groupNames); + if (CollectionUtils.isNotEmpty(roleNames)) { + Map> dbRolePriv = new HashMap<>(); + for (String roleName : roleNames) { + dbRolePriv.put(roleName, getConnectorPrivilege(catName, connectorName, roleName, + PrincipalType.ROLE)); + } + ret.setRolePrivileges(dbRolePriv); + } + return ret; + } + + @Override + public PrincipalPrivilegeSet getPartitionPrivilegeSet(TableName table, String partition, String userName, + List groupNames) throws InvalidObjectException, MetaException { + PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); + String tableName = normalizeIdentifier(table.getTable()); + String dbName = normalizeIdentifier(table.getDb()); + String catName = normalizeIdentifier(table.getCat()); + if (userName != null) { + Map> partUserPriv = new HashMap<>(); + partUserPriv.put(userName, getPartitionPrivilege(catName, dbName, + tableName, partition, userName, PrincipalType.USER)); + ret.setUserPrivileges(partUserPriv); + } + if (CollectionUtils.isNotEmpty(groupNames)) { + Map> partGroupPriv = new HashMap<>(); + for (String groupName : groupNames) { + partGroupPriv.put(groupName, getPartitionPrivilege(catName, dbName, tableName, + partition, groupName, PrincipalType.GROUP)); + } + ret.setGroupPrivileges(partGroupPriv); + } + Set roleNames = listAllRolesInHierarchy(userName, groupNames); + if (CollectionUtils.isNotEmpty(roleNames)) { + Map> partRolePriv = new HashMap<>(); + for (String roleName : roleNames) { + partRolePriv.put(roleName, getPartitionPrivilege(catName, dbName, tableName, + partition, roleName, PrincipalType.ROLE)); + } + ret.setRolePrivileges(partRolePriv); + } + return ret; + } + + @Override + public PrincipalPrivilegeSet getTablePrivilegeSet(TableName table, String userName, List groupNames) + throws InvalidObjectException, MetaException { + boolean commited = false; + PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); + String tableName = normalizeIdentifier(table.getTable()); + String catName = normalizeIdentifier(table.getCat()); + String dbName = normalizeIdentifier(table.getDb()); + + if (userName != null) { + Map> tableUserPriv = new HashMap<>(); + tableUserPriv.put(userName, getTablePrivilege(catName, dbName, + tableName, userName, PrincipalType.USER)); + ret.setUserPrivileges(tableUserPriv); + } + if (CollectionUtils.isNotEmpty(groupNames)) { + Map> tableGroupPriv = new HashMap<>(); + for (String groupName : groupNames) { + tableGroupPriv.put(groupName, getTablePrivilege(catName, dbName, tableName, + groupName, PrincipalType.GROUP)); + } + ret.setGroupPrivileges(tableGroupPriv); + } + Set roleNames = listAllRolesInHierarchy(userName, groupNames); + if (CollectionUtils.isNotEmpty(roleNames)) { + Map> tableRolePriv = new HashMap<>(); + for (String roleName : roleNames) { + tableRolePriv.put(roleName, getTablePrivilege(catName, dbName, tableName, + roleName, PrincipalType.ROLE)); + } + ret.setRolePrivileges(tableRolePriv); + } + return ret; + } + + @Override + public PrincipalPrivilegeSet getColumnPrivilegeSet(TableName table, String partitionName, String columnName, + String userName, List groupNames) throws InvalidObjectException, + MetaException { + String tableName = normalizeIdentifier(table.getTable()); + String dbName = normalizeIdentifier(table.getDb()); + columnName = normalizeIdentifier(columnName); + String catName = normalizeIdentifier(table.getCat()); + + PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); + if (userName != null) { + Map> columnUserPriv = new HashMap<>(); + columnUserPriv.put(userName, getColumnPrivilege(catName, dbName, tableName, + columnName, partitionName, userName, PrincipalType.USER)); + ret.setUserPrivileges(columnUserPriv); + } + if (CollectionUtils.isNotEmpty(groupNames)) { + Map> columnGroupPriv = new HashMap<>(); + for (String groupName : groupNames) { + columnGroupPriv.put(groupName, getColumnPrivilege(catName, dbName, tableName, + columnName, partitionName, groupName, PrincipalType.GROUP)); + } + ret.setGroupPrivileges(columnGroupPriv); + } + Set roleNames = listAllRolesInHierarchy(userName, groupNames); + if (CollectionUtils.isNotEmpty(roleNames)) { + Map> columnRolePriv = new HashMap<>(); + for (String roleName : roleNames) { + columnRolePriv.put(roleName, getColumnPrivilege(catName, dbName, tableName, + columnName, partitionName, roleName, PrincipalType.ROLE)); + } + ret.setRolePrivileges(columnRolePriv); + } + return ret; + } + + private List getPartitionPrivilege(String catName, String dbName, + String tableName, String partName, String principalName, + PrincipalType principalType) { + + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); + + if (principalName != null) { + List userNameTabPartPriv = this + .listPrincipalMPartitionGrants(principalName, principalType, + catName, dbName, tableName, partName); + if (CollectionUtils.isNotEmpty(userNameTabPartPriv)) { + List grantInfos = new ArrayList<>( + userNameTabPartPriv.size()); + for (int i = 0; i < userNameTabPartPriv.size(); i++) { + MPartitionPrivilege item = userNameTabPartPriv.get(i); + grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item + .getCreateTime(), item.getGrantor(), + getPrincipalTypeFromStr(item.getGrantorType()), item.getGrantOption())); + + } + return grantInfos; + } + } + return new ArrayList<>(0); + } + + public static PrincipalType getPrincipalTypeFromStr(String str) { + return str == null ? null : PrincipalType.valueOf(str); + } + + private List getTablePrivilege(String catName, String dbName, + String tableName, String principalName, PrincipalType principalType) { + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); + + if (principalName != null) { + List userNameTabPartPriv = this + .listAllMTableGrants(principalName, principalType, + catName, dbName, tableName); + if (CollectionUtils.isNotEmpty(userNameTabPartPriv)) { + List grantInfos = new ArrayList<>( + userNameTabPartPriv.size()); + for (int i = 0; i < userNameTabPartPriv.size(); i++) { + MTablePrivilege item = userNameTabPartPriv.get(i); + grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item + .getCreateTime(), item.getGrantor(), getPrincipalTypeFromStr(item + .getGrantorType()), item.getGrantOption())); + } + return grantInfos; + } + } + return Collections.emptyList(); + } + + private List getColumnPrivilege(String catName, String dbName, + String tableName, String columnName, String partitionName, + String principalName, PrincipalType principalType) { + + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + columnName = normalizeIdentifier(columnName); + catName = normalizeIdentifier(catName); + + if (partitionName == null) { + List userNameColumnPriv = this + .listPrincipalMTableColumnGrants(principalName, principalType, + catName, dbName, tableName, columnName); + if (CollectionUtils.isNotEmpty(userNameColumnPriv)) { + List grantInfos = new ArrayList<>( + userNameColumnPriv.size()); + for (int i = 0; i < userNameColumnPriv.size(); i++) { + MTableColumnPrivilege item = userNameColumnPriv.get(i); + grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item + .getCreateTime(), item.getGrantor(), getPrincipalTypeFromStr(item + .getGrantorType()), item.getGrantOption())); + } + return grantInfos; + } + } else { + List userNameColumnPriv = this + .listPrincipalMPartitionColumnGrants(principalName, + principalType, catName, dbName, tableName, partitionName, columnName); + if (CollectionUtils.isNotEmpty(userNameColumnPriv)) { + List grantInfos = new ArrayList<>( + userNameColumnPriv.size()); + for (int i = 0; i < userNameColumnPriv.size(); i++) { + MPartitionColumnPrivilege item = userNameColumnPriv.get(i); + grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item + .getCreateTime(), item.getGrantor(), getPrincipalTypeFromStr(item + .getGrantorType()), item.getGrantOption())); + } + return grantInfos; + } + } + return Collections.emptyList(); + } + + @Override + public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectException, + MetaException, NoSuchObjectException { + int now = (int) (System.currentTimeMillis() / 1000); + List persistentObjs = new ArrayList<>(); + + List privilegeList = privileges.getPrivileges(); + + if (CollectionUtils.isNotEmpty(privilegeList)) { + Iterator privIter = privilegeList.iterator(); + Set privSet = new HashSet<>(); + while (privIter.hasNext()) { + HiveObjectPrivilege privDef = privIter.next(); + HiveObjectRef hiveObject = privDef.getHiveObject(); + String privilegeStr = privDef.getGrantInfo().getPrivilege(); + String[] privs = privilegeStr.split(","); + String userName = privDef.getPrincipalName(); + String authorizer = privDef.getAuthorizer(); + PrincipalType principalType = privDef.getPrincipalType(); + String grantor = privDef.getGrantInfo().getGrantor(); + String grantorType = privDef.getGrantInfo().getGrantorType().toString(); + boolean grantOption = privDef.getGrantInfo().isGrantOption(); + privSet.clear(); + + if(principalType == PrincipalType.ROLE){ + validateRole(userName); + } + + String catName = hiveObject.isSetCatName() ? hiveObject.getCatName() : + getDefaultCatalog(conf); + if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { + List globalPrivs = this + .listPrincipalMGlobalGrants(userName, principalType, authorizer); + for (MGlobalPrivilege priv : globalPrivs) { + if (priv.getGrantor().equalsIgnoreCase(grantor)) { + privSet.add(priv.getPrivilege()); + } + } + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException(privilege + + " is already granted by " + grantor); + } + MGlobalPrivilege mGlobalPrivs = new MGlobalPrivilege(userName, + principalType.toString(), privilege, now, grantor, grantorType, grantOption, + authorizer); + persistentObjs.add(mGlobalPrivs); + } + } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { + MDatabase dbObj = baseStore.ensureGetMDatabase(catName, hiveObject.getDbName()); + List dbPrivs = this.listPrincipalMDBGrants( + userName, principalType, catName, hiveObject.getDbName(), authorizer); + for (MDBPrivilege priv : dbPrivs) { + if (priv.getGrantor().equalsIgnoreCase(grantor)) { + privSet.add(priv.getPrivilege()); + } + } + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException(privilege + + " is already granted on database " + + hiveObject.getDbName() + " by " + grantor); + } + MDBPrivilege mDb = new MDBPrivilege(userName, principalType + .toString(), dbObj, privilege, now, grantor, grantorType, grantOption, authorizer); + persistentObjs.add(mDb); + } + } else if (hiveObject.getObjectType() == HiveObjectType.DATACONNECTOR) { + MDataConnector dcObj = convert(baseStore.getDataConnector(hiveObject.getObjectName())); + List dcPrivs = this.listPrincipalMDCGrants(userName, principalType, + hiveObject.getObjectName(), authorizer); + for (MDCPrivilege priv : dcPrivs) { + if (priv.getGrantor().equalsIgnoreCase(grantor)) { + privSet.add(priv.getPrivilege()); + } + } + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException(privilege + + " is already granted on data connector " + + hiveObject.getDbName() + " by " + grantor); + } + MDCPrivilege mDc = new MDCPrivilege(userName, principalType + .toString(), dcObj, privilege, now, grantor, grantorType, grantOption, authorizer); + persistentObjs.add(mDc); + } + } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) { + MTable tblObj = baseStore.ensureGetMTable(catName, hiveObject.getDbName(), hiveObject + .getObjectName()); + if (tblObj != null) { + List tablePrivs = this + .listAllMTableGrants(userName, principalType, + catName, hiveObject.getDbName(), hiveObject.getObjectName(), authorizer); + for (MTablePrivilege priv : tablePrivs) { + if (priv.getGrantor() != null + && priv.getGrantor().equalsIgnoreCase(grantor)) { + privSet.add(priv.getPrivilege()); + } + } + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException(privilege + + " is already granted on table [" + + hiveObject.getDbName() + "," + + hiveObject.getObjectName() + "] by " + grantor); + } + MTablePrivilege mTab = new MTablePrivilege( + userName, principalType.toString(), tblObj, + privilege, now, grantor, grantorType, grantOption, authorizer); + persistentObjs.add(mTab); + } + } + } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { + MPartition partObj = baseStore.ensureGetMPartition(new TableName(catName, hiveObject.getDbName(), + hiveObject.getObjectName()), hiveObject.getPartValues()); + String partName = null; + if (partObj != null) { + partName = partObj.getPartitionName(); + List partPrivs = this + .listPrincipalMPartitionGrants(userName, + principalType, catName, hiveObject.getDbName(), hiveObject + .getObjectName(), partObj.getPartitionName(), authorizer); + for (MPartitionPrivilege priv : partPrivs) { + if (priv.getGrantor().equalsIgnoreCase(grantor)) { + privSet.add(priv.getPrivilege()); + } + } + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException(privilege + + " is already granted on partition [" + + hiveObject.getDbName() + "," + + hiveObject.getObjectName() + "," + + partName + "] by " + grantor); + } + MPartitionPrivilege mTab = new MPartitionPrivilege(userName, + principalType.toString(), partObj, privilege, now, grantor, + grantorType, grantOption, authorizer); + persistentObjs.add(mTab); + } + } + } else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { + MTable tblObj = baseStore.ensureGetMTable(catName, hiveObject.getDbName(), hiveObject + .getObjectName()); + if (tblObj != null) { + if (hiveObject.getPartValues() != null) { + MPartition partObj = null; + List colPrivs = null; + partObj = baseStore.ensureGetMPartition(new TableName(catName, hiveObject.getDbName(), hiveObject + .getObjectName()), hiveObject.getPartValues()); + if (partObj == null) { + continue; + } + colPrivs = this.listPrincipalMPartitionColumnGrants( + userName, principalType, catName, hiveObject.getDbName(), hiveObject + .getObjectName(), partObj.getPartitionName(), + hiveObject.getColumnName(), authorizer); + + for (MPartitionColumnPrivilege priv : colPrivs) { + if (priv.getGrantor().equalsIgnoreCase(grantor)) { + privSet.add(priv.getPrivilege()); + } + } + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException(privilege + + " is already granted on column " + + hiveObject.getColumnName() + " [" + + hiveObject.getDbName() + "," + + hiveObject.getObjectName() + "," + + partObj.getPartitionName() + "] by " + grantor); + } + MPartitionColumnPrivilege mCol = new MPartitionColumnPrivilege(userName, + principalType.toString(), partObj, hiveObject + .getColumnName(), privilege, now, grantor, grantorType, + grantOption, authorizer); + persistentObjs.add(mCol); + } + + } else { + List colPrivs = null; + colPrivs = this.listPrincipalMTableColumnGrants( + userName, principalType, catName, hiveObject.getDbName(), hiveObject + .getObjectName(), hiveObject.getColumnName(), authorizer); + + for (MTableColumnPrivilege priv : colPrivs) { + if (priv.getGrantor().equalsIgnoreCase(grantor)) { + privSet.add(priv.getPrivilege()); + } + } + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException(privilege + + " is already granted on column " + + hiveObject.getColumnName() + " [" + + hiveObject.getDbName() + "," + + hiveObject.getObjectName() + "] by " + grantor); + } + MTableColumnPrivilege mCol = new MTableColumnPrivilege(userName, + principalType.toString(), tblObj, hiveObject + .getColumnName(), privilege, now, grantor, grantorType, + grantOption, authorizer); + persistentObjs.add(mCol); + } + } + } + } + } + } + if (CollectionUtils.isNotEmpty(persistentObjs)) { + pm.makePersistentAll(persistentObjs); + } + return true; + } + + @Override + public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) + throws InvalidObjectException, MetaException, NoSuchObjectException { + List persistentObjs = new ArrayList<>(); + + List privilegeList = privileges.getPrivileges(); + + if (CollectionUtils.isNotEmpty(privilegeList)) { + Iterator privIter = privilegeList.iterator(); + + while (privIter.hasNext()) { + HiveObjectPrivilege privDef = privIter.next(); + HiveObjectRef hiveObject = privDef.getHiveObject(); + String privilegeStr = privDef.getGrantInfo().getPrivilege(); + if (privilegeStr == null || privilegeStr.trim().equals("")) { + continue; + } + String[] privs = privilegeStr.split(","); + String userName = privDef.getPrincipalName(); + PrincipalType principalType = privDef.getPrincipalType(); + + String catName = hiveObject.isSetCatName() ? hiveObject.getCatName() : + getDefaultCatalog(conf); + if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { + List mSecUser = this.listPrincipalMGlobalGrants( + userName, principalType); + boolean found = false; + for (String privilege : privs) { + for (MGlobalPrivilege userGrant : mSecUser) { + String userGrantPrivs = userGrant.getPrivilege(); + if (privilege.equals(userGrantPrivs)) { + found = true; + if (grantOption) { + if (userGrant.getGrantOption()) { + userGrant.setGrantOption(false); + } else { + throw new MetaException("User " + userName + + " does not have grant option with privilege " + privilege); + } + } + persistentObjs.add(userGrant); + break; + } + } + if (!found) { + throw new InvalidObjectException( + "No user grant found for privileges " + privilege); + } + } + + } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { + String db = hiveObject.getDbName(); + boolean found = false; + List dbGrants = this.listPrincipalMDBGrants( + userName, principalType, catName, db); + for (String privilege : privs) { + for (MDBPrivilege dbGrant : dbGrants) { + String dbGrantPriv = dbGrant.getPrivilege(); + if (privilege.equals(dbGrantPriv)) { + found = true; + if (grantOption) { + if (dbGrant.getGrantOption()) { + dbGrant.setGrantOption(false); + } else { + throw new MetaException("User " + userName + + " does not have grant option with privilege " + privilege); + } + } + persistentObjs.add(dbGrant); + break; + } + } + if (!found) { + throw new InvalidObjectException( + "No database grant found for privileges " + privilege + + " on database " + db); + } + } + } else if (hiveObject.getObjectType() == HiveObjectType.DATACONNECTOR) { + String dc = hiveObject.getObjectName(); + boolean found = false; + List dcGrants = this.listPrincipalMDCGrants( + userName, principalType, catName, dc); + for (String privilege : privs) { + for (MDCPrivilege dcGrant : dcGrants) { + String dcGrantPriv = dcGrant.getPrivilege(); + if (privilege.equals(dcGrantPriv)) { + found = true; + if (grantOption) { + if (dcGrant.getGrantOption()) { + dcGrant.setGrantOption(false); + } else { + throw new MetaException("User " + userName + + " does not have grant option with privilege " + privilege); + } + } + persistentObjs.add(dcGrant); + break; + } + } + if (!found) { + throw new InvalidObjectException( + "No dataconnector grant found for privileges " + privilege + + " on data connector " + dc); + } + } + } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) { + boolean found = false; + List tableGrants = this + .listAllMTableGrants(userName, principalType, + catName, hiveObject.getDbName(), hiveObject.getObjectName()); + for (String privilege : privs) { + for (MTablePrivilege tabGrant : tableGrants) { + String tableGrantPriv = tabGrant.getPrivilege(); + if (privilege.equalsIgnoreCase(tableGrantPriv)) { + found = true; + if (grantOption) { + if (tabGrant.getGrantOption()) { + tabGrant.setGrantOption(false); + } else { + throw new MetaException("User " + userName + + " does not have grant option with privilege " + privilege); + } + } + persistentObjs.add(tabGrant); + break; + } + } + if (!found) { + throw new InvalidObjectException("No grant (" + privilege + + ") found " + " on table " + hiveObject.getObjectName() + + ", database is " + hiveObject.getDbName()); + } + } + } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { + boolean found = false; + Table tabObj = baseStore.unwrap(TableStore.class).getTable( + new TableName(catName, hiveObject.getDbName(), hiveObject.getObjectName()), null, -1); + String partName = null; + if (hiveObject.getPartValues() != null) { + partName = Warehouse.makePartName(tabObj.getPartitionKeys(), hiveObject.getPartValues()); + } + List partitionGrants = this + .listPrincipalMPartitionGrants(userName, principalType, + catName, hiveObject.getDbName(), hiveObject.getObjectName(), partName); + for (String privilege : privs) { + for (MPartitionPrivilege partGrant : partitionGrants) { + String partPriv = partGrant.getPrivilege(); + if (partPriv.equalsIgnoreCase(privilege)) { + found = true; + if (grantOption) { + if (partGrant.getGrantOption()) { + partGrant.setGrantOption(false); + } else { + throw new MetaException("User " + userName + + " does not have grant option with privilege " + privilege); + } + } + persistentObjs.add(partGrant); + break; + } + } + if (!found) { + throw new InvalidObjectException("No grant (" + privilege + + ") found " + " on table " + tabObj.getTableName() + + ", partition is " + partName + ", database is " + tabObj.getDbName()); + } + } + } else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { + Table tabObj = baseStore.unwrap(TableStore.class).getTable( + new TableName(catName, hiveObject.getDbName(), hiveObject.getObjectName()), null, -1); + String partName = null; + if (hiveObject.getPartValues() != null) { + partName = Warehouse.makePartName(tabObj.getPartitionKeys(), hiveObject.getPartValues()); + } + + if (partName != null) { + List mSecCol = listPrincipalMPartitionColumnGrants( + userName, principalType, catName, hiveObject.getDbName(), hiveObject + .getObjectName(), partName, hiveObject.getColumnName()); + boolean found = false; + for (String privilege : privs) { + for (MPartitionColumnPrivilege col : mSecCol) { + String colPriv = col.getPrivilege(); + if (colPriv.equalsIgnoreCase(privilege)) { + found = true; + if (grantOption) { + if (col.getGrantOption()) { + col.setGrantOption(false); + } else { + throw new MetaException("User " + userName + + " does not have grant option with privilege " + privilege); + } + } + persistentObjs.add(col); + break; + } + } + if (!found) { + throw new InvalidObjectException("No grant (" + privilege + + ") found " + " on table " + tabObj.getTableName() + + ", partition is " + partName + ", column name = " + + hiveObject.getColumnName() + ", database is " + + tabObj.getDbName()); + } + } + } else { + List mSecCol = listPrincipalMTableColumnGrants( + userName, principalType, catName, hiveObject.getDbName(), hiveObject + .getObjectName(), hiveObject.getColumnName()); + boolean found = false; + for (String privilege : privs) { + for (MTableColumnPrivilege col : mSecCol) { + String colPriv = col.getPrivilege(); + if (colPriv.equalsIgnoreCase(privilege)) { + found = true; + if (grantOption) { + if (col.getGrantOption()) { + col.setGrantOption(false); + } else { + throw new MetaException("User " + userName + + " does not have grant option with privilege " + privilege); + } + } + persistentObjs.add(col); + break; + } + } + if (!found) { + throw new InvalidObjectException("No grant (" + privilege + + ") found " + " on table " + tabObj.getTableName() + + ", column name = " + + hiveObject.getColumnName() + ", database is " + + tabObj.getDbName()); + } + } + } + + } + } + } + + if (CollectionUtils.isNotEmpty(persistentObjs)) { + if (grantOption) { + // If grant option specified, only update the privilege, don't remove it. + // Grant option has already been removed from the privileges in the section above + } else { + pm.deletePersistentAll(persistentObjs); + } + } + return true; + } + + class PrivilegeWithoutCreateTimeComparator implements Comparator { + @Override + public int compare(HiveObjectPrivilege o1, HiveObjectPrivilege o2) { + int createTime1 = o1.getGrantInfo().getCreateTime(); + int createTime2 = o2.getGrantInfo().getCreateTime(); + o1.getGrantInfo().setCreateTime(0); + o2.getGrantInfo().setCreateTime(0); + int result = o1.compareTo(o2); + o1.getGrantInfo().setCreateTime(createTime1); + o2.getGrantInfo().setCreateTime(createTime2); + return result; + } + } + + @Override + public boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, PrivilegeBag grantPrivileges) + throws InvalidObjectException, MetaException, NoSuchObjectException { + Set revokePrivilegeSet + = new TreeSet<>(new PrivilegeWithoutCreateTimeComparator()); + Set grantPrivilegeSet + = new TreeSet<>(new PrivilegeWithoutCreateTimeComparator()); + + List grants = null; + String catName = objToRefresh.isSetCatName() ? objToRefresh.getCatName() : + getDefaultCatalog(conf); + switch (objToRefresh.getObjectType()) { + case DATABASE: + try { + grants = this.listDBGrantsAll(catName, objToRefresh.getDbName(), authorizer); + } catch (Exception e) { + throw new MetaException(e.getMessage()); + } + break; + case DATACONNECTOR: + try { + grants = this.listDCGrantsAll(objToRefresh.getObjectName(), authorizer); + } catch (Exception e) { + throw new MetaException(e.getMessage()); + } + break; + case TABLE: + grants = listTableGrantsAll(new TableName(catName, objToRefresh.getDbName(), objToRefresh.getObjectName()), authorizer); + break; + case COLUMN: + Preconditions.checkArgument(objToRefresh.getColumnName()==null, "columnName must be null"); + grants = getTableAllColumnGrants(catName, objToRefresh.getDbName(), + objToRefresh.getObjectName(), authorizer); + break; + default: + throw new MetaException("Unexpected object type " + objToRefresh.getObjectType()); + } + revokePrivilegeSet.addAll(grants); + + // Optimize revoke/grant list, remove the overlapping + if (grantPrivileges.getPrivileges() != null) { + for (HiveObjectPrivilege grantPrivilege : grantPrivileges.getPrivileges()) { + if (revokePrivilegeSet.contains(grantPrivilege)) { + revokePrivilegeSet.remove(grantPrivilege); + } else { + grantPrivilegeSet.add(grantPrivilege); + } + } + } + if (!revokePrivilegeSet.isEmpty()) { + LOG.debug("Found " + revokePrivilegeSet.size() + " new revoke privileges to be synced."); + PrivilegeBag remainingRevokePrivileges = new PrivilegeBag(); + for (HiveObjectPrivilege revokePrivilege : revokePrivilegeSet) { + remainingRevokePrivileges.addToPrivileges(revokePrivilege); + } + revokePrivileges(remainingRevokePrivileges, false); + } else { + LOG.debug("No new revoke privileges are required to be synced."); + } + if (!grantPrivilegeSet.isEmpty()) { + LOG.debug("Found " + grantPrivilegeSet.size() + " new grant privileges to be synced."); + PrivilegeBag remainingGrantPrivileges = new PrivilegeBag(); + for (HiveObjectPrivilege grantPrivilege : grantPrivilegeSet) { + remainingGrantPrivileges.addToPrivileges(grantPrivilege); + } + grantPrivileges(remainingGrantPrivileges); + } else { + LOG.debug("No new grant privileges are required to be synced."); + } + return true; + } + + private List getTableAllColumnGrants(String catalog, String db, + String tableName, String authorizer) + throws MetaException, NoSuchObjectException { + String catName = normalizeIdentifier(catalog); + String dbName = normalizeIdentifier(db); + String tblName = normalizeIdentifier(tableName); + return new GetListHelper(this, new TableName(catName, dbName, tableName)) { + + @Override + protected String describeResult() { + return "Table column privileges."; + } + + @Override + protected List getSqlResult() + throws MetaException { + return getDirectSql().getTableAllColumnGrants(catName, dbName, tblName, authorizer); + } + + @Override + protected List getJdoResult() { + return convertTableCols(listTableAllColumnGrants(catName, dbName, tblName, authorizer)); + } + }.run(false); + } + + public List listMRoleMembers(String roleName) { + Query query = null; + List mRoleMemeberList = new ArrayList<>(); + query = pm.newQuery(MRoleMap.class, "role.roleName == t1"); + query.declareParameters("java.lang.String t1"); + query.setUnique(false); + List mRoles = (List) query.execute(roleName); + pm.retrieveAll(mRoles); + mRoleMemeberList.addAll(mRoles); + return mRoleMemeberList; + } + + @Override + public List listRoleMembers(String roleName) { + List roleMaps = listMRoleMembers(roleName); + List rolePrinGrantList = new ArrayList<>(); + + if (roleMaps != null) { + for (MRoleMap roleMap : roleMaps) { + RolePrincipalGrant rolePrinGrant = new RolePrincipalGrant( + roleMap.getRole().getRoleName(), + roleMap.getPrincipalName(), + PrincipalType.valueOf(roleMap.getPrincipalType()), + roleMap.getGrantOption(), + roleMap.getAddTime(), + roleMap.getGrantor(), + // no grantor type for public role, hence the null check + roleMap.getGrantorType() == null ? null + : PrincipalType.valueOf(roleMap.getGrantorType()) + ); + rolePrinGrantList.add(rolePrinGrant); + + } + } + return rolePrinGrantList; + } + + private List listPrincipalMGlobalGrants(String principalName, + PrincipalType principalType) { + return listPrincipalMGlobalGrants(principalName, principalType, null); + } + + private List listPrincipalMGlobalGrants(String principalName, + PrincipalType principalType, String authorizer) { + Query query; + List userNameDbPriv = new ArrayList<>(); + List mPrivs = null; + if (principalName != null) { + if (authorizer != null) { + query = pm.newQuery(MGlobalPrivilege.class, "principalName == t1 && principalType == t2 " + + "&& authorizer == t3"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + mPrivs = (List) query + .executeWithArray(principalName, principalType.toString(), authorizer); + } else { + query = pm.newQuery(MGlobalPrivilege.class, "principalName == t1 && principalType == t2 "); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mPrivs = (List) query + .executeWithArray(principalName, principalType.toString()); + } + pm.retrieveAll(mPrivs); + } + if (mPrivs != null) { + userNameDbPriv.addAll(mPrivs); + } + return userNameDbPriv; + } + + @Override + public List listPrincipalGlobalGrants(String principalName, + PrincipalType principalType) { + List mUsers = + listPrincipalMGlobalGrants(principalName, principalType); + if (mUsers.isEmpty()) { + return Collections.emptyList(); + } + List result = new ArrayList<>(); + for (int i = 0; i < mUsers.size(); i++) { + MGlobalPrivilege sUsr = mUsers.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.GLOBAL, null, null, null, null); + HiveObjectPrivilege secUser = new HiveObjectPrivilege( + objectRef, sUsr.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sUsr.getPrivilege(), sUsr + .getCreateTime(), sUsr.getGrantor(), PrincipalType + .valueOf(sUsr.getGrantorType()), sUsr.getGrantOption()), + sUsr.getAuthorizer()); + result.add(secUser); + } + return result; + } + + @Override + public List listGlobalGrantsAll() { + Query query = pm.newQuery(MGlobalPrivilege.class); + List userNameDbPriv = (List) query.execute(); + pm.retrieveAll(userNameDbPriv); + return convertGlobal(userNameDbPriv); + } + + private List convertGlobal(List privs) { + List result = new ArrayList<>(); + for (MGlobalPrivilege priv : privs) { + String pname = priv.getPrincipalName(); + String authorizer = priv.getAuthorizer(); + PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); + + HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.GLOBAL, null, null, null, null); + PrivilegeGrantInfo grantor = new PrivilegeGrantInfo(priv.getPrivilege(), priv.getCreateTime(), + priv.getGrantor(), PrincipalType.valueOf(priv.getGrantorType()), priv.getGrantOption()); + + result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor, authorizer)); + } + return result; + } + + private List listPrincipalMDBGrants(String principalName, + PrincipalType principalType, String catName, String dbName) { + return listPrincipalMDBGrants(principalName, principalType, catName, dbName, null); + } + + private List listPrincipalMDBGrants(String principalName, + PrincipalType principalType, String catName, String dbName, String authorizer) { + Query query = null; + List mSecurityDBList = new ArrayList<>(); + dbName = normalizeIdentifier(dbName); + List mPrivs; + if (authorizer != null) { + query = pm.newQuery(MDBPrivilege.class, + "principalName == t1 && principalType == t2 && database.name == t3 && " + + "database.catalogName == t4 && authorizer == t5"); + query.declareParameters( + "java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4, " + + "java.lang.String t5"); + mPrivs = (List) query.executeWithArray(principalName, principalType.toString(), + dbName, catName, authorizer); + } else { + query = pm.newQuery(MDBPrivilege.class, + "principalName == t1 && principalType == t2 && database.name == t3 && database.catalogName == t4"); + query.declareParameters( + "java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4"); + mPrivs = (List) query.executeWithArray(principalName, principalType.toString(), + dbName, catName); + } + pm.retrieveAll(mPrivs); + mSecurityDBList.addAll(mPrivs); + return mSecurityDBList; + } + + private List listPrincipalMDCGrants(String principalName, + PrincipalType principalType, String dcName) { + return listPrincipalMDCGrants(principalName, principalType, dcName, null); + } + + private List listPrincipalMDCGrants(String principalName, + PrincipalType principalType, String dcName, String authorizer) { + Query query = null; + List mSecurityDCList = new ArrayList<>(); + dcName = normalizeIdentifier(dcName); + List mPrivs; + if (authorizer != null) { + query = pm.newQuery(MDCPrivilege.class, + "principalName == t1 && principalType == t2 && dataConnector.name == t3 && " + + "authorizer == t4"); + query.declareParameters( + "java.lang.String t1, java.lang.String t2, java.lang.String t3, " + + "java.lang.String t4"); + mPrivs = (List) query.executeWithArray(principalName, principalType.toString(), + dcName, authorizer); + } else { + query = pm.newQuery(MDCPrivilege.class, + "principalName == t1 && principalType == t2 && dataConnector.name == t3"); + query.declareParameters( + "java.lang.String t1, java.lang.String t2, java.lang.String t3"); + mPrivs = (List) query.executeWithArray(principalName, principalType.toString(), dcName); + } + pm.retrieveAll(mPrivs); + mSecurityDCList.addAll(mPrivs); + return mSecurityDCList; + } + + @Override + public List listPrincipalDBGrants(String principalName, + PrincipalType principalType, + String catName, String dbName) { + List mDbs = listPrincipalMDBGrants(principalName, principalType, catName, dbName); + if (mDbs.isEmpty()) { + return Collections.emptyList(); + } + List result = new ArrayList<>(); + for (int i = 0; i < mDbs.size(); i++) { + MDBPrivilege sDB = mDbs.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.DATABASE, dbName, null, null, null); + objectRef.setCatName(catName); + HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, + sDB.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sDB.getPrivilege(), sDB + .getCreateTime(), sDB.getGrantor(), PrincipalType + .valueOf(sDB.getGrantorType()), sDB.getGrantOption()), sDB.getAuthorizer()); + result.add(secObj); + } + return result; + } + + @Override + public List listPrincipalDBGrantsAll(String principalName, PrincipalType principalType) { + return convertDB(listPrincipalAllDBGrant(principalName, principalType)); + } + + @Override + public List listDBGrantsAll(String catName, String dbName) { + return listDBGrantsAll(catName, dbName, null); + } + + private List listDBGrantsAll(String catName, String dbName, String authorizer) { + return convertDB(listDatabaseGrants(catName, dbName, authorizer)); + } + + private List convertDB(List privs) { + List result = new ArrayList<>(); + for (MDBPrivilege priv : privs) { + String pname = priv.getPrincipalName(); + String authorizer = priv.getAuthorizer(); + PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); + String database = priv.getDatabase().getName(); + + HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.DATABASE, database, + null, null, null); + objectRef.setCatName(priv.getDatabase().getCatalogName()); + PrivilegeGrantInfo grantor = new PrivilegeGrantInfo(priv.getPrivilege(), priv.getCreateTime(), + priv.getGrantor(), PrincipalType.valueOf(priv.getGrantorType()), priv.getGrantOption()); + + result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor, authorizer)); + } + return result; + } + + private List listPrincipalAllDBGrant(String principalName, PrincipalType principalType) { + final List mSecurityDBList; + + LOG.debug("Executing listPrincipalAllDBGrant"); + Query query; + if (principalName != null && principalType != null) { + query = pm.newQuery(MDBPrivilege.class, "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityDBList = (List) query.execute(principalName, principalType.toString()); + pm.retrieveAll(mSecurityDBList); + LOG.debug("Done retrieving all objects for listPrincipalAllDBGrant: {}", mSecurityDBList); + return Collections.unmodifiableList(new ArrayList<>(mSecurityDBList)); + } else { + query = pm.newQuery(MDBPrivilege.class); + mSecurityDBList = (List) query.execute(); + pm.retrieveAll(mSecurityDBList); + LOG.debug("Done retrieving all objects for listPrincipalAllDBGrant: {}", mSecurityDBList); + return Collections.unmodifiableList(new ArrayList<>(mSecurityDBList)); + } + } + + @Override + public List listPrincipalDCGrants(String principalName, + PrincipalType principalType, + String dcName) { + List mDcs = listPrincipalMDCGrants(principalName, principalType, dcName); + if (mDcs.isEmpty()) { + return Collections.emptyList(); + } + List result = new ArrayList<>(); + for (int i = 0; i < mDcs.size(); i++) { + MDCPrivilege sDC = mDcs.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.DATACONNECTOR, null, dcName, null, null); + HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, + sDC.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sDC.getPrivilege(), sDC + .getCreateTime(), sDC.getGrantor(), PrincipalType + .valueOf(sDC.getGrantorType()), sDC.getGrantOption()), sDC.getAuthorizer()); + result.add(secObj); + } + return result; + } + + @Override + public List listPrincipalDCGrantsAll(String principalName, PrincipalType principalType) { + return convertDC(listPrincipalAllDCGrant(principalName, principalType)); + } + + @Override + public List listDCGrantsAll(String dcName) { + return listDCGrantsAll(dcName, null); + } + + private List listDCGrantsAll(String dcName, String authorizer) { + return convertDC(listDataConnectorGrants(dcName, authorizer)); + } + + private List convertDC(List privs) { + List result = new ArrayList<>(); + for (MDCPrivilege priv : privs) { + String pname = priv.getPrincipalName(); + String authorizer = priv.getAuthorizer(); + PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); + String dataConnectorName = priv.getDataConnector().getName(); + + HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.DATACONNECTOR, null, + dataConnectorName, null, null); + PrivilegeGrantInfo grantor = new PrivilegeGrantInfo(priv.getPrivilege(), priv.getCreateTime(), + priv.getGrantor(), PrincipalType.valueOf(priv.getGrantorType()), priv.getGrantOption()); + + result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor, authorizer)); + } + return result; + } + + private List listPrincipalAllDCGrant(String principalName, PrincipalType principalType) { + final List mSecurityDCList; + + LOG.debug("Executing listPrincipalAllDCGrant"); + + if (principalName != null && principalType != null) { + Query query = pm.newQuery(MDCPrivilege.class, "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityDCList = (List) query.execute(principalName, principalType.toString()); + pm.retrieveAll(mSecurityDCList); + LOG.debug("Done retrieving all objects for listPrincipalAllDCGrant: {}", mSecurityDCList); + return Collections.unmodifiableList(new ArrayList<>(mSecurityDCList)); + } else { + Query query = pm.newQuery(MDCPrivilege.class); + mSecurityDCList = (List) query.execute(); + pm.retrieveAll(mSecurityDCList); + LOG.debug("Done retrieving all objects for listPrincipalAllDCGrant: {}", mSecurityDCList); + return Collections.unmodifiableList(new ArrayList<>(mSecurityDCList)); + } + } + + private List listTableAllColumnGrants( + String catName, String dbName, String tableName, String authorizer) { + boolean success = false; + Query query = null; + List mTblColPrivilegeList = new ArrayList<>(); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); + List mPrivs = null; + if (authorizer != null) { + String queryStr = "table.tableName == t1 && table.database.name == t2 &&" + + "table.database.catalogName == t3 && authorizer == t4"; + query = pm.newQuery(MTableColumnPrivilege.class, queryStr); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " + + "java.lang.String t4"); + mPrivs = (List) query.executeWithArray(tableName, dbName, catName, authorizer); + } else { + String queryStr = "table.tableName == t1 && table.database.name == t2 &&" + + "table.database.catalogName == t3"; + query = pm.newQuery(MTableColumnPrivilege.class, queryStr); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + mPrivs = (List) query.executeWithArray(tableName, dbName, catName); + } + LOG.debug("Query to obtain objects for listTableAllColumnGrants finished"); + pm.retrieveAll(mPrivs); + LOG.debug("RetrieveAll on all the objects for listTableAllColumnGrants finished"); + mTblColPrivilegeList.addAll(mPrivs); + return mTblColPrivilegeList; + } + + @Override + public List listDatabaseGrants(String catName, String dbName, String authorizer) { + LOG.debug("Executing listDatabaseGrants"); + + dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); + + final Query query; + final String[] args; + + if (authorizer != null) { + query = pm.newQuery(MDBPrivilege.class, "database.name == t1 && database.catalogName == t2 && authorizer == t3"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + args = new String[] { dbName, catName, authorizer }; + } else { + query = pm.newQuery(MDBPrivilege.class, "database.name == t1 && database.catalogName == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + args = new String[] { dbName, catName }; + } + + final List mSecurityDBList = (List) query.executeWithArray(args); + pm.retrieveAll(mSecurityDBList); + LOG.debug("Done retrieving all objects for listDatabaseGrants: {}", mSecurityDBList); + return Collections.unmodifiableList(new ArrayList<>(mSecurityDBList)); + } + + @Override + public List listDataConnectorGrants(String dcName, String authorizer) { + LOG.debug("Executing listDataConnectorGrants"); + + dcName = normalizeIdentifier(dcName); + + final Query query; + String[] args = null; + final List mSecurityDCList; + + if (authorizer != null) { + query = pm.newQuery(MDCPrivilege.class, "dataConnector.name == t1 && authorizer == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + args = new String[] { dcName, authorizer }; + } else { + query = pm.newQuery(MDCPrivilege.class, "dataConnector.name == t1"); + query.declareParameters("java.lang.String t1"); + } + if (args != null) { + mSecurityDCList = (List) query.executeWithArray(args); + } else { + mSecurityDCList = (List) query.execute(dcName); + } + pm.retrieveAll(mSecurityDCList); + LOG.debug("Done retrieving all objects for listDataConnectorGrants: {}", mSecurityDCList); + return Collections.unmodifiableList(new ArrayList<>(mSecurityDCList)); + } + + private List listAllMTableGrants( + String principalName, PrincipalType principalType, String catName, String dbName, + String tableName) { + return listAllMTableGrants(principalName, principalType, catName, dbName, tableName, null); + } + + private List listAllMTableGrants( + String principalName, PrincipalType principalType, String catName, String dbName, + String tableName, String authorizer) { + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); + Query query = null; + List mSecurityTabPartList = new ArrayList<>(); + LOG.debug("Executing listAllTableGrants"); + List mPrivs; + if (authorizer != null) { + query = pm.newQuery(MTablePrivilege.class, + "principalName == t1 && principalType == t2 && table.tableName == t3 &&" + + "table.database.name == t4 && table.database.catalogName == t5 && authorizer == t6"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3," + + "java.lang.String t4, java.lang.String t5, java.lang.String t6"); + mPrivs = (List) query.executeWithArray(principalName, principalType.toString(), + tableName, dbName, catName, authorizer); + } else { + query = pm.newQuery(MTablePrivilege.class, + "principalName == t1 && principalType == t2 && table.tableName == t3 &&" + + "table.database.name == t4 && table.database.catalogName == t5"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3," + + "java.lang.String t4, java.lang.String t5"); + mPrivs = (List) query.executeWithArray(principalName, principalType.toString(), + tableName, dbName, catName); + } + pm.retrieveAll(mPrivs); + mSecurityTabPartList.addAll(mPrivs); + return mSecurityTabPartList; + } + + @Override + public List listAllTableGrants(String principalName, + PrincipalType principalType, TableName table) { + String catName = normalizeIdentifier(table.getCat()); + String dbName = normalizeIdentifier(table.getDb()); + String tableName = normalizeIdentifier(table.getTable()); + List mTbls = + listAllMTableGrants(principalName, principalType, catName, dbName, tableName); + if (mTbls.isEmpty()) { + return Collections.emptyList(); + } + List result = new ArrayList<>(); + for (int i = 0; i < mTbls.size(); i++) { + MTablePrivilege sTbl = mTbls.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.TABLE, dbName, tableName, null, null); + objectRef.setCatName(catName); + HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, + sTbl.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sTbl.getPrivilege(), sTbl.getCreateTime(), sTbl + .getGrantor(), PrincipalType.valueOf(sTbl + .getGrantorType()), sTbl.getGrantOption()), sTbl.getAuthorizer()); + result.add(secObj); + } + return result; + } + + private List listPrincipalMPartitionGrants( + String principalName, PrincipalType principalType, String catName, String dbName, + String tableName, String partName) { + return listPrincipalMPartitionGrants(principalName, principalType, catName, dbName, tableName, partName, null); + } + + private List listPrincipalMPartitionGrants( + String principalName, PrincipalType principalType, String catName, String dbName, + String tableName, String partName, String authorizer) { + Query query; + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); + List mSecurityTabPartList = new ArrayList<>(); + List mPrivs; + if (authorizer != null) { + query = pm.newQuery(MPartitionPrivilege.class, + "principalName == t1 && principalType == t2 && partition.table.tableName == t3 " + + "&& partition.table.database.name == t4 && partition.table.database.catalogName == t5" + + "&& partition.partitionName == t6 && authorizer == t7"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4, " + + "java.lang.String t5, java.lang.String t6, java.lang.String t7"); + mPrivs = (List) query.executeWithArray(principalName, + principalType.toString(), tableName, dbName, catName, partName, authorizer); + } else { + query = pm.newQuery(MPartitionPrivilege.class, + "principalName == t1 && principalType == t2 && partition.table.tableName == t3 " + + "&& partition.table.database.name == t4 && partition.table.database.catalogName == t5" + + "&& partition.partitionName == t6"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4, " + + "java.lang.String t5, java.lang.String t6"); + mPrivs = (List) query.executeWithArray(principalName, + principalType.toString(), tableName, dbName, catName, partName); + } + pm.retrieveAll(mPrivs); + mSecurityTabPartList.addAll(mPrivs); + return mSecurityTabPartList; + } + + @Override + public List listPrincipalPartitionGrants(String principalName, + PrincipalType principalType, + TableName table, + List partValues, + String partName) { + String catName = normalizeIdentifier(table.getCat()); + String dbName = normalizeIdentifier(table.getDb()); + String tableName = normalizeIdentifier(table.getTable()); + List mParts = listPrincipalMPartitionGrants(principalName, + principalType, catName, dbName, tableName, partName); + if (mParts.isEmpty()) { + return Collections.emptyList(); + } + List result = new ArrayList<>(); + for (int i = 0; i < mParts.size(); i++) { + MPartitionPrivilege sPart = mParts.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.PARTITION, dbName, tableName, partValues, null); + objectRef.setCatName(catName); + HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, + sPart.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sPart.getPrivilege(), sPart + .getCreateTime(), sPart.getGrantor(), PrincipalType + .valueOf(sPart.getGrantorType()), sPart + .getGrantOption()), sPart.getAuthorizer()); + + result.add(secObj); + } + return result; + } + + private List listPrincipalMTableColumnGrants( + String principalName, PrincipalType principalType, String catName, String dbName, + String tableName, String columnName) { + return listPrincipalMTableColumnGrants(principalName, principalType, catName, dbName, tableName, + columnName, null); + } + + private List listPrincipalMTableColumnGrants( + String principalName, PrincipalType principalType, String catName, String dbName, + String tableName, String columnName, String authorizer) { + Query query; + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + columnName = normalizeIdentifier(columnName); + List mSecurityColList = new ArrayList<>(); + List mPrivs; + if (authorizer != null) { + String queryStr = + "principalName == t1 && principalType == t2 && " + + "table.tableName == t3 && table.database.name == t4 && " + + "table.database.catalogName == t5 && columnName == t6 && authorizer == t7"; + query = pm.newQuery(MTableColumnPrivilege.class, queryStr); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " + + "java.lang.String t4, java.lang.String t5, java.lang.String t6, java.lang.String t7"); + mPrivs = (List) query.executeWithArray(principalName, + principalType.toString(), tableName, dbName, catName, columnName, authorizer); + } else { + String queryStr = + "principalName == t1 && principalType == t2 && " + + "table.tableName == t3 && table.database.name == t4 && " + + "table.database.catalogName == t5 && columnName == t6 "; + query = pm.newQuery(MTableColumnPrivilege.class, queryStr); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " + + "java.lang.String t4, java.lang.String t5, java.lang.String t6"); + mPrivs = (List) query.executeWithArray(principalName, + principalType.toString(), tableName, dbName, catName, columnName); + } + pm.retrieveAll(mPrivs); + mSecurityColList.addAll(mPrivs); + return mSecurityColList; + } + + @Override + public List listPrincipalTableColumnGrants(String principalName, + PrincipalType principalType, + TableName table, + String columnName) { + String catName = normalizeIdentifier(table.getCat()); + String dbName = normalizeIdentifier(table.getDb()); + String tableName = normalizeIdentifier(table.getTable()); + List mTableCols = + listPrincipalMTableColumnGrants(principalName, principalType, catName, dbName, tableName, columnName); + if (mTableCols.isEmpty()) { + return Collections.emptyList(); + } + List result = new ArrayList<>(); + for (int i = 0; i < mTableCols.size(); i++) { + MTableColumnPrivilege sCol = mTableCols.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.COLUMN, dbName, tableName, null, sCol.getColumnName()); + objectRef.setCatName(catName); + HiveObjectPrivilege secObj = new HiveObjectPrivilege( + objectRef, sCol.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sCol.getPrivilege(), sCol + .getCreateTime(), sCol.getGrantor(), PrincipalType + .valueOf(sCol.getGrantorType()), sCol + .getGrantOption()), sCol.getAuthorizer()); + result.add(secObj); + } + return result; + } + + private List listPrincipalMPartitionColumnGrants( + String principalName, PrincipalType principalType, String catName, String dbName, + String tableName, String partitionName, String columnName) { + return listPrincipalMPartitionColumnGrants(principalName, principalType, catName, dbName, + tableName, partitionName, columnName, null); + } + + private List listPrincipalMPartitionColumnGrants( + String principalName, PrincipalType principalType, String catName, String dbName, + String tableName, String partitionName, String columnName, String authorizer) { + Query query = null; + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + columnName = normalizeIdentifier(columnName); + catName = normalizeIdentifier(catName); + List mSecurityColList = new ArrayList<>(); + List mPrivs; + if (authorizer != null) { + query = pm.newQuery( + MPartitionColumnPrivilege.class, + "principalName == t1 && principalType == t2 && partition.table.tableName == t3 " + + "&& partition.table.database.name == t4 && partition.table.database.catalogName == t5" + + " && partition.partitionName == t6 && columnName == t7 && authorizer == t8"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " + + "java.lang.String t4, java.lang.String t5, java.lang.String t6, java.lang.String t7, " + + "java.lang.String t8"); + mPrivs = (List) query.executeWithArray(principalName, + principalType.toString(), tableName, dbName, catName, partitionName, columnName, authorizer); + } else { + query = pm.newQuery( + MPartitionColumnPrivilege.class, + "principalName == t1 && principalType == t2 && partition.table.tableName == t3 " + + "&& partition.table.database.name == t4 && partition.table.database.catalogName == t5" + + " && partition.partitionName == t6 && columnName == t7"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " + + "java.lang.String t4, java.lang.String t5, java.lang.String t6, java.lang.String t7"); + mPrivs = (List) query.executeWithArray(principalName, + principalType.toString(), tableName, dbName, catName, partitionName, columnName); + } + pm.retrieveAll(mPrivs); + mSecurityColList.addAll(mPrivs); + return mSecurityColList; + } + + @Override + public List listPrincipalPartitionColumnGrants(String principalName, + PrincipalType principalType, + TableName table, + List partValues, + String partitionName, + String columnName) { + String catName = normalizeIdentifier(table.getCat()); + String dbName = normalizeIdentifier(table.getDb()); + String tableName = normalizeIdentifier(table.getTable()); + List mPartitionCols = + listPrincipalMPartitionColumnGrants(principalName, principalType, catName, dbName, tableName, + partitionName, columnName); + if (mPartitionCols.isEmpty()) { + return Collections.emptyList(); + } + List result = new ArrayList<>(); + for (int i = 0; i < mPartitionCols.size(); i++) { + MPartitionColumnPrivilege sCol = mPartitionCols.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.COLUMN, dbName, tableName, partValues, sCol.getColumnName()); + objectRef.setCatName(catName); + HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, + sCol.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sCol.getPrivilege(), sCol + .getCreateTime(), sCol.getGrantor(), PrincipalType + .valueOf(sCol.getGrantorType()), sCol.getGrantOption()), sCol.getAuthorizer()); + result.add(secObj); + } + return result; + } + + @Override + public List listPrincipalPartitionColumnGrantsAll( + String principalName, PrincipalType principalType) { + Query query = null; + LOG.debug("Executing listPrincipalPartitionColumnGrantsAll"); + List mSecurityTabPartList; + if (principalName != null && principalType != null) { + query = + pm.newQuery(MPartitionColumnPrivilege.class, + "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityTabPartList = + (List) query.executeWithArray(principalName, + principalType.toString()); + } else { + query = pm.newQuery(MPartitionColumnPrivilege.class); + mSecurityTabPartList = (List) query.execute(); + } + LOG.debug("Done executing query for listPrincipalPartitionColumnGrantsAll"); + pm.retrieveAll(mSecurityTabPartList); + List result = convertPartCols(mSecurityTabPartList); + return result; + } + + @Override + public List listPartitionColumnGrantsAll( + TableName table, String partitionName, String columnName) { + String catName = normalizeIdentifier(table.getCat()); + String dbName = normalizeIdentifier(table.getDb()); + String tableName = normalizeIdentifier(table.getTable()); + LOG.debug("Executing listPartitionColumnGrantsAll"); + Query query = + pm.newQuery(MPartitionColumnPrivilege.class, + "partition.table.tableName == t3 && partition.table.database.name == t4 && " + + "partition.table.database.name == t5 && " + + "partition.partitionName == t6 && columnName == t7"); + query.declareParameters("java.lang.String t3, java.lang.String t4, java.lang.String t5," + + "java.lang.String t6, java.lang.String t7"); + List mSecurityTabPartList = + (List) query.executeWithArray(tableName, dbName, catName, + partitionName, columnName); + LOG.debug("Done executing query for listPartitionColumnGrantsAll"); + pm.retrieveAll(mSecurityTabPartList); + List result = convertPartCols(mSecurityTabPartList); + return result; + } + + private List convertPartCols(List privs) { + List result = new ArrayList<>(); + for (MPartitionColumnPrivilege priv : privs) { + String pname = priv.getPrincipalName(); + String authorizer = priv.getAuthorizer(); + PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); + + MPartition mpartition = priv.getPartition(); + MTable mtable = mpartition.getTable(); + MDatabase mdatabase = mtable.getDatabase(); + + HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.COLUMN, + mdatabase.getName(), mtable.getTableName(), mpartition.getValues(), priv.getColumnName()); + objectRef.setCatName(mdatabase.getCatalogName()); + PrivilegeGrantInfo grantor = new PrivilegeGrantInfo(priv.getPrivilege(), priv.getCreateTime(), + priv.getGrantor(), PrincipalType.valueOf(priv.getGrantorType()), priv.getGrantOption()); + + result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor, authorizer)); + } + return result; + } + + private List listPrincipalAllTableGrants(String principalName, PrincipalType principalType) { + LOG.debug("Executing listPrincipalAllTableGrants"); + Query query = pm.newQuery(MTablePrivilege.class, "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + final List mSecurityTabPartList = + (List) query.execute(principalName, principalType.toString()); + + pm.retrieveAll(mSecurityTabPartList); + + LOG.debug("Done retrieving all objects for listPrincipalAllTableGrants"); + + return Collections.unmodifiableList(new ArrayList<>(mSecurityTabPartList)); + } + + @Override + public List listPrincipalTableGrantsAll(String principalName, + PrincipalType principalType) { + Query query; + LOG.debug("Executing listPrincipalAllTableGrants"); + List mSecurityTabPartList; + if (principalName != null && principalType != null) { + query = pm.newQuery(MTablePrivilege.class, "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityTabPartList = + (List) query.execute(principalName, principalType.toString()); + } else { + query = pm.newQuery(MTablePrivilege.class); + mSecurityTabPartList = (List) query.execute(); + } + List result = convertTable(mSecurityTabPartList); + return result; + } + + @Override + public List listTableGrantsAll(TableName table) { + return listTableGrantsAll(table, null); + } + + private List listTableGrantsAll(TableName table, + String authorizer) { + Query query; + String catName = normalizeIdentifier(table.getCat()); + String dbName = normalizeIdentifier(table.getDb()); + String tableName = normalizeIdentifier(table.getTable()); + LOG.debug("Executing listTableGrantsAll"); + List mSecurityTabPartList = null; + if (authorizer != null) { + query = pm.newQuery(MTablePrivilege.class, + "table.tableName == t1 && table.database.name == t2 && table.database.catalogName == t3" + + " && authorizer == t4"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " + + "java.lang.String t4"); + mSecurityTabPartList = (List) query.executeWithArray(tableName, dbName, catName, authorizer); + } else { + query = pm.newQuery(MTablePrivilege.class, + "table.tableName == t1 && table.database.name == t2 && table.database.catalogName == t3"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + mSecurityTabPartList = (List) query.executeWithArray(tableName, dbName, catName); + } + LOG.debug("Done executing query for listTableGrantsAll"); + pm.retrieveAll(mSecurityTabPartList); + List result = convertTable(mSecurityTabPartList); + return result; + } + + private List convertTable(List privs) { + List result = new ArrayList<>(); + for (MTablePrivilege priv : privs) { + String pname = priv.getPrincipalName(); + String authorizer = priv.getAuthorizer(); + PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); + + String table = priv.getTable().getTableName(); + String database = priv.getTable().getDatabase().getName(); + + HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.TABLE, database, table, + null, null); + objectRef.setCatName(priv.getTable().getDatabase().getCatalogName()); + PrivilegeGrantInfo grantor = new PrivilegeGrantInfo(priv.getPrivilege(), priv.getCreateTime(), + priv.getGrantor(), PrincipalType.valueOf(priv.getGrantorType()), priv.getGrantOption()); + + result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor, authorizer)); + } + return result; + } + + private List listPrincipalAllPartitionGrants(String principalName, PrincipalType principalType) { + LOG.debug("Executing listPrincipalAllPartitionGrants"); + + Query query = pm.newQuery(MPartitionPrivilege.class, "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + final List mSecurityTabPartList = + (List) query.execute(principalName, principalType.toString()); + + pm.retrieveAll(mSecurityTabPartList); + LOG.debug("Done retrieving all objects for listPrincipalAllPartitionGrants"); + + return Collections.unmodifiableList(new ArrayList<>(mSecurityTabPartList)); + } + + @Override + public List listPrincipalPartitionGrantsAll(String principalName, + PrincipalType principalType) { + Query query = null; + LOG.debug("Executing listPrincipalPartitionGrantsAll"); + List mSecurityTabPartList; + if (principalName != null && principalType != null) { + query = + pm.newQuery(MPartitionPrivilege.class, "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityTabPartList = + (List) query.execute(principalName, principalType.toString()); + } else { + query = pm.newQuery(MPartitionPrivilege.class); + mSecurityTabPartList = (List) query.execute(); + } + LOG.debug("Done executing query for listPrincipalPartitionGrantsAll"); + pm.retrieveAll(mSecurityTabPartList); + List result = convertPartition(mSecurityTabPartList); + return result; + } + + @Override + public List listPartitionGrantsAll(TableName table, String partitionName) { + String tableName = normalizeIdentifier(table.getTable()); + String dbName = normalizeIdentifier(table.getDb()); + String catName = normalizeIdentifier(table.getCat()); + LOG.debug("Executing listPrincipalPartitionGrantsAll"); + Query query = + pm.newQuery(MPartitionPrivilege.class, + "partition.table.tableName == t3 && partition.table.database.name == t4 && " + + "partition.table.database.catalogName == t5 && partition.partitionName == t6"); + query.declareParameters("java.lang.String t3, java.lang.String t4, java.lang.String t5, " + + "java.lang.String t6"); + List mSecurityTabPartList = + (List) query.executeWithArray(tableName, dbName, catName, partitionName); + LOG.debug("Done executing query for listPrincipalPartitionGrantsAll"); + pm.retrieveAll(mSecurityTabPartList); + List result = convertPartition(mSecurityTabPartList); + return result; + } + + private List convertPartition(List privs) { + List result = new ArrayList<>(); + for (MPartitionPrivilege priv : privs) { + String pname = priv.getPrincipalName(); + String authorizer = priv.getAuthorizer(); + PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); + + MPartition mpartition = priv.getPartition(); + MTable mtable = mpartition.getTable(); + MDatabase mdatabase = mtable.getDatabase(); + + HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.PARTITION, + mdatabase.getName(), mtable.getTableName(), mpartition.getValues(), null); + objectRef.setCatName(mdatabase.getCatalogName()); + PrivilegeGrantInfo grantor = new PrivilegeGrantInfo(priv.getPrivilege(), priv.getCreateTime(), + priv.getGrantor(), PrincipalType.valueOf(priv.getGrantorType()), priv.getGrantOption()); + + result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor, authorizer)); + } + return result; + } + + private List listPrincipalAllTableColumnGrants(String principalName, + PrincipalType principalType) { + + LOG.debug("Executing listPrincipalAllTableColumnGrants"); + + + Query query = pm.newQuery(MTableColumnPrivilege.class, "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + final List mSecurityColumnList = + (List) query.execute(principalName, principalType.toString()); + + pm.retrieveAll(mSecurityColumnList); + LOG.debug("Done retrieving all objects for listPrincipalAllTableColumnGrants"); + + return Collections.unmodifiableList(new ArrayList<>(mSecurityColumnList)); + } + + @Override + public List listPrincipalTableColumnGrantsAll(String principalName, + PrincipalType principalType) { + Query query = null; + LOG.debug("Executing listPrincipalTableColumnGrantsAll"); + + List mSecurityTabPartList; + if (principalName != null && principalType != null) { + query = + pm.newQuery(MTableColumnPrivilege.class, "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityTabPartList = + (List) query.execute(principalName, principalType.toString()); + } else { + query = pm.newQuery(MTableColumnPrivilege.class); + mSecurityTabPartList = (List) query.execute(); + } + LOG.debug("Done executing query for listPrincipalTableColumnGrantsAll"); + pm.retrieveAll(mSecurityTabPartList); + List result = convertTableCols(mSecurityTabPartList); + return result; + } + + @Override + public List listTableColumnGrantsAll(TableName table, + String columnName) { + Query query = null; + String catName = normalizeIdentifier(table.getCat()); + String dbName = normalizeIdentifier(table.getDb()); + String tableName = normalizeIdentifier(table.getTable()); + LOG.debug("Executing listPrincipalTableColumnGrantsAll"); + query = + pm.newQuery(MTableColumnPrivilege.class, + "table.tableName == t3 && table.database.name == t4 && " + + "table.database.catalogName == t5 && columnName == t6"); + query.declareParameters("java.lang.String t3, java.lang.String t4, java.lang.String t5, " + + "java.lang.String t6"); + List mSecurityTabPartList = + (List) query.executeWithArray(tableName, dbName, + catName, columnName); + LOG.debug("Done executing query for listPrincipalTableColumnGrantsAll"); + pm.retrieveAll(mSecurityTabPartList); + List result = convertTableCols(mSecurityTabPartList); + return result; + } + + private List convertTableCols(List privs) { + List result = new ArrayList<>(); + for (MTableColumnPrivilege priv : privs) { + String pname = priv.getPrincipalName(); + String authorizer = priv.getAuthorizer(); + PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); + + MTable mtable = priv.getTable(); + MDatabase mdatabase = mtable.getDatabase(); + + HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.COLUMN, + mdatabase.getName(), mtable.getTableName(), null, priv.getColumnName()); + objectRef.setCatName(mdatabase.getCatalogName()); + PrivilegeGrantInfo grantor = new PrivilegeGrantInfo(priv.getPrivilege(), priv.getCreateTime(), + priv.getGrantor(), PrincipalType.valueOf(priv.getGrantorType()), priv.getGrantOption()); + + result.add(new HiveObjectPrivilege(objectRef, pname, ptype, grantor, authorizer)); + } + return result; + } + + private List listPrincipalAllPartitionColumnGrants(String principalName, + PrincipalType principalType) { + LOG.debug("Executing listPrincipalAllTableColumnGrants"); + + Query query = pm.newQuery(MPartitionColumnPrivilege.class, "principalName == t1 && principalType == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + final List mSecurityColumnList = + (List) query.execute(principalName, principalType.toString()); + + pm.retrieveAll(mSecurityColumnList); + LOG.debug("Done retrieving all objects for listPrincipalAllTableColumnGrants"); + + return Collections.unmodifiableList(new ArrayList<>(mSecurityColumnList)); + } + + @Override + public void setBaseStore(RawStore store) { + super.setBaseStore(store); + this.conf = baseStore.getConf(); + } +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/impl/TableStoreImpl.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/impl/TableStoreImpl.java new file mode 100644 index 000000000000..805e889d94f6 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metastore/impl/TableStoreImpl.java @@ -0,0 +1,2966 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.metastore.impl; + +import com.google.common.base.Joiner; + +import javax.jdo.Query; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; + +import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; +import org.apache.hadoop.hive.metastore.Batchable; +import org.apache.hadoop.hive.metastore.DatabaseProduct; +import org.apache.hadoop.hive.metastore.directsql.MetaStoreDirectSql; +import org.apache.hadoop.hive.metastore.PartFilterExprUtil; +import org.apache.hadoop.hive.metastore.PartitionExpressionProxy; +import org.apache.hadoop.hive.metastore.PartitionProjectionEvaluator; +import org.apache.hadoop.hive.metastore.PersistenceManagerProvider; +import org.apache.hadoop.hive.metastore.QueryWrapper; +import org.apache.hadoop.hive.metastore.RawStore; +import org.apache.hadoop.hive.metastore.TableFields; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.GetPartitionsFilterSpec; +import org.apache.hadoop.hive.metastore.api.GetProjectionsSpec; +import org.apache.hadoop.hive.metastore.api.InvalidInputException; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PartitionEventType; +import org.apache.hadoop.hive.metastore.api.PartitionFilterMode; +import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse; +import org.apache.hadoop.hive.metastore.api.PartitionValuesRow; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.TableMeta; +import org.apache.hadoop.hive.metastore.api.UnknownDBException; +import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; +import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.client.builder.GetPartitionsArgs; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.metastore.RawStoreAware; +import org.apache.hadoop.hive.metastore.model.FetchGroups; +import org.apache.hadoop.hive.metastore.model.MColumnDescriptor; +import org.apache.hadoop.hive.metastore.model.MConstraint; +import org.apache.hadoop.hive.metastore.model.MCreationMetadata; +import org.apache.hadoop.hive.metastore.model.MMVSource; +import org.apache.hadoop.hive.metastore.model.MPartition; +import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege; +import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics; +import org.apache.hadoop.hive.metastore.model.MPartitionEvent; +import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege; +import org.apache.hadoop.hive.metastore.model.MStorageDescriptor; +import org.apache.hadoop.hive.metastore.model.MTable; +import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege; +import org.apache.hadoop.hive.metastore.model.MTablePrivilege; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree; +import org.apache.hadoop.hive.metastore.metastore.GetHelper; +import org.apache.hadoop.hive.metastore.metastore.GetListHelper; +import org.apache.hadoop.hive.metastore.metastore.iface.TableStore; +import org.apache.hadoop.hive.metastore.txn.TxnUtils; +import org.apache.hadoop.hive.metastore.utils.FileUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.thrift.TException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static org.apache.commons.lang3.StringUtils.join; +import static org.apache.hadoop.hive.metastore.Batchable.NO_BATCHING; +import static org.apache.hadoop.hive.metastore.ObjectStore.appendPatternCondition; +import static org.apache.hadoop.hive.metastore.ObjectStore.appendSimpleCondition; +import static org.apache.hadoop.hive.metastore.ObjectStore.convertToCreationMetadata; +import static org.apache.hadoop.hive.metastore.ObjectStore.convertToFieldSchemas; +import static org.apache.hadoop.hive.metastore.ObjectStore.convertToMCreationMetadata; +import static org.apache.hadoop.hive.metastore.ObjectStore.convertToMPart; +import static org.apache.hadoop.hive.metastore.ObjectStore.convertToMTable; +import static org.apache.hadoop.hive.metastore.ObjectStore.convertToPart; +import static org.apache.hadoop.hive.metastore.ObjectStore.convertToParts; +import static org.apache.hadoop.hive.metastore.ObjectStore.convertToTable; +import static org.apache.hadoop.hive.metastore.ObjectStore.getJDOFilterStrForPartitionNames; +import static org.apache.hadoop.hive.metastore.ObjectStore.getPartQueryWithParams; +import static org.apache.hadoop.hive.metastore.ObjectStore.makeParameterDeclarationString; +import static org.apache.hadoop.hive.metastore.ObjectStore.putPersistentPrivObjects; +import static org.apache.hadoop.hive.metastore.ObjectStore.verifyStatsChangeCtx; +import static org.apache.hadoop.hive.metastore.metastore.impl.PrivilegeStoreImpl.getPrincipalTypeFromStr; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.newMetaException; +import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; + +@SuppressWarnings("unchecked") +public class TableStoreImpl extends RawStoreAware implements TableStore { + private final static Logger LOG = LoggerFactory.getLogger(TableStoreImpl.class); + private DatabaseProduct dbType; + protected int batchSize = NO_BATCHING; + private boolean areTxnStatsSupported = false; + private PartitionExpressionProxy expressionProxy = null; + private Configuration conf; + + @Override + public void setBaseStore(RawStore store) { + super.setBaseStore(store); + this.dbType = PersistenceManagerProvider.getDatabaseProduct(); + this.batchSize = MetastoreConf.getIntVar(store.getConf(), + MetastoreConf.ConfVars.RAWSTORE_PARTITION_BATCH_SIZE); + this.areTxnStatsSupported = MetastoreConf.getBoolVar(baseStore.getConf(), + MetastoreConf.ConfVars.HIVE_TXN_STATS_ENABLED); + this.expressionProxy = PartFilterExprUtil.createExpressionProxy(store.getConf()); + this.conf = store.getConf(); + } + + @Override + public boolean dropTable(TableName table) + throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { + String catName = normalizeIdentifier(table.getCat()); + String dbName = normalizeIdentifier(table.getDb()); + String tableName = normalizeIdentifier(table.getTable()); + boolean materializedView = false; + MTable tbl = getMTable(catName, dbName, tableName); + pm.retrieve(tbl); + if (tbl != null) { + materializedView = TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType()); + // first remove all the grants + List tabGrants = listAllTableGrants(catName, dbName, tableName); + if (CollectionUtils.isNotEmpty(tabGrants)) { + pm.deletePersistentAll(tabGrants); + } + List tblColGrants = listTableAllColumnGrants(catName, dbName, + tableName, null); + if (CollectionUtils.isNotEmpty(tblColGrants)) { + pm.deletePersistentAll(tblColGrants); + } + + List partGrants = listTableAllPartitionGrants(catName, dbName, tableName); + if (CollectionUtils.isNotEmpty(partGrants)) { + pm.deletePersistentAll(partGrants); + } + + List partColGrants = listTableAllPartitionColumnGrants(catName, dbName, + tableName); + if (CollectionUtils.isNotEmpty(partColGrants)) { + pm.deletePersistentAll(partColGrants); + } + + // delete column statistics if present + baseStore.deleteTableColumnStatistics(catName, dbName, tableName, null, null); + + List tabConstraints = listAllTableConstraintsWithOptionalConstraintName( + catName, dbName, tableName, null); + if (CollectionUtils.isNotEmpty(tabConstraints)) { + pm.deletePersistentAll(tabConstraints); + } + + preDropStorageDescriptor(tbl.getSd()); + + if (materializedView) { + dropCreationMetadata(tbl.getDatabase().getCatalogName(), + tbl.getDatabase().getName(), tbl.getTableName()); + } + + // then remove the table + pm.deletePersistentAll(tbl); + } + return true; + } + + private List listAllTableConstraintsWithOptionalConstraintName( + String catName, String dbName, String tableName, String constraintname) { + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + constraintname = constraintname!=null?normalizeIdentifier(constraintname):null; + List mConstraints = null; + List constraintNames = new ArrayList<>(); + + Query queryForConstraintName = pm.newQuery("select constraintName from org.apache.hadoop.hive.metastore.model.MConstraint where " + + "((parentTable.tableName == ptblname && parentTable.database.name == pdbname && " + + "parentTable.database.catalogName == pcatname) || " + + "(childTable != null && childTable.tableName == ctblname &&" + + "childTable.database.name == cdbname && childTable.database.catalogName == ccatname)) " + + (constraintname != null ? " && constraintName == constraintname" : "")); + Query queryForMConstraint = pm.newQuery(MConstraint.class); + queryForConstraintName.declareParameters("java.lang.String ptblname, java.lang.String pdbname," + + "java.lang.String pcatname, java.lang.String ctblname, java.lang.String cdbname," + + "java.lang.String ccatname" + + (constraintname != null ? ", java.lang.String constraintname" : "")); + Collection constraintNamesColl = + constraintname != null ? + ((Collection) queryForConstraintName. + executeWithArray(tableName, dbName, catName, tableName, dbName, catName, constraintname)): + ((Collection) queryForConstraintName. + executeWithArray(tableName, dbName, catName, tableName, dbName, catName)); + for (Iterator i = constraintNamesColl.iterator(); i.hasNext();) { + String currName = (String) i.next(); + constraintNames.add(currName); + } + + queryForMConstraint.setFilter("param.contains(constraintName)"); + queryForMConstraint.declareParameters("java.util.Collection param"); + Collection constraints = (Collection)queryForMConstraint.execute(constraintNames); + mConstraints = new ArrayList<>(); + for (Iterator i = constraints.iterator(); i.hasNext();) { + MConstraint currConstraint = (MConstraint) i.next(); + mConstraints.add(currConstraint); + } + return mConstraints; + } + + private List listTableAllPartitionColumnGrants( + String catName, String dbName, String tableName) { + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); + List mSecurityColList = new ArrayList<>(); + LOG.debug("Executing listTableAllPartitionColumnGrants"); + + String queryStr = "partition.table.tableName == t1 && partition.table.database.name == t2 " + + "&& partition.table.database.catalogName == t3"; + Query query = pm.newQuery(MPartitionColumnPrivilege.class, queryStr); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + List mPrivs = + (List) query.executeWithArray(tableName, dbName, catName); + pm.retrieveAll(mPrivs); + mSecurityColList.addAll(mPrivs); + + LOG.debug("Done retrieving all objects for listTableAllPartitionColumnGrants"); + return mSecurityColList; + } + + + private List listTableAllPartitionGrants(String catName, String dbName, String tableName) { + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); + List mSecurityTabPartList = new ArrayList<>(); + LOG.debug("Executing listTableAllPartitionGrants"); + + String queryStr = "partition.table.tableName == t1 && partition.table.database.name == t2 " + + "&& partition.table.database.catalogName == t3"; + Query query = pm.newQuery(MPartitionPrivilege.class, queryStr); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + List mPrivs = + (List) query.executeWithArray(tableName, dbName, catName); + pm.retrieveAll(mPrivs); + mSecurityTabPartList.addAll(mPrivs); + + LOG.debug("Done retrieving all objects for listTableAllPartitionGrants"); + return mSecurityTabPartList; + } + + private void dropCreationMetadata(String catName, String dbName, String tableName) { + MCreationMetadata mcm = getCreationMetadata(catName, dbName, tableName); + pm.retrieve(mcm); + if (mcm != null) { + pm.deletePersistentAll(mcm); + } + } + + /** + * Called right before an action that would drop a storage descriptor. + * This function makes the SD's reference to a CD null, and then deletes the CD + * if it no longer is referenced in the table. + * @param msd the storage descriptor to drop + */ + private void preDropStorageDescriptor(MStorageDescriptor msd) { + if (msd == null || msd.getCD() == null) { + return; + } + + MColumnDescriptor mcd = msd.getCD(); + // Because there is a 1-N relationship between CDs and SDs, + // we must set the SD's CD to null first before dropping the storage descriptor + // to satisfy foreign key constraints. + msd.setCD(null); + removeUnusedColumnDescriptor(mcd); + } + + + @Override + public List dropAllPartitionsAndGetLocations(TableName table, String baseLocationToNotShow, + AtomicReference message) + throws MetaException, InvalidInputException, NoSuchObjectException, InvalidObjectException { + String catName = normalizeIdentifier(table.getCat()); + String dbName = normalizeIdentifier(table.getDb()); + String tblName = normalizeIdentifier(table.getTable()); + return new GetHelper>(this, new TableName(catName, dbName, tblName)) { + @Override + protected String describeResult() { + return "delete all partitions from " + table; + } + + @Override + protected List getSqlResult() throws MetaException { + return getDirectSql() + .dropAllPartitionsAndGetLocations(getTable().getId(), baseLocationToNotShow, message); + } + + @Override + protected List getJdoResult() + throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { + Map partitionLocations = + getPartitionLocations(table, baseLocationToNotShow, -1); + dropPartitionsViaJdo(catName, dbName, tblName, new ArrayList<>(partitionLocations.keySet()), message); + return partitionLocations.values().stream().filter(Objects::nonNull).toList(); + } + }.run(true); + } + + @Override + public Map getPartitionLocations(TableName tableName, + String baseLocationToNotShow, int max) { + String catName = normalizeIdentifier(tableName.getCat()); + String dbName = normalizeIdentifier(tableName.getDb()); + String tblName = normalizeIdentifier(tableName.getTable()); + + Map partLocations = new HashMap<>(); + LOG.debug("Executing getPartitionLocations"); + + Query query = pm.newQuery(MPartition.class); + query.setFilter( + "this.table.database.catalogName == t1 && this.table.database.name == t2 " + + "&& this.table.tableName == t3"); + query.declareParameters("String t1, String t2, String t3"); + query.setResult("this.partitionName, this.sd.location"); + if (max >= 0) { + //Row limit specified, set it on the Query + query.setRange(0, max); + } + + List result = (List) query.execute(catName, dbName, tblName); + for (Object[] row : result) { + String location = (String) row[1]; + if (baseLocationToNotShow != null && location != null && + FileUtils.isSubdirectory(baseLocationToNotShow, location)) { + location = null; + } + partLocations.put((String) row[0], location); + } + LOG.debug("Done executing query for getPartitionLocations"); + return partLocations; + } + + private void dropPartitionsViaJdo(String catName, String dbName, String tblName, List partNames, + AtomicReference message) throws MetaException { + if (partNames.isEmpty()) { + return; + } + int batch = batchSize == NO_BATCHING ? 1 : (partNames.size() + batchSize) / batchSize; + AtomicLong batchIdx = new AtomicLong(1); + AtomicLong timeSpent = new AtomicLong(0); + Batchable.runBatched(batchSize, partNames, new Batchable() { + @Override + public List run(List input) throws MetaException { + StringBuilder progress = new StringBuilder("Dropping partitions, batch: "); + long start = System.currentTimeMillis(); + progress.append(batchIdx.get()).append("/").append(batch); + if (batchIdx.get() > 1) { + long leftTime = (batch - batchIdx.get()) * timeSpent.get() / batchIdx.get(); + progress.append(", time left: ").append(leftTime).append("ms"); + } + message.set(progress.toString()); + // Delete all things. + dropPartitionGrantsNoTxn(catName, dbName, tblName, input); + dropPartitionAllColumnGrantsNoTxn(catName, dbName, tblName, input); + dropPartitionColumnStatisticsNoTxn(catName, dbName, tblName, input); + + // CDs are reused; go try partition SDs, detach all CDs from SDs, then remove unused CDs. + for (MColumnDescriptor mcd : detachCdsFromSdsNoTxn(catName, dbName, tblName, input)) { + removeUnusedColumnDescriptor(mcd); + } + dropPartitionsNoTxn(catName, dbName, tblName, input); + timeSpent.addAndGet(System.currentTimeMillis() - start); + batchIdx.incrementAndGet(); + return Collections.emptyList(); + } + }); + } + + /** + * Checks if a column descriptor has any remaining references by storage descriptors + * in the db. If it does not, then delete the CD. If it does, then do nothing. + * + * @param oldCD the column descriptor to delete if it is no longer referenced anywhere + */ + private void removeUnusedColumnDescriptor(MColumnDescriptor oldCD) { + if (oldCD == null) { + return; + } + LOG.debug("execute removeUnusedColumnDescriptor"); + if (!hasRemainingCDReference(oldCD)) { + // First remove any constraints that may be associated with this CD + Query query = pm.newQuery(MConstraint.class, "parentColumn == inCD || childColumn == inCD"); + query.declareParameters("MColumnDescriptor inCD"); + List mConstraintsList = (List) query.execute(oldCD); + if (CollectionUtils.isNotEmpty(mConstraintsList)) { + pm.deletePersistentAll(mConstraintsList); + } + // Finally remove CD + pm.retrieve(oldCD); + pm.deletePersistent(oldCD); + LOG.debug("successfully deleted a CD in removeUnusedColumnDescriptor"); + } + } + + /** + * Checks if a column descriptor has any remaining references by storage descriptors + * in the db. + * + * @param oldCD the column descriptor to check if it has references or not + * @return true if has references + */ + private boolean hasRemainingCDReference(MColumnDescriptor oldCD) { + assert oldCD != null; + Query query; + /** + * In order to workaround oracle not supporting limit statement caused performance issue, HIVE-9447 makes + * all the backend DB run select count(1) from SDS where SDS.CD_ID=? to check if the specific CD_ID is + * referenced in SDS table before drop a partition. This select count(1) statement does not scale well in + * Postgres, and there is no index for CD_ID column in SDS table. + * For a SDS table with with 1.5 million rows, select count(1) has average 700ms without index, while in + * 10-20ms with index. But the statement before + * HIVE-9447( SELECT * FROM "SDS" "A0" WHERE "A0"."CD_ID" = $1 limit 1) uses less than 10ms . + */ + // HIVE-21075: Fix Postgres performance regression caused by HIVE-9447 + LOG.debug("The dbType is {} ", dbType.getHiveSchemaPostfix()); + if (dbType.isPOSTGRES() || dbType.isMYSQL()) { + query = pm.newQuery(MStorageDescriptor.class, "this.cd == inCD"); + query.declareParameters("MColumnDescriptor inCD"); + List referencedSDs = null; + LOG.debug("Executing listStorageDescriptorsWithCD"); + // User specified a row limit, set it on the Query + query.setRange(0L, 1L); + referencedSDs = (List) query.execute(oldCD); + LOG.debug("Done executing query for listStorageDescriptorsWithCD"); + pm.retrieveAll(referencedSDs); + LOG.debug("Done retrieving all objects for listStorageDescriptorsWithCD"); + //if no other SD references this CD, we can throw it out. + return referencedSDs != null && !referencedSDs.isEmpty(); + } else { + query = pm.newQuery( + "select count(1) from org.apache.hadoop.hive.metastore.model.MStorageDescriptor where (this.cd == inCD)"); + query.declareParameters("MColumnDescriptor inCD"); + long count = (Long) query.execute(oldCD); + //if no other SD references this CD, we can throw it out. + return count != 0; + } + } + + /** + * Detaches column descriptors from storage descriptors; returns the set of unique CDs + * thus detached. This is done before dropping partitions because CDs are reused between + * SDs; so, we remove the links to delete SDs and then check the returned CDs to see if + * they are referenced by other SDs. + */ + private Set detachCdsFromSdsNoTxn(String catName, String dbName, String tblName, + List partNames) { + Pair> queryWithParams = getPartQueryWithParams(pm, catName, dbName, tblName, partNames); + Query query = queryWithParams.getLeft(); + query.setClass(MPartition.class); + query.setResult("sd"); + List sds = + (List) query.executeWithMap(queryWithParams.getRight()); + HashSet candidateCds = new HashSet<>(); + for (MStorageDescriptor sd : sds) { + if (sd != null && sd.getCD() != null) { + candidateCds.add(sd.getCD()); + sd.setCD(null); + } + } + return candidateCds; + } + + private void dropPartitionsNoTxn(String catName, String dbName, String tblName, List partNames) { + Pair> queryWithParams = getPartQueryWithParams(pm, catName, dbName, tblName, partNames); + Query query = queryWithParams.getLeft(); + query.setClass(MPartition.class); + long deleted = query.deletePersistentAll(queryWithParams.getRight()); + LOG.debug("Deleted {} partition from store", deleted); + } + + private void dropPartitionGrantsNoTxn(String catName, String dbName, String tableName, List partNames) { + Pair queryWithParams = makeQueryByPartitionNames(catName, dbName, tableName, partNames, + MPartitionPrivilege.class, "partition.table.tableName", "partition.table.database.name", + "partition.partitionName", "partition.table.database.catalogName"); + Query query = queryWithParams.getLeft(); + query.deletePersistentAll(queryWithParams.getRight()); + } + + private Pair makeQueryByPartitionNames(String catName, String dbName, String tableName, + List partNames, Class clazz, String tbCol, String dbCol, String partCol, String catCol) { + StringBuilder queryStr = new StringBuilder(tbCol + " == t1 && " + dbCol + " == t2 && " + catCol + " == t3"); + StringBuilder paramStr = new StringBuilder("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + Object[] params = new Object[3 + partNames.size()]; + params[0] = normalizeIdentifier(tableName); + params[1] = normalizeIdentifier(dbName); + params[2] = normalizeIdentifier(catName); + int index = 0; + for (String partName : partNames) { + params[index + 3] = partName; + queryStr.append(((index == 0) ? " && (" : " || ") + partCol + " == p" + index); + paramStr.append(", java.lang.String p" + index); + ++index; + } + queryStr.append(")"); + Query query = pm.newQuery(clazz, queryStr.toString()); + query.declareParameters(paramStr.toString()); + return Pair.of(query, params); + } + + private void dropPartitionAllColumnGrantsNoTxn(String catName, String dbName, String tableName, + List partNames) { + Pair queryWithParams = makeQueryByPartitionNames(catName, dbName, tableName, partNames, + MPartitionColumnPrivilege.class, "partition.table.tableName", "partition.table.database.name", + "partition.partitionName", "partition.table.database.catalogName"); + Query query = queryWithParams.getLeft(); + query.deletePersistentAll(queryWithParams.getRight()); + } + + private void dropPartitionColumnStatisticsNoTxn(String catName, String dbName, String tableName, + List partNames) { + Pair queryWithParams = makeQueryByPartitionNames(catName, dbName, tableName, partNames, + MPartitionColumnStatistics.class, "partition.table.tableName", "partition.table.database.name", + "partition.partitionName", "partition.table.database.catalogName"); + Query query = queryWithParams.getLeft(); + query.deletePersistentAll(queryWithParams.getRight()); + } + + class AttachedMTableInfo { + MTable mtbl; + MColumnDescriptor mcd; + + public AttachedMTableInfo() {} + + public AttachedMTableInfo(MTable mtbl, MColumnDescriptor mcd) { + this.mtbl = mtbl; + this.mcd = mcd; + } + } + + private MTable getMTable(String catName, String db, String table) { + AttachedMTableInfo nmtbl = getMTable(catName, db, table, false); + return nmtbl.mtbl; + } + + private AttachedMTableInfo getMTable(String catName, String db, String table, + boolean retrieveCD) { + AttachedMTableInfo nmtbl = new AttachedMTableInfo(); + catName = normalizeIdentifier(Optional.ofNullable(catName).orElse(getDefaultCatalog(baseStore.getConf()))); + db = normalizeIdentifier(db); + table = normalizeIdentifier(table); + Query query = pm.newQuery(MTable.class, + "tableName == table && database.name == db && database.catalogName == catname"); + query.declareParameters( + "java.lang.String table, java.lang.String db, java.lang.String catname"); + query.setUnique(true); + if (LOG.isDebugEnabled()) { + LOG.debug("Executing getMTable for {}", + TableName.getQualified(catName, db, table)); + } + MTable mtbl = (MTable) query.execute(table, db, catName); + pm.retrieve(mtbl); + // Retrieving CD can be expensive and unnecessary, so do it only when required. + if (mtbl != null && retrieveCD) { + pm.retrieve(mtbl.getSd()); + pm.retrieveAll(mtbl.getSd().getCD()); + nmtbl.mcd = mtbl.getSd().getCD(); + } + nmtbl.mtbl = mtbl; + return nmtbl; + } + + @Override + public Table getTable(TableName table, String writeIdList, long tableId) + throws MetaException { + Table tbl; + String catName = normalizeIdentifier(table.getCat()); + String dbName = normalizeIdentifier(table.getDb()); + String tableName = normalizeIdentifier(table.getTable()); + MTable mtable = getMTable(catName, dbName, tableName); + tbl = convertToTable(mtable, conf); + // Retrieve creation metadata if needed + if (tbl != null && TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType())) { + tbl.setCreationMetadata( + convertToCreationMetadata(getCreationMetadata(catName, dbName, tableName), baseStore)); + } + + // If transactional non partitioned table, + // check whether the current version table statistics + // in the metastore comply with the client query's snapshot isolation. + // Note: a partitioned table has table stats and table snapshot in MPartiiton. + if (writeIdList != null) { + boolean isTxn = TxnUtils.isTransactionalTable(tbl); + if (isTxn && !areTxnStatsSupported) { + StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE); + LOG.info("Removed COLUMN_STATS_ACCURATE from Table's parameters."); + } else if (isTxn && tbl.getPartitionKeysSize() == 0) { + if (isCurrentStatsValidForTheQuery(mtable.getParameters(), + mtable.getWriteId(), writeIdList, false)) { + tbl.setIsStatsCompliant(true); + } else { + tbl.setIsStatsCompliant(false); + // Do not make persistent the following state since it is the query specific (not global). + StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE); + LOG.info("Removed COLUMN_STATS_ACCURATE from Table's parameters."); + } + } + } + return tbl; + } + + private MCreationMetadata getCreationMetadata(String catName, String dbName, String tblName) { + Query query = pm.newQuery( + MCreationMetadata.class, "tblName == table && dbName == db && catalogName == cat"); + query.declareParameters("java.lang.String table, java.lang.String db, java.lang.String cat"); + query.setUnique(true); + MCreationMetadata mcm = (MCreationMetadata) query.execute(tblName, dbName, catName); + pm.retrieve(mcm); + return mcm; + } + + // TODO: move to somewhere else + public static boolean isCurrentStatsValidForTheQuery( + Map statsParams, long statsWriteId, String queryValidWriteIdList, + boolean isCompleteStatsWriter) throws MetaException { + + // Note: can be changed to debug/info to verify the calls. + LOG.debug("isCurrentStatsValidForTheQuery with stats write ID {}; query {}; writer: {} params {}", + statsWriteId, queryValidWriteIdList, isCompleteStatsWriter, statsParams); + // return true since the stats does not seem to be transactional. + if (statsWriteId < 1) { + return true; + } + // This COLUMN_STATS_ACCURATE(CSA) state checking also includes the case that the stats is + // written by an aborted transaction but TXNS has no entry for the transaction + // after compaction. Don't check for a complete stats writer - it may replace invalid stats. + if (!isCompleteStatsWriter && !StatsSetupConst.areBasicStatsUptoDate(statsParams)) { + return false; + } + + if (queryValidWriteIdList != null) { // Can be null when stats are being reset to invalid. + ValidWriteIdList list4TheQuery = ValidReaderWriteIdList.fromValue(queryValidWriteIdList); + // Just check if the write ID is valid. If it's valid (i.e. we are allowed to see it), + // that means it cannot possibly be a concurrent write. If it's not valid (we are not + // allowed to see it), that means it's either concurrent or aborted, same thing for us. + if (list4TheQuery.isWriteIdValid(statsWriteId)) { + return true; + } + // Updater is also allowed to overwrite stats from aborted txns, as long as they are not concurrent. + if (isCompleteStatsWriter && list4TheQuery.isWriteIdAborted(statsWriteId)) { + return true; + } + } + + return false; + } + + @Override + public boolean addPartitions(TableName tableName, List parts) throws InvalidObjectException, MetaException { + String catName = normalizeIdentifier(tableName.getCat()); + String dbName = normalizeIdentifier(tableName.getDb()); + String tblName = normalizeIdentifier(tableName.getTable()); + List tabGrants = null; + List tabColumnGrants = null; + MTable table = this.getMTable(catName, dbName, tblName); + if (table == null) { + throw new InvalidObjectException("Unable to add partitions because " + + tableName + " does not exist"); + } + if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { + tabGrants = listAllTableGrants(catName, dbName, tblName); + tabColumnGrants = listTableAllColumnGrants(catName, dbName, tblName, null); + } + List mParts = new ArrayList<>(); + List> mPartPrivilegesList = new ArrayList<>(); + List> mPartColPrivilegesList = new ArrayList<>(); + for (Partition part : parts) { + if (!part.getTableName().equalsIgnoreCase(tblName) || !part.getDbName().equalsIgnoreCase(dbName)) { + throw new MetaException("Partition does not belong to target table " + + dbName + "." + tblName + ": " + part); + } + MPartition mpart = convertToMPart(part, table); + mParts.add(mpart); + int now = (int) (System.currentTimeMillis() / 1000); + List mPartPrivileges = new ArrayList<>(); + if (tabGrants != null) { + for (MTablePrivilege tab: tabGrants) { + MPartitionPrivilege mPartPrivilege = new MPartitionPrivilege(tab.getPrincipalName(), tab.getPrincipalType(), + mpart, tab.getPrivilege(), now, tab.getGrantor(), tab.getGrantorType(), tab.getGrantOption(), + tab.getAuthorizer()); + mPartPrivileges.add(mPartPrivilege); + } + } + + List mPartColumnPrivileges = new ArrayList<>(); + if (tabColumnGrants != null) { + for (MTableColumnPrivilege col : tabColumnGrants) { + MPartitionColumnPrivilege mPartColumnPrivilege = new MPartitionColumnPrivilege(col.getPrincipalName(), + col.getPrincipalType(), mpart, col.getColumnName(), col.getPrivilege(), now, col.getGrantor(), + col.getGrantorType(), col.getGrantOption(), col.getAuthorizer()); + mPartColumnPrivileges.add(mPartColumnPrivilege); + } + } + mPartPrivilegesList.add(mPartPrivileges); + mPartColPrivilegesList.add(mPartColumnPrivileges); + } + if (CollectionUtils.isNotEmpty(mParts)) { + GetHelper helper = new GetHelper<>(this, tableName) { + @Override + protected Void getSqlResult() throws MetaException { + getDirectSql().addPartitions(mParts, mPartPrivilegesList, mPartColPrivilegesList); + return null; + } + + @Override + protected Void getJdoResult() { + List toPersist = new ArrayList<>(mParts); + mPartPrivilegesList.forEach(toPersist::addAll); + mPartColPrivilegesList.forEach(toPersist::addAll); + pm.makePersistentAll(toPersist); + pm.flush(); + return null; + } + + @Override + protected String describeResult() { + return "add partitions"; + } + }; + try { + helper.run(false); + } catch (NoSuchObjectException e) { + throw newMetaException(e); + } + } + return true; + } + + private List listAllTableGrants(String catName, String dbName, String tableName) { + List mSecurityTabList = new ArrayList<>(); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); + LOG.debug("Executing listAllTableGrants"); + + String queryStr = "table.tableName == t1 && table.database.name == t2" + + "&& table.database.catalogName == t3"; + Query query = pm.newQuery(MTablePrivilege.class, queryStr); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + List mPrivs = + (List) query.executeWithArray(tableName, dbName, catName); + LOG.debug("Done executing query for listAllTableGrants"); + pm.retrieveAll(mPrivs); + mSecurityTabList.addAll(mPrivs); + LOG.debug("Done retrieving all objects for listAllTableGrants"); + return mSecurityTabList; + } + + private List listTableAllColumnGrants( + String catName, String dbName, String tableName, String authorizer) { + Query query; + List mTblColPrivilegeList = new ArrayList<>(); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); + LOG.debug("Executing listTableAllColumnGrants"); + List mPrivs = null; + if (authorizer != null) { + String queryStr = "table.tableName == t1 && table.database.name == t2 &&" + + "table.database.catalogName == t3 && authorizer == t4"; + query = pm.newQuery(MTableColumnPrivilege.class, queryStr); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " + + "java.lang.String t4"); + mPrivs = (List) query.executeWithArray(tableName, dbName, catName, authorizer); + } else { + String queryStr = "table.tableName == t1 && table.database.name == t2 &&" + + "table.database.catalogName == t3"; + query = pm.newQuery(MTableColumnPrivilege.class, queryStr); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + mPrivs = (List) query.executeWithArray(tableName, dbName, catName); + } + LOG.debug("Query to obtain objects for listTableAllColumnGrants finished"); + pm.retrieveAll(mPrivs); + LOG.debug("RetrieveAll on all the objects for listTableAllColumnGrants finished"); + mTblColPrivilegeList.addAll(mPrivs); + LOG.debug("Done retrieving " + mPrivs.size() + " objects for listTableAllColumnGrants"); + return mTblColPrivilegeList; + } + + @Override + public Partition getPartition(TableName tabName, List part_vals, String validWriteIds) + throws MetaException, NoSuchObjectException { + String catName = normalizeIdentifier(tabName.getCat()); + String dbName = normalizeIdentifier(tabName.getDb()); + String tableName = normalizeIdentifier(tabName.getTable()); + Partition part = null; + MTable table = this.getMTable(catName, dbName, tableName); + if (table == null) { + throw new NoSuchObjectException("Unable to get partition because " + + TableName.getQualified(catName, dbName, tableName) + + " does not exist"); + } + MPartition mpart = getMPartition(catName, dbName, tableName, part_vals, table); + part = convertToPart(catName, dbName, tableName, mpart, + TxnUtils.isAcidTable(table.getParameters()), conf); + if (part == null) { + throw new NoSuchObjectException("partition values=" + + part_vals.toString()); + } + + part.setValues(part_vals); + // If transactional table partition, check whether the current version partition + // statistics in the metastore comply with the client query's snapshot isolation. + long statsWriteId = mpart.getWriteId(); + if (TxnUtils.isTransactionalTable(table.getParameters())) { + if (!areTxnStatsSupported) { + // Do not make persistent the following state since it is query specific (not global). + StatsSetupConst.setBasicStatsState(part.getParameters(), StatsSetupConst.FALSE); + LOG.info("Removed COLUMN_STATS_ACCURATE from Partition object's parameters."); + } else if (validWriteIds != null) { + if (isCurrentStatsValidForTheQuery(part.getParameters(), statsWriteId, validWriteIds, false)) { + part.setIsStatsCompliant(true); + } else { + part.setIsStatsCompliant(false); + // Do not make persistent the following state since it is query specific (not global). + StatsSetupConst.setBasicStatsState(part.getParameters(), StatsSetupConst.FALSE); + LOG.info("Removed COLUMN_STATS_ACCURATE from Partition object's parameters."); + } + } + } + return part; + } + + /** + * Getting MPartition object. Use this method only if the partition name is not available, + * since then the table will be queried to get the partition keys. + * @param catName The catalogue + * @param dbName The database + * @param tableName The table + * @param part_vals The values defining the partition + * @return The MPartition object in the backend database + */ + private MPartition getMPartition(String catName, String dbName, String tableName, List part_vals, MTable mtbl) + throws MetaException { + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + if (mtbl == null) { + mtbl = getMTable(catName, dbName, tableName); + if (mtbl == null) { + return null; + } + } + // Change the query to use part_vals instead of the name which is + // redundant TODO: callers of this often get part_vals out of name for no reason... + String name = + Warehouse.makePartName(convertToFieldSchemas(mtbl.getPartitionKeys()), part_vals); + MPartition result = getMPartition(catName, dbName, tableName, name); + return result; + } + + @Override + public List getPartitions(TableName table, GetPartitionsArgs args) + throws MetaException, NoSuchObjectException { + String catName = normalizeIdentifier(table.getCat()); + String dbName = normalizeIdentifier(table.getDb()); + String tblName = normalizeIdentifier(table.getTable()); + return new GetListHelper(this, table) { + @Override + protected List getSqlResult() throws MetaException { + return getDirectSql().getPartitions(catName, dbName, tblName, args); + } + @Override + protected List getJdoResult() throws MetaException { + try { + return convertToParts(catName, dbName, tblName, + listMPartitions(catName, dbName, tblName, args.getMax()), false, conf, args); + } catch (Exception e) { + LOG.error("Failed to convert to parts", e); + throw new MetaException(e.getMessage()); + } + } + }.run(false); + } + + private List listMPartitions(String catName, String dbName, String tableName, int max) { + LOG.debug("Executing listMPartitions"); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + + Query query = pm.newQuery(MPartition.class, + "table.tableName == t1 && table.database.name == t2 && table.database.catalogName == t3"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + query.setOrdering("partitionName ascending"); + if (max >= 0) { + query.setRange(0, max); + } + final List mparts = (List) query.execute(tableName, dbName, catName); + LOG.debug("Done executing query for listMPartitions"); + + pm.retrieveAll(mparts); + pm.makeTransientAll(mparts); + + LOG.debug("Done retrieving all objects for listMPartitions {}", mparts); + + return Collections.unmodifiableList(new ArrayList<>(mparts)); + } + + @Override + public Table alterTable(TableName tableName, Table newTable, String queryValidWriteIds) + throws InvalidObjectException, MetaException { + String name = normalizeIdentifier(tableName.getTable()); + String dbname = normalizeIdentifier(tableName.getDb()); + String catName = normalizeIdentifier(tableName.getCat()); + MTable newt = convertToMTable(newTable, baseStore); + if (newt == null) { + throw new InvalidObjectException("new table is invalid"); + } + + MTable oldt = getMTable(catName, dbname, name); + if (oldt == null) { + throw new MetaException("table " + dbname + "." + name + " doesn't exist"); + } + + // For now only alter name, owner, parameters, cols, bucketcols are allowed + oldt.setDatabase(newt.getDatabase()); + oldt.setTableName(normalizeIdentifier(newt.getTableName())); + boolean isTxn = TxnUtils.isTransactionalTable(newTable); + boolean isToTxn = isTxn && !TxnUtils.isTransactionalTable(oldt.getParameters()); + if (!isToTxn && isTxn && areTxnStatsSupported) { + // Transactional table is altered without a txn. Make sure there are no changes to the flag. + String errorMsg = verifyStatsChangeCtx(TableName.getDbTable(name, dbname), oldt.getParameters(), + newTable.getParameters(), newTable.getWriteId(), queryValidWriteIds, false); + if (errorMsg != null) { + throw new MetaException(errorMsg); + } + } + oldt.setParameters(newt.getParameters()); + oldt.setOwner(newt.getOwner()); + oldt.setOwnerType(newt.getOwnerType()); + // Fully copy over the contents of the new SD into the old SD, + // so we don't create an extra SD in the metastore db that has no references. + MColumnDescriptor oldCD = null; + MStorageDescriptor oldSD = oldt.getSd(); + if (oldSD != null) { + oldCD = oldSD.getCD(); + } + copyMSD(newt.getSd(), oldt.getSd()); + removeUnusedColumnDescriptor(oldCD); + oldt.setRetention(newt.getRetention()); + oldt.setPartitionKeys(newt.getPartitionKeys()); + oldt.setTableType(newt.getTableType()); + oldt.setLastAccessTime(newt.getLastAccessTime()); + oldt.setViewOriginalText(newt.getViewOriginalText()); + oldt.setViewExpandedText(newt.getViewExpandedText()); + oldt.setRewriteEnabled(newt.isRewriteEnabled()); + + // If transactional, update the stats state for the current Stats updater query. + // Set stats invalid for ACID conversion; it doesn't pass in the write ID. + if (isTxn) { + if (!areTxnStatsSupported || isToTxn) { + StatsSetupConst.setBasicStatsState(oldt.getParameters(), StatsSetupConst.FALSE); + } else if (queryValidWriteIds != null && newTable.getWriteId() > 0) { + // Check concurrent INSERT case and set false to the flag. + if (!isCurrentStatsValidForTheQuery(oldt.getParameters(), oldt.getWriteId(), queryValidWriteIds, true)) { + StatsSetupConst.setBasicStatsState(oldt.getParameters(), StatsSetupConst.FALSE); + LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the table " + + dbname + "." + name + ". will be made persistent."); + } + assert newTable.getWriteId() > 0; + oldt.setWriteId(newTable.getWriteId()); + } + } + newTable = convertToTable(oldt, conf); + return newTable; + } + + + private void copyMSD(MStorageDescriptor newSd, MStorageDescriptor oldSd) { + oldSd.setLocation(newSd.getLocation()); + // If the columns of the old column descriptor != the columns of the new one, + // then change the old storage descriptor's column descriptor. + // Convert the MFieldSchema's to their thrift object counterparts, because we maintain + // datastore identity (i.e., identity of the model objects are managed by JDO, + // not the application). + List oldCols = oldSd.getCD() != null && oldSd.getCD().getCols() != null ? + convertToFieldSchemas(oldSd.getCD().getCols()) : null; + List newCols = newSd.getCD() != null && newSd.getCD().getCols() != null ? + convertToFieldSchemas(newSd.getCD().getCols()) : null; + if (oldCols == null || !oldCols.equals(newCols)) { + // First replace any constraints that may be associated with this CD + // Create mapping from old col indexes to new col indexes + if (oldCols != null && newCols != null) { + Map mapping = new HashMap<>(); + for (int i = 0; i < oldCols.size(); i++) { + FieldSchema oldCol = oldCols.get(i); + //TODO: replace for loop with list.indexOf() + for (int j = 0; j < newCols.size(); j++) { + FieldSchema newCol = newCols.get(j); + if (oldCol.equals(newCol)) { + mapping.put(i, j); + break; + } + } + } + // If we find it, we will change the reference for the CD. + // If we do not find it, i.e., the column will be deleted, we do not change it + // and we let the logic in removeUnusedColumnDescriptor take care of it + try (QueryWrapper query = new QueryWrapper(pm.newQuery(MConstraint.class, "parentColumn == inCD || childColumn == inCD"))) { + query.declareParameters("MColumnDescriptor inCD"); + List mConstraintsList = (List) query.execute(oldSd.getCD()); + pm.retrieveAll(mConstraintsList); + for (MConstraint mConstraint : mConstraintsList) { + if (oldSd.getCD().equals(mConstraint.getParentColumn())) { + Integer newIdx = mapping.get(mConstraint.getParentIntegerIndex()); + if (newIdx != null) { + mConstraint.setParentColumn(newSd.getCD()); + mConstraint.setParentIntegerIndex(newIdx); + } + } + if (oldSd.getCD().equals(mConstraint.getChildColumn())) { + Integer newIdx = mapping.get(mConstraint.getChildIntegerIndex()); + if (newIdx != null) { + mConstraint.setChildColumn(newSd.getCD()); + mConstraint.setChildIntegerIndex(newIdx); + } + } + } + pm.makePersistentAll(mConstraintsList); + } + // Finally replace CD + oldSd.setCD(newSd.getCD()); + } + } + + oldSd.setBucketCols(newSd.getBucketCols()); + oldSd.setIsCompressed(newSd.isCompressed()); + oldSd.setInputFormat(newSd.getInputFormat()); + oldSd.setOutputFormat(newSd.getOutputFormat()); + oldSd.setNumBuckets(newSd.getNumBuckets()); + oldSd.getSerDeInfo().setName(newSd.getSerDeInfo().getName()); + oldSd.getSerDeInfo().setSerializationLib( + newSd.getSerDeInfo().getSerializationLib()); + oldSd.getSerDeInfo().setParameters(newSd.getSerDeInfo().getParameters()); + oldSd.getSerDeInfo().setDescription(newSd.getSerDeInfo().getDescription()); + oldSd.setSkewedColNames(newSd.getSkewedColNames()); + oldSd.setSkewedColValues(newSd.getSkewedColValues()); + oldSd.setSkewedColValueLocationMaps(newSd.getSkewedColValueLocationMaps()); + oldSd.setSortCols(newSd.getSortCols()); + oldSd.setParameters(newSd.getParameters()); + oldSd.setStoredAsSubDirectories(newSd.isStoredAsSubDirectories()); + } + + @Override + public void createTable(Table tbl) throws InvalidObjectException, MetaException { + MTable mtbl = convertToMTable(tbl, baseStore);; + + if (TxnUtils.isTransactionalTable(tbl)) { + mtbl.setWriteId(tbl.getWriteId()); + } + pm.makePersistent(mtbl); + + if (tbl.getCreationMetadata() != null) { + MCreationMetadata mcm = convertToMCreationMetadata(tbl.getCreationMetadata(), baseStore); + pm.makePersistent(mcm); + } + tbl.setId(mtbl.getId()); + + PrincipalPrivilegeSet principalPrivs = tbl.getPrivileges(); + List toPersistPrivObjs = new ArrayList<>(); + if (principalPrivs != null) { + int now = (int) (System.currentTimeMillis() / 1000); + + Map> userPrivs = principalPrivs.getUserPrivileges(); + putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, userPrivs, PrincipalType.USER, "SQL"); + + Map> groupPrivs = principalPrivs.getGroupPrivileges(); + putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, groupPrivs, PrincipalType.GROUP, "SQL"); + + Map> rolePrivs = principalPrivs.getRolePrivileges(); + putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, rolePrivs, PrincipalType.ROLE, "SQL"); + } + pm.makePersistentAll(toPersistPrivObjs); + } + + @Override + public boolean dropPartitions(TableName tableName, List partNames) + throws MetaException, NoSuchObjectException { + if (CollectionUtils.isEmpty(partNames)) { + return false; + } + String catName = normalizeIdentifier(tableName.getCat()); + String dbName = normalizeIdentifier(tableName.getDb()); + String tblName = normalizeIdentifier(tableName.getTable()); + new GetListHelper(this, tableName) { + @Override + protected List getSqlResult() throws MetaException { + getDirectSql().dropPartitionsViaSqlFilter(catName, dbName, tblName, partNames); + return Collections.emptyList(); + } + @Override + protected List getJdoResult() throws MetaException { + dropPartitionsViaJdo(catName, dbName, tblName, partNames, new AtomicReference<>()); + return Collections.emptyList(); + } + }.run(false); + return true; + } + + @Override + public List getTables(String catName, String dbName, String pattern, TableType tableType, int limit) + throws MetaException { + try { + final String db_name = normalizeIdentifier(dbName); + final String cat_name = normalizeIdentifier(catName); + return new GetListHelper(this, null) { + @Override + protected boolean canUseDirectSql() throws MetaException { + return (pattern == null || pattern.equals(".*")); + } + + @Override + protected List getSqlResult() throws MetaException { + return getDirectSql().getTables(cat_name, db_name, tableType, limit); + } + + @Override + protected List getJdoResult() throws MetaException, NoSuchObjectException { + return getTablesInternalViaJdo(cat_name, db_name, pattern, tableType, limit); + } + }.run(false); + } catch (NoSuchObjectException nse) { + throw new MetaException(nse.getMessage()); + } + } + + private List getTablesInternalViaJdo(String catName, String dbName, String pattern, + TableType tableType, int limit) { + dbName = normalizeIdentifier(dbName); + // Take the pattern and split it on the | to get all the composing + // patterns + List parameterVals = new ArrayList<>(); + StringBuilder filterBuilder = new StringBuilder(); + //adds database.name == dbName to the filter + appendSimpleCondition(filterBuilder, "database.name", new String[] {dbName}, parameterVals); + appendSimpleCondition(filterBuilder, "database.catalogName", new String[] {catName}, parameterVals); + if(pattern != null) { + appendPatternCondition(filterBuilder, "tableName", pattern, parameterVals); + } + if(tableType != null) { + appendSimpleCondition(filterBuilder, "tableType", new String[] {tableType.toString()}, parameterVals); + } + + Query query = pm.newQuery(MTable.class, filterBuilder.toString()); + query.setResult("tableName"); + query.setOrdering("tableName ascending"); + if (limit >= 0) { + query.setRange(0, limit); + } + Collection names = (Collection) query.executeWithArray(parameterVals.toArray(new String[0])); + return new ArrayList<>(names); + } + + + @Override + public List
getTableObjectsByName(String catName, String db, List tbl_names, + GetProjectionsSpec projectionSpec, String tablePattern) throws MetaException, UnknownDBException { + List
tables = new ArrayList<>(); + List mtables = null; + catName = normalizeIdentifier(catName); + + List lowered_tbl_names = new ArrayList<>(); + if(tbl_names != null) { + lowered_tbl_names = new ArrayList<>(tbl_names.size()); + for (String t : tbl_names) { + lowered_tbl_names.add(normalizeIdentifier(t)); + } + } + + StringBuilder filterBuilder = new StringBuilder(); + List parameterVals = new ArrayList<>(); + appendPatternCondition(filterBuilder, "database.name", db, parameterVals); + appendSimpleCondition(filterBuilder, "database.catalogName", new String[] {catName}, parameterVals); + if(tbl_names != null){ + appendSimpleCondition(filterBuilder, "tableName", lowered_tbl_names.toArray(new String[0]), parameterVals); + } + if(tablePattern != null){ + appendPatternCondition(filterBuilder, "tableName", tablePattern, parameterVals); + } + Query query = pm.newQuery(MTable.class, filterBuilder.toString()) ; + List projectionFields = null; + + // If a projection specification has been set, validate it and translate it to JDO columns. + if (projectionSpec != null) { + //Validate the projection fields for multi-valued fields. + projectionFields = TableFields.getMFieldNames(projectionSpec.getFieldList()); + } + + // If the JDO translation resulted in valid JDO columns names, use it to create a projection for the JDO query. + if (projectionFields != null) { + // fetch partially filled tables using result clause + query.setResult(Joiner.on(',').join(projectionFields)); + } + + if (projectionFields == null) { + mtables = (List) query.executeWithArray(parameterVals.toArray(new String[parameterVals.size()])); + } else { + if (projectionFields.size() > 1) { + // Execute the query to fetch the partial results. + List results = (List) query.executeWithArray(parameterVals.toArray(new String[parameterVals.size()])); + // Declare the tables array to return the list of tables + mtables = new ArrayList<>(results.size()); + // Iterate through each row of the result and create the MTable object. + for (Object[] row : results) { + MTable mtable = new MTable(); + int i = 0; + for (Object val : row) { + MetaStoreServerUtils.setNestedProperty(mtable, projectionFields.get(i), val, true); + i++; + } + mtables.add(mtable); + } + } else if (projectionFields.size() == 1) { + // Execute the query to fetch the partial results. + List results = (List) query.executeWithArray(parameterVals.toArray(new String[parameterVals.size()])); + // Iterate through each row of the result and create the MTable object. + mtables = new ArrayList<>(results.size()); + for (Object row : results) { + MTable mtable = new MTable(); + MetaStoreServerUtils.setNestedProperty(mtable, projectionFields.get(0), row, true); + mtables.add(mtable); + } + } + } + + if (mtables == null || mtables.isEmpty()) { + try { + baseStore.ensureGetMDatabase(catName, db); + } catch (NoSuchObjectException nse) { + throw new UnknownDBException(nse.getMessage()); + } + } else { + for (Iterator iter = mtables.iterator(); iter.hasNext(); ) { + Table tbl = convertToTable((MTable) iter.next(), conf); + // Retrieve creation metadata if needed + if (TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType())) { + tbl.setCreationMetadata( + convertToCreationMetadata( + getCreationMetadata(tbl.getCatName(), tbl.getDbName(), tbl.getTableName()), baseStore)); + } + tables.add(tbl); + } + } + return tables; + } + + @Override + public List getMaterializedViewsForRewriting(String catName, String dbName) + throws MetaException, NoSuchObjectException { + catName = normalizeIdentifier(catName); + List params = new ArrayList<>(Arrays.asList(catName, TableType.MATERIALIZED_VIEW.toString(), true)); + if (dbName != null) { + params.add(normalizeIdentifier(dbName)); + } + Query query = pm.newQuery(MTable.class, + "database.catalogName == cat && tableType == tt && rewriteEnabled == re" + + (dbName != null ? " && database.name == db" : "")); + query.declareParameters( + "java.lang.String cat, java.lang.String tt, boolean re" + ((dbName != null) ? " , java.lang.String db" : "")); + query.setResult("tableName"); + Collection names = (Collection) query.executeWithArray(params.toArray()); + return new ArrayList<>(names); + } + + @Override + public List getTableMeta(String catName, String dbNames, String tableNames, List tableTypes) + throws MetaException { + List metas = new ArrayList<>(); + try { + // Take the pattern and split it on the | to get all the composing + // patterns + StringBuilder filterBuilder = new StringBuilder(); + List parameterVals = new ArrayList<>(); + appendSimpleCondition(filterBuilder, "database.catalogName", new String[] {catName}, parameterVals); + if (dbNames != null && !dbNames.equals("*")) { + appendPatternCondition(filterBuilder, "database.name", dbNames, parameterVals); + } + if (tableNames != null && !tableNames.equals("*")) { + appendPatternCondition(filterBuilder, "tableName", tableNames, parameterVals); + } + if (tableTypes != null && !tableTypes.isEmpty()) { + appendSimpleCondition(filterBuilder, "tableType", tableTypes.toArray(new String[0]), parameterVals); + } + + if (LOG.isDebugEnabled()) { + LOG.debug("getTableMeta with filter " + filterBuilder + " params: " + + StringUtils.join(parameterVals, ", ")); + } + // Add the fetch group here which retrieves the database object along with the MTable + // objects. If we don't prefetch the database object, we could end up in a situation where + // the database gets dropped while we are looping through the tables throwing a + // JDOObjectNotFoundException. This causes HMS to go into a retry loop which greatly degrades + // performance of this function when called with dbNames="*" and tableNames="*" (fetch all + // tables in all databases, essentially a full dump) + pm.getFetchPlan().addGroup(FetchGroups.FETCH_DATABASE_ON_MTABLE); + Query query = pm.newQuery(MTable.class, filterBuilder.toString()) ; + query.setResult("database.name, tableName, tableType, parameters.get(\"comment\"), owner, ownerType"); + List tables = (List) query.executeWithArray(parameterVals.toArray(new String[0])); + for (Object[] table : tables) { + TableMeta metaData = new TableMeta(table[0].toString(), table[1].toString(), table[2].toString()); + metaData.setCatName(catName); + if (table[3] != null) { + metaData.setComments(table[3].toString()); + } + if (table[4] != null) { + metaData.setOwnerName(table[4].toString()); + } + if (table[5] != null) { + metaData.setOwnerType(getPrincipalTypeFromStr(table[5].toString())); + } + metas.add(metaData); + } + } finally { + pm.getFetchPlan().removeGroup(FetchGroups.FETCH_DATABASE_ON_MTABLE); + } + return metas; + } + + @Override + public List listTableNamesByFilter(String catName, String dbName, String filter, short maxTables) + throws MetaException, UnknownDBException { + Query query = null; + List tableNames = new ArrayList<>(); + LOG.debug("Executing listTableNamesByFilter"); + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + + try { + baseStore.ensureGetMDatabase(catName, dbName); + } catch (NoSuchObjectException nse) { + throw new UnknownDBException(nse.getMessage()); + } + + Map params = new HashMap<>(); + String queryFilterString = makeQueryFilterString(catName, dbName, null, filter, params); + query = pm.newQuery(MTable.class); + query.declareImports("import java.lang.String"); + query.setResult("tableName"); + query.setResultClass(java.lang.String.class); + if (maxTables >= 0) { + query.setRange(0, maxTables); + } + LOG.debug("filter specified is {}, JDOQL filter is {}", filter, queryFilterString); + if (LOG.isDebugEnabled()) { + for (Map.Entry entry : params.entrySet()) { + LOG.debug("key: {} value: {} class: {}", entry.getKey(), entry.getValue(), + entry.getValue().getClass().getName()); + } + } + String parameterDeclaration = makeParameterDeclarationStringObj(params); + query.declareParameters(parameterDeclaration); + query.setFilter(queryFilterString); + Collection names = (Collection)query.executeWithMap(params); + // have to emulate "distinct", otherwise tables with the same name may be returned + tableNames = new ArrayList<>(new HashSet<>(names)); + LOG.debug("Done executing query for listTableNamesByFilter"); + return tableNames; + } + + @Override + public List listPartitionNamesByFilter(TableName tableName, GetPartitionsArgs args) + throws MetaException, NoSuchObjectException { + + String catName = normalizeIdentifier(tableName.getCat()); + String dbName = normalizeIdentifier(tableName.getDb()); + String tblName = normalizeIdentifier(tableName.getTable()); + + MTable mTable = ensureGetMTable(tableName); + List partitionKeys = convertToFieldSchemas(mTable.getPartitionKeys()); + String filter = args.getFilter(); + final ExpressionTree tree = (filter != null && !filter.isEmpty()) + ? PartFilterExprUtil.parseFilterTree(filter) : ExpressionTree.EMPTY_TREE; + return new GetListHelper(this, tableName) { + private final MetaStoreDirectSql.SqlFilterForPushdown filter = new MetaStoreDirectSql.SqlFilterForPushdown(); + + @Override + protected boolean canUseDirectSql() throws MetaException { + return getDirectSql().generateSqlFilterForPushdown(catName, dbName, tblName, + partitionKeys, tree, null, filter); + } + + @Override + protected List getSqlResult() throws MetaException { + return getDirectSql().getPartitionNamesViaSql(filter, partitionKeys, + getDefaultPartitionName(args.getDefaultPartName()), null, args.getMax()); + } + + @Override + protected List getJdoResult() + throws MetaException, NoSuchObjectException, InvalidObjectException { + return getPartitionNamesViaOrm(catName, dbName, tblName, tree, null, + args.getMax(), true, partitionKeys); + } + }.run(false); + } + + private String makeParameterDeclarationStringObj(Map params) { + //Create the parameter declaration string + StringBuilder paramDecl = new StringBuilder(); + for (Map.Entry entry : params.entrySet()) { + paramDecl.append(", "); + paramDecl.append(entry.getValue().getClass().getName()); + paramDecl.append(' '); + paramDecl.append(entry.getKey()); + } + return paramDecl.toString(); + } + + /** + * Makes a JDO query filter string for tables or partitions. + * @param dbName Database name. + * @param table Table. If null, the query returned is over tables in a database. + * If not null, the query returned is over partitions in a table. + * @param tree The expression tree from which JDOQL filter will be made. + * @param params Parameters for the filter. Some parameters may be added here. + * @param isValidatedFilter Whether the filter was pre-validated for JDOQL pushdown + * by the client; if it was and we fail to create a filter, we will throw. + * @return Resulting filter. Can be null if isValidatedFilter is false, and there was error. + */ + private String makeQueryFilterString(String catName, String dbName, Table table, + ExpressionTree tree, Map params, + boolean isValidatedFilter) throws MetaException { + assert tree != null; + ExpressionTree.FilterBuilder queryBuilder = new ExpressionTree.FilterBuilder(isValidatedFilter); + if (table != null) { + queryBuilder.append("table.tableName == t1 && table.database.name == t2 && table.database.catalogName == t3"); + params.put("t1", table.getTableName()); + params.put("t2", table.getDbName()); + params.put("t3", table.getCatName()); + } else { + queryBuilder.append("database.name == dbName && database.catalogName == catName"); + params.put("dbName", dbName); + params.put("catName", catName); + } + + tree.accept(new ExpressionTree.JDOFilterGenerator(baseStore.getConf(), + table != null ? table.getPartitionKeys() : null, queryBuilder, params)); + if (queryBuilder.hasError()) { + assert !isValidatedFilter; + LOG.debug("JDO filter pushdown cannot be used: {}", queryBuilder.getErrorMessage()); + return null; + } + String jdoFilter = queryBuilder.getFilter(); + LOG.debug("jdoFilter = {}", jdoFilter); + return jdoFilter; + } + + private String makeQueryFilterString(String catName, String dbName, String tblName, + ExpressionTree tree, Map params, + boolean isValidatedFilter, List partitionKeys) throws MetaException { + assert tree != null; + ExpressionTree.FilterBuilder queryBuilder = new ExpressionTree.FilterBuilder(isValidatedFilter); + queryBuilder.append("table.tableName == t1 && table.database.name == t2 && table.database.catalogName == t3"); + params.put("t1", tblName); + params.put("t2", dbName); + params.put("t3", catName); + tree.accept(new ExpressionTree.JDOFilterGenerator(baseStore.getConf(), partitionKeys, queryBuilder, params)); + if (queryBuilder.hasError()) { + assert !isValidatedFilter; + LOG.debug("JDO filter pushdown cannot be used: {}", queryBuilder.getErrorMessage()); + return null; + } + String jdoFilter = queryBuilder.getFilter(); + LOG.debug("jdoFilter = {}", jdoFilter); + return jdoFilter; + } + + + private List getPartitionNamesViaOrm(String catName, String dbName, String tblName, + ExpressionTree tree, String order, Integer maxParts, boolean isValidatedFilter, + List partitionKeys) throws MetaException { + Map params = new HashMap(); + String jdoFilter = makeQueryFilterString(catName, dbName, tblName, tree, + params, isValidatedFilter, partitionKeys); + if (jdoFilter == null) { + assert !isValidatedFilter; + throw new MetaException("Failed to generate filter."); + } + + try (QueryWrapper query = new QueryWrapper(pm.newQuery( + "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition"))) { + query.setFilter(jdoFilter); + List orderSpecs = MetaStoreUtils.makeOrderSpecs(order); + StringBuilder builder = new StringBuilder(); + for (Object[] spec : orderSpecs) { + // TODO: order by casted value if the type of partition key is not string + builder.append("values.get(").append(spec[0]).append(") ").append(spec[1]).append(","); + } + if (builder.length() > 0) { + builder.setLength(builder.length() - 1); + query.setOrdering(builder.toString()); + } else { + query.setOrdering("partitionName ascending"); + } + + if (maxParts > -1) { + query.setRange(0, maxParts); + } + + String parameterDeclaration = makeParameterDeclarationStringObj(params); + query.declareParameters(parameterDeclaration); + Collection jdoRes = (Collection) query.executeWithMap(params); + List result = new LinkedList(); + for (Object partName : jdoRes) { + result.add((String) partName); + } + return result; + } + } + + /** + * Gets the default partition name. + * @param inputDefaultPartName Incoming default partition name. + * @return Valid default partition name + */ + private String getDefaultPartitionName(String inputDefaultPartName) { + return (((inputDefaultPartName == null) || (inputDefaultPartName.isEmpty())) + ? MetastoreConf.getVar(baseStore.getConf(), MetastoreConf.ConfVars.DEFAULTPARTITIONNAME) + : inputDefaultPartName); + } + + /** + * Gets the table object for a given table, throws if anything goes wrong. + * @param tableName Table name. + * @return Table object. + */ + @Override + public MTable ensureGetMTable(TableName tableName) + throws NoSuchObjectException { + String catName = normalizeIdentifier(tableName.getCat()); + String dbName = normalizeIdentifier(tableName.getDb()); + String tblName = normalizeIdentifier(tableName.getTable()); + MTable mtable = getMTable(catName, dbName, tblName); + if (mtable == null) { + throw new NoSuchObjectException( + "Specified catalog.database.table does not exist : " + tableName); + } + return mtable; + } + + @Override + public List listPartitionNames(TableName tableName, String defaultPartName, byte[] exprBytes, String order, + int maxParts) throws MetaException, NoSuchObjectException { + final String defaultPartitionName = getDefaultPartitionName(defaultPartName); + final boolean isEmptyFilter = exprBytes == null || (exprBytes.length == 1 && exprBytes[0] == -1); + ExpressionTree tmp = null; + if (!isEmptyFilter) { + tmp = PartFilterExprUtil.makeExpressionTree(expressionProxy, exprBytes, + getDefaultPartitionName(defaultPartName), baseStore.getConf()); + } + String catName = normalizeIdentifier(tableName.getCat()); + String dbName = normalizeIdentifier(tableName.getDb()); + String tblName = normalizeIdentifier(tableName.getTable()); + final ExpressionTree exprTree = tmp; + return new GetListHelper(this, tableName) { + private List getPartNamesPrunedByExpr(Table table, boolean isJdoQuery) throws MetaException { + int max = isEmptyFilter ? maxParts : -1; + List result; + if (isJdoQuery) { + result = getPartitionNamesViaOrm(catName, dbName, tblName, ExpressionTree.EMPTY_TREE, + order, max, true, table.getPartitionKeys()); + } else { + MetaStoreDirectSql.SqlFilterForPushdown filter = new MetaStoreDirectSql.SqlFilterForPushdown(table, false); + result = getDirectSql().getPartitionNamesViaSql(filter, table.getPartitionKeys(), + defaultPartitionName, order, max); + } + if (!isEmptyFilter) { + prunePartitionNamesByExpr(catName, dbName, tblName, result, + new GetPartitionsArgs.GetPartitionsArgsBuilder() + .expr(exprBytes).defaultPartName(defaultPartName).max(maxParts).build()); + } + return result; + } + @Override + protected List getSqlResult() throws MetaException { + MetaStoreDirectSql.SqlFilterForPushdown filter = new MetaStoreDirectSql.SqlFilterForPushdown(getTable(), false); + List partNames = null; + Table table = getTable(); + if (exprTree != null) { + if (getDirectSql().generateSqlFilterForPushdown(table.getCatName(), table.getDbName(), table.getTableName(), + getTable().getPartitionKeys(), exprTree, defaultPartitionName, filter)) { + partNames = getDirectSql().getPartitionNamesViaSql(filter, table.getPartitionKeys(), + defaultPartitionName, order, (int)maxParts); + } + } + if (partNames == null) { + partNames = getPartNamesPrunedByExpr(table, false); + } + return partNames; + } + @Override + protected List getJdoResult() throws MetaException, NoSuchObjectException { + List result = null; + if (exprTree != null) { + try { + result = getPartitionNamesViaOrm(catName, dbName, tblName, exprTree, order, + maxParts, true, getTable().getPartitionKeys()); + } catch (MetaException e) { + result = null; + } + } + if (result == null) { + result = getPartNamesPrunedByExpr(getTable(), true); + } + return result; + } + }.run(true); + } + + private boolean prunePartitionNamesByExpr(String catName, String dbName, String tblName, + List result, GetPartitionsArgs args) throws MetaException { + MTable mTable = getMTable(catName, dbName, tblName); + List partitionKeys = convertToFieldSchemas(mTable.getPartitionKeys()); + boolean hasUnknownPartitions = expressionProxy.filterPartitionsByExpr( + partitionKeys, + args.getExpr(), + getDefaultPartitionName(args.getDefaultPartName()), + result); + if (args.getMax() >= 0 && result.size() > args.getMax()) { + result = result.subList(0, args.getMax()); + } + return hasUnknownPartitions; + } + + @Override + public boolean getPartitionsByExpr(TableName tableName, List result, GetPartitionsArgs args) + throws TException { + assert result != null; + byte[] expr = args.getExpr(); + final ExpressionTree exprTree = expr.length != 0 ? PartFilterExprUtil.makeExpressionTree( + expressionProxy, expr, getDefaultPartitionName(args.getDefaultPartName()), baseStore.getConf()) : ExpressionTree.EMPTY_TREE; + final AtomicBoolean hasUnknownPartitions = new AtomicBoolean(false); + + String catName = normalizeIdentifier(tableName.getCat()); + String dbName = normalizeIdentifier(tableName.getDb()); + String tblName = normalizeIdentifier(tableName.getTable()); + MTable mTable = ensureGetMTable(tableName); + List partitionKeys = convertToFieldSchemas(mTable.getPartitionKeys()); + boolean isAcidTable = TxnUtils.isAcidTable(mTable.getParameters()); + result.addAll(new GetListHelper(this, tableName) { + @Override + protected List getSqlResult() throws MetaException { + // If we have some sort of expression tree, try SQL filter pushdown. + if (exprTree != null) { + MetaStoreDirectSql.SqlFilterForPushdown filter = new MetaStoreDirectSql.SqlFilterForPushdown(); + if (getDirectSql().generateSqlFilterForPushdown(catName, dbName, tblName, partitionKeys, + exprTree, args.getDefaultPartName(), filter)) { + String catalogName = (catName != null) ? catName : getDefaultCatalog(baseStore.getConf()); + return getDirectSql().getPartitionsViaSqlFilter(catalogName, dbName, tblName, filter, + isAcidTable, args); + } + } + // We couldn't do SQL filter pushdown. Get names via normal means. + List partNames = new LinkedList<>(); + hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn( + catName, dbName, tblName, partitionKeys, expr, args.getDefaultPartName(), (short) args.getMax(), partNames)); + GetPartitionsArgs newArgs = new GetPartitionsArgs.GetPartitionsArgsBuilder(args).partNames(partNames).build(); + return getDirectSql().getPartitionsViaPartNames(catName, dbName, tblName, newArgs); + } + + @Override + protected List getJdoResult() throws MetaException, NoSuchObjectException { + // If we have some sort of expression tree, try JDOQL filter pushdown. + List result = null; + if (exprTree != null) { + result = getPartitionsViaOrmFilter(catName, dbName, tblName, exprTree, + false, partitionKeys, isAcidTable, args); + } + if (result == null) { + // We couldn't do JDOQL filter pushdown. Get names via normal means. + List partNames = new ArrayList<>(); + hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn( + catName, dbName, tblName, partitionKeys, expr, args.getDefaultPartName(), (short) args.getMax(), partNames)); + GetPartitionsArgs newArgs = new GetPartitionsArgs.GetPartitionsArgsBuilder(args).partNames(partNames).build(); + result = getPartitionsViaOrmFilter(catName, dbName, tblName, isAcidTable, newArgs); + } + return result; + } + }.run(false)); + return hasUnknownPartitions.get(); + } + + /** + * Gets partition names from the table via ORM (JDOQL) filter pushdown. + * @param tblName The table. + * @param tree The expression tree from which JDOQL filter will be made. + * @param isValidatedFilter Whether the filter was pre-validated for JDOQL pushdown by a client + * (old hive client or non-hive one); if it was and we fail to create a filter, we will throw. + * @param args additional arguments for getting partitions + * @return Resulting partitions. Can be null if isValidatedFilter is false, and + * there was error deriving the JDO filter. + */ + private List getPartitionsViaOrmFilter(String catName, String dbName, String tblName, ExpressionTree tree, + boolean isValidatedFilter, List partitionKeys, boolean isAcidTable, + GetPartitionsArgs args) throws MetaException { + Map params = new HashMap<>(); + String jdoFilter = + makeQueryFilterString(catName, dbName, tblName, tree, params, isValidatedFilter, partitionKeys); + if (jdoFilter == null) { + assert !isValidatedFilter; + return null; + } + Query query = pm.newQuery(MPartition.class, jdoFilter); + if (args.getMax() >= 0) { + // User specified a row limit, set it on the Query + query.setRange(0, args.getMax()); + } + String parameterDeclaration = makeParameterDeclarationStringObj(params); + query.declareParameters(parameterDeclaration); + query.setOrdering("partitionName ascending"); + List mparts = (List) query.executeWithMap(params); + LOG.debug("Done executing query for getPartitionsViaOrmFilter"); + pm.retrieveAll(mparts); // TODO: why is this inconsistent with what we get by names? + LOG.debug("Done retrieving all objects for getPartitionsViaOrmFilter"); + List results = + convertToParts(catName, dbName, tblName, mparts, isAcidTable, conf, args); + return results; + } + + /** + * Gets partition names from the table via ORM (JDOQL) name filter. + * @param dbName Database name. + * @param tblName Table name. + * @param isAcidTable True if the table is ACID + * @param args additional arguments for getting partitions + * @return Resulting partitions. + */ + private List getPartitionsViaOrmFilter(String catName, String dbName, String tblName, + boolean isAcidTable, GetPartitionsArgs args) throws MetaException { + List partNames = args.getPartNames(); + if (partNames.isEmpty()) { + return Collections.emptyList(); + } + return Batchable.runBatched(batchSize, partNames, new Batchable() { + @Override + public List run(List input) throws MetaException { + Pair> queryWithParams = + getPartQueryWithParams(pm, catName, dbName, tblName, input); + + try (QueryWrapper query = new QueryWrapper(queryWithParams.getLeft())) { + query.setResultClass(MPartition.class); + query.setClass(MPartition.class); + query.setOrdering("partitionName ascending"); + + List mparts = (List) query.executeWithMap(queryWithParams.getRight()); + List partitions = convertToParts(catName, dbName, tblName, mparts, + isAcidTable, conf, args); + + return partitions; + } + } + }); + } + + + /** + * Gets the partition names from a table, pruned using an expression. + * @param catName + * @param dbName + * @param tblName + * @param expr Expression. + * @param defaultPartName Default partition name from job config, if any. + * @param maxParts Maximum number of partition names to return. + * @param result The resulting names. + * @return Whether the result contains any unknown partitions. + */ + private boolean getPartitionNamesPrunedByExprNoTxn(String catName, String dbName, String tblName, List partColumns, byte[] expr, + String defaultPartName, short maxParts, List result) throws MetaException { + result.addAll(getPartitionNamesNoTxn(catName, dbName, tblName, (short) -1)); + return prunePartitionNamesByExpr(catName, dbName, tblName, result, + new GetPartitionsArgs.GetPartitionsArgsBuilder() + .expr(expr).defaultPartName(defaultPartName).max(maxParts).build()); + } + + private List getPartitionNamesNoTxn(String catName, String dbName, String tableName, short max) { + List pns = new ArrayList<>(); + if (max == 0) { + return pns; + } + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + try (QueryWrapper query = new QueryWrapper( + pm.newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " + + "where table.database.name == t1 && table.tableName == t2 && table.database.catalogName == t3 " + + "order by partitionName asc"))) { + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + query.setResult("partitionName"); + + if (max > 0) { + query.setRange(0, max); + } + Collection names = (Collection) query.execute(dbName, tableName, catName); + pns.addAll(names); + + return pns; + } + } + + @Override + public List getPartitionsByNames(TableName tableName, GetPartitionsArgs args) + throws MetaException, NoSuchObjectException { + String catName = normalizeIdentifier(tableName.getCat()); + String dbName = normalizeIdentifier(tableName.getDb()); + String tblName = normalizeIdentifier(tableName.getTable()); + return new GetListHelper(this, tableName) { + @Override + protected List getSqlResult() throws MetaException { + return getDirectSql().getPartitionsViaPartNames(catName, dbName, tblName, args); + } + @Override + protected List getJdoResult() throws MetaException, NoSuchObjectException { + return getPartitionsViaOrmFilter(catName, dbName, tblName, false, args); + } + }.run(false); + } + + @Override + public Partition alterPartition(TableName tableName, List part_vals, Partition new_part, + String queryValidWriteIds) throws InvalidObjectException, MetaException { + String catName = normalizeIdentifier(tableName.getCat()); + String dbname = normalizeIdentifier(tableName.getDb()); + String name = normalizeIdentifier(tableName.getTable()); + AtomicReference oldCd = new AtomicReference<>(); + Partition result = alterPartitionNoTxn(catName, dbname, name, part_vals, new_part, queryValidWriteIds, oldCd); + removeUnusedColumnDescriptor(oldCd.get()); + return result; + } + + /** + * Alters an existing partition. Initiates copy of SD. Returns the old CD. + * @param part_vals Partition values (of the original partition instance) + * @param newPart Partition object containing new information + */ + private Partition alterPartitionNoTxn(String catName, String dbname, String name, + List part_vals, Partition newPart, String validWriteIds, AtomicReference oldCd) + throws InvalidObjectException, MetaException { + MTable table = this.getMTable(newPart.getCatName(), newPart.getDbName(), newPart.getTableName()); + MPartition oldp = getMPartition(catName, dbname, name, part_vals, table); + return alterPartitionNoTxn(catName, dbname, name, oldp, newPart, + validWriteIds, oldCd, table); + } + + private Partition alterPartitionNoTxn(String catName, String dbname, + String name, MPartition oldp, Partition newPart, + String validWriteIds, + AtomicReference oldCd, MTable table) + throws InvalidObjectException, MetaException { + catName = normalizeIdentifier(catName); + name = normalizeIdentifier(name); + dbname = normalizeIdentifier(dbname); + MPartition newp = convertToMPart(newPart, table); + MColumnDescriptor oldCD = null; + MStorageDescriptor oldSD = oldp.getSd(); + if (oldSD != null) { + oldCD = oldSD.getCD(); + } + if (newp == null) { + throw new InvalidObjectException("partition does not exist."); + } + oldp.setValues(newp.getValues()); + oldp.setPartitionName(newp.getPartitionName()); + boolean isTxn = TxnUtils.isTransactionalTable(table.getParameters()); + if (isTxn && areTxnStatsSupported) { + // Transactional table is altered without a txn. Make sure there are no changes to the flag. + String errorMsg = verifyStatsChangeCtx(TableName.getDbTable(dbname, name), + oldp.getParameters(), + newPart.getParameters(), newPart.getWriteId(), validWriteIds, false); + if (errorMsg != null) { + throw new MetaException(errorMsg); + } + } + oldp.setParameters(newPart.getParameters()); + if (!TableType.VIRTUAL_VIEW.name().equals(oldp.getTable().getTableType())) { + copyMSD(newp.getSd(), oldp.getSd()); + } + if (newp.getCreateTime() != oldp.getCreateTime()) { + oldp.setCreateTime(newp.getCreateTime()); + } + if (newp.getLastAccessTime() != oldp.getLastAccessTime()) { + oldp.setLastAccessTime(newp.getLastAccessTime()); + } + + // If transactional, add/update the MUPdaterTransaction + // for the current updater query. + if (isTxn) { + if (!areTxnStatsSupported) { + StatsSetupConst.setBasicStatsState(oldp.getParameters(), StatsSetupConst.FALSE); + } else if (validWriteIds != null && newPart.getWriteId() > 0) { + // Check concurrent INSERT case and set false to the flag. + if (!isCurrentStatsValidForTheQuery(oldp.getParameters(), oldp.getWriteId(), validWriteIds, true)) { + StatsSetupConst.setBasicStatsState(oldp.getParameters(), StatsSetupConst.FALSE); + LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the partition " + + dbname + "." + name + "." + oldp.getPartitionName() + " will be made persistent."); + } + oldp.setWriteId(newPart.getWriteId()); + } + } + + oldCd.set(oldCD); + return convertToPart(catName, dbname, name, oldp, TxnUtils.isAcidTable(table.getParameters()), conf); + } + + @Override + public List alterPartitions(TableName tableName, List> part_vals, + List newParts, long writeId, String queryWriteIdList) throws InvalidObjectException, MetaException { + List results = new ArrayList<>(newParts.size()); + if (newParts.isEmpty()) { + return results; + } + try { + MTable table = ensureGetMTable(tableName); + if (writeId > 0) { + newParts.forEach(newPart -> newPart.setWriteId(writeId)); + } + List partCols = convertToFieldSchemas(table.getPartitionKeys()); + List partNames = new ArrayList<>(); + for (List partVal : part_vals) { + partNames.add(Warehouse.makePartName(partCols, partVal)); + } + results = alterPartitionsInternal(table, partNames, newParts, queryWriteIdList); + } catch (NoSuchObjectException nse) { + throw new MetaException(nse.getMessage()); + } + // commit the changes + return results; + } + + protected List alterPartitionsInternal(MTable table, + List partNames, List newParts, String queryWriteIdList) + throws InvalidObjectException, MetaException, NoSuchObjectException { + // Validate new parts: StorageDescriptor and SerDeInfo must be set in Partition. + if (!TableType.VIRTUAL_VIEW.name().equals(table.getTableType())) { + for (Partition newPart : newParts) { + if (!newPart.isSetSd() || !newPart.getSd().isSetSerdeInfo()) { + throw new InvalidObjectException("Partition does not set storageDescriptor or serdeInfo."); + } + } + } + + String dbName = table.getDatabase().getName(); + String tblName = table.getTableName(); + for (Partition tmpPart : newParts) { + if (!tmpPart.getDbName().equalsIgnoreCase(dbName)) { + throw new MetaException("Invalid DB name : " + tmpPart.getDbName()); + } + if (!tmpPart.getTableName().equalsIgnoreCase(tblName)) { + throw new MetaException("Invalid table name : " + tmpPart.getDbName()); + } + } + return new GetListHelper(this, null) { + @Override + protected List getSqlResult() + throws MetaException { + return getDirectSql().alterPartitions(table, partNames, newParts, queryWriteIdList); + } + + @Override + protected List getJdoResult() + throws MetaException, InvalidObjectException { + return alterPartitionsViaJdo(table, partNames, newParts, queryWriteIdList); + } + }.run(false); + } + + private List alterPartitionsViaJdo(MTable table, List partNames, + List newParts, String queryWriteIdList) + throws MetaException, InvalidObjectException { + String catName = table.getDatabase().getCatalogName(); + String dbName = table.getDatabase().getName(); + String tblName = table.getTableName(); + List results = new ArrayList<>(newParts.size()); + List mPartitionList; + + try (QueryWrapper query = new QueryWrapper(pm.newQuery(MPartition.class, + "table.tableName == t1 && table.database.name == t2 && t3.contains(partitionName) " + + " && table.database.catalogName == t4"))) { + query.declareParameters("java.lang.String t1, java.lang.String t2, java.util.Collection t3, " + + "java.lang.String t4"); + mPartitionList = (List) query.executeWithArray(tblName, dbName, partNames, catName); + pm.retrieveAll(mPartitionList); + + if (mPartitionList.size() > newParts.size()) { + throw new MetaException("Expecting only one partition but more than one partitions are found."); + } + + Map, MPartition> mPartsMap = new HashMap(); + for (MPartition mPartition : mPartitionList) { + mPartsMap.put(mPartition.getValues(), mPartition); + } + + Set oldCds = new HashSet<>(); + AtomicReference oldCdRef = new AtomicReference<>(); + for (Partition tmpPart : newParts) { + oldCdRef.set(null); + Partition result = alterPartitionNoTxn(catName, dbName, tblName, + mPartsMap.get(tmpPart.getValues()), tmpPart, queryWriteIdList, oldCdRef, table); + results.add(result); + if (oldCdRef.get() != null) { + oldCds.add(oldCdRef.get()); + } + } + for (MColumnDescriptor oldCd : oldCds) { + removeUnusedColumnDescriptor(oldCd); + } + } + + return results; + } + + @Override + public List getPartitionsByFilter(TableName tableName, GetPartitionsArgs args) + throws MetaException, NoSuchObjectException { + + String catName = normalizeIdentifier(tableName.getCat()); + String dbName = normalizeIdentifier(tableName.getDb()); + String tblName = normalizeIdentifier(tableName.getTable()); + + MTable mTable = ensureGetMTable(tableName); + List partitionKeys = convertToFieldSchemas(mTable.getPartitionKeys()); + boolean isAcidTable = TxnUtils.isAcidTable(mTable.getParameters()); + String filter = args.getFilter(); + final ExpressionTree tree = (filter != null && !filter.isEmpty()) + ? PartFilterExprUtil.parseFilterTree(filter) : ExpressionTree.EMPTY_TREE; + return new GetListHelper(this, tableName) { + private final MetaStoreDirectSql.SqlFilterForPushdown filter = new MetaStoreDirectSql.SqlFilterForPushdown(); + + @Override + protected boolean canUseDirectSql() throws MetaException { + return getDirectSql().generateSqlFilterForPushdown(catName, dbName, tblName, partitionKeys, tree, null, filter); + } + + @Override + protected List getSqlResult() throws MetaException { + return getDirectSql().getPartitionsViaSqlFilter(catName, dbName, tblName, filter, isAcidTable, args); + } + + @Override + protected List getJdoResult() throws MetaException, NoSuchObjectException { + return getPartitionsViaOrmFilter(catName, dbName, tblName, tree, true, + partitionKeys, isAcidTable, args); + } + }.run(false); + } + + @Override + public List getPartitionSpecsByFilterAndProjection(Table table, GetProjectionsSpec partitionsProjectSpec, + GetPartitionsFilterSpec filterSpec) throws MetaException, NoSuchObjectException { + List fieldList = null; + String inputIncludePattern = null; + String inputExcludePattern = null; + if (partitionsProjectSpec != null) { + fieldList = partitionsProjectSpec.getFieldList(); + if (partitionsProjectSpec.isSetIncludeParamKeyPattern()) { + inputIncludePattern = partitionsProjectSpec.getIncludeParamKeyPattern(); + } + if (partitionsProjectSpec.isSetExcludeParamKeyPattern()) { + inputExcludePattern = partitionsProjectSpec.getExcludeParamKeyPattern(); + } + } + TableName tableName = new TableName(table.getCatName(), table.getDbName(), table.getTableName()); + if (fieldList == null || fieldList.isEmpty()) { + // no fields are requested. Fallback to regular getPartitions implementation to return all the fields + GetPartitionsArgs.GetPartitionsArgsBuilder argsBuilder = new GetPartitionsArgs.GetPartitionsArgsBuilder() + .excludeParamKeyPattern(inputExcludePattern) + .includeParamKeyPattern(inputIncludePattern); + return getPartitions(tableName, argsBuilder.build()); + } + + // anonymous class below requires final String objects + final String includeParamKeyPattern = inputIncludePattern; + final String excludeParamKeyPattern = inputExcludePattern; + + return new GetListHelper(this, tableName, fieldList) { + private final MetaStoreDirectSql.SqlFilterForPushdown filter = new MetaStoreDirectSql.SqlFilterForPushdown(); + private ExpressionTree tree; + + @Override + protected boolean canUseDirectSql() throws MetaException { + if (filterSpec.isSetFilterMode() && filterSpec.getFilterMode().equals(PartitionFilterMode.BY_EXPR)) { + // if the filter mode is BY_EXPR initialize the filter and generate the expression tree + // if there are more than one filter string we AND them together + initExpressionTree(); + return getDirectSql().generateSqlFilterForPushdown(table.getCatName(), table.getDbName(), table.getTableName(), + table.getPartitionKeys(), tree, null, filter); + } + // BY_VALUES and BY_NAMES are always supported + return true; + } + + private void initExpressionTree() throws MetaException { + StringBuilder filterBuilder = new StringBuilder(); + int len = filterSpec.getFilters().size(); + List filters = filterSpec.getFilters(); + for (int i = 0; i < len; i++) { + filterBuilder.append('('); + filterBuilder.append(filters.get(i)); + filterBuilder.append(')'); + if (i + 1 < len) { + filterBuilder.append(" AND "); + } + } + String filterStr = filterBuilder.toString(); + tree = PartFilterExprUtil.parseFilterTree(filterStr); + } + + @Override + protected List getSqlResult() throws MetaException { + return getDirectSql().getPartitionsUsingProjectionAndFilterSpec(getTable(), getPartitionFields(), + includeParamKeyPattern, excludeParamKeyPattern, filterSpec, filter); + } + + @Override + protected List getJdoResult() throws MetaException { + // For single-valued fields we can use setResult() to implement projection of fields but + // JDO doesn't support multi-valued fields in setResult() so currently JDO implementation + // fallbacks to full-partition fetch if the requested fields contain multi-valued fields + List fieldNames = PartitionProjectionEvaluator.getMPartitionFieldNames(getPartitionFields()); + Map params = new HashMap<>(); + String jdoFilter = null; + if (filterSpec.isSetFilterMode()) { + // generate the JDO filter string + switch(filterSpec.getFilterMode()) { + case BY_EXPR: + if (tree == null) { + // tree could be null when directSQL is disabled + initExpressionTree(); + } + jdoFilter = + makeQueryFilterString(table.getCatName(), table.getDbName(), table, tree, params, + true); + if (jdoFilter == null) { + throw new MetaException("Could not generate JDO filter from given expression"); + } + break; + case BY_NAMES: + jdoFilter = getJDOFilterStrForPartitionNames(table.getCatName(), table.getDbName(), + table.getTableName(), filterSpec.getFilters(), params); + break; + case BY_VALUES: + jdoFilter = getJDOFilterStrForPartitionVals(table, filterSpec.getFilters(), params); + break; + default: + throw new MetaException("Unsupported filter mode " + filterSpec.getFilterMode()); + } + } else { + // filter mode is not set create simple JDOFilterStr and params + jdoFilter = "table.tableName == t1 && table.database.name == t2 && table.database.catalogName == t3"; + params.put("t1", normalizeIdentifier(table.getTableName())); + params.put("t2", normalizeIdentifier(table.getDbName())); + params.put("t3", normalizeIdentifier(table.getCatName())); + } + try { + List mparts = listMPartitionsWithProjection(fieldNames, jdoFilter, params); + return convertToParts(table.getCatName(), table.getDbName(), table.getTableName(), + mparts, false, conf, new GetPartitionsArgs.GetPartitionsArgsBuilder() + .excludeParamKeyPattern(excludeParamKeyPattern) + .includeParamKeyPattern(includeParamKeyPattern) + .build()); + } catch (MetaException me) { + throw me; + } catch (Exception e) { + throw new MetaException(e.getMessage()); + } + } + }.run(true); + } + + @Override + public List listPartitionsPsWithAuth(TableName tableName, GetPartitionsArgs args) + throws MetaException, InvalidObjectException, NoSuchObjectException { + List partitions; + LOG.debug("executing listPartitionNamesPsWithAuth"); + MTable mtbl = ensureGetMTable(tableName); + String userName = args.getUserName(); + List groupNames = args.getGroupNames(); + List part_vals = args.getPart_vals(); + List partNames = args.getPartNames(); + String catName = normalizeIdentifier(tableName.getCat()); + String dbName = normalizeIdentifier(tableName.getDb()); + String tblName = normalizeIdentifier(tableName.getTable()); + boolean getauth = null != userName && null != groupNames && + "TRUE".equalsIgnoreCase( + mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE")); + if (MetaStoreUtils.arePartValsEmpty(part_vals) && partNames == null) { + partitions = getPartitions(tableName, args); + } else if (partNames != null) { + partitions = getPartitionsByNames(tableName, args); + } else { + partitions = getPartitionsByPs(tableName, args); + } + if (getauth) { + for (Partition part : partitions) { + String partName = Warehouse.makePartName(convertToFieldSchemas(mtbl + .getPartitionKeys()), part.getValues()); + PrincipalPrivilegeSet partAuth = baseStore.getPartitionPrivilegeSet(catName, dbName, + tblName, partName, userName, groupNames); + part.setPrivileges(partAuth); + } + } + return partitions; + } + + private List getPartitionsByPs(TableName tableName, GetPartitionsArgs args) + throws MetaException, NoSuchObjectException { + String catName = normalizeIdentifier(tableName.getCat()); + String dbName = normalizeIdentifier(tableName.getDb()); + String tblName = normalizeIdentifier(tableName.getTable()); + + return new GetListHelper(this, tableName) { + + @Override + protected List getSqlResult() throws MetaException { + return getDirectSql().getPartitionsViaSqlPs(getTable(), args); + } + + @Override + protected List getJdoResult() + throws MetaException, NoSuchObjectException { + List result = new ArrayList<>(); + Collection parts = getPartitionPsQueryResults(catName, dbName, tblName, + args.getPart_vals(), args.getMax(), null); + boolean isAcidTable = TxnUtils.isAcidTable(getTable()); + for (MPartition o : parts) { + Partition part = convertToPart(catName, dbName, tblName, o, isAcidTable, conf, args); + result.add(part); + } + return result; + } + }.run(true); + } + + /** + * Retrieves a Collection of partition-related results from the database that match + * the partial specification given for a specific table. + * @param dbName the name of the database + * @param tableName the name of the table + * @param part_vals the partial specification values + * @param max_parts the maximum number of partitions to return + * @param resultsCol the metadata column of the data to return, e.g. partitionName, etc. + * if resultsCol is empty or null, a collection of MPartition objects is returned + * @return A Collection of partition-related items from the db that match the partial spec + * for a table. The type of each item in the collection corresponds to the column + * you want results for. E.g., if resultsCol is partitionName, the Collection + * has types of String, and if resultsCol is null, the types are MPartition. + */ + private Collection getPartitionPsQueryResults(String catName, String dbName, + String tableName, List part_vals, + int max_parts, String resultsCol) + throws MetaException, NoSuchObjectException { + + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + Table table = getTable(new TableName(catName, dbName, tableName), null, -1); + if (table == null) { + throw new NoSuchObjectException(TableName.getQualified(catName, dbName, tableName) + " table not found"); + } + // size is known since it contains dbName, catName, tblName and partialRegex + // pattern + Map params = new HashMap<>(4); + String filter = getJDOFilterStrForPartitionVals(table, part_vals, params); + try (QueryWrapper query = new QueryWrapper(pm.newQuery(MPartition.class))) { + query.setFilter(filter); + query.setOrdering("partitionName ascending"); + query.declareParameters(makeParameterDeclarationString(params)); + if (max_parts >= 0) { + // User specified a row limit, set it on the Query + query.setRange(0, max_parts); + } + if (resultsCol != null && !resultsCol.isEmpty()) { + query.setResult(resultsCol); + } + + Collection result = (Collection) query.executeWithMap(params); + + return Collections.unmodifiableCollection(new ArrayList<>(result)); + } + } + + + private String getJDOFilterStrForPartitionVals(Table table, List vals, + Map params) throws MetaException { + String partNameMatcher = MetaStoreUtils.makePartNameMatcher(table, vals, ".*"); + params.put("dbName", table.getDbName()); + params.put("catName", table.getCatName()); + params.put("tableName", table.getTableName()); + params.put("partialRegex", partNameMatcher); + return "table.database.name == dbName" + " && table.database.catalogName == catName" + + " && table.tableName == tableName" + " && partitionName.matches(partialRegex)"; + } + + // This code is only executed in JDO code path, not from direct SQL code path. + private List listMPartitionsWithProjection(List fieldNames, String jdoFilter, + Map params) throws Exception { + List mparts = null; + LOG.debug("Executing listMPartitionsWithProjection"); + Query query = pm.newQuery(MPartition.class, jdoFilter); + String parameterDeclaration = makeParameterDeclarationStringObj(params); + query.declareParameters(parameterDeclaration); + query.setOrdering("partitionName ascending"); + if (fieldNames == null || fieldNames.isEmpty()) { + // full fetch of partitions + mparts = (List) query.executeWithMap(params); + pm.retrieveAll(mparts); + pm.makeTransientAll(mparts); + mparts = new ArrayList<>(mparts); + } else { + // fetch partially filled partitions using result clause + query.setResult(Joiner.on(',').join(fieldNames)); + // if more than one fields are in the result class the return type is + // List + if (fieldNames.size() > 1) { + List results = (List) query.executeWithMap(params); + mparts = new ArrayList<>(results.size()); + for (Object[] row : results) { + MPartition mpart = new MPartition(); + int i = 0; + for (Object val : row) { + MetaStoreServerUtils.setNestedProperty(mpart, fieldNames.get(i), val, true); + i++; + } + mparts.add(mpart); + } + } else { + // only one field is requested, return type is List + List results = (List) query.executeWithMap(params); + mparts = new ArrayList<>(results.size()); + for (Object row : results) { + MPartition mpart = new MPartition(); + MetaStoreServerUtils.setNestedProperty(mpart, fieldNames.get(0), row, true); + mparts.add(mpart); + } + } + } + return mparts; + } + + @Override + public int getNumPartitionsByFilter(TableName tableName, String filter) throws MetaException, NoSuchObjectException { + final ExpressionTree exprTree = org.apache.commons.lang3.StringUtils.isNotEmpty(filter) + ? PartFilterExprUtil.parseFilterTree(filter) : ExpressionTree.EMPTY_TREE; + + String catName = normalizeIdentifier(tableName.getCat()); + String dbName = normalizeIdentifier(tableName.getDb()); + String tblName = normalizeIdentifier(tableName.getTable()); + MTable mTable = ensureGetMTable(tableName); + List partitionKeys = convertToFieldSchemas(mTable.getPartitionKeys()); + + return new GetHelper(this, tableName) { + private final MetaStoreDirectSql.SqlFilterForPushdown filter = new MetaStoreDirectSql.SqlFilterForPushdown(); + + @Override + protected String describeResult() { + return "Partition count"; + } + + @Override + protected boolean canUseDirectSql() throws MetaException { + return getDirectSql().generateSqlFilterForPushdown(catName, dbName, tblName, partitionKeys, exprTree, null, filter); + } + + @Override + protected Integer getSqlResult() throws MetaException { + return getDirectSql().getNumPartitionsViaSqlFilter(filter); + } + @Override + protected Integer getJdoResult() throws MetaException, NoSuchObjectException { + return getNumPartitionsViaOrmFilter(catName ,dbName, tblName, exprTree, true, partitionKeys); + } + }.run(false); + } + + private Integer getNumPartitionsViaOrmFilter(String catName, String dbName, String tblName, ExpressionTree tree, boolean isValidatedFilter, List partitionKeys) + throws MetaException { + Map params = new HashMap<>(); + String jdoFilter = makeQueryFilterString(catName, dbName, tblName, tree, + params, isValidatedFilter, partitionKeys); + if (jdoFilter == null) { + assert !isValidatedFilter; + return null; + } + + Query query = pm.newQuery( + "select count(partitionName) from org.apache.hadoop.hive.metastore.model.MPartition"); + query.setFilter(jdoFilter); + String parameterDeclaration = makeParameterDeclarationStringObj(params); + query.declareParameters(parameterDeclaration); + Long result = (Long) query.executeWithMap(params); + + return result.intValue(); + } + + @Override + public int getNumPartitionsByPs(TableName tableName, List partVals) + throws MetaException, NoSuchObjectException { + + return new GetHelper(this, tableName) { + @Override + protected String describeResult() { + return "Partition count by partial values"; + } + + @Override + protected Integer getSqlResult() throws MetaException { + return getDirectSql().getNumPartitionsViaSqlPs(getTable(), partVals); + } + + @Override + protected Integer getJdoResult() + throws MetaException, NoSuchObjectException, InvalidObjectException { + // size is known since it contains dbName, catName, tblName and partialRegex pattern + Map params = new HashMap<>(4); + String filter = getJDOFilterStrForPartitionVals(getTable(), partVals, params); + try (QueryWrapper query = new QueryWrapper(pm.newQuery( + "select count(partitionName) from org.apache.hadoop.hive.metastore.model.MPartition"))) { + query.setFilter(filter); + query.declareParameters(makeParameterDeclarationString(params)); + Long result = (Long) query.executeWithMap(params); + + return result.intValue(); + } + } + }.run(true); + } + + @Override + public PartitionValuesResponse listPartitionValues(TableName table, List cols, boolean applyDistinct, + String filter, boolean ascending, List order, long maxParts) throws MetaException { + String catName = normalizeIdentifier(table.getCat()); + String dbName = normalizeIdentifier(table.getDb()); + String tableName = normalizeIdentifier(table.getTable()); + try { + if (filter == null || filter.isEmpty()) { + PartitionValuesResponse response = getDistinctValuesForPartitionsNoTxn(catName, dbName, + tableName, cols, applyDistinct, maxParts); + LOG.info("Number of records fetched: {}", response.getPartitionValues().size()); + return response; + } else { + PartitionValuesResponse response = + extractPartitionNamesByFilter(catName, dbName, tableName, filter, cols, ascending, maxParts); + if (response.getPartitionValues() != null) { + LOG.info("Number of records fetched with filter: {}", response.getPartitionValues().size()); + } + return response; + } + } catch (Exception t) { + LOG.error("Exception in ORM", t); + throw new MetaException("Error retrieving partition values: " + t); + } + } + + private PartitionValuesResponse extractPartitionNamesByFilter( + String catName, String dbName, String tableName, String filter, List cols, + boolean ascending, long maxParts) + throws MetaException, NoSuchObjectException { + + LOG.info("Table: {} filter: \"{}\" cols: {}", + TableName.getQualified(catName, dbName, tableName), filter, cols); + List partitionNames = null; + List partitions = null; + Table tbl = getTable(new TableName(catName, dbName, tableName), null, -1); + try { + // Get partitions by name - ascending or descending + partitionNames = getPartitionNamesByFilter(catName, dbName, tableName, filter, ascending, + maxParts); + } catch (MetaException e) { + LOG.warn("Querying by partition names failed, trying out with partition objects, filter: {}", filter); + } + + if (partitionNames == null) { + partitions = getPartitionsByFilter(new TableName(catName, dbName, tableName), + new GetPartitionsArgs.GetPartitionsArgsBuilder().filter(filter).max((short) maxParts).build()); + } + + if (partitions != null) { + partitionNames = new ArrayList<>(partitions.size()); + for (Partition partition : partitions) { + // Check for NULL's just to be safe + if (tbl.getPartitionKeys() != null && partition.getValues() != null) { + partitionNames.add(Warehouse.makePartName(tbl.getPartitionKeys(), partition.getValues())); + } + } + } + + if (partitionNames == null) { + throw new MetaException("Cannot obtain list of partitions by filter:\"" + filter + + "\" for " + TableName.getQualified(catName, dbName, tableName)); + } + + if (!ascending) { + partitionNames.sort(Collections.reverseOrder()); + } + + // Return proper response + PartitionValuesResponse response = new PartitionValuesResponse(); + response.setPartitionValues(new ArrayList<>(partitionNames.size())); + LOG.info("Converting responses to Partition values for items: {}", partitionNames.size()); + for (String partName : partitionNames) { + ArrayList vals = new ArrayList<>(Collections.nCopies(tbl.getPartitionKeys().size(), null)); + PartitionValuesRow row = new PartitionValuesRow(); + Warehouse.makeValsFromName(partName, vals); + for (String value : vals) { + row.addToRow(value); + } + response.addToPartitionValues(row); + } + return response; + } + + private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn( + String catName, String dbName, String tableName, List cols, + boolean applyDistinct, long maxParts) + throws MetaException { + Query q = pm.newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " + + "where table.database.name == t1 && table.database.catalogName == t2 && " + + "table.tableName == t3 "); + q.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + + // TODO: Ordering seems to affect the distinctness, needs checking, disabling. +/* + if (ascending) { + q.setOrdering("partitionName ascending"); + } else { + q.setOrdering("partitionName descending"); + } +*/ + if (maxParts > 0) { + q.setRange(0, maxParts); + } + StringBuilder partValuesSelect = new StringBuilder(256); + if (applyDistinct) { + partValuesSelect.append("DISTINCT "); + } + List partitionKeys = + getTable(new TableName(catName, dbName, tableName), null, -1).getPartitionKeys(); + for (FieldSchema key : cols) { + partValuesSelect.append(extractPartitionKey(key, partitionKeys)).append(", "); + } + partValuesSelect.setLength(partValuesSelect.length() - 2); + LOG.info("Columns to be selected from Partitions: {}", partValuesSelect); + q.setResult(partValuesSelect.toString()); + + PartitionValuesResponse response = new PartitionValuesResponse(); + response.setPartitionValues(new ArrayList<>()); + if (cols.size() > 1) { + List results = (List) q.execute(dbName, catName, tableName); + for (Object[] row : results) { + PartitionValuesRow rowResponse = new PartitionValuesRow(); + for (Object columnValue : row) { + rowResponse.addToRow((String) columnValue); + } + response.addToPartitionValues(rowResponse); + } + } else { + List results = (List) q.execute(dbName, catName, tableName); + for (Object row : results) { + PartitionValuesRow rowResponse = new PartitionValuesRow(); + rowResponse.addToRow((String) row); + response.addToPartitionValues(rowResponse); + } + } + return response; + } + + private String extractPartitionKey(FieldSchema key, List pkeys) { + StringBuilder buffer = new StringBuilder(256); + + assert pkeys.size() >= 1; + + String partKey = "/" + key.getName() + "="; + + // Table is partitioned by single key + if (pkeys.size() == 1 && (pkeys.get(0).getName().matches(key.getName()))) { + buffer.append("partitionName.substring(partitionName.indexOf(\"") + .append(key.getName()).append("=\") + ").append(key.getName().length() + 1) + .append(")"); + + // First partition key - anything between key= and first / + } else if ((pkeys.get(0).getName().matches(key.getName()))) { + + buffer.append("partitionName.substring(partitionName.indexOf(\"") + .append(key.getName()).append("=\") + ").append(key.getName().length() + 1).append(", ") + .append("partitionName.indexOf(\"/\")") + .append(")"); + + // Last partition key - anything between /key= and end + } else if ((pkeys.get(pkeys.size() - 1).getName().matches(key.getName()))) { + buffer.append("partitionName.substring(partitionName.indexOf(\"") + .append(partKey).append("\") + ").append(partKey.length()) + .append(")"); + + // Intermediate key - anything between /key= and the following / + } else { + + buffer.append("partitionName.substring(partitionName.indexOf(\"") + .append(partKey).append("\") + ").append(partKey.length()).append(", ") + .append("partitionName.indexOf(\"/\", partitionName.indexOf(\"").append(partKey) + .append("\") + 1))"); + } + LOG.info("Query for Key:" + key.getName() + " is :" + buffer); + return buffer.toString(); + } + + private List getPartitionNamesByFilter(String catName, String dbName, String tableName, + String filter, boolean ascending, long maxParts) + throws MetaException { + List partNames = new ArrayList<>(); + Query query = null; + LOG.debug("Executing getPartitionNamesByFilter"); + catName = normalizeIdentifier(catName); + dbName = dbName.toLowerCase(); + tableName = tableName.toLowerCase(); + + MTable mtable = getMTable(catName, dbName, tableName); + if( mtable == null ) { + // To be consistent with the behavior of listPartitionNames, if the + // table or db does not exist, we return an empty list + return partNames; + } + Map params = new HashMap<>(); + String queryFilterString = makeQueryFilterString(catName, dbName, mtable, filter, params); + query = pm.newQuery( + "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " + + "where " + queryFilterString); + + if (maxParts >= 0) { + //User specified a row limit, set it on the Query + query.setRange(0, maxParts); + } + + LOG.debug("Filter specified is {}, JDOQL filter is {}", filter, + queryFilterString); + + LOG.debug("Parms is {}", params); + + String parameterDeclaration = makeParameterDeclarationStringObj(params); + query.declareParameters(parameterDeclaration); + if (ascending) { + query.setOrdering("partitionName ascending"); + } else { + query.setOrdering("partitionName descending"); + } + query.setResult("partitionName"); + + Collection names = (Collection) query.executeWithMap(params); + partNames = new ArrayList<>(names); + + LOG.debug("Done executing query for getPartitionNamesByFilter"); + return partNames; + } + + @Override + public List listPartitionNamesPs(TableName tableName, List partVals, short maxParts) + throws MetaException, NoSuchObjectException { + LOG.debug("Executing listPartitionNamesPs"); + String catName = normalizeIdentifier(tableName.getCat()); + String dbName = normalizeIdentifier(tableName.getDb()); + String tblName = normalizeIdentifier(tableName.getTable()); + Collection names = getPartitionPsQueryResults(catName, dbName, tblName, + partVals, maxParts, "partitionName"); + return new ArrayList<>(names); + } + + @Override + public Partition getPartitionWithAuth(TableName tableName, List partVals, String user_name, + List group_names) throws MetaException, NoSuchObjectException, InvalidObjectException { + String catName = normalizeIdentifier(tableName.getCat()); + String dbName = normalizeIdentifier(tableName.getDb()); + String tblName = normalizeIdentifier(tableName.getTable()); + MPartition mpart = getMPartition(catName, dbName, tblName, partVals, null); + if (mpart == null) { + throw new NoSuchObjectException("partition values=" + + partVals.toString()); + } + MTable mtbl = mpart.getTable(); + + Partition part = convertToPart(catName, dbName, tblName, mpart, TxnUtils.isAcidTable(mtbl.getParameters()), conf); + if ("TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { + String partName = Warehouse.makePartName(convertToFieldSchemas(mtbl + .getPartitionKeys()), partVals); + PrincipalPrivilegeSet partAuth = baseStore.getPartitionPrivilegeSet(catName, dbName, + tblName, partName, user_name, group_names); + part.setPrivileges(partAuth); + } + return part; + } + + /** + * Makes a JDO query filter string. + * Makes a JDO query filter string for tables or partitions. + * @param dbName Database name. + * @param mtable Table. If null, the query returned is over tables in a database. + * If not null, the query returned is over partitions in a table. + * @param filter The filter from which JDOQL filter will be made. + * @param params Parameters for the filter. Some parameters may be added here. + * @return Resulting filter. + */ + private String makeQueryFilterString(String catName, String dbName, MTable mtable, String filter, + Map params) throws MetaException { + ExpressionTree tree = (filter != null && !filter.isEmpty()) + ? PartFilterExprUtil.parseFilterTree(filter) : ExpressionTree.EMPTY_TREE; + return makeQueryFilterString(catName, dbName, convertToTable(mtable, baseStore.getConf()), tree, params, true); + } + + @Override + public void updateCreationMetadata(TableName tableName, CreationMetadata cm) throws MetaException { + // Update creation metadata + String catName = normalizeIdentifier(tableName.getCat()); + String dbName = normalizeIdentifier(tableName.getDb()); + String name = normalizeIdentifier(tableName.getTable()); + MCreationMetadata newMcm = convertToMCreationMetadata(cm, baseStore); + MCreationMetadata mcm = getCreationMetadata(catName, dbName, name); + mcm.setTables(newMcm.getTables()); + mcm.setMaterializationTime(newMcm.getMaterializationTime()); + mcm.setTxnList(newMcm.getTxnList()); + // commit the changes + cm.setMaterializationTime(newMcm.getMaterializationTime()); + } + + @Override + public List
getAllMaterializedViewObjectsForRewriting(String catName) throws MetaException { + List
allMaterializedViews = new ArrayList<>(); + Query query = null; + catName = normalizeIdentifier(catName); + query = pm.newQuery(MTable.class); + query.setFilter("database.catalogName == catName && tableType == tt && rewriteEnabled == re"); + query.declareParameters("java.lang.String catName, java.lang.String tt, boolean re"); + Collection mTbls = (Collection) query.executeWithArray( + catName, TableType.MATERIALIZED_VIEW.toString(), true); + for (MTable mTbl : mTbls) { + Table tbl = convertToTable(mTbl, conf); + tbl.setCreationMetadata( + convertToCreationMetadata( + getCreationMetadata(tbl.getCatName(), tbl.getDbName(), tbl.getTableName()), baseStore)); + allMaterializedViews.add(tbl); + } + return allMaterializedViews; + } + + @Override + public List isPartOfMaterializedView(TableName tableName) { + String dbName = normalizeIdentifier(tableName.getDb()); + String tblName = normalizeIdentifier(tableName.getTable()); + List mViewList = new ArrayList<>(); + Query query = pm.newQuery("select from org.apache.hadoop.hive.metastore.model.MCreationMetadata"); + List creationMetadata = (List) query.execute(); + Iterator iter = creationMetadata.iterator(); + + while (iter.hasNext()) { + MCreationMetadata p = iter.next(); + Set tables = p.getTables(); + for (MMVSource sourceTable : tables) { + MTable table = sourceTable.getTable(); + if (dbName.equals(table.getDatabase().getName()) && tblName.equals(table.getTableName())) { + LOG.info("Cannot drop table {} as it is being used by MView {}", table.getTableName(), p.getTblName()); + mViewList.add(p.getDbName() + "." + p.getTblName()); + } + } + } + return mViewList; + } + + @Override + public Table markPartitionForEvent(TableName tableName, Map partVals, PartitionEventType evtType) + throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { + LOG.debug("Begin executing markPartitionForEvent"); + Table tbl = getTable(tableName, null, -1); + if(null == tbl) { + throw new UnknownTableException("Table: "+ tableName + " is not found."); + } + + pm.makePersistent(new MPartitionEvent(normalizeIdentifier(tableName.getCat()), + normalizeIdentifier(tableName.getDb()), normalizeIdentifier(tableName.getTable()), + getPartitionStr(tbl, partVals), evtType.getValue())); + LOG.debug("Done executing markPartitionForEvent"); + return tbl; + } + + private String getPartitionStr(Table tbl, Map partVals) throws InvalidPartitionException{ + if(tbl.getPartitionKeysSize() != partVals.size()){ + throw new InvalidPartitionException("Number of partition columns in table: "+ tbl.getPartitionKeysSize() + + " doesn't match with number of supplied partition values: "+ partVals.size()); + } + final List storedVals = new ArrayList<>(tbl.getPartitionKeysSize()); + for(FieldSchema partKey : tbl.getPartitionKeys()){ + String partVal = partVals.get(partKey.getName()); + if(null == partVal) { + throw new InvalidPartitionException("No value found for partition column: "+partKey.getName()); + } + storedVals.add(partVal); + } + return join(storedVals,','); + } + + @Override + public boolean isPartitionMarkedForEvent(TableName tableName, Map partName, + PartitionEventType evtType) + throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { + LOG.debug("Begin Executing isPartitionMarkedForEvent"); + Query query = pm.newQuery(MPartitionEvent.class, + "dbName == t1 && tblName == t2 && partName == t3 && eventType == t4 && catalogName == t5"); + query + .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, int t4," + + "java.lang.String t5"); + Table tbl = getTable(tableName, null, -1); // Make sure dbName and tblName are valid. + if (null == tbl) { + throw new UnknownTableException("Table: " + tableName + " is not found."); + } + Collection partEvents = (Collection) query.executeWithArray( + normalizeIdentifier(tableName.getDb()), normalizeIdentifier(tableName.getTable()), + getPartitionStr(tbl, partName), evtType.getValue(), normalizeIdentifier(tableName.getCat())); + pm.retrieveAll(partEvents); + LOG.debug("Done executing isPartitionMarkedForEvent"); + return partEvents != null && !partEvents.isEmpty(); + } + + @Override + public int getObjectCount(String fieldName, String objName) { + String queryStr = "select count(" + fieldName + ") from " + objName; + Query query = pm.newQuery(queryStr); + Long result = (Long) query.execute(); + return result != null ? result.intValue() : 0; + } + + @Override + public long updateParameterWithExpectedValue(Table table, String key, String expectedValue, String newValue) + throws MetaException, NoSuchObjectException { + return new GetHelper(this, new TableName(table.getCatName(), table.getDbName(), table.getCatName())) { + @Override + protected String describeResult() { + return "Affected rows"; + } + @Override + protected Long getSqlResult() throws MetaException { + return getDirectSql().updateTableParam(table, key, expectedValue, newValue); + } + @Override + protected Long getJdoResult() + throws MetaException, NoSuchObjectException, InvalidObjectException { + throw new UnsupportedOperationException( + "Cannot update parameter with JDO, make sure direct SQL is enabled"); + } + @Override + protected boolean canUseJdoQuery() throws MetaException { + return false; + } + }.run(false); + } + + @Override + public MPartition ensureGetMPartition(TableName tableName, List partVals) throws MetaException { + String catName = normalizeIdentifier(tableName.getCat()); + String dbName = normalizeIdentifier(tableName.getDb()); + String tblName = normalizeIdentifier(tableName.getTable()); + MPartition result = null; + MTable mtbl = getMTable(catName, dbName, tblName); + if (mtbl == null) { + // throw exception? + return null; + } + // Change the query to use part_vals instead of the name which is + // redundant TODO: callers of this often get part_vals out of name for no reason... + String name = + Warehouse.makePartName(convertToFieldSchemas(mtbl.getPartitionKeys()), partVals); + result = getMPartition(catName, dbName, tblName, name); + + return result; + } + + /** + * Getting MPartition object. Use this method if the partition name is available, so we do not + * query the table object again. + * @param catName The catalogue + * @param dbName The database + * @param tableName The table + * @param name The partition name + * @return The MPartition object in the backend database + */ + private MPartition getMPartition(String catName, String dbName, String tableName, + String name) throws MetaException { + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + MPartition ret = null; + Query query = + pm.newQuery(MPartition.class, + "table.tableName == t1 && table.database.name == t2 && partitionName == t3 " + + " && table.database.catalogName == t4"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " + + "java.lang.String t4"); + List mparts = (List) query.executeWithArray(tableName, dbName, name, catName); + pm.retrieveAll(mparts); + // We need to compare partition name with requested name since some DBs + // (like MySQL, Derby) considers 'a' = 'a ' whereas others like (Postgres, + // Oracle) doesn't exhibit this problem. + if (CollectionUtils.isNotEmpty(mparts)) { + if (mparts.size() > 1) { + throw new MetaException( + "Expecting only one partition but more than one partitions are found."); + } else { + MPartition mpart = mparts.get(0); + if (name.equals(mpart.getPartitionName())) { + ret = mpart; + } else { + throw new MetaException("Expecting a partition with name " + name + + ", but metastore is returning a partition with name " + mpart.getPartitionName() + + "."); + } + } + } + return ret; + } +} diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/StatisticsTestUtils.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/StatisticsTestUtils.java index a6ece7aafceb..e5e2bb5a5ba3 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/StatisticsTestUtils.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/StatisticsTestUtils.java @@ -83,6 +83,7 @@ public static ColumnStatistics createColumnStatistics(ColumnStatisticsData data, ColumnStatistics colStats = new ColumnStatistics(); ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, tbl.getDbName(), tbl.getTableName()); statsDesc.setPartName(partName); + statsDesc.setCatName(tbl.getCatName()); colStats.setStatsDesc(statsDesc); colStats.setStatsObj(Collections.singletonList(statObj)); colStats.setEngine(HIVE_ENGINE); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index 8ebc1a7ccf9e..b6c579c9d052 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -61,12 +61,10 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.dataconnector.jdbc.AbstractJDBCConnectorProvider; import org.apache.hadoop.hive.metastore.handler.AddPartitionsHandler; -import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.metastore.utils.FileUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; import org.apache.hadoop.hive.metastore.utils.MetastoreVersionInfo; import org.apache.hadoop.hive.metastore.utils.SecurityUtils; -import org.apache.orc.impl.OrcAcidUtils; import org.datanucleus.api.jdo.JDOPersistenceManager; import org.datanucleus.api.jdo.JDOPersistenceManagerFactory; import org.junit.Assert; diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java index afede2f768c0..cbcec3911acb 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -23,6 +23,7 @@ import com.google.common.collect.ImmutableSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; @@ -81,6 +82,8 @@ import org.apache.hadoop.hive.metastore.model.MNotificationLog; import org.apache.hadoop.hive.metastore.model.MNotificationNextId; import org.apache.hadoop.hive.metastore.model.MTable; +import org.apache.hadoop.hive.metastore.metastore.iface.TableStore; +import org.apache.hadoop.hive.metastore.utils.DirectSqlConfigurator; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; import org.apache.hadoop.hive.metastore.utils.RetryingExecutor; import org.junit.Assert; @@ -783,20 +786,24 @@ public void testDirectSQLDropPartitionsCacheInSession() createPartitionedTable(false, false, new HashSet<>()); // query the partitions with JDO List partitions; - try(AutoCloseable c = deadline()) { - partitions = objectStore.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1, - false, true, new GetPartitionsArgs.GetPartitionsArgsBuilder().max(10).build()); + TableStore tableStore = objectStore.unwrap(TableStore.class); + try(AutoCloseable c = deadline(); + AutoCloseable d = new DirectSqlConfigurator(objectStore.getConf(), false)) { + partitions = tableStore.getPartitions(new TableName(DEFAULT_CATALOG_NAME, DB1, TABLE1), + new GetPartitionsArgs.GetPartitionsArgsBuilder().max(10).build()); } Assert.assertEquals(3, partitions.size()); // drop partitions with directSql - try(AutoCloseable c = deadline()) { - objectStore.dropPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1, - Arrays.asList("test_part_col=a0", "test_part_col=a1"), true, false); + try(AutoCloseable c = deadline(); + AutoCloseable d = new DirectSqlConfigurator(objectStore.getConf(), true)) { + tableStore.dropPartitions(new TableName(DEFAULT_CATALOG_NAME, DB1, TABLE1), + Arrays.asList("test_part_col=a0", "test_part_col=a1")); } - try (AutoCloseable c = deadline()) { + try (AutoCloseable c = deadline(); + AutoCloseable d = new DirectSqlConfigurator(objectStore.getConf(), false)) { // query the partitions with JDO, checking the cache is not causing any problem - partitions = objectStore.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1, false, true, + partitions = tableStore.getPartitions(new TableName(DEFAULT_CATALOG_NAME, DB1, TABLE1), new GetPartitionsArgs.GetPartitionsArgsBuilder().max(10).build()); } Assert.assertEquals(1, partitions.size()); @@ -815,27 +822,34 @@ public void testDirectSQLDropPartitionsCacheCrossSession() GetPartitionsArgs args = new GetPartitionsArgs.GetPartitionsArgsBuilder().max(10).build(); // query the partitions with JDO in the 1st session List partitions; - try (AutoCloseable c = deadline()) { - partitions = objectStore.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1, false, true, args); + TableStore tableStore2 = objectStore2.unwrap(TableStore.class); + TableStore tableStore1 = objectStore.unwrap(TableStore.class); + TableName targetTable = new TableName(DEFAULT_CATALOG_NAME, DB1, TABLE1); + try (AutoCloseable c = deadline(); + AutoCloseable d = new DirectSqlConfigurator(conf, false)) { + partitions = tableStore1.getPartitions(targetTable, args); } Assert.assertEquals(3, partitions.size()); // query the partitions with JDO in the 2nd session - try (AutoCloseable c = deadline()) { - partitions = objectStore2.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1, false, true, args); + try (AutoCloseable c = deadline(); + AutoCloseable d = new DirectSqlConfigurator(conf, false)) { + partitions = tableStore2.getPartitions(targetTable, args); } Assert.assertEquals(3, partitions.size()); // drop partitions with directSql in the 1st session - try (AutoCloseable c = deadline()) { - objectStore.dropPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1, - Arrays.asList("test_part_col=a0", "test_part_col=a1"), true, false); + try (AutoCloseable c = deadline(); + AutoCloseable d = new DirectSqlConfigurator(conf, true)) { + tableStore1.dropPartitions(targetTable, + Arrays.asList("test_part_col=a0", "test_part_col=a1")); } // query the partitions with JDO in the 2nd session, checking the cache is not causing any // problem - try (AutoCloseable c = deadline()) { - partitions = objectStore2.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1, false, true, args); + try (AutoCloseable c = deadline(); + AutoCloseable d = new DirectSqlConfigurator(conf, false)) { + partitions = tableStore2.getPartitions(targetTable, args); } Assert.assertEquals(1, partitions.size()); } @@ -864,9 +878,9 @@ public void testDirectSQLDropPartitionsCleanup() throws Exception { checkBackendTableSize("SERDES", 4); // Table has a serde // drop the partitions - try (AutoCloseable c = deadline()) { - objectStore.dropPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1, - Arrays.asList("test_part_col=a0", "test_part_col=a1", "test_part_col=a2"), true, false); + try (AutoCloseable c = deadline(); AutoCloseable d = new DirectSqlConfigurator(conf, true)) { + objectStore.unwrap(TableStore.class).dropPartitions(new TableName(DEFAULT_CATALOG_NAME, DB1, TABLE1), + Arrays.asList("test_part_col=a0", "test_part_col=a1", "test_part_col=a2")); } // Check, if every data is dropped connected to the partitions @@ -907,9 +921,9 @@ public void testDirectSQLCDsCleanup() throws Exception { checkBackendTableSize("CDS", 2); checkBackendTableSize("COLUMNS_V2", 11); // drop the partitions - try (AutoCloseable c = deadline()) { - objectStore.dropPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1, - Arrays.asList("test_part_col=a0", "test_part_col=a1", "test_part_col=a2"), true, false); + try (AutoCloseable c = deadline(); AutoCloseable d = new DirectSqlConfigurator(conf, true)) { + objectStore.unwrap(TableStore.class).dropPartitions(new TableName(DEFAULT_CATALOG_NAME, DB1, TABLE1), + Arrays.asList("test_part_col=a0", "test_part_col=a1", "test_part_col=a2")); } // Checks if the data connected to the partitions is dropped checkBackendTableSize("PARTITIONS", 0); @@ -1050,10 +1064,10 @@ private void assertAggrStats(AggrStats aggrStats, ColumnStatisticsData computedS private void statsAggrResourceCleanup() throws Exception { - try (AutoCloseable c = deadline()) { - objectStore.dropPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1, - Arrays.asList("test_part_col=a0", "test_part_col=a1", "test_part_col=a2"), true, true); - objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, TABLE1); + try (AutoCloseable c = deadline(); AutoCloseable d = new DirectSqlConfigurator(conf, true)) { + objectStore.unwrap(TableStore.class).dropPartitions(new TableName(DEFAULT_CATALOG_NAME, DB1, TABLE1), + Arrays.asList("test_part_col=a0", "test_part_col=a1", "test_part_col=a2")); + objectStore.unwrap(TableStore.class).dropTable(new TableName(DEFAULT_CATALOG_NAME, DB1, TABLE1)); objectStore.dropDatabase(DEFAULT_CATALOG_NAME, DB1); } } @@ -1332,7 +1346,7 @@ public void testQueryCloseOnError() throws Exception { spy.getAllFunctions(DEFAULT_CATALOG_NAME); spy.getAllTables(DEFAULT_CATALOG_NAME, DB1); spy.getPartitionCount(); - Mockito.verify(spy, Mockito.times(3)) + Mockito.verify(spy, Mockito.times(2)) .rollbackAndCleanup(Mockito.anyBoolean(), ArgumentMatchers.any()); } @@ -1880,10 +1894,10 @@ protected String describeResult() { @Override protected Object getSqlResult(ObjectStore.GetHelper ctx) throws MetaException { // drop the partitions with SQL alone - try (AutoCloseable c = deadline()) { - objectStore.dropPartitionsInternal(ctx.catName, ctx.dbName, ctx.tblName, partNames, true, - false); - Assert.assertEquals(0, objectStore.getPartitionCount()); + try (AutoCloseable c = deadline(); AutoCloseable d = new DirectSqlConfigurator(conf, true)) { + objectStore.unwrap(TableStore.class) + .dropPartitions(new TableName(ctx.catName, ctx.dbName, ctx.tblName), partNames); + assertEquals(0, objectStore.getPartitionCount()); } catch (Exception e) { throw new MetaException(e.getMessage()); } @@ -1893,10 +1907,10 @@ protected Object getSqlResult(ObjectStore.GetHelper ctx) throws MetaExce @Override protected Object getJdoResult(ObjectStore.GetHelper ctx) throws MetaException { // drop the partitions with JDO alone - try (AutoCloseable c = deadline()) { - Assert.assertEquals(3, objectStore.getPartitionCount()); - objectStore.dropPartitionsInternal(ctx.catName, ctx.dbName, ctx.tblName, partNames, false, - true); + try (AutoCloseable c = deadline(); AutoCloseable d = new DirectSqlConfigurator(conf, false)) { + assertEquals(3, objectStore.getPartitionCount()); + objectStore.unwrap(TableStore.class) + .dropPartitions(new TableName(ctx.catName, ctx.dbName, ctx.tblName), partNames); } catch (Exception e) { throw new MetaException(e.getMessage()); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java index 31bc5635a40d..65e7996b6e02 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java @@ -35,10 +35,13 @@ import org.apache.commons.lang3.ClassUtils; import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.client.builder.GetPartitionsArgs; import org.apache.hadoop.hive.metastore.model.MTable; +import org.apache.hadoop.hive.metastore.metastore.iface.TableStore; +import org.apache.hadoop.hive.metastore.utils.DirectSqlConfigurator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; @@ -59,52 +62,62 @@ public VerifyingObjectStore() { public List getPartitionsByFilter(String catName, String dbName, String tblName, GetPartitionsArgs args) throws MetaException, NoSuchObjectException { - List sqlResults = getPartitionsByFilterInternal( - catName, dbName, tblName, true, false, args); - List ormResults = getPartitionsByFilterInternal( - catName, dbName, tblName, false, true, args); - verifyLists(sqlResults, ormResults, Partition.class); - return sqlResults; + TableStore tableStore = unwrap(TableStore.class); + try (DirectSqlConfigurator configurator = new DirectSqlConfigurator(conf, false)) { + List ormResults = tableStore.getPartitionsByFilter(new TableName(catName, dbName, tblName), args); + configurator.tryDirectSql(true); + List sqlResults = tableStore.getPartitionsByFilter(new TableName(catName, dbName, tblName), args); + verifyLists(sqlResults, ormResults, Partition.class); + return sqlResults; + } } @Override public List getPartitionsByNames(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { GetPartitionsArgs args = new GetPartitionsArgs.GetPartitionsArgsBuilder().partNames(partNames).build(); - List sqlResults = getPartitionsByNamesInternal( - catName, dbName, tblName, true, false, args); - List ormResults = getPartitionsByNamesInternal( - catName, dbName, tblName, false, true, args); - verifyLists(sqlResults, ormResults, Partition.class); - return sqlResults; + TableStore tableStore = unwrap(TableStore.class); + try (DirectSqlConfigurator configurator = new DirectSqlConfigurator(conf, false)) { + List ormResults = tableStore.getPartitionsByNames(new TableName(catName, dbName, tblName), args); + configurator.tryDirectSql(true); + List sqlResults = tableStore.getPartitionsByNames(new TableName(catName, dbName, tblName), args); + verifyLists(sqlResults, ormResults, Partition.class); + return sqlResults; + } } @Override public boolean getPartitionsByExpr(String catName, String dbName, String tblName, List result, GetPartitionsArgs args) throws TException { List ormParts = new LinkedList<>(); - boolean sqlResult = getPartitionsByExprInternal( - catName, dbName, tblName, result, true, false, args); - boolean ormResult = getPartitionsByExprInternal( - catName, dbName, tblName, ormParts, false, true, args); - if (sqlResult != ormResult) { - String msg = "The unknown flag is different - SQL " + sqlResult + ", ORM " + ormResult; - LOG.error(msg); - throw new MetaException(msg); + TableStore tableStore = unwrap(TableStore.class); + try (DirectSqlConfigurator configurator = new DirectSqlConfigurator(conf, false)) { + boolean ormResult = tableStore.getPartitionsByExpr(new TableName(catName, dbName, tblName), ormParts, args); + configurator.tryDirectSql(true); + boolean sqlResult = tableStore.getPartitionsByExpr(new TableName(catName, dbName, tblName), result, args); + if (sqlResult != ormResult) { + String msg = "The unknown flag is different - SQL " + sqlResult + ", ORM " + ormResult; + LOG.error(msg); + throw new MetaException(msg); + } + verifyLists(result, ormParts, Partition.class); + return sqlResult; } - verifyLists(result, ormParts, Partition.class); - return sqlResult; } @Override public List getPartitions( String catName, String dbName, String tableName, GetPartitionsArgs args) throws MetaException, NoSuchObjectException { openTransaction(); - List sqlResults = getPartitionsInternal(catName, dbName, tableName, true, false, args); - List ormResults = getPartitionsInternal(catName, dbName, tableName, false, true, args); - verifyLists(sqlResults, ormResults, Partition.class); - commitTransaction(); - return sqlResults; + TableStore tableStore = unwrap(TableStore.class); + try (DirectSqlConfigurator configurator = new DirectSqlConfigurator(conf, false)) { + List ormResults = tableStore.getPartitions(new TableName(catName, dbName, tableName), args); + configurator.tryDirectSql(true); + List sqlResults = tableStore.getPartitions(new TableName(catName, dbName, tableName), args); + verifyLists(sqlResults, ormResults, Partition.class); + commitTransaction(); + return sqlResults; + } } @Override @@ -142,15 +155,22 @@ public List alterPartitions(String catName, String dbName, String tbl // could be different from that in the datastore. // We cannot verify the partitions by getPartitionsByNames now. GetPartitionsArgs args = new GetPartitionsArgs.GetPartitionsArgsBuilder().partNames(partNames).build(); - List oldParts = getPartitionsByNamesInternal( - catName, dbName, tblName, true, true, args); + List oldParts = unwrap(TableStore.class).getPartitionsByNames(new TableName(catName, dbName, tblName), args); if (oldParts.size() != partNames.size()) { throw new MetaException("Some partitions to be altered are missing"); } List tmpNewParts = new ArrayList<>(newParts); - alterPartitionsInternal(table, partNames, newParts, queryWriteIdList, true, false); - alterPartitionsInternal(table, partNames, oldParts, queryWriteIdList, false, true); - results = alterPartitionsInternal(table, partNames, tmpNewParts, queryWriteIdList, true, false); + TableStore tableStore = unwrap(TableStore.class); + try (DirectSqlConfigurator configurator = new DirectSqlConfigurator(conf, true)) { + tableStore.alterPartitions(new TableName(catName, dbName, tblName), part_vals, newParts, writeId, + queryWriteIdList); + configurator.tryDirectSql(false); + tableStore.alterPartitions(new TableName(catName, dbName, tblName), part_vals, oldParts, writeId, + queryWriteIdList); + configurator.tryDirectSql(true); + tableStore.alterPartitions(new TableName(catName, dbName, tblName), part_vals, tmpNewParts, writeId, + queryWriteIdList); + } // commit the changes success = commitTransaction(); } catch (Exception exception) { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java index 44e00bc69a6a..dd2c2ce507f2 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java @@ -1157,7 +1157,7 @@ public void testRenamePartitionNullTblName() throws Exception { } } - @Test(expected = MetaException.class) + @Test(expected = InvalidOperationException.class) public void testRenamePartitionChangeTblName() throws Exception { List> oldValues = createTable4PartColsParts(client); List oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); @@ -1168,7 +1168,7 @@ public void testRenamePartitionChangeTblName() throws Exception { client.renamePartition(DB_NAME, TABLE_NAME, oldValues.get(3), partToRename); } - @Test(expected = MetaException.class) + @Test(expected = InvalidOperationException.class) public void testRenamePartitionChangeDbName() throws Exception { List> oldValues = createTable4PartColsParts(client); List oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/DirectSqlConfigurator.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/DirectSqlConfigurator.java new file mode 100644 index 000000000000..ae154d7626d2 --- /dev/null +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/DirectSqlConfigurator.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.utils; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; + +import static org.apache.hadoop.hive.metastore.metastore.GetHelper.getDirectSqlErrors; + +public class DirectSqlConfigurator implements AutoCloseable { + private final Configuration conf; + private final boolean origAllowSql; + private final long directSqlErrors; + + public DirectSqlConfigurator(Configuration configuration, boolean tryDirectSql) { + this.conf = configuration; + this.origAllowSql = MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.TRY_DIRECT_SQL); + MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.TRY_DIRECT_SQL, tryDirectSql); + directSqlErrors = getDirectSqlErrors(); + } + + public void tryDirectSql(boolean tryDirectSql) { + MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.TRY_DIRECT_SQL, tryDirectSql); + } + + @Override + public void close() throws MetaException { + MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.TRY_DIRECT_SQL, origAllowSql); + if (directSqlErrors != getDirectSqlErrors()) { + throw new MetaException("An unexpected direct sql error raised behind," + + " please check the log to see the details"); + } + } +}