aws-glue-datacatalog-hive2-client/src/main/java/com/amazonaws/glue/catalog/metastore/AWSCatalogMetastoreClient.java [1272:1540]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      throws NoSuchObjectException, MetaException, TException {
    return listPartitions(dbName, tblName, null, max);
  }

  @Override
  public List<org.apache.hadoop.hive.metastore.api.Partition> listPartitions(
      String databaseName,
      String tableName,
      List<String> values,
      short max
  ) throws NoSuchObjectException, MetaException, TException {
    String expression = null;
    if (values != null) {
      org.apache.hadoop.hive.metastore.api.Table table = getTable(databaseName, tableName);
      expression = ExpressionHelper.buildExpressionFromPartialSpecification(table, values);
    }
    return glueMetastoreClientDelegate.getPartitions(databaseName, tableName, expression, (long) max);
  }

  @Override
  public boolean listPartitionsByExpr(
      String databaseName,
      String tableName,
      byte[] expr,
      String defaultPartitionName,
      short max,
      List<org.apache.hadoop.hive.metastore.api.Partition> result
  ) throws TException {
    checkNotNull(result, "The result argument cannot be null.");

    String catalogExpression =  ExpressionHelper.convertHiveExpressionToCatalogExpression(expr);
    List<org.apache.hadoop.hive.metastore.api.Partition> partitions =
        glueMetastoreClientDelegate.getPartitions(databaseName, tableName, catalogExpression, (long) max);
    result.addAll(partitions);

    return false;
  }

  @Override
  public List<org.apache.hadoop.hive.metastore.api.Partition> listPartitionsByFilter(
      String databaseName,
      String tableName,
      String filter,
      short max
  ) throws MetaException, NoSuchObjectException, TException {
    // we need to replace double quotes with single quotes in the filter expression
    // since server side does not accept double quote expressions.
    if (StringUtils.isNotBlank(filter)) {
      filter = ExpressionHelper.replaceDoubleQuoteWithSingleQuotes(filter);
    }
    return glueMetastoreClientDelegate.getPartitions(databaseName, tableName, filter, (long) max);
  }

  @Override
  public List<org.apache.hadoop.hive.metastore.api.Partition> listPartitionsWithAuthInfo(String database, String table, short maxParts,
                                                                                         String user, List<String> groups)
        throws MetaException, TException, NoSuchObjectException {
    List<org.apache.hadoop.hive.metastore.api.Partition> partitions = listPartitions(database, table, maxParts);

    for (org.apache.hadoop.hive.metastore.api.Partition p : partitions) {
      HiveObjectRef obj = new HiveObjectRef();
      obj.setObjectType(HiveObjectType.PARTITION);
      obj.setDbName(database);
      obj.setObjectName(table);
      obj.setPartValues(p.getValues());
      org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet set = this.get_privilege_set(obj, user, groups);
      p.setPrivileges(set);
    }

    return partitions;
  }

  @Override
  public List<org.apache.hadoop.hive.metastore.api.Partition> listPartitionsWithAuthInfo(String database, String table,
                                                                                         List<String> partVals, short maxParts,
                                                                                         String user, List<String> groups) throws MetaException, TException, NoSuchObjectException {
    List<org.apache.hadoop.hive.metastore.api.Partition> partitions = listPartitions(database, table, partVals, maxParts);

    for (org.apache.hadoop.hive.metastore.api.Partition p : partitions) {
      HiveObjectRef obj = new HiveObjectRef();
      obj.setObjectType(HiveObjectType.PARTITION);
      obj.setDbName(database);
      obj.setObjectName(table);
      obj.setPartValues(p.getValues());
      org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet set;
      try {
        set = get_privilege_set(obj, user, groups);
      } catch (MetaException e) {
        logger.info(String.format("No privileges found for user: %s, "
              + "groups: [%s]", user, LoggingHelper.concatCollectionToStringForLogging(groups, ",")));
        set = new org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet();
      }
      p.setPrivileges(set);
    }

    return partitions;
  }

  @Override
  public List<String> listTableNamesByFilter(String dbName, String filter, short maxTables) throws MetaException,
        TException, InvalidOperationException, UnknownDBException {
    return glueMetastoreClientDelegate.listTableNamesByFilter(dbName, filter, maxTables);
  }

  @Override
  public List<HiveObjectPrivilege> list_privileges(
      String principal,
      org.apache.hadoop.hive.metastore.api.PrincipalType principalType,
      HiveObjectRef objectRef
  ) throws MetaException, TException {
    return glueMetastoreClientDelegate.listPrivileges(principal, principalType, objectRef);
  }

  @Override
  public LockResponse lock(LockRequest lockRequest) throws NoSuchTxnException, TxnAbortedException, TException {
    return glueMetastoreClientDelegate.lock(lockRequest);
  }

  @Override
  public void markPartitionForEvent(
      String dbName,
      String tblName,
      Map<String, String> partKVs,
      PartitionEventType eventType
  ) throws MetaException, NoSuchObjectException, TException, UnknownTableException, UnknownDBException,
      UnknownPartitionException, InvalidPartitionException {
    glueMetastoreClientDelegate.markPartitionForEvent(dbName, tblName, partKVs, eventType);
  }

  @Override
  public long openTxn(String user) throws TException {
    return glueMetastoreClientDelegate.openTxn(user);
  }

  @Override
  public OpenTxnsResponse openTxns(String user, int numTxns) throws TException {
    return glueMetastoreClientDelegate.openTxns(user, numTxns);
  }

  @Override
  public Map<String, String> partitionNameToSpec(String name) throws MetaException, TException {
    // Lifted from HiveMetaStore
    if (name.length() == 0) {
      return new HashMap<String, String>();
    }
    return Warehouse.makeSpecFromName(name);
  }

  @Override
  public List<String> partitionNameToVals(String name) throws MetaException, TException {
    return glueMetastoreClientDelegate.partitionNameToVals(name);
  }

  @Override
  public void reconnect() throws MetaException {
    // TODO reset active Hive confs for metastore glueClient
    logger.debug("reconnect() was called.");
  }

  @Override
  public void renamePartition(String dbName, String tblName, List<String> partitionValues,
                              org.apache.hadoop.hive.metastore.api.Partition newPartition)
        throws InvalidOperationException, MetaException, TException {

    // Set DDL time to now if not specified
    setDDLTime(newPartition);
    org.apache.hadoop.hive.metastore.api.Table tbl;
    org.apache.hadoop.hive.metastore.api.Partition oldPart;

    try {
      tbl = getTable(dbName, tblName);
      oldPart = getPartition(dbName, tblName, partitionValues);
    } catch(NoSuchObjectException e) {
      throw new InvalidOperationException(e.getMessage());
    }

    if(newPartition.getSd() == null || oldPart.getSd() == null ) {
      throw new InvalidOperationException("Storage descriptor cannot be null");
    }

    // if an external partition is renamed, the location should not change
    if (!Strings.isNullOrEmpty(tbl.getTableType()) && tbl.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) {
      newPartition.getSd().setLocation(oldPart.getSd().getLocation());
      renamePartitionInCatalog(dbName, tblName, partitionValues, newPartition);
    } else {

      Path destPath = getDestinationPathForRename(dbName, tbl, newPartition);
      Path srcPath = new Path(oldPart.getSd().getLocation());
      FileSystem srcFs = wh.getFs(srcPath);
      FileSystem destFs = wh.getFs(destPath);

      verifyDestinationLocation(srcFs, destFs, srcPath, destPath, tbl, newPartition);
      newPartition.getSd().setLocation(destPath.toString());

      renamePartitionInCatalog(dbName, tblName, partitionValues, newPartition);
      boolean success = true;
      try{
        if (srcFs.exists(srcPath)) {
          //if destPath's parent path doesn't exist, we should mkdir it
          Path destParentPath = destPath.getParent();
          if (!wh.mkdirs(destParentPath, true)) {
            throw new IOException("Unable to create path " + destParentPath);
          }
          wh.renameDir(srcPath, destPath, true);
        }
      } catch (IOException e) {
        success = false;
        throw new InvalidOperationException("Unable to access old location "
              + srcPath + " for partition " + tbl.getDbName() + "."
              + tbl.getTableName() + " " + partitionValues);
      } finally {
        if(!success) {
          // revert metastore operation
          renamePartitionInCatalog(dbName, tblName, newPartition.getValues(), oldPart);
        }
      }
    }
  }

  private void verifyDestinationLocation(FileSystem srcFs, FileSystem destFs, Path srcPath, Path destPath, org.apache.hadoop.hive.metastore.api.Table tbl, org.apache.hadoop.hive.metastore.api.Partition newPartition)
        throws InvalidOperationException {
    String oldPartLoc = srcPath.toString();
    String newPartLoc = destPath.toString();

    // check that src and dest are on the same file system
    if (!FileUtils.equalsFileSystem(srcFs, destFs)) {
      throw new InvalidOperationException("table new location " + destPath
            + " is on a different file system than the old location "
            + srcPath + ". This operation is not supported");
    }
    try {
      srcFs.exists(srcPath); // check that src exists and also checks
      if (newPartLoc.compareTo(oldPartLoc) != 0 && destFs.exists(destPath)) {
        throw new InvalidOperationException("New location for this partition "
              + tbl.getDbName() + "." + tbl.getTableName() + "." + newPartition.getValues()
              + " already exists : " + destPath);
      }
    } catch (IOException e) {
      throw new InvalidOperationException("Unable to access new location "
            + destPath + " for partition " + tbl.getDbName() + "."
            + tbl.getTableName() + " " + newPartition.getValues());
    }
  }

  private Path getDestinationPathForRename(String dbName, org.apache.hadoop.hive.metastore.api.Table tbl, org.apache.hadoop.hive.metastore.api.Partition newPartition)
        throws InvalidOperationException, MetaException, TException {
    try {
      Path destPath = new Path(hiveShims.getDefaultTablePath(getDatabase(dbName), tbl.getTableName(), wh),
            Warehouse.makePartName(tbl.getPartitionKeys(), newPartition.getValues()));
      return constructRenamedPath(destPath, new Path(newPartition.getSd().getLocation()));
    } catch (NoSuchObjectException e) {
      throw new InvalidOperationException(
            "Unable to change partition or table. Database " + dbName + " does not exist"
                  + " Check metastore logs for detailed stack." + e.getMessage());
    }
  }

  private void setDDLTime(org.apache.hadoop.hive.metastore.api.Partition partition) {
    if (partition.getParameters() == null ||
          partition.getParameters().get(hive_metastoreConstants.DDL_TIME) == null ||
          Integer.parseInt(partition.getParameters().get(hive_metastoreConstants.DDL_TIME)) == 0) {
      partition.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System
            .currentTimeMillis() / 1000));
    }
  }

  private void renamePartitionInCatalog(String databaseName, String tableName,
                                        List<String> partitionValues, org.apache.hadoop.hive.metastore.api.Partition newPartition)
        throws InvalidOperationException, MetaException, TException {
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



aws-glue-datacatalog-spark-client/src/main/java/com/amazonaws/glue/catalog/metastore/AWSCatalogMetastoreClient.java [1121:1389]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        throws NoSuchObjectException, MetaException, TException {
      return listPartitions(dbName, tblName, null, max);
  }

  @Override
  public List<org.apache.hadoop.hive.metastore.api.Partition> listPartitions(
      String databaseName,
      String tableName,
      List<String> values,
      short max
  ) throws NoSuchObjectException, MetaException, TException {
    String expression = null;
    if (values != null) {
        org.apache.hadoop.hive.metastore.api.Table table = getTable(databaseName, tableName);
        expression = ExpressionHelper.buildExpressionFromPartialSpecification(table, values);
    }
    return glueMetastoreClientDelegate.getPartitions(databaseName, tableName, expression, (long) max);
  }

  @Override
  public boolean listPartitionsByExpr(
      String databaseName,
      String tableName,
      byte[] expr,
      String defaultPartitionName,
      short max,
      List<org.apache.hadoop.hive.metastore.api.Partition> result
  ) throws TException {
    checkNotNull(result, "The result argument cannot be null.");

    String catalogExpression =  ExpressionHelper.convertHiveExpressionToCatalogExpression(expr);
    List<org.apache.hadoop.hive.metastore.api.Partition> partitions =
        glueMetastoreClientDelegate.getPartitions(databaseName, tableName, catalogExpression, (long) max);
    result.addAll(partitions);

    return false;
  }

  @Override
  public List<org.apache.hadoop.hive.metastore.api.Partition> listPartitionsByFilter(
      String databaseName,
      String tableName,
      String filter,
      short max
  ) throws MetaException, NoSuchObjectException, TException {
    // we need to replace double quotes with single quotes in the filter expression
    // since server side does not accept double quote expressions.
    if (StringUtils.isNotBlank(filter)) {
        filter = ExpressionHelper.replaceDoubleQuoteWithSingleQuotes(filter);
    }
    return glueMetastoreClientDelegate.getPartitions(databaseName, tableName, filter, (long) max);
  }

  @Override
  public List<org.apache.hadoop.hive.metastore.api.Partition> listPartitionsWithAuthInfo(String database, String table, short maxParts,
                                                                                         String user, List<String> groups)
        throws MetaException, TException, NoSuchObjectException {
      List<org.apache.hadoop.hive.metastore.api.Partition> partitions = listPartitions(database, table, maxParts);

      for (org.apache.hadoop.hive.metastore.api.Partition p : partitions) {
          HiveObjectRef obj = new HiveObjectRef();
          obj.setObjectType(HiveObjectType.PARTITION);
          obj.setDbName(database);
          obj.setObjectName(table);
          obj.setPartValues(p.getValues());
          org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet set = this.get_privilege_set(obj, user, groups);
          p.setPrivileges(set);
      }

      return partitions;
  }

  @Override
  public List<org.apache.hadoop.hive.metastore.api.Partition> listPartitionsWithAuthInfo(String database, String table,
                                                                                         List<String> partVals, short maxParts,
                                                                                         String user, List<String> groups) throws MetaException, TException, NoSuchObjectException {
    List<org.apache.hadoop.hive.metastore.api.Partition> partitions = listPartitions(database, table, partVals, maxParts);

    for (org.apache.hadoop.hive.metastore.api.Partition p : partitions) {
      HiveObjectRef obj = new HiveObjectRef();
      obj.setObjectType(HiveObjectType.PARTITION);
      obj.setDbName(database);
      obj.setObjectName(table);
      obj.setPartValues(p.getValues());
      org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet set;
      try {
        set = get_privilege_set(obj, user, groups);
      } catch (MetaException e) {
        logger.info(String.format("No privileges found for user: %s, "
            + "groups: [%s]", user, LoggingHelper.concatCollectionToStringForLogging(groups, ",")));
        set = new org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet();
      }
      p.setPrivileges(set);
    }

    return partitions;
  }

  @Override
  public List<String> listTableNamesByFilter(String dbName, String filter, short maxTables) throws MetaException,
        TException, InvalidOperationException, UnknownDBException {
    return glueMetastoreClientDelegate.listTableNamesByFilter(dbName, filter, maxTables);
  }

  @Override
  public List<HiveObjectPrivilege> list_privileges(
      String principal,
      org.apache.hadoop.hive.metastore.api.PrincipalType principalType,
      HiveObjectRef objectRef
  ) throws MetaException, TException {
    return glueMetastoreClientDelegate.listPrivileges(principal, principalType, objectRef);
  }

  @Override
  public LockResponse lock(LockRequest lockRequest) throws NoSuchTxnException, TxnAbortedException, TException {
    return glueMetastoreClientDelegate.lock(lockRequest);
  }

  @Override
  public void markPartitionForEvent(
      String dbName,
      String tblName,
      Map<String, String> partKVs,
      PartitionEventType eventType
  ) throws MetaException, NoSuchObjectException, TException, UnknownTableException, UnknownDBException,
      UnknownPartitionException, InvalidPartitionException {
    glueMetastoreClientDelegate.markPartitionForEvent(dbName, tblName, partKVs, eventType);
  }

  @Override
  public long openTxn(String user) throws TException {
    return glueMetastoreClientDelegate.openTxn(user);
  }

  @Override
  public OpenTxnsResponse openTxns(String user, int numTxns) throws TException {
    return glueMetastoreClientDelegate.openTxns(user, numTxns);
  }

  @Override
  public Map<String, String> partitionNameToSpec(String name) throws MetaException, TException {
      // Lifted from HiveMetaStore
      if (name.length() == 0) {
          return new HashMap<String, String>();
      }
      return Warehouse.makeSpecFromName(name);
  }

  @Override
  public List<String> partitionNameToVals(String name) throws MetaException, TException {
    return glueMetastoreClientDelegate.partitionNameToVals(name);
  }

  @Override
  public void reconnect() throws MetaException {
      // TODO reset active Hive confs for metastore glueClient
      logger.debug("reconnect() was called.");
  }

  @Override
  public void renamePartition(String dbName, String tblName, List<String> partitionValues,
                              org.apache.hadoop.hive.metastore.api.Partition newPartition)
        throws InvalidOperationException, MetaException, TException {

      // Set DDL time to now if not specified
      setDDLTime(newPartition);
      org.apache.hadoop.hive.metastore.api.Table tbl;
      org.apache.hadoop.hive.metastore.api.Partition oldPart;

      try {
          tbl = getTable(dbName, tblName);
          oldPart = getPartition(dbName, tblName, partitionValues);
      } catch(NoSuchObjectException e) {
          throw new InvalidOperationException(e.getMessage());
      }

      if(newPartition.getSd() == null || oldPart.getSd() == null ) {
          throw new InvalidOperationException("Storage descriptor cannot be null");
      }

      // if an external partition is renamed, the location should not change
      if (!Strings.isNullOrEmpty(tbl.getTableType()) && tbl.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) {
          newPartition.getSd().setLocation(oldPart.getSd().getLocation());
          renamePartitionInCatalog(dbName, tblName, partitionValues, newPartition);
      } else {

          Path destPath = getDestinationPathForRename(dbName, tbl, newPartition);
          Path srcPath = new Path(oldPart.getSd().getLocation());
          FileSystem srcFs = wh.getFs(srcPath);
          FileSystem destFs = wh.getFs(destPath);

          verifyDestinationLocation(srcFs, destFs, srcPath, destPath, tbl, newPartition);
          newPartition.getSd().setLocation(destPath.toString());

          renamePartitionInCatalog(dbName, tblName, partitionValues, newPartition);
          boolean success = true;
          try{
              if (srcFs.exists(srcPath)) {
                  //if destPath's parent path doesn't exist, we should mkdir it
                  Path destParentPath = destPath.getParent();
                  if (!wh.mkdirs(destParentPath, true)) {
                      throw new IOException("Unable to create path " + destParentPath);
                  }
                  wh.renameDir(srcPath, destPath, true);
              }
          } catch (IOException e) {
              success = false;
              throw new InvalidOperationException("Unable to access old location "
                    + srcPath + " for partition " + tbl.getDbName() + "."
                    + tbl.getTableName() + " " + partitionValues);
          } finally {
              if(!success) {
                  // revert metastore operation
                  renamePartitionInCatalog(dbName, tblName, newPartition.getValues(), oldPart);
              }
          }
      }
  }

  private void verifyDestinationLocation(FileSystem srcFs, FileSystem destFs, Path srcPath, Path destPath, org.apache.hadoop.hive.metastore.api.Table tbl, org.apache.hadoop.hive.metastore.api.Partition newPartition)
        throws InvalidOperationException {
      String oldPartLoc = srcPath.toString();
      String newPartLoc = destPath.toString();

      // check that src and dest are on the same file system
      if (!FileUtils.equalsFileSystem(srcFs, destFs)) {
          throw new InvalidOperationException("table new location " + destPath
                + " is on a different file system than the old location "
                + srcPath + ". This operation is not supported");
      }
      try {
          srcFs.exists(srcPath); // check that src exists and also checks
          if (newPartLoc.compareTo(oldPartLoc) != 0 && destFs.exists(destPath)) {
              throw new InvalidOperationException("New location for this partition "
                    + tbl.getDbName() + "." + tbl.getTableName() + "." + newPartition.getValues()
                    + " already exists : " + destPath);
          }
      } catch (IOException e) {
          throw new InvalidOperationException("Unable to access new location "
                + destPath + " for partition " + tbl.getDbName() + "."
                + tbl.getTableName() + " " + newPartition.getValues());
      }
  }

  private Path getDestinationPathForRename(String dbName, org.apache.hadoop.hive.metastore.api.Table tbl, org.apache.hadoop.hive.metastore.api.Partition newPartition)
        throws InvalidOperationException, MetaException, TException {
      try {
          Path destPath = new Path(hiveShims.getDefaultTablePath(getDatabase(dbName), tbl.getTableName(), wh),
                Warehouse.makePartName(tbl.getPartitionKeys(), newPartition.getValues()));
          return constructRenamedPath(destPath, new Path(newPartition.getSd().getLocation()));
      } catch (NoSuchObjectException e) {
          throw new InvalidOperationException(
                "Unable to change partition or table. Database " + dbName + " does not exist"
                      + " Check metastore logs for detailed stack." + e.getMessage());
      }
  }

  private void setDDLTime(org.apache.hadoop.hive.metastore.api.Partition partition) {
      if (partition.getParameters() == null ||
            partition.getParameters().get(hive_metastoreConstants.DDL_TIME) == null ||
            Integer.parseInt(partition.getParameters().get(hive_metastoreConstants.DDL_TIME)) == 0) {
          partition.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System
                .currentTimeMillis() / 1000));
      }
  }

  private void renamePartitionInCatalog(String databaseName, String tableName,
                                        List<String> partitionValues, org.apache.hadoop.hive.metastore.api.Partition newPartition)
        throws InvalidOperationException, MetaException, TException {
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



