aws-glue-datacatalog-hive2-client/src/main/java/com/amazonaws/glue/catalog/metastore/AWSCatalogMetastoreClient.java [765:915]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    return dropPartitions_core(dbName, tblName, partExprs, options.deleteData, options.purgeData);
  }

  private List<org.apache.hadoop.hive.metastore.api.Partition> dropPartitions_core(
      String databaseName,
      String tableName,
      List<ObjectPair<Integer, byte[]>> partExprs,
      boolean deleteData,
      boolean purgeData
  ) throws TException {
    List<org.apache.hadoop.hive.metastore.api.Partition> deleted = Lists.newArrayList();
    for (ObjectPair<Integer, byte[]> expr : partExprs) {
      byte[] tmp = expr.getSecond();
      String exprString = ExpressionHelper.convertHiveExpressionToCatalogExpression(tmp);
      List<Partition> catalogPartitionsToDelete = glueMetastoreClientDelegate.getCatalogPartitions(databaseName, tableName, exprString, -1);
      deleted.addAll(batchDeletePartitions(databaseName, tableName, catalogPartitionsToDelete, deleteData, purgeData));
    }
    return deleted;
  }

  /**
   * Delete all partitions in the list provided with BatchDeletePartitions request. It doesn't use transaction,
   * so the call may result in partial failure.
   * @param dbName
   * @param tableName
   * @param partitionsToDelete
   * @return the partitions successfully deleted
   * @throws TException
   */
  private List<org.apache.hadoop.hive.metastore.api.Partition> batchDeletePartitions(
        final String dbName, final String tableName, final List<Partition> partitionsToDelete,
        final boolean deleteData, final boolean purgeData) throws TException {

    List<org.apache.hadoop.hive.metastore.api.Partition> deleted = Lists.newArrayList();
    if (partitionsToDelete == null) {
      return deleted;
    }

    validateBatchDeletePartitionsArguments(dbName, tableName, partitionsToDelete);

    List<Future<BatchDeletePartitionsHelper>> batchDeletePartitionsFutures = Lists.newArrayList();

    int numOfPartitionsToDelete = partitionsToDelete.size();
    for (int i = 0; i < numOfPartitionsToDelete; i += BATCH_DELETE_PARTITIONS_PAGE_SIZE) {
      int j = Math.min(i + BATCH_DELETE_PARTITIONS_PAGE_SIZE, numOfPartitionsToDelete);
      final List<Partition> partitionsOnePage = partitionsToDelete.subList(i, j);

      batchDeletePartitionsFutures.add(BATCH_DELETE_PARTITIONS_THREAD_POOL.submit(new Callable<BatchDeletePartitionsHelper>() {
        @Override
        public BatchDeletePartitionsHelper call() throws Exception {
          return new BatchDeletePartitionsHelper(glueClient, dbName, tableName, catalogId, partitionsOnePage).deletePartitions();
        }
      }));
    }

    TException tException = null;
    for (Future<BatchDeletePartitionsHelper> future : batchDeletePartitionsFutures) {
      try {
        BatchDeletePartitionsHelper batchDeletePartitionsHelper = future.get();
        for (Partition partition : batchDeletePartitionsHelper.getPartitionsDeleted()) {
          org.apache.hadoop.hive.metastore.api.Partition hivePartition =
                CatalogToHiveConverter.convertPartition(partition);
          try {
            performDropPartitionPostProcessing(dbName, tableName, hivePartition, deleteData, purgeData);
          } catch (TException e) {
            logger.error("Drop partition directory failed.", e);
            tException = tException == null ? e : tException;
          }
          deleted.add(hivePartition);
        }
        tException = tException == null ? batchDeletePartitionsHelper.getFirstTException() : tException;
      } catch (Exception e) {
        logger.error("Exception thrown by BatchDeletePartitions thread pool. ", e);
      }
    }

    if (tException != null) {
      throw tException;
    }
    return deleted;
  }

  private void validateBatchDeletePartitionsArguments(final String dbName, final String tableName,
                                                      final List<Partition> partitionsToDelete) {

    Preconditions.checkArgument(dbName != null, "Database name cannot be null");
    Preconditions.checkArgument(tableName != null, "Table name cannot be null");
    for (Partition partition : partitionsToDelete) {
      Preconditions.checkArgument(dbName.equals(partition.getDatabaseName()), "Database name cannot be null");
      Preconditions.checkArgument(tableName.equals(partition.getTableName()), "Table name cannot be null");
      Preconditions.checkArgument(partition.getValues() != null, "Partition values cannot be null");
    }
  }

  // Preserve the logic from Hive metastore
  private void performDropPartitionPostProcessing(String dbName, String tblName,
                                                  org.apache.hadoop.hive.metastore.api.Partition partition, boolean deleteData, boolean ifPurge)
        throws MetaException, NoSuchObjectException, TException {
    if (deleteData && partition.getSd() != null && partition.getSd().getLocation() != null) {
      Path partPath = new Path(partition.getSd().getLocation());
      org.apache.hadoop.hive.metastore.api.Table table = getTable(dbName, tblName);
      if (isExternalTable(table)){
        //Don't delete external table data
        return;
      }
      boolean mustPurge = isMustPurge(table, ifPurge);
      wh.deleteDir(partPath, true, mustPurge);
      try {
        List<String> values = partition.getValues();
        deleteParentRecursive(partPath.getParent(), values.size() - 1, mustPurge);
      } catch (IOException e) {
        throw new MetaException(e.getMessage());
      }
    }
  }

  @Deprecated
  public void dropTable(String tableName, boolean deleteData) throws MetaException, UnknownTableException, TException,
        NoSuchObjectException {
    dropTable(DEFAULT_DATABASE_NAME, tableName, deleteData, false);
  }

  @Override
  public void dropTable(String dbname, String tableName) throws MetaException, TException, NoSuchObjectException {
    dropTable(dbname, tableName, true, true, false);
  }

  @Override
  public void dropTable(String dbname, String tableName, boolean deleteData, boolean ignoreUnknownTab)
        throws MetaException, TException, NoSuchObjectException {
    dropTable(dbname, tableName, deleteData, ignoreUnknownTab, false);
  }

  @Override
  public void dropTable(String dbname, String tableName, boolean deleteData, boolean ignoreUnknownTab, boolean ifPurge)
        throws MetaException, TException, NoSuchObjectException {
    glueMetastoreClientDelegate.dropTable(dbname, tableName, deleteData, ignoreUnknownTab, ifPurge);
  }

  @Override
  public org.apache.hadoop.hive.metastore.api.Partition exchange_partition(
      Map<String, String> partitionSpecs,
      String srcDb,
      String srcTbl,
      String dstDb,
      String dstTbl
  ) throws MetaException, NoSuchObjectException, InvalidObjectException, TException {
    return glueMetastoreClientDelegate.exchangePartition(partitionSpecs, srcDb, srcTbl, dstDb, dstTbl);
  }

  @Override
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



aws-glue-datacatalog-spark-client/src/main/java/com/amazonaws/glue/catalog/metastore/AWSCatalogMetastoreClient.java [660:810]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      return dropPartitions_core(dbName, tblName, partExprs, options.deleteData, options.purgeData);
  }

  private List<org.apache.hadoop.hive.metastore.api.Partition> dropPartitions_core(
      String databaseName,
      String tableName,
      List<ObjectPair<Integer, byte[]>> partExprs,
      boolean deleteData,
      boolean purgeData
  ) throws TException {
    List<org.apache.hadoop.hive.metastore.api.Partition> deleted = Lists.newArrayList();
    for (ObjectPair<Integer, byte[]> expr : partExprs) {
      byte[] tmp = expr.getSecond();
      String exprString = ExpressionHelper.convertHiveExpressionToCatalogExpression(tmp);
      List<Partition> catalogPartitionsToDelete = glueMetastoreClientDelegate.getCatalogPartitions(databaseName, tableName, exprString, -1);
      deleted.addAll(batchDeletePartitions(databaseName, tableName, catalogPartitionsToDelete, deleteData, purgeData));
    }
    return deleted;
  }

  /**
   * Delete all partitions in the list provided with BatchDeletePartitions request. It doesn't use transaction,
   * so the call may result in partial failure.
   * @param dbName
   * @param tableName
   * @param partitionsToDelete
   * @return the partitions successfully deleted
   * @throws TException
   */
  private List<org.apache.hadoop.hive.metastore.api.Partition> batchDeletePartitions(
        final String dbName, final String tableName, final List<Partition> partitionsToDelete,
        final boolean deleteData, final boolean purgeData) throws TException {

      List<org.apache.hadoop.hive.metastore.api.Partition> deleted = Lists.newArrayList();
      if (partitionsToDelete == null) {
          return deleted;
      }

      validateBatchDeletePartitionsArguments(dbName, tableName, partitionsToDelete);

      List<Future<BatchDeletePartitionsHelper>> batchDeletePartitionsFutures = Lists.newArrayList();

      int numOfPartitionsToDelete = partitionsToDelete.size();
      for (int i = 0; i < numOfPartitionsToDelete; i += BATCH_DELETE_PARTITIONS_PAGE_SIZE) {
          int j = Math.min(i + BATCH_DELETE_PARTITIONS_PAGE_SIZE, numOfPartitionsToDelete);
          final List<Partition> partitionsOnePage = partitionsToDelete.subList(i, j);

          batchDeletePartitionsFutures.add(BATCH_DELETE_PARTITIONS_THREAD_POOL.submit(new Callable<BatchDeletePartitionsHelper>() {
              @Override
              public BatchDeletePartitionsHelper call() throws Exception {
                  return new BatchDeletePartitionsHelper(glueClient, dbName, tableName, catalogId, partitionsOnePage).deletePartitions();
              }
          }));
      }

      TException tException = null;
      for (Future<BatchDeletePartitionsHelper> future : batchDeletePartitionsFutures) {
          try {
              BatchDeletePartitionsHelper batchDeletePartitionsHelper = future.get();
              for (Partition partition : batchDeletePartitionsHelper.getPartitionsDeleted()) {
                  org.apache.hadoop.hive.metastore.api.Partition hivePartition =
                        CatalogToHiveConverter.convertPartition(partition);
                  try {
                      performDropPartitionPostProcessing(dbName, tableName, hivePartition, deleteData, purgeData);
                  } catch (TException e) {
                      logger.error("Drop partition directory failed.", e);
                      tException = tException == null ? e : tException;
                  }
                  deleted.add(hivePartition);
              }
              tException = tException == null ? batchDeletePartitionsHelper.getFirstTException() : tException;
          } catch (Exception e) {
              logger.error("Exception thrown by BatchDeletePartitions thread pool. ", e);
          }
      }

      if (tException != null) {
          throw tException;
      }
      return deleted;
  }

  private void validateBatchDeletePartitionsArguments(final String dbName, final String tableName,
                                                      final List<Partition> partitionsToDelete) {

      Preconditions.checkArgument(dbName != null, "Database name cannot be null");
      Preconditions.checkArgument(tableName != null, "Table name cannot be null");
      for (Partition partition : partitionsToDelete) {
          Preconditions.checkArgument(dbName.equals(partition.getDatabaseName()), "Database name cannot be null");
          Preconditions.checkArgument(tableName.equals(partition.getTableName()), "Table name cannot be null");
          Preconditions.checkArgument(partition.getValues() != null, "Partition values cannot be null");
      }
  }

  // Preserve the logic from Hive metastore
  private void performDropPartitionPostProcessing(String dbName, String tblName,
                                                  org.apache.hadoop.hive.metastore.api.Partition partition, boolean deleteData, boolean ifPurge)
        throws MetaException, NoSuchObjectException, TException {
      if (deleteData && partition.getSd() != null && partition.getSd().getLocation() != null) {
          Path partPath = new Path(partition.getSd().getLocation());
          org.apache.hadoop.hive.metastore.api.Table table = getTable(dbName, tblName);
          if (isExternalTable(table)){
              //Don't delete external table data
              return;
          }
          boolean mustPurge = isMustPurge(table, ifPurge);
          wh.deleteDir(partPath, true, mustPurge);
          try {
              List<String> values = partition.getValues();
              deleteParentRecursive(partPath.getParent(), values.size() - 1, mustPurge);
          } catch (IOException e) {
              throw new MetaException(e.getMessage());
          }
      }
  }

  @Deprecated
  public void dropTable(String tableName, boolean deleteData) throws MetaException, UnknownTableException, TException,
        NoSuchObjectException {
      dropTable(DEFAULT_DATABASE_NAME, tableName, deleteData, false);
  }

  @Override
  public void dropTable(String dbname, String tableName) throws MetaException, TException, NoSuchObjectException {
      dropTable(dbname, tableName, true, true, false);
  }

  @Override
  public void dropTable(String dbname, String tableName, boolean deleteData, boolean ignoreUnknownTab)
        throws MetaException, TException, NoSuchObjectException {
      dropTable(dbname, tableName, deleteData, ignoreUnknownTab, false);
  }

  @Override
  public void dropTable(String dbname, String tableName, boolean deleteData, boolean ignoreUnknownTab, boolean ifPurge)
        throws MetaException, TException, NoSuchObjectException {
    glueMetastoreClientDelegate.dropTable(dbname, tableName, deleteData, ignoreUnknownTab, ifPurge);
  }

  @Override
  public org.apache.hadoop.hive.metastore.api.Partition exchange_partition(
      Map<String, String> partitionSpecs,
      String srcDb,
      String srcTbl,
      String dstDb,
      String dstTbl
  ) throws MetaException, NoSuchObjectException, InvalidObjectException, TException {
    return glueMetastoreClientDelegate.exchangePartition(partitionSpecs, srcDb, srcTbl, dstDb, dstTbl);
  }

  @Override
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



