private List batchDeletePartitions()

in aws-glue-datacatalog-spark-client/src/main/java/com/amazonaws/glue/catalog/metastore/AWSCatalogMetastoreClient.java [689:740]


  private List<org.apache.hadoop.hive.metastore.api.Partition> batchDeletePartitions(
        final String dbName, final String tableName, final List<Partition> partitionsToDelete,
        final boolean deleteData, final boolean purgeData) throws TException {

      List<org.apache.hadoop.hive.metastore.api.Partition> deleted = Lists.newArrayList();
      if (partitionsToDelete == null) {
          return deleted;
      }

      validateBatchDeletePartitionsArguments(dbName, tableName, partitionsToDelete);

      List<Future<BatchDeletePartitionsHelper>> batchDeletePartitionsFutures = Lists.newArrayList();

      int numOfPartitionsToDelete = partitionsToDelete.size();
      for (int i = 0; i < numOfPartitionsToDelete; i += BATCH_DELETE_PARTITIONS_PAGE_SIZE) {
          int j = Math.min(i + BATCH_DELETE_PARTITIONS_PAGE_SIZE, numOfPartitionsToDelete);
          final List<Partition> partitionsOnePage = partitionsToDelete.subList(i, j);

          batchDeletePartitionsFutures.add(BATCH_DELETE_PARTITIONS_THREAD_POOL.submit(new Callable<BatchDeletePartitionsHelper>() {
              @Override
              public BatchDeletePartitionsHelper call() throws Exception {
                  return new BatchDeletePartitionsHelper(glueClient, dbName, tableName, catalogId, partitionsOnePage).deletePartitions();
              }
          }));
      }

      TException tException = null;
      for (Future<BatchDeletePartitionsHelper> future : batchDeletePartitionsFutures) {
          try {
              BatchDeletePartitionsHelper batchDeletePartitionsHelper = future.get();
              for (Partition partition : batchDeletePartitionsHelper.getPartitionsDeleted()) {
                  org.apache.hadoop.hive.metastore.api.Partition hivePartition =
                        CatalogToHiveConverter.convertPartition(partition);
                  try {
                      performDropPartitionPostProcessing(dbName, tableName, hivePartition, deleteData, purgeData);
                  } catch (TException e) {
                      logger.error("Drop partition directory failed.", e);
                      tException = tException == null ? e : tException;
                  }
                  deleted.add(hivePartition);
              }
              tException = tException == null ? batchDeletePartitionsHelper.getFirstTException() : tException;
          } catch (Exception e) {
              logger.error("Exception thrown by BatchDeletePartitions thread pool. ", e);
          }
      }

      if (tException != null) {
          throw tException;
      }
      return deleted;
  }