public OMClientResponse validateAndUpdateCache()

in hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java [139:383]


  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) {
    final long trxnLogIndex = context.getIndex();
    MultipartUploadCompleteRequest multipartUploadCompleteRequest =
        getOmRequest().getCompleteMultiPartUploadRequest();

    KeyArgs keyArgs = multipartUploadCompleteRequest.getKeyArgs();

    List<OzoneManagerProtocolProtos.Part> partsList =
        multipartUploadCompleteRequest.getPartsListList();
    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
    auditMap.put(OzoneConsts.UPLOAD_ID, keyArgs.getMultipartUploadID());

    String volumeName = keyArgs.getVolumeName();
    String bucketName = keyArgs.getBucketName();
    final String requestedVolume = volumeName;
    final String requestedBucket = bucketName;
    String keyName = keyArgs.getKeyName();
    String uploadID = keyArgs.getMultipartUploadID();
    String multipartKey = null;

    ozoneManager.getMetrics().incNumCompleteMultipartUploads();

    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();

    boolean acquiredLock = false;
    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
        getOmRequest());
    OMClientResponse omClientResponse = null;
    Exception exception = null;
    Result result = null;
    try {
      multipartKey = omMetadataManager.getMultipartKey(volumeName,
          bucketName, keyName, uploadID);

      mergeOmLockDetails(omMetadataManager.getLock()
          .acquireWriteLock(BUCKET_LOCK, volumeName, bucketName));
      acquiredLock = getOmLockDetails().isLockAcquired();

      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
      OmBucketInfo omBucketInfo = getBucketInfo(omMetadataManager,
          volumeName, bucketName);

      List<OmDirectoryInfo> missingParentInfos;
      OMFileRequest.OMPathInfoWithFSO pathInfoFSO = OMFileRequest
          .verifyDirectoryKeysInPath(omMetadataManager, volumeName, bucketName,
              keyName, Paths.get(keyName));
      missingParentInfos = getAllMissingParentDirInfo(ozoneManager, keyArgs, omBucketInfo,
              pathInfoFSO, trxnLogIndex);

      if (missingParentInfos != null) {
        final long volumeId = omMetadataManager.getVolumeId(volumeName);
        final long bucketId = omMetadataManager.getBucketId(volumeName,
            bucketName);

        // add all missing parents to directory table
        addMissingParentsToCache(omBucketInfo, missingParentInfos,
            omMetadataManager, volumeId, bucketId, trxnLogIndex);

        String multipartOpenKey = omMetadataManager
            .getMultipartKey(volumeId, bucketId,
                pathInfoFSO.getLastKnownParentId(),
                pathInfoFSO.getLeafNodeName(),
                keyArgs.getMultipartUploadID());

        if (getOmKeyInfoFromOpenKeyTable(multipartOpenKey,
            keyName, omMetadataManager) == null) {

          final ReplicationConfig replicationConfig = OzoneConfigUtil
              .resolveReplicationConfigPreference(keyArgs.getType(),
                  keyArgs.getFactor(), keyArgs.getEcReplicationConfig(),
                  omBucketInfo != null ?
                      omBucketInfo.getDefaultReplicationConfig() :
                      null, ozoneManager);

          OmMultipartKeyInfo multipartKeyInfoFromArgs =
              new OmMultipartKeyInfo.Builder()
                  .setUploadID(keyArgs.getMultipartUploadID())
                  .setCreationTime(keyArgs.getModificationTime())
                  .setReplicationConfig(replicationConfig)
                  .setObjectID(pathInfoFSO.getLeafNodeObjectId())
                  .setUpdateID(trxnLogIndex)
                  .setParentID(pathInfoFSO.getLastKnownParentId())
                  .build();

          OmKeyInfo keyInfoFromArgs = new OmKeyInfo.Builder()
              .setVolumeName(volumeName)
              .setBucketName(bucketName)
              .setKeyName(keyName)
              .setCreationTime(keyArgs.getModificationTime())
              .setModificationTime(keyArgs.getModificationTime())
              .setReplicationConfig(replicationConfig)
              .setOmKeyLocationInfos(Collections.singletonList(
                  new OmKeyLocationInfoGroup(0, new ArrayList<>(), true)))
              .setAcls(getAclsForKey(keyArgs, omBucketInfo, pathInfoFSO,
                  ozoneManager.getPrefixManager(), ozoneManager.getConfiguration()))
              .setObjectID(pathInfoFSO.getLeafNodeObjectId())
              .setUpdateID(trxnLogIndex)
              .setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ?
                  OMPBHelper.convert(keyArgs.getFileEncryptionInfo()) : null)
              .setParentObjectID(pathInfoFSO.getLastKnownParentId())
              .build();

          // Add missing multi part info to open key table
          addMultiPartToCache(omMetadataManager, multipartOpenKey,
              pathInfoFSO, keyInfoFromArgs, keyName, trxnLogIndex);
        }
      }

      String dbMultipartOpenKey =
          getDBMultipartOpenKey(volumeName, bucketName, keyName, uploadID,
              omMetadataManager);

      OmMultipartKeyInfo multipartKeyInfo = omMetadataManager
          .getMultipartInfoTable().get(multipartKey);

      String ozoneKey = omMetadataManager.getOzoneKey(
          volumeName, bucketName, keyName);

      String dbOzoneKey =
          getDBOzoneKey(omMetadataManager, volumeName, bucketName, keyName);

      // Check for directory exists with same name for the LEGACY_FS,
      // if it exists throw error.
      checkDirectoryAlreadyExists(ozoneManager, omBucketInfo, keyName,
          omMetadataManager);

      if (multipartKeyInfo == null) {
        throw new OMException(
            failureMessage(requestedVolume, requestedBucket, keyName),
            OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
      }

      if (!partsList.isEmpty()) {
        final OmMultipartKeyInfo.PartKeyInfoMap partKeyInfoMap
            = multipartKeyInfo.getPartKeyInfoMap();
        if (partKeyInfoMap.size() == 0) {
          LOG.error("Complete MultipartUpload failed for key {} , MPU Key has" +
                  " no parts in OM, parts given to upload are {}", ozoneKey,
              partsList);
          throw new OMException(
              failureMessage(requestedVolume, requestedBucket, keyName),
              OMException.ResultCodes.INVALID_PART);
        }

        // First Check for Invalid Part Order.
        List< Integer > partNumbers = new ArrayList<>();
        int partsListSize = getPartsListSize(requestedVolume,
                requestedBucket, keyName, ozoneKey, partNumbers, partsList);

        List<OmKeyLocationInfo> partLocationInfos = new ArrayList<>();
        long dataSize = getMultipartDataSize(requestedVolume, requestedBucket,
                keyName, ozoneKey, partKeyInfoMap, partsListSize,
                partLocationInfos, partsList, ozoneManager);

        // All parts have same replication information. Here getting from last
        // part.
        OmKeyInfo omKeyInfo =
            getOmKeyInfo(trxnLogIndex, keyArgs, volumeName,
                bucketName, keyName, dbMultipartOpenKey, omMetadataManager,
                dbOzoneKey, partKeyInfoMap, partLocationInfos, dataSize);

        long usedBytesDiff = 0;
        //Find all unused parts.
        List<OmKeyInfo> allKeyInfoToRemove = new ArrayList<>();
        for (PartKeyInfo partKeyInfo : partKeyInfoMap) {
          if (!partNumbers.contains(partKeyInfo.getPartNumber())) {
            OmKeyInfo delPartKeyInfo =
                OmKeyInfo.getFromProtobuf(partKeyInfo.getPartKeyInfo());
            allKeyInfoToRemove.add(delPartKeyInfo);
            usedBytesDiff -= delPartKeyInfo.getReplicatedSize();
          }
        }

        // If bucket versioning is turned on during the update, between key
        // creation and key commit, old versions will be just overwritten and
        // not kept. Bucket versioning will be effective from the first key
        // creation after the knob turned on.
        OmKeyInfo keyToDelete =
            omMetadataManager.getKeyTable(getBucketLayout()).get(dbOzoneKey);
        boolean isNamespaceUpdate = false;
        if (keyToDelete != null && !omBucketInfo.getIsVersionEnabled()) {
          RepeatedOmKeyInfo oldKeyVersionsToDelete = getOldVersionsToCleanUp(
              keyToDelete, trxnLogIndex);
          allKeyInfoToRemove.addAll(oldKeyVersionsToDelete.getOmKeyInfoList());
          usedBytesDiff -= keyToDelete.getReplicatedSize();
        } else {
          checkBucketQuotaInNamespace(omBucketInfo, 1L);
          omBucketInfo.incrUsedNamespace(1L);
          isNamespaceUpdate = true;
        }

        String dbBucketKey = omMetadataManager.getBucketKey(
            omBucketInfo.getVolumeName(), omBucketInfo.getBucketName());
        if (usedBytesDiff != 0) {
          omBucketInfo.incrUsedBytes(usedBytesDiff);
        } else if (!isNamespaceUpdate) {
          // If no bucket size and Namespace changed, prevent from updating
          // bucket object.
          omBucketInfo = null;
        }

        updateCache(omMetadataManager, dbBucketKey, omBucketInfo, dbOzoneKey,
            dbMultipartOpenKey, multipartKey, omKeyInfo, trxnLogIndex);

        omResponse.setCompleteMultiPartUploadResponse(
            MultipartUploadCompleteResponse.newBuilder()
                .setVolume(requestedVolume)
                .setBucket(requestedBucket)
                .setKey(keyName)
                .setHash(omKeyInfo.getMetadata().get(OzoneConsts.ETAG)));

        long volumeId = omMetadataManager.getVolumeId(volumeName);
        long bucketId = omMetadataManager.getBucketId(volumeName, bucketName);
        omClientResponse =
            getOmClientResponse(multipartKey, omResponse, dbMultipartOpenKey,
                omKeyInfo, allKeyInfoToRemove, omBucketInfo,
                volumeId, bucketId, missingParentInfos, multipartKeyInfo);

        result = Result.SUCCESS;
      } else {
        throw new OMException(
            failureMessage(requestedVolume, requestedBucket, keyName) +
            " because of empty part list",
            OMException.ResultCodes.INVALID_REQUEST);
      }

    } catch (IOException | InvalidPathException ex) {
      result = Result.FAILURE;
      exception = ex;
      omClientResponse = getOmClientResponse(omResponse, exception);
    } finally {
      if (acquiredLock) {
        mergeOmLockDetails(omMetadataManager.getLock()
            .releaseWriteLock(BUCKET_LOCK, volumeName, bucketName));
      }
      if (omClientResponse != null) {
        omClientResponse.setOmLockDetails(getOmLockDetails());
      }
    }

    logResult(ozoneManager, multipartUploadCompleteRequest, partsList,
            auditMap, volumeName, bucketName, keyName, exception, result);

    return omClientResponse;
  }