def uploadMetadataToS3()

in online_archive/src/main/scala/VidispineFunctions.scala [306:342]


  def uploadMetadataToS3(itemId: String, essenceVersion: Option[Int], archivedRecord: ArchivedRecord): Future[Either[String, Json]] = {
    vidispineCommunicator.akkaStreamXMLMetadataDocument(itemId).flatMap({
      case None=>
        logger.error(s"No metadata present on $itemId")
        Future.failed(new RuntimeException(s"No metadata present on $itemId"))
      case Some(entity)=>
        logger.info(s"Got metadata source from Vidispine: ${entity} uploading to S3 bucket")
        val uploadedPathXtn = FilenameSplitter(archivedRecord.uploadedPath)
        val metadataPath = uploadedPathXtn._1 + "_metadata.xml"

        for {
          uploadResult <- proxyFileUploader.uploadAkkaStreamViaTempfile(entity.dataBytes, metadataPath, entity.contentType)
          _ <- archiveHunterCommunicator.importProxy(archivedRecord.archiveHunterID, metadataPath, proxyFileUploader.bucketName, ArchiveHunter.ProxyType.METADATA)
          updatedRecord <- Future(archivedRecord.copy(
            proxyBucket = Some(proxyFileUploader.bucketName),
            metadataXML = Some(metadataPath),
            metadataVersion = essenceVersion
          ))
          _ <- archivedRecordDAO.writeRecord(updatedRecord)
        } yield Right(updatedRecord.asJson)
    }).recoverWith(err=>{
      val attemptCount = attemptCountFromMDC() match {
        case Some(count)=>count
        case None=>
          logger.warn(s"Could not get attempt count from diagnostic context for $itemId")
          1
      }

      val rec = FailureRecord(id = None,
        originalFilePath = archivedRecord.originalFilePath,
        attempt = attemptCount,
        errorMessage = err.getMessage,
        errorComponent = ErrorComponents.AWS,
        retryState = RetryStates.WillRetry)
      failureRecordDAO.writeRecord(rec).map(_=>Left(err.getMessage))
    })
  }