def uploadShapeIfRequired()

in online_archive/src/main/scala/VidispineFunctions.scala [256:287]


  def uploadShapeIfRequired(itemId: String, shapeId: String, shapeTag:String, archivedRecord: ArchivedRecord):Future[Either[String,MessageProcessorReturnValue]] = {
    ArchiveHunter.shapeTagToProxyTypeMap.get(shapeTag) match {
      case None=>
        logger.info(s"Shape $shapeTag for item $itemId is not required for ArchiveHunter, dropping the message")
        Future.failed(SilentDropMessage())
      case Some(destinationProxyType)=>
        vidispineCommunicator.findItemShape(itemId, shapeId).flatMap({
          case None=>
            logger.error(s"Shape $shapeId does not exist on item $itemId despite a notification informing us that it does.")
            Future.failed(new RuntimeException(s"Shape $shapeId does not exist"))
          case Some(shapeDoc)=>
            shapeDoc.getLikelyFile match {
              case None =>
                Future(Left(s"No file exists on shape $shapeId for item $itemId yet"))
              case Some(fileInfo) =>
                val uploadedFut = for {
                  uploadResult <- doUploadShape(fileInfo, archivedRecord, shapeDoc)
                  _ <- archiveHunterCommunicator.importProxy(archivedRecord.archiveHunterID, uploadResult._1, proxyFileUploader.bucketName, destinationProxyType)
                  updatedRecord <- Future(archivedRecord.copy(proxyBucket = Some(proxyFileUploader.bucketName), proxyPath = Some(uploadResult._1)))
                  _ <- archivedRecordDAO.writeRecord(updatedRecord)
                } yield Right(updatedRecord.asJson)

                //the future will fail if we can't upload to S3, but treat this as a retryable failure
                uploadedFut.recover({
                  case err:Throwable=>
                    logger.error(s"Could not upload ${fileInfo.uri} to S3: ${err.getMessage}", err)
                    Left(s"Could not upload ${fileInfo.uri} to S3")
                })
            }
        })
    }
  }