def build()

in backend/app/utils/AwsDiscovery.scala [22:97]


  def build(config: Config, discoveryConfig: AWSDiscoveryConfig): DiscoveryResult = {
    // We won't have an instance ID if running locally but against databases in S3
    val maybeInstanceId = Option(EC2MetadataUtils.getInstanceId)

    val AWSDiscoveryConfig(region, stack, app, stage, _, _) = discoveryConfig
    val runningLocally = discoveryConfig.runningLocally.getOrElse(false)

    val credentials = AwsCredentials(profile = if(runningLocally) { Some("investigations") } else { None })
    val ec2Client = AmazonEC2ClientBuilder.standard().withCredentials(credentials).withRegion(region).build()
    val ssmClient = AWSSimpleSystemsManagementClientBuilder.standard().withCredentials(credentials).withRegion(region).build()
    val secretsManagerClient = AWSSecretsManagerClientBuilder.standard().withCredentials(credentials).withRegion(region).build()

    logger.info(s"AWS discovery stack: $stack app: $app stage: $stage region: $region runningLocally: $runningLocally")

    val updatedConfig = config.copy(
      app = config.app.copy(
        hideDownloadButton = false,
        label = getLabel(stack)
      ),
      auth = config.auth.copy(
        provider = config.auth.provider match {
          case db: DatabaseAuthConfig => db.copy(require2FA = true)
          case other => other
        }
      ),
      s3 = config.s3.copy(
        region = region,
        buckets = buildBuckets(config.s3.buckets, stack, stage),
        sseAlgorithm = Some("aws:kms"),
        // these are determined using instance credentials
        endpoint = None, accessKey = None, secretKey = None
      ),
      elasticsearch = config.elasticsearch.copy(
        hosts = if(runningLocally) {
          List("http://localhost:19200")
        } else {
          buildElasticsearchHosts(stack, stage, ec2Client)
        },
        disableSniffing = Some(runningLocally)
      ),
      postgres = getDbSecrets(stack, secretsManagerClient),
      neo4j = config.neo4j.copy(
        url = if(runningLocally) {
          "bolt://localhost:17687"
        } else {
          buildNeo4jUrl(stack, stage, ec2Client)
        },
        password = readSSMParameter("neo4j/password", stack, stage, ssmClient)
      ),
      // Using the instanceId as the worker name will allow us to break locks on terminated instances in the future
      worker = maybeInstanceId.map { instanceId =>
        config.worker.copy(
          name = Some(instanceId))
      }.getOrElse(config.worker),
      transcribe = config.transcribe.copy(
        whisperModelFilename = readSSMParameter("transcribe/modelFilename", stack, stage, ssmClient),
        transcriptionOutputQueueUrl = readSSMParameter("transcribe/transcriptionOutputQueueUrl", stack, stage, ssmClient),
        transcriptionServiceQueueUrl = readSSMParameter("transcribe/transcriptionServiceQueueUrl", stack, stage, ssmClient),
        transcriptionOutputDeadLetterQueueUrl = readSSMParameter("transcribe/transcriptionOutputDeadLetterQueueUrl", stack, stage, ssmClient)
      ),
      sqs = config.sqs.copy(endpoint = None),
      underlying = config.underlying
        .withValue("play.http.secret.key", fromAnyRef(readSSMParameter("pfi/playSecret", stack, stage, ssmClient)))
        .withValue("pekko.actor.provider", fromAnyRef("local")) // disable Pekko clustering, we query EC2 directly
    )

    val jsonLoggingProperties = Map(
      "stack" -> discoveryConfig.stack,
      "app" -> discoveryConfig.app,
      "stage" -> discoveryConfig.stage
    ) ++ maybeInstanceId.map { instanceId =>
      Map("instanceId" -> instanceId)
    }.getOrElse(Map.empty)

    DiscoveryResult(updatedConfig, jsonLoggingProperties)
  }