# frozen_string_literal: true

module ReleaseTools
  module PipelineTracer
    class MetricsService
      include ::SemanticLogger::Loggable

      MAX_DEPTH = 3

      InvalidDepthError = Class.new(ArgumentError)

      def self.from_pipeline_url(pipeline_url, version:, depth: 2, upstream_pipeline_name: nil)
        pipeline = Pipeline.from_url(pipeline_url)

        new(
          pipeline:,
          version:,
          depth:,
          upstream_pipeline_name:
        )
      end

      # @param [PipelineTracer::Pipeline] pipeline
      # @param [string] version is the DEPLOY_VERSION.
      # @param [integer] depth can be any integer >= 0. It is the depth to which metrics should be generated for this
      #         pipeline. If depth is 1, metrics will not be generated for jobs in downstream pipelines.
      # @return [PipelineTracer::MetricsService]
      def initialize(pipeline:, version:, depth: 2, upstream_pipeline_name: nil)
        @pipeline = pipeline
        @version = version
        @depth = depth
        @upstream_pipeline_name = upstream_pipeline_name
      end

      def execute
        logger.info('Gathering duration metrics for pipeline', pipeline: pipeline.url)

        raise InvalidDepthError, "Depth must be between 0 and #{MAX_DEPTH}" unless depth.between?(0, MAX_DEPTH)

        return if SharedStatus.dry_run?

        unless pipeline.end_time
          logger.info('Not generating duration metrics for pipeline since end_time is unknown', pipeline_url: pipeline.url)
          return
        end

        metrics_client.set('deployment_pipeline_duration_seconds', pipeline.real_time_duration, labels: pipeline_duration_labels)

        observe_duration_as_histogram

        return if depth < 1

        process_pipeline_jobs
        process_downstream_pipelines
      end

      private

      attr_reader :pipeline, :version, :depth, :upstream_pipeline_name

      def observe_duration_as_histogram
        return if pipeline.details.name.nil?

        pipeline_name = pipeline.details.name

        metric_name = if pipeline.project == Project::ReleaseTools.ops_path && pipeline_name.starts_with?('Deployment pipeline')
                        'deployment_coordinator_pipeline_duration_seconds'
                      elsif pipeline.project == Project::OmnibusGitlab.dev_path && pipeline_name == 'AUTO_DEPLOY_BUILD_PIPELINE'
                        'deployment_packager_omnibus_pipeline_duration_seconds'
                      elsif pipeline.project == Project::CNGImage.dev_path && %w[AUTO_DEPLOY_BUILD_PIPELINE AUTO_DEPLOY_TAG_BUILD_PIPELINE].include?(pipeline_name)
                        'deployment_packager_cng_pipeline_duration_seconds'
                      end

        return if metric_name.nil?

        metrics_client.observe(metric_name, pipeline.real_time_duration, labels: pipeline_duration_histogram_labels)
      end

      def process_pipeline_jobs
        pipeline.jobs.each_page do |page|
          page.each do |job_attributes|
            job = Job.new(job_attributes, pipeline.client)
            next unless job.completed?

            metrics_client.set('deployment_job_duration_seconds', job.real_time_duration, labels: job_duration_labels(job))

            process_triggered_pipeline(job)
          end
        end
      end

      def process_downstream_pipelines
        pipeline.bridge_jobs.each_page do |page|
          page.each do |bridge|
            next unless bridge.downstream_pipeline

            self.class
              .from_pipeline_url(
                bridge.downstream_pipeline.web_url,
                version: version,
                depth: depth - 1,
                upstream_pipeline_name: pipeline.details.name
              )
              .execute
          end
        end
      end

      def process_triggered_pipeline(job)
        return unless job.triggered_downstream_pipeline?

        logger.info('Gathering metrics for manually triggered downstream pipeline', downstream_pipeline_url: job.triggered_pipeline_url, job_url: job.web_url)

        self.class
          .from_pipeline_url(
            job.triggered_pipeline_url,
            version: version,
            depth: depth - 1,
            upstream_pipeline_name: pipeline.details.name
          )
          .execute
      end

      def job_duration_labels(job)
        env = job.environment_from_name
        "#{job.name},#{job.stage},#{job.status},#{pipeline.project},#{version}," \
          "#{target_env(env)},#{target_stage(env)},#{short_job_name(job)}," \
          "#{job.web_url},#{job.id},#{pipeline.details.id},#{pipeline.details.name}"
      end

      def target_env(environment)
        parse_environment(environment)[0]
      end

      def target_stage(environment)
        parse_environment(environment)[1]
      end

      def parse_environment(environment)
        return [] unless environment

        return [environment, 'main'] unless environment.end_with?('-cny')

        [environment.delete_suffix('-cny'), 'cny']
      end

      def short_job_name(job)
        job.name_without_environment
      end

      def pipeline_duration_labels
        env = pipeline.deploy_environment
        "#{pipeline.project},#{version},#{pipeline.details.status},#{pipeline.details.name}," \
          "#{pipeline.details.id},#{pipeline.details.web_url},#{target_env(env)}," \
          "#{target_stage(env)},#{upstream_pipeline_name}"
      end

      def pipeline_duration_histogram_labels
        pipeline.details.status.to_s
      end

      def metrics_client
        @metrics_client ||= Metrics::Client.new
      end
    end
  end
end
