# imports
import contextlib
import os
import json
import glob
import argparse

from configparser import ConfigParser

# define constants
ENABLE_MANUAL_CALLING = True  # defines whether the workflow can be invoked or not
NOT_TESTED_NOTEBOOKS = []  # cannot automate lets exclude
NOT_SCHEDULED_NOTEBOOKS = []  # these are too expensive, lets not run everyday
# define branch where we need this
# use if running on a release candidate, else make it empty
READONLY_HEADER = "# This code is autogenerated.\
\n# Code is generated by running custom script: python3 readme.py\
\n# Any manual changes to this file may cause incorrect behavior.\
\n# Any manual changes will be overwritten if the code is regenerated.\n"
BRANCH = "main"  # default - do not change
# BRANCH = "sdk-preview"  # this should be deleted when this branch is merged to main
GITHUB_CONCURRENCY_GROUP = (
    "${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}"
)
USE_FORECAST_REQUIREMENTS = "USE_FORECAST_REQUIREMENTS"
COMPUTE_NAMES = "COMPUTE_NAMES"


def main(args):

    # get list of notebooks
    notebooks = sorted(glob.glob("**/*.ipynb", recursive=True))

    for n in notebooks:
        print(n)

    # write workflows
    write_workflows(notebooks)

    # modify notebooks
    modify_notebooks(notebooks)

    # write readme
    write_readme(notebooks)

    # # write pipeline readme
    # pipeline_dir = "jobs" + os.sep + "pipelines" + os.sep
    # with change_working_dir(pipeline_dir):
    #     pipeline_notebooks = sorted(glob.glob("**/*.ipynb", recursive=True))
    # pipeline_notebooks = [
    #     f"{pipeline_dir}{notebook}" for notebook in pipeline_notebooks
    # ]
    # write_readme(pipeline_notebooks, pipeline_folder=pipeline_dir)


def write_workflows(notebooks):
    print("writing .github/workflows...")
    cfg = ConfigParser()
    cfg.read(os.path.join("notebooks_config.ini"))
    for notebook in notebooks:
        if not any(excluded in notebook for excluded in NOT_TESTED_NOTEBOOKS):
            # get notebook name
            name = os.path.basename(notebook).replace(".ipynb", "")
            folder = os.path.dirname(notebook)
            classification = folder.replace(os.sep, "-")

            enable_scheduled_runs = True
            if any(excluded in notebook for excluded in NOT_SCHEDULED_NOTEBOOKS):
                enable_scheduled_runs = False

            # write workflow file
            write_notebook_workflow(
                notebook, name, classification, folder, enable_scheduled_runs, cfg
            )
    print("finished writing .github/workflows")


def get_additional_requirements(req_name, req_path):
    return f"""
    - name: pip install {req_name} reqs
      run: pip install -r {req_path}"""


def get_mlflow_import(notebook):
    with open(notebook, "r", encoding="utf-8") as f:
        if "import mlflow" in f.read():
            return get_additional_requirements(
                "mlflow", "sdk/python/mlflow-requirements.txt"
            )
        else:
            return ""


def get_forecast_reqs(notebook_name, nb_config):
    is_required = int(
        nb_config.get(
            section=notebook_name, option=USE_FORECAST_REQUIREMENTS, fallback=0
        )
    )
    if is_required:
        return get_additional_requirements(
            "forecasting", "sdk/python/forecasting-requirements.txt"
        )
    else:
        return ""


def write_notebook_workflow(
    notebook, name, classification, folder, enable_scheduled_runs, nb_config
):
    is_pipeline_notebook = ("jobs-pipelines" in classification) or (
        "assets-component" in classification
    )
    # Duplicate name in working directory during checkout
    # https://github.com/actions/checkout/issues/739
    github_workspace = "${{ github.workspace }}"
    mlflow_import = get_mlflow_import(notebook)
    forecast_import = get_forecast_reqs(name, nb_config)
    posix_folder = folder.replace(os.sep, "/")
    posix_notebook = notebook.replace(os.sep, "/")
    runs_on = "ubuntu-latest"
    workflow_sched = "0 */8 * * *"
    if "explore-data" in name:
        runs_on = "ubuntu-20.04"
    if "deploy-model" in name:
        workflow_sched = "0 */12 * * *"

    workflow_yaml = f"""{READONLY_HEADER}
name: tutorials-{classification}-{name}
# This file is created by tutorials/readme.py.
# Please do not edit directly.
on:\n"""
    if ENABLE_MANUAL_CALLING:
        workflow_yaml += f"""  workflow_dispatch:\n"""
    if enable_scheduled_runs:
        workflow_yaml += f"""  schedule:
    - cron: "{workflow_sched}"\n"""
    workflow_yaml += f"""  pull_request:
    branches:
      - main\n"""
    if BRANCH != "main":
        workflow_yaml += f"""      - {BRANCH}\n"""
        if is_pipeline_notebook:
            workflow_yaml += "      - pipeline/*\n"
    workflow_yaml += f"""    paths:
      - tutorials/{posix_folder}/**
      - .github/workflows/tutorials-{classification}-{name}.yml
      - sdk/python/dev-requirements.txt
      - infra/bootstrapping/**
      - sdk/python/setup.sh
permissions:
  id-token: write
concurrency:
  group: {GITHUB_CONCURRENCY_GROUP}
  cancel-in-progress: true
jobs:
  build:
    runs-on: {runs_on}
    steps:
    - name: check out repo
      uses: actions/checkout@v2
    - name: setup python
      uses: actions/setup-python@v2
      with: 
        python-version: "3.8"
    - name: pip install notebook reqs
      run: pip install -r sdk/python/dev-requirements.txt{mlflow_import}{forecast_import}
    - name: azure login
      uses: azure/login@v1
      with:
        client-id: ${{{{ secrets.OIDC_AZURE_CLIENT_ID }}}}
        tenant-id: ${{{{ secrets.OIDC_AZURE_TENANT_ID }}}}
        subscription-id: ${{{{ secrets.OIDC_AZURE_SUBSCRIPTION_ID }}}}
    - name: bootstrap resources
      run: |
          echo '{GITHUB_CONCURRENCY_GROUP}';
          bash bootstrap.sh
      working-directory: infra/bootstrapping
      continue-on-error: false
    - name: setup SDK
      run: |
          source "{github_workspace}/infra/bootstrapping/sdk_helpers.sh";
          source "{github_workspace}/infra/bootstrapping/init_environment.sh";
          bash setup.sh
      working-directory: sdk/python
      continue-on-error: true
    - name: validate readme
      run: |
          python check-readme.py "{github_workspace}" "{github_workspace}/tutorials/{posix_folder}"
      working-directory: infra/bootstrapping
      continue-on-error: false
    - name: setup-cli
      run: |
          source "{github_workspace}/infra/bootstrapping/sdk_helpers.sh";
          source "{github_workspace}/infra/bootstrapping/init_environment.sh";
          bash setup.sh
      working-directory: cli
      continue-on-error: true
    - name: Eagerly cache access tokens for required scopes
      run: |
          # Workaround for azure-cli's lack of support for ID token refresh
          # Taken from: https://github.com/Azure/login/issues/372#issuecomment-2056289617

          # Management
          az account get-access-token --scope https://management.azure.com/.default --output none
          # ML
          az account get-access-token --scope https://ml.azure.com/.default --output none
    - name: run {posix_notebook}
      run: |
          source "{github_workspace}/infra/bootstrapping/sdk_helpers.sh";
          source "{github_workspace}/infra/bootstrapping/init_environment.sh";
          bash "{github_workspace}/infra/bootstrapping/sdk_helpers.sh" generate_workspace_config "../../.azureml/config.json";
          bash "{github_workspace}/infra/bootstrapping/sdk_helpers.sh" replace_template_values "{name}.ipynb";
          [ -f "../../.azureml/config" ] && cat "../../.azureml/config";"""

    if name == "debug-online-endpoints-locally-in-visual-studio-code":
        workflow_yaml += f"""
          sed -i -e "s/<ENDPOINT_NAME>/localendpoint/g" {name}.ipynb

          # Create a dummy executable for VSCode
          mkdir -p /tmp/code
          touch /tmp/code/code
          chmod +x /tmp/code/code
          export PATH="/tmp/code:$PATH"\n"""
    if "explore-data" in name:
        workflow_yaml += f"""

          # load data into 'data' subdirectory
          mkdir data
          cd data
          wget https://azuremlexamples.blob.core.windows.net/datasets/credit_card/default_of_credit_card_clients.csv
          cd .."""

    if not ("automl" in folder):
        workflow_yaml += f"""
          papermill -k python {name}.ipynb {name}.output.ipynb
      working-directory: tutorials/{posix_folder}"""
    elif "nlp" in folder or "image" in folder:
        # need GPU cluster, so override the compute cluster name to dedicated
        workflow_yaml += f"""          
          papermill -k python -p compute_name automl-gpu-cluster {name}.ipynb {name}.output.ipynb
      working-directory: tutorials/{posix_folder}"""
    else:
        # need CPU cluster, so override the compute cluster name to dedicated
        workflow_yaml += f"""
          papermill -k python -p compute_name automl-cpu-cluster {name}.ipynb {name}.output.ipynb
      working-directory: tutorials/{posix_folder}"""

    workflow_yaml += f"""
    - name: upload notebook's working folder as an artifact
      if: ${{{{ always() }}}}
      uses: ./.github/actions/upload-artifact
      with:
        name: {name}
        path: tutorials/{posix_folder}\n"""

    if nb_config.get(section=name, option=COMPUTE_NAMES, fallback=None):
        workflow_yaml += f"""
    - name: Remove the compute if notebook did not done it properly.
      run: bash "{github_workspace}/infra/bootstrapping/remove_computes.sh" {nb_config.get(section=name, option=COMPUTE_NAMES)}\n"""

    workflow_file = os.path.join(
        "..", ".github", "workflows", f"tutorials-{classification}-{name}.yml"
    )

    workflow_before = ""
    if os.path.exists(workflow_file):
        with open(workflow_file, "r") as f:
            workflow_before = f.read()

    if workflow_yaml != workflow_before:
        # write workflow
        with open(workflow_file, "w") as f:
            f.write(workflow_yaml)


def write_readme(notebooks, pipeline_folder=None):
    prefix = "prefix.md"
    suffix = "suffix.md"
    readme_file = "README.md"
    if pipeline_folder:
        prefix = os.path.join(pipeline_folder, prefix)
        suffix = os.path.join(pipeline_folder, suffix)
        readme_file = os.path.join(pipeline_folder, readme_file)

    if BRANCH == "":
        branch = "main"
    else:
        branch = BRANCH
        # read in prefix.md and suffix.md
        with open(prefix, "r") as f:
            prefix = f.read()
        with open(suffix, "r") as f:
            suffix = f.read()

        # define markdown tables
        notebook_table = f"Test Status is for branch - **_{branch}_**\n|Title|Notebook|Description|Status|\n|--|--|--|--|\n"
        for notebook in notebooks:
            # get notebook name
            name = notebook.split(os.sep)[-1].replace(".ipynb", "")
            area = notebook.split(os.sep)[0]
            sub_area = notebook.split(os.sep)[1]
            folder = os.path.dirname(notebook)
            classification = folder.replace(os.sep, "-")

            try:
                # read in notebook
                with open(notebook, "r") as f:
                    data = json.load(f)

                description = "*no description*"
                try:
                    if data["metadata"]["description"] is not None:
                        description = data["metadata"]["description"]["description"]
                except BaseException:
                    pass
            except BaseException:
                print("Could not load", notebook)
                pass

            if any(excluded in notebook for excluded in NOT_TESTED_NOTEBOOKS):
                description += " - _This sample is excluded from automated tests_"
            if any(excluded in notebook for excluded in NOT_SCHEDULED_NOTEBOOKS):
                description += " - _This sample is only tested on demand_"

            if pipeline_folder:
                notebook = os.path.relpath(notebook, pipeline_folder)

            # write workflow file
            notebook_table += (
                write_readme_row(
                    branch,
                    notebook.replace(os.sep, "/"),
                    name,
                    classification,
                    area,
                    sub_area,
                    description,
                )
                + "\n"
            )

        print("writing README.md...")
        with open(readme_file, "w") as f:
            f.write(prefix + notebook_table + suffix)
        print("finished writing README.md")


def write_readme_row(
    branch, notebook, name, classification, area, sub_area, description
):
    gh_link = "https://github.com/Azure/azureml-examples/actions/workflows"

    nb_name = f"[{name}]({notebook})"
    status = f"[![{name}]({gh_link}/tutorials-{classification}-{name}.yml/badge.svg?branch={branch})]({gh_link}/tutorials-{classification}-{name}.yml)"

    row = f"|{area}|{nb_name}|{description}|{status}|"
    return row


def modify_notebooks(notebooks):
    print("modifying notebooks...")
    # setup variables
    kernelspec = {
        "display_name": "Python 3.10 - SDK v2",
        "language": "python",
        "name": "python310-sdkv2",
    }

    # for each notebooks
    for notebook in notebooks:

        # read in notebook
        with open(notebook, "r", encoding="utf-8") as f:
            data = json.load(f)

        # update metadata
        data["metadata"]["kernelspec"] = kernelspec

        # write notebook
        with open(notebook, "w", encoding="utf-8") as f:
            json.dump(data, f, indent=1, ensure_ascii=False)
            f.write("\n")

    print("finished modifying notebooks...")


@contextlib.contextmanager
def change_working_dir(path):
    """Context manager for changing the current working directory"""

    saved_path = os.getcwd()
    os.chdir(str(path))
    try:
        yield
    finally:
        os.chdir(saved_path)


# run functions
if __name__ == "__main__":

    # setup argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("--check-readme", type=bool, default=False)
    args = parser.parse_args()

    # call main
    main(args)
