def execute_dump_instance()

in mysqloperator/backup_main.py [0:0]


def execute_dump_instance(backup_source: dict, profile: DumpInstance, backupdir: Optional[str], backup_name: str, logger: logging.Logger):
    shell = mysqlsh.globals.shell
    util = mysqlsh.globals.util

    start = utils.isotime()

    options = profile.dumpOptions.copy()
    if "threads" not in options:
        options["threads"] = multiprocessing.cpu_count()

    if profile.storage.ociObjectStorage:
        oci_config = create_oci_config_file_from_envs(os.environ, logger)
        options["osBucketName"] = profile.storage.ociObjectStorage.bucketName
        options["ociConfigFile"] = oci_config["config"]
        options["ociProfile"] = oci_config["profile"]
        logger.info(f"options={options}")
        if profile.storage.ociObjectStorage.prefix:
            output = os.path.join(
                profile.storage.ociObjectStorage.prefix, backup_name)
        else:
            output = backup_name
    elif profile.storage.s3:
        options["s3BucketName"] = profile.storage.s3.bucketName
        options["s3Profile"] = profile.storage.s3.profile
        if profile.storage.s3.endpoint:
            options["s3EndpointOverride"] = profile.storage.s3.endpoint
        if profile.storage.s3.prefix:
            output = os.path.join(
                profile.storage.s3.prefix, backup_name)
        else:
            output = backup_name
    elif profile.storage.azure:
        options["azureContainerName"] = profile.storage.azure.containerName
        if profile.storage.azure.prefix:
            output = os.path.join(
                profile.storage.azure.prefix, backup_name)
        else:
            output = backup_name

    else:
        output = os.path.join(backupdir, backup_name)

    logger.info(
        f"dump_instance starting: output={output}  options={options}  source={backup_source['user']}@{backup_source['host']}:{backup_source['port']}")

    try:
        shell.connect(backup_source)
    except mysqlsh.Error as e:
        logger.error(
            f"Could not connect to {backup_source['host']}:{backup_source['port']}: {e}")
        raise

    try:
        util.dump_instance(output, options)
    except mysqlsh.Error as e:
        logger.error(f"dump_instance failed: {e}")
        raise

    # TODO get backup size and other stats from the dump cmd itself

    if profile.storage.ociObjectStorage:
        tenancy = [line.split("=")[1].strip() for line in open(
            options["ociConfigFile"], "r").readlines() if line.startswith("tenancy")][0]

        info = {
            "method": "dump-instance/oci-bucket",
            "source": f"{backup_source['user']}@{backup_source['host']}:{backup_source['port']}",
            "bucket": profile.storage.ociObjectStorage.bucketName,
            "ociTenancy": tenancy
        }
    elif profile.storage.s3:
        info = {
            "method": "dump-instance/s3",
            "source": f"{backup_source['user']}@{backup_source['host']}:{backup_source['port']}",
            "bucket": profile.storage.s3.bucketName,
        }
    elif profile.storage.azure:
        info = {
            "method": "dump-instance/azure-blob-storage",
            "source": f"{backup_source['user']}@{backup_source['host']}:{backup_source['port']}",
            "container": profile.storage.azure.containerName,
        }
    elif profile.storage.persistentVolumeClaim:
        fsinfo = os.statvfs(backupdir)
        gb_avail = (fsinfo.f_frsize * fsinfo.f_bavail) / (1024*1024*1024)
        backup_size = get_dir_size(output) / (1024*1024*1024)
        info = {
            "method": "dump-instance/volume",
            "source": f"{backup_source['user']}@{backup_source['host']}:{backup_source['port']}",
            "spaceAvailable": f"{gb_avail:.4}G",
            "size": f"{backup_size:.4}G"
        }
    else:
        assert False

    logger.info(f"dump_instance finished successfully")

    return info