def create_managed_disk()

in src/azure-cli/azure/cli/command_modules/vm/custom.py [0:0]


def create_managed_disk(cmd, resource_group_name, disk_name, location=None,  # pylint: disable=too-many-locals, too-many-branches, too-many-statements, line-too-long
                        size_gb=None, sku='Premium_LRS', os_type=None,
                        source=None, for_upload=None, upload_size_bytes=None,  # pylint: disable=unused-argument
                        # below are generated internally from 'source'
                        source_blob_uri=None, source_disk=None, source_snapshot=None, source_restore_point=None,
                        source_storage_account_id=None, no_wait=False, tags=None, zone=None,
                        disk_iops_read_write=None, disk_mbps_read_write=None, hyper_v_generation=None,
                        encryption_type=None, disk_encryption_set=None, max_shares=None,
                        disk_iops_read_only=None, disk_mbps_read_only=None,
                        image_reference=None, image_reference_lun=None,
                        gallery_image_reference=None, gallery_image_reference_lun=None,
                        network_access_policy=None, disk_access=None, logical_sector_size=None,
                        tier=None, enable_bursting=None, edge_zone=None, security_type=None, support_hibernation=None,
                        public_network_access=None, accelerated_network=None, architecture=None,
                        data_access_auth_mode=None, gallery_image_reference_type=None, security_data_uri=None,
                        upload_type=None, secure_vm_disk_encryption_set=None, performance_plus=None,
                        optimized_for_frequent_attach=None):

    from azure.mgmt.core.tools import resource_id, is_valid_resource_id
    from azure.cli.core.commands.client_factory import get_subscription_id

    location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
    if security_data_uri:
        option = 'ImportSecure'
    elif source_blob_uri:
        option = 'Import'
    elif source_disk or source_snapshot:
        option = 'Copy'
    elif source_restore_point:
        option = 'Restore'
    elif upload_type == 'Upload':
        option = 'Upload'
    elif upload_type == 'UploadWithSecurityData':
        option = 'UploadPreparedSecure'
    elif image_reference or gallery_image_reference:
        option = 'FromImage'
    else:
        option = 'Empty'

    if source_storage_account_id is None and source_blob_uri is not None:
        subscription_id = get_subscription_id(cmd.cli_ctx)
        storage_account_name = source_blob_uri.split('.')[0].split('/')[-1]
        source_storage_account_id = resource_id(
            subscription=subscription_id, resource_group=resource_group_name,
            namespace='Microsoft.Storage', type='storageAccounts', name=storage_account_name)

    if upload_size_bytes is not None and not upload_type:
        raise RequiredArgumentMissingError(
            'usage error: --upload-size-bytes should be used together with --upload-type')

    from ._constants import COMPATIBLE_SECURITY_TYPE_VALUE, UPGRADE_SECURITY_HINT
    if image_reference is not None:
        if not is_valid_resource_id(image_reference):
            # URN or name
            terms = image_reference.split(':')
            if len(terms) == 4:  # URN
                disk_publisher, disk_offer, disk_sku, disk_version = terms[0], terms[1], terms[2], terms[3]
                if disk_version.lower() == 'latest':
                    disk_version = _get_latest_image_version(cmd.cli_ctx, location, disk_publisher, disk_offer,
                                                             disk_sku)
            else:  # error
                raise CLIError('usage error: --image-reference should be ID or URN (publisher:offer:sku:version).')
        else:
            from azure.mgmt.core.tools import parse_resource_id
            terms = parse_resource_id(image_reference)
            disk_publisher, disk_offer, disk_sku, disk_version = \
                terms['child_name_1'], terms['child_name_3'], terms['child_name_4'], terms['child_name_5']

        client = _compute_client_factory(cmd.cli_ctx)
        response = client.virtual_machine_images.get(location=location, publisher_name=disk_publisher,
                                                     offer=disk_offer, skus=disk_sku, version=disk_version)

        if hasattr(response, 'hyper_v_generation'):
            if response.hyper_v_generation == 'V1':
                logger.warning(UPGRADE_SECURITY_HINT)
            elif response.hyper_v_generation == 'V2':
                # set default value of hyper_v_generation
                if hyper_v_generation == 'V1':
                    hyper_v_generation = 'V2'
                # set default value of security_type
                if not security_type:
                    security_type = 'TrustedLaunch'
                if security_type != 'TrustedLaunch':
                    logger.warning(UPGRADE_SECURITY_HINT)

        # image_reference is an ID now
        image_reference = {'id': response.id}
        if image_reference_lun is not None:
            image_reference['lun'] = image_reference_lun

    if gallery_image_reference is not None:
        if not security_type:
            security_type = 'Standard'
        if security_type != 'TrustedLaunch':
            logger.warning(UPGRADE_SECURITY_HINT)

        key = gallery_image_reference_type if gallery_image_reference_type else 'id'
        gallery_image_reference = {key: gallery_image_reference}
        if gallery_image_reference_lun is not None:
            gallery_image_reference['lun'] = gallery_image_reference_lun

    creation_data = {
        "create_option": option,
        "source_uri": source_blob_uri,
        "image_reference": image_reference,
        "gallery_image_reference": gallery_image_reference,
        "source_resource_id": source_disk or source_snapshot or source_restore_point,
        "storage_account_id": source_storage_account_id,
        "upload_size_bytes": upload_size_bytes,
        "logical_sector_size": logical_sector_size,
        "security_data_uri": security_data_uri,
        "performance_plus": performance_plus
    }

    if size_gb is None and option == "Empty":
        raise RequiredArgumentMissingError(
            'usage error: --size-gb is required to create an empty disk')
    if upload_size_bytes is None and upload_type:
        raise RequiredArgumentMissingError(
            'usage error: --upload-size-bytes is required to create a disk for upload')

    if disk_encryption_set is not None and not is_valid_resource_id(disk_encryption_set):
        disk_encryption_set = resource_id(
            subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
            namespace='Microsoft.Compute', type='diskEncryptionSets', name=disk_encryption_set)

    if disk_access is not None and not is_valid_resource_id(disk_access):
        disk_access = resource_id(
            subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
            namespace='Microsoft.Compute', type='diskAccesses', name=disk_access)

    if secure_vm_disk_encryption_set is not None and not is_valid_resource_id(secure_vm_disk_encryption_set):
        secure_vm_disk_encryption_set = resource_id(
            subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
            namespace='Microsoft.Compute', type='diskEncryptionSets', name=secure_vm_disk_encryption_set)

    encryption = None
    if disk_encryption_set or encryption_type:
        encryption = {
            "type": encryption_type,
            "disk_encryption_set_id": disk_encryption_set
        }

    sku = {"name": sku}

    args = {
        "location": location,
        "creation_data": creation_data,
        "tags": tags or {},
        "sku": sku,
        "disk_size_gb": size_gb,
        "os_type": os_type,
        "encryption": encryption
    }

    if hyper_v_generation:
        args["hyper_v_generation"] = hyper_v_generation

    if zone:
        args["zones"] = zone
    if disk_iops_read_write is not None:
        args["disk_iops_read_write"] = disk_iops_read_write
    if disk_mbps_read_write is not None:
        args["disk_m_bps_read_write"] = disk_mbps_read_write
    if max_shares is not None:
        args["max_shares"] = max_shares
    if disk_iops_read_only is not None:
        args["disk_iops_read_only"] = disk_iops_read_only
    if disk_mbps_read_only is not None:
        args["disk_m_bps_read_only"] = disk_mbps_read_only
    if network_access_policy is not None:
        args["network_access_policy"] = network_access_policy
    if disk_access is not None:
        args["disk_access_id"] = disk_access
    if tier is not None:
        args["tier"] = tier
    if enable_bursting is not None:
        args["bursting_enabled"] = enable_bursting
    if edge_zone is not None:
        args["extended_location"] = edge_zone
    # The `Standard` is used for backward compatibility to allow customers to keep their current behavior
    # after changing the default values to Trusted Launch VMs in the future.
    if security_type and security_type != COMPATIBLE_SECURITY_TYPE_VALUE:
        args["security_profile"] = {'securityType': security_type}
        if secure_vm_disk_encryption_set:
            args["security_profile"]["secure_vm_disk_encryption_set_id"] = secure_vm_disk_encryption_set
    if support_hibernation is not None:
        args["supports_hibernation"] = support_hibernation
    if public_network_access is not None:
        args["public_network_access"] = public_network_access
    if accelerated_network is not None or architecture is not None:
        if args.get("supported_capabilities", None) is None:
            supported_capabilities = {
                "accelerated_network": accelerated_network,
                "architecture": architecture
            }
            args["supported_capabilities"] = supported_capabilities
        else:
            args["supported_capabilities"]["accelerated_network"] = accelerated_network
            args["supported_capabilities"]["architecture"] = architecture
    if data_access_auth_mode is not None:
        args["data_access_auth_mode"] = data_access_auth_mode
    if optimized_for_frequent_attach is not None:
        args["optimized_for_frequent_attach"] = optimized_for_frequent_attach

    args["no_wait"] = no_wait
    args["disk_name"] = disk_name
    args["resource_group"] = resource_group_name

    from .aaz.latest.disk import Create
    return Create(cli_ctx=cmd.cli_ctx)(command_args=args)