pai-dlc-20201203/main.tea (2,774 lines of code) (raw):
/**
*
*/
import Util;
import OpenApi;
import OpenApiUtil;
import EndpointUtil;
extends OpenApi;
init(config: OpenApi.Config){
super(config);
@endpointRule = 'regional';
@endpointMap = {
'ap-northeast-2-pop' = 'pai-dlc.aliyuncs.com',
'ap-south-1' = 'pai-dlc.aliyuncs.com',
'ap-southeast-2' = 'pai-dlc.aliyuncs.com',
'cn-beijing-finance-1' = 'pai-dlc.aliyuncs.com',
'cn-beijing-finance-pop' = 'pai-dlc.aliyuncs.com',
'cn-beijing-gov-1' = 'pai-dlc.aliyuncs.com',
'cn-beijing-nu16-b01' = 'pai-dlc.aliyuncs.com',
'cn-chengdu' = 'pai-dlc.aliyuncs.com',
'cn-edge-1' = 'pai-dlc.aliyuncs.com',
'cn-fujian' = 'pai-dlc.aliyuncs.com',
'cn-haidian-cm12-c01' = 'pai-dlc.aliyuncs.com',
'cn-hangzhou-bj-b01' = 'pai-dlc.aliyuncs.com',
'cn-hangzhou-finance' = 'pai-dlc.aliyuncs.com',
'cn-hangzhou-internal-prod-1' = 'pai-dlc.aliyuncs.com',
'cn-hangzhou-internal-test-1' = 'pai-dlc.aliyuncs.com',
'cn-hangzhou-internal-test-2' = 'pai-dlc.aliyuncs.com',
'cn-hangzhou-internal-test-3' = 'pai-dlc.aliyuncs.com',
'cn-hangzhou-test-306' = 'pai-dlc.aliyuncs.com',
'cn-hongkong-finance-pop' = 'pai-dlc.aliyuncs.com',
'cn-huhehaote' = 'pai-dlc.aliyuncs.com',
'cn-huhehaote-nebula-1' = 'pai-dlc.aliyuncs.com',
'cn-north-2-gov-1' = 'pai-dlc.aliyuncs.com',
'cn-qingdao' = 'pai-dlc.aliyuncs.com',
'cn-qingdao-nebula' = 'pai-dlc.aliyuncs.com',
'cn-shanghai-et15-b01' = 'pai-dlc.aliyuncs.com',
'cn-shanghai-et2-b01' = 'pai-dlc.aliyuncs.com',
'cn-shanghai-inner' = 'pai-dlc.aliyuncs.com',
'cn-shanghai-internal-test-1' = 'pai-dlc.aliyuncs.com',
'cn-shenzhen-finance-1' = 'pai-dlc.aliyuncs.com',
'cn-shenzhen-inner' = 'pai-dlc.aliyuncs.com',
'cn-shenzhen-st4-d01' = 'pai-dlc.aliyuncs.com',
'cn-shenzhen-su18-b01' = 'pai-dlc.aliyuncs.com',
'cn-wuhan' = 'pai-dlc.aliyuncs.com',
'cn-yushanfang' = 'pai-dlc.aliyuncs.com',
'cn-zhangbei' = 'pai-dlc.aliyuncs.com',
'cn-zhangbei-na61-b01' = 'pai-dlc.aliyuncs.com',
'cn-zhangjiakou' = 'pai-dlc.aliyuncs.com',
'cn-zhangjiakou-na62-a01' = 'pai-dlc.aliyuncs.com',
'cn-zhengzhou-nebula-1' = 'pai-dlc.aliyuncs.com',
'eu-west-1' = 'pai-dlc.aliyuncs.com',
'eu-west-1-oxs' = 'pai-dlc.aliyuncs.com',
'me-east-1' = 'pai-dlc.aliyuncs.com',
'rus-west-1-pop' = 'pai-dlc.aliyuncs.com',
};
checkConfig(config);
@endpoint = getEndpoint('pai-dlc', @regionId, @endpointRule, @network, @suffix, @endpointMap, @endpoint);
}
function getEndpoint(productId: string, regionId: string, endpointRule: string, network: string, suffix: string, endpointMap: map[string]string, endpoint: string) throws: string{
if (!Util.empty(endpoint)) {
return endpoint;
}
if (!Util.isUnset(endpointMap) && !Util.empty(endpointMap[regionId])) {
return endpointMap[regionId];
}
return EndpointUtil.getEndpointRules(productId, regionId, endpointRule, network, suffix);
}
model AIMasterMessage {
extended?: string(name='Extended'),
jobRestartCount?: int32(name='JobRestartCount'),
messageContent?: string(name='MessageContent'),
messageEvent?: string(name='MessageEvent'),
messageVersion?: int32(name='MessageVersion'),
restartType?: string(name='RestartType'),
}
model AliyunAccounts {
aliyunUid?: string(name='AliyunUid'),
employeeId?: string(name='EmployeeId'),
gmtCreateTime?: string(name='GmtCreateTime'),
gmtModifyTime?: string(name='GmtModifyTime'),
}
model AssignNodeSpec {
antiAffinityNodeNames?: string(name='AntiAffinityNodeNames', example='lingjxxxxxxxx'),
enableAssignNode?: boolean(name='EnableAssignNode', example='true'),
nodeNames?: string(name='NodeNames', example='lingjxxxxxxxx'),
}
model AssumeUserInfo {
accessKeyId?: string(name='AccessKeyId'),
id?: string(name='Id'),
securityToken?: string(name='SecurityToken'),
type?: string(name='Type'),
}
model CodeSourceItem {
codeBranch?: string(name='CodeBranch', example='master'),
codeCommit?: string(name='CodeCommit', example='44da1*******'),
codeRepo?: string(name='CodeRepo', example='https://code.aliyun.com/pai-dlc/examples.git'),
codeRepoAccessToken?: string(name='CodeRepoAccessToken'),
codeRepoUserName?: string(name='CodeRepoUserName', example='user'),
codeSourceId?: string(name='CodeSourceId', example='code-20210111103721-85qz*****'),
description?: string(name='Description', example='code source of dlc examples'),
displayName?: string(name='DisplayName', example='MyCodeSourceName1'),
gmtCreateTime?: string(name='GmtCreateTime', example='2021-01-18T12:52:15Z'),
gmtModifyTime?: string(name='GmtModifyTime', example='2021-01-18T12:52:15Z'),
userId?: string(name='UserId', example='115**********'),
}
model ContainerSpec {
args?: [ string ](name='Args'),
command?: [ string ](name='Command'),
env?: [
EnvVar
](name='Env'),
image?: string(name='Image', example='registry.cn-hangzhou.aliyuncs.com/pai-dlc/curl:v1.0.0'),
name?: string(name='Name', example='data-init'),
resources?: ResourceRequirements(name='Resources'),
workingDir?: string(name='WorkingDir', example='/root'),
}
model CredentialConfig {
aliyunEnvRoleKey?: string(name='AliyunEnvRoleKey'),
credentialConfigItems?: [
CredentialConfigItem
](name='CredentialConfigItems'),
enableCredentialInject?: boolean(name='EnableCredentialInject'),
}
model CredentialConfigItem {
key?: string(name='Key'),
roles?: [
CredentialRole
](name='Roles', nullable=true),
type?: string(name='Type'),
}
model CredentialRole {
assumeRoleFor?: string(name='AssumeRoleFor'),
assumeUserInfo?: AssumeUserInfo(name='AssumeUserInfo'),
policy?: string(name='Policy'),
roleArn?: string(name='RoleArn'),
roleType?: string(name='RoleType'),
}
model DataSourceItem {
dataSourceId?: string(name='DataSourceId', example='data-20210114104214-vf9lowjt3pso'),
dataSourceType?: string(name='DataSourceType', example='nas'),
description?: string(name='Description', example='data source of dlc examples'),
displayName?: string(name='DisplayName', example='nas-data'),
endpoint?: string(name='Endpoint', example='oss-cn-beijing-internal.aliyuncs.com'),
fileSystemId?: string(name='FileSystemId', example='1ca404****'),
gmtCreateTime?: string(name='GmtCreateTime', example='2021-01-12T14:35:00Z'),
gmtModifyTime?: string(name='GmtModifyTime', example='2021-01-12T14:36:00Z'),
mountPath?: string(name='MountPath', example='/root/data/'),
options?: string(name='Options', example='{"key": "value"}'),
path?: string(name='Path', example='oss://mybucket/path/to/dir'),
userId?: string(name='UserId', example='123456789'),
}
model DebuggerConfig {
content?: string(name='Content', example='{\\"description\\":\\"这是一个新的pytorchjob模板\\"}'),
debuggerConfigId?: string(name='DebuggerConfigId', example='dc-vf9lowjt3pso'),
description?: string(name='Description', example='这是一个Pytorch的基础配置模板'),
displayName?: string(name='DisplayName', example='Pytorch Experiment Config'),
gmtCreateTime?: string(name='GmtCreateTime', example='2021-01-12T14:35:00Z'),
gmtModifyTime?: string(name='GmtModifyTime', example='2021-01-12T14:36:00Z'),
}
model DebuggerJob {
debuggerJobId?: string(name='DebuggerJobId', example='dlc20210126170216-mtl37ge7gkvdz'),
displayName?: string(name='DisplayName', example='dlc debugger test'),
duration?: string(name='Duration', example='2932'),
gmtCreateTime?: string(name='GmtCreateTime', example='2021-01-12T14:35:00Z'),
gmtFailedTime?: string(name='GmtFailedTime'),
gmtFinishTime?: string(name='GmtFinishTime'),
gmtRunningTime?: string(name='GmtRunningTime'),
gmtStoppedTime?: string(name='GmtStoppedTime'),
gmtSubmittedTime?: string(name='GmtSubmittedTime'),
gmtSucceedTime?: string(name='GmtSucceedTime'),
status?: string(name='Status', example='Running'),
userId?: string(name='UserId', example='12344556'),
workspaceId?: string(name='WorkspaceId', example='workspace01'),
workspaceName?: string(name='WorkspaceName', example='public'),
}
model DebuggerJobIssue {
debuggerJobIssue?: string(name='DebuggerJobIssue', example='{"Name": "CPUBottleneck", "Triggered": 10, "Violations": 2, "Details": "{}"}'),
gmtCreateTime?: string(name='GmtCreateTime', example='2021-01-12T14:35:00Z'),
jobDebuggerIssueId?: string(name='JobDebuggerIssueId', example='de-826ca1bcfba30'),
jobId?: string(name='JobId', example='dlc-20210126170216-mtl37ge7gkvdz'),
reasonCode?: string(name='ReasonCode', example='1002300'),
reasonMessage?: string(name='ReasonMessage', example='GPU利用率低'),
ruleName?: string(name='RuleName', example='ProfileReport'),
}
model DebuggerResult {
debuggerConfigContent?: string(name='DebuggerConfigContent', example='{\\"description\\":\\"这是一个新的pytorchjob模板\\"}'),
debuggerJobIssues?: string(name='DebuggerJobIssues', example='{ "ProfileReport": { "Name": "CPUBottleneck","Triggered": 10,"Violations": 2,"Details": "{}"}, "LowCPU": { "Name": "CPUBottleneck","Triggered": 10,"Violations": 2,"Details": "{}"}}'),
debuggerJobStatus?: string(name='DebuggerJobStatus', example='{"Running": 1, "Failed": 1, "Succeeded": 2}'),
debuggerReportURL?: string(name='DebuggerReportURL', example='http://xxx.com/debug/report/download/new_xxxx.html'),
jobDisplayName?: string(name='JobDisplayName', example='dlc debugger test'),
jobId?: string(name='JobId', example='dlc-20210126170216-mtl37ge7gkvdz'),
jobUserId?: string(name='JobUserId', example='12344556'),
}
model EcsSpec {
acceleratorType?: string(name='AcceleratorType', example='GPU'),
cpu?: int32(name='Cpu', example='12'),
defaultGPUDriver?: string(name='DefaultGPUDriver', example='470.199.02'),
gpu?: int32(name='Gpu', example='1'),
gpuMemory?: int32(name='GpuMemory', example='80'),
gpuType?: string(name='GpuType', example='NVIDIA v100'),
instanceType?: string(name='InstanceType', example='ecs.gn6e-c12g1.3xlarge'),
isAvailable?: boolean(name='IsAvailable', example='true'),
memory?: int32(name='Memory', example='92'),
nonProtectSpotDiscount?: float(name='NonProtectSpotDiscount', example='0.1'),
paymentTypes?: [ string ](name='PaymentTypes'),
resourceType?: string(name='ResourceType', example='ECS'),
spotStockStatus?: string(name='SpotStockStatus', example='WithStock'),
supportedGPUDrivers?: [ string ](name='SupportedGPUDrivers'),
}
model EnvVar {
name?: string(name='Name', example='ENABLE_DEBUG'),
value?: string(name='Value', example='true'),
}
model EventInfo {
content?: string(name='Content'),
id?: string(name='Id'),
podId?: string(name='PodId'),
podUid?: string(name='PodUid'),
time?: string(name='Time'),
}
model ExtraPodSpec {
initContainers?: [
ContainerSpec
](name='InitContainers'),
lifecycle?: Lifecycle(name='Lifecycle'),
podAnnotations?: map[string]string(name='PodAnnotations', deprecated=true),
podLabels?: map[string]string(name='PodLabels', deprecated=true),
sharedVolumeMountPaths?: [ string ](name='SharedVolumeMountPaths'),
sideCarContainers?: [
ContainerSpec
](name='SideCarContainers'),
}
model FreeResourceClusterControlItem {
clusterID?: string(name='ClusterID'),
clusterName?: string(name='ClusterName'),
crossClusters?: boolean(name='CrossClusters'),
enableFreeResource?: boolean(name='EnableFreeResource'),
freeResourceClusterControlId?: string(name='FreeResourceClusterControlId', example='frcc-whateversth'),
gmtCreateTime?: string(name='GmtCreateTime'),
gmtModifyTime?: string(name='GmtModifyTime'),
regionID?: string(name='RegionID'),
}
model FreeResourceDetail {
amount?: int32(name='Amount', example='2'),
resourceType?: string(name='ResourceType', example='CPU'),
}
model FreeResourceItem {
availableNumber?: long(name='AvailableNumber', example='2'),
clusterID?: string(name='ClusterID'),
clusterName?: string(name='ClusterName'),
freeResourceId?: string(name='FreeResourceId', example='freeres-whateversth'),
gmtCreateTime?: string(name='GmtCreateTime', example='2021-01-18T12:52:15Z'),
gmtModifyTime?: string(name='GmtModifyTime', example='2021-01-18T12:52:15Z'),
regionID?: string(name='RegionID', example='inner'),
resourceType?: string(name='ResourceType', example='cpu'),
}
model GPUDetail {
GPU?: string(name='GPU'),
GPUType?: string(name='GPUType', example='Tesla-V100-32G'),
GPUTypeFullName?: string(name='GPUTypeFullName', example='nvidia.com/gpu-tesla-v100-sxm2-16gb'),
}
model ImageConfig {
auth?: string(name='Auth'),
dockerRegistry?: string(name='DockerRegistry', example='registry.cn-hangzhou.aliyuncs.com'),
password?: string(name='Password'),
username?: string(name='Username'),
}
model ImageItem {
acceleratorType?: string(name='AcceleratorType', example='gpu'),
authorId?: string(name='AuthorId', example='ken'),
framework?: string(name='Framework', example='PyTorchJob'),
imageProviderType?: string(name='ImageProviderType', example='Community'),
imageTag?: string(name='ImageTag', example='tensorflow-training:2.3-cpu-py36-ubuntu18.04'),
imageUrl?: string(name='ImageUrl', example='registry.cn-beijing.aliyuncs.com/pai-dlc/tensorflow-training:2.3-cpu-py36-ubuntu18.04'),
imageUrlVpc?: string(name='ImageUrlVpc', example='registry-vpc.cn-beijing.aliyuncs.com/pai-dlc/tensorflow-training:2.3-cpu-py36-ubuntu18.04'),
}
model JobDebuggerConfig {
debuggerConfigContent?: string(name='DebuggerConfigContent'),
debuggerConfigId?: string(name='DebuggerConfigId'),
gmtCreateTime?: string(name='GmtCreateTime'),
jobId?: string(name='JobId'),
}
model JobElasticSpec {
AIMasterDockerImage?: string(name='AIMasterDockerImage'),
AIMasterType?: string(name='AIMasterType'),
EDPMaxParallelism?: int32(name='EDPMaxParallelism', example='16'),
EDPMinParallelism?: int32(name='EDPMinParallelism', example='8'),
elasticStrategy?: string(name='ElasticStrategy'),
enableAIMaster?: boolean(name='EnableAIMaster'),
enableEDP?: boolean(name='EnableEDP', example='true'),
enableElasticTraining?: boolean(name='EnableElasticTraining', example='true'),
enablePsJobElasticPS?: boolean(name='EnablePsJobElasticPS', example='true'),
enablePsJobElasticWorker?: boolean(name='EnablePsJobElasticWorker'),
enablePsResourceEstimate?: boolean(name='EnablePsResourceEstimate', example='true'),
maxParallelism?: int32(name='MaxParallelism', example='8'),
minParallelism?: int32(name='MinParallelism', example='1'),
PSMaxParallelism?: int32(name='PSMaxParallelism', example='10'),
PSMinParallelism?: int32(name='PSMinParallelism', example='4'),
}
model JobItem {
accessibility?: string(name='Accessibility', example='PUBLIC'),
clusterId?: string(name='ClusterId'),
codeSource?: {
branch?: string(name='Branch', example='master'),
codeSourceId?: string(name='CodeSourceId', example='code-20210111103721-85qz78ia96lu'),
commit?: string(name='Commit', example='44da109b59f8596152987eaa8f3b2487bb72ea63'),
mountPath?: string(name='MountPath', example='/mnt/data'),
}(name='CodeSource'),
credentialConfig?: CredentialConfig(name='CredentialConfig'),
dataSources?: [
{
dataSourceId?: string(name='DataSourceId', example='data-20210114104214-vf9lowjt3pso'),
mountPath?: string(name='MountPath', example='/mnt/data'),
}
](name='DataSources'),
displayName?: string(name='DisplayName', example='tf-mnist-test'),
duration?: long(name='Duration', example='3602'),
elasticSpec?: JobElasticSpec(name='ElasticSpec'),
enablePreemptibleJob?: boolean(name='EnablePreemptibleJob', example='false'),
enabledDebugger?: boolean(name='EnabledDebugger', example='false'),
envs?: map[string]string(name='Envs'),
gmtCreateTime?: string(name='GmtCreateTime', example='2021-01-12T14:35:01Z'),
gmtFailedTime?: string(name='GmtFailedTime', example='2021-01-12T14:35:01Z'),
gmtFinishTime?: string(name='GmtFinishTime', example='2021-01-12T15:36:08Z'),
gmtModifiedTime?: string(name='GmtModifiedTime', example='2021-01-12T15:36:08Z'),
gmtRunningTime?: string(name='GmtRunningTime', example='2021-01-12T14:35:01Z'),
gmtStoppedTime?: string(name='GmtStoppedTime', example='2021-01-12T14:35:01Z'),
gmtSubmittedTime?: string(name='GmtSubmittedTime', example='2021-01-12T14:35:01Z'),
gmtSuccessedTime?: string(name='GmtSuccessedTime', example='2021-01-12T14:35:01Z'),
isDeleted?: boolean(name='IsDeleted', example='false'),
jobId?: string(name='JobId', example='dlc-20210126170216-mtl37ge7gkvdz'),
jobMaxRunningTimeMinutes?: long(name='JobMaxRunningTimeMinutes', example='1'),
jobSpecs?: [
JobSpec
](name='JobSpecs'),
jobType?: string(name='JobType', example='TFJob'),
nodeCount?: string(name='NodeCount', example='1'),
nodeNames?: [ string ](name='NodeNames'),
pods?: [
PodItem
](name='Pods'),
priority?: int32(name='Priority', example='1'),
reasonCode?: string(name='ReasonCode', example='JobStoppedByUser'),
reasonMessage?: string(name='ReasonMessage', example='Job is stopped by user.'),
requestCPU?: long(name='RequestCPU', example='1'),
requestGPU?: string(name='RequestGPU', example='1'),
requestMemory?: string(name='RequestMemory', example='1Gi'),
resourceId?: string(name='ResourceId', example='dlc-quota'),
resourceLevel?: string(name='ResourceLevel', example='L0'),
resourceName?: string(name='ResourceName', example='my_resource_group'),
resourceQuotaName?: string(name='ResourceQuotaName', example='test'),
resourceType?: string(name='ResourceType', example='ECS'),
restartTimes?: string(name='RestartTimes', example='1'),
settings?: JobSettings(name='Settings'),
status?: string(name='Status', example='Stopped'),
statusHistory?: [
StatusTransitionItem
](name='StatusHistory'),
subStatus?: string(name='SubStatus', example='Restarting'),
systemEnvs?: map[string]string(name='SystemEnvs'),
tenantId?: string(name='TenantId'),
thirdpartyLibDir?: string(name='ThirdpartyLibDir', example='/root/code/'),
thirdpartyLibs?: [ string ](name='ThirdpartyLibs'),
useOversoldResource?: boolean(name='UseOversoldResource', example='false'),
userCommand?: string(name='UserCommand', example='python /root/code/mnist.py'),
userId?: string(name='UserId', example='123456789'),
userScript?: string(name='UserScript', example='ls'),
userVpc?: {
defaultRoute?: string(name='DefaultRoute'),
extendedCidrs?: [ string ](name='ExtendedCidrs'),
securityGroupId?: string(name='SecurityGroupId'),
switchId?: string(name='SwitchId'),
vpcId?: string(name='VpcId'),
}(name='UserVpc', example='vpc-1'),
username?: string(name='Username', example='pai-dlc-role'),
workingDir?: string(name='WorkingDir', example='/mnt/data'),
workspaceId?: string(name='WorkspaceId', example='268'),
workspaceName?: string(name='WorkspaceName', example='dlc-workspace'),
}
model JobSettings {
advancedSettings?: map[string]any(name='AdvancedSettings'),
businessUserId?: string(name='BusinessUserId', example='166924'),
caller?: string(name='Caller', example='SilkFlow'),
disableEcsStockCheck?: boolean(name='DisableEcsStockCheck', example='false'),
driver?: string(name='Driver', example='535.54.03'),
enableCPUAffinity?: boolean(name='EnableCPUAffinity', example='true'),
enableErrorMonitoringInAIMaster?: boolean(name='EnableErrorMonitoringInAIMaster', example='false'),
enableOssAppend?: boolean(name='EnableOssAppend', example='true'),
enableRDMA?: boolean(name='EnableRDMA', example='true'),
enableSanityCheck?: boolean(name='EnableSanityCheck', example='true'),
enableTideResource?: boolean(name='EnableTideResource', example='true'),
errorMonitoringArgs?: string(name='ErrorMonitoringArgs', example='--enable-log-hang-detection true'),
jobReservedMinutes?: int32(name='JobReservedMinutes', example='30'),
jobReservedPolicy?: string(name='JobReservedPolicy', example='Always'),
oversoldType?: string(name='OversoldType', example='AcceptQuotaOverSold'),
pipelineId?: string(name='PipelineId', example='pid-123456'),
sanityCheckArgs?: string(name='SanityCheckArgs', example='--sanity-check-timing=AfterJobFaultTolerant --sanity-check-timeout-ops=MarkJobFai'),
tags?: map[string]string(name='Tags'),
}
model JobSpec {
assignNodeSpec?: AssignNodeSpec(name='AssignNodeSpec'),
ecsSpec?: string(name='EcsSpec', example='ecs.c6.large'),
extraPodSpec?: ExtraPodSpec(name='ExtraPodSpec'),
image?: string(name='Image', example='registry.cn-hangzhou.aliyuncs.com/pai-dlc/tensorflow-training:1.12.2PAI-cpu-py27-ubuntu16.04'),
imageConfig?: ImageConfig(name='ImageConfig'),
podCount?: long(name='PodCount', example='1'),
resourceConfig?: ResourceConfig(name='ResourceConfig'),
spotSpec?: SpotSpec(name='SpotSpec'),
type?: string(name='Type', example='Worker'),
useSpotInstance?: boolean(name='UseSpotInstance', example='false', deprecated=true),
}
model Lifecycle {
postStart?: {
exec?: {
command?: [ string ](name='Command'),
}(name='Exec'),
}(name='PostStart'),
preStop?: {
exec?: {
command?: [ string ](name='Command'),
}(name='Exec'),
}(name='PreStop'),
}
model LogInfo {
content?: string(name='Content'),
id?: string(name='Id'),
isTruncated?: boolean(name='IsTruncated'),
podId?: string(name='PodId'),
podUid?: string(name='PodUid'),
source?: string(name='Source', example='stderr, stdout'),
time?: string(name='Time'),
}
model Member {
memberId?: string(name='MemberId', example='ken_12345'),
memberType?: string(name='MemberType', example='WorkspaceAdmin'),
}
model Metric {
time?: long(name='Time', example='1616987726587'),
value?: float(name='Value', example='23.45'),
}
model NodeMetric {
metrics?: [
Metric
](name='Metrics'),
nodeName?: string(name='NodeName', example='asi_xxx'),
}
model PodItem {
gmtCreateTime?: string(name='GmtCreateTime', example='2021-01-12T14:36:01Z'),
gmtFinishTime?: string(name='GmtFinishTime', example='2021-01-12T15:36:05Z'),
gmtStartTime?: string(name='GmtStartTime', example='2021-01-12T14:36:05Z'),
historyPods?: [
PodItem
](name='HistoryPods'),
ip?: string(name='Ip', example='10.0.1.2'),
nodeName?: string(name='NodeName'),
podId?: string(name='PodId', example='dlc-20210126170216-mtl37ge7gkvdz-worker-0'),
podUid?: string(name='PodUid', example='fe846462-af2c-4521-bd6f-96787a57591d'),
status?: string(name='Status', example='Stopped'),
subStatus?: string(name='SubStatus'),
type?: string(name='Type', example='Worker'),
}
model PodMetric {
metrics?: [
Metric
](name='Metrics'),
podId?: string(name='PodId', example='dlc-20210329110128-746bf7cl47pr8-worker-0'),
}
model Quota {
clusterId?: string(name='ClusterId'),
clusterName?: string(name='ClusterName'),
quotaConfig?: QuotaConfig(name='QuotaConfig'),
quotaId?: string(name='QuotaId', example='quotamtl37ge7gkvdz'),
quotaName?: string(name='QuotaName', example='dlc-quota'),
quotaType?: string(name='QuotaType', example='asiquota'),
totalQuota?: QuotaDetail(name='TotalQuota'),
totalTideQuota?: QuotaDetail(name='TotalTideQuota'),
usedQuota?: QuotaDetail(name='UsedQuota'),
usedTideQuota?: QuotaDetail(name='UsedTideQuota'),
}
model QuotaConfig {
allowedMaxPriority?: int32(name='AllowedMaxPriority'),
enableDLC?: boolean(name='EnableDLC'),
enableDSW?: boolean(name='EnableDSW'),
enableTideResource?: boolean(name='EnableTideResource'),
resourceLevel?: string(name='ResourceLevel'),
}
model QuotaDetail {
CPU?: string(name='CPU', example='2'),
GPU?: string(name='GPU', example='5'),
GPUDetails?: [
GPUDetail
](name='GPUDetails'),
GPUType?: string(name='GPUType', example='Tesla-V100'),
GPUTypeFullName?: string(name='GPUTypeFullName', example='nvidia.com/gpu'),
memory?: string(name='Memory', example='10Gi'),
}
model ResourceConfig {
CPU?: string(name='CPU', example='10'),
GPU?: string(name='GPU', example='3'),
GPUType?: string(name='GPUType', example='Tesla-V100-16G'),
memory?: string(name='Memory', example='10Gi'),
sharedMemory?: string(name='SharedMemory', example='5Gi'),
}
model ResourceRequirements {
limits?: map[string]string(name='Limits'),
requests?: map[string]string(name='Requests'),
}
model Resources {
CPU?: string(name='CPU', example='10'),
GPU?: string(name='GPU', example='8'),
memory?: string(name='Memory', example='1024(单位GB)'),
}
model SanityCheckResultItem {
checkNumber?: int32(name='CheckNumber', example='1'),
finishedAt?: string(name='FinishedAt', example='”2023-11-30T16:47:30.378817+08:00"'),
message?: string(name='Message'),
phase?: string(name='Phase', example='CheckInit'),
startedAt?: string(name='StartedAt', example='”2023-11-30T16:47:30.343005+08:00“'),
status?: string(name='Status', example='Succeeded'),
}
model SeccompProfile {
localhostProfile?: string(name='LocalhostProfile', example='my-profiles/profile-allow.json'),
type?: string(name='Type', example='Unconfined'),
}
model SecurityContext {
runAsGroup?: long(name='RunAsGroup', example='1000'),
runAsUser?: long(name='RunAsUser', example='1000'),
seccompProfile?: SeccompProfile(name='SeccompProfile'),
}
model SmartCache {
cacheWorkerNum?: long(name='CacheWorkerNum', example='10'),
cacheWorkerSize?: long(name='CacheWorkerSize', example='100'),
description?: string(name='Description', example='test'),
displayName?: string(name='DisplayName', example='test'),
duration?: string(name='Duration', example='123456'),
endpoint?: string(name='Endpoint', example='oss-cn-beijing-internal.aliyuncs.com'),
fileSystemId?: string(name='FileSystemId', example='1ca404****'),
gmtCreateTime?: string(name='GmtCreateTime', example='2021-01-12T14:36:01Z'),
gmtModifyTime?: string(name='GmtModifyTime', example='2021-01-12T23:36:01Z'),
mountPath?: string(name='MountPath', example='/root/data/'),
options?: string(name='Options', example='{"num_threads": 32}'),
path?: string(name='Path', example='oss://buc/path/to/dir'),
smartCacheId?: string(name='SmartCacheId', example='smartcache-20210114104214-vf9lowjt3pso'),
status?: string(name='Status', example='Running'),
type?: string(name='Type', example='oss'),
userId?: string(name='UserId', example='189xxx'),
}
model SpotSpec {
spotDiscountLimit?: float(name='SpotDiscountLimit'),
spotPriceLimit?: float(name='SpotPriceLimit'),
spotStrategy?: string(name='SpotStrategy'),
}
model StatusTransitionItem {
endTime?: string(name='EndTime'),
reasonCode?: string(name='ReasonCode'),
reasonMessage?: string(name='ReasonMessage'),
startTime?: string(name='StartTime'),
status?: string(name='Status'),
}
model Tensorboard {
accessibility?: string(name='Accessibility'),
cpu?: long(name='Cpu'),
dataSourceId?: string(name='DataSourceId', example='datasource-test'),
dataSourceType?: string(name='DataSourceType'),
displayName?: string(name='DisplayName', example='test'),
duration?: string(name='Duration', example='1234567'),
gmtCreateTime?: string(name='GmtCreateTime', example='2021-01-12T14:35:00Z'),
gmtFinishTime?: string(name='GmtFinishTime', example='2021-01-12T14:36:00Z'),
gmtModifyTime?: string(name='GmtModifyTime', example='2021-01-12T14:36:00Z'),
jobId?: string(name='JobId', example='dlc-20210114104214-vf9lowjt3pso'),
maxRunningTimeMinutes?: long(name='MaxRunningTimeMinutes'),
memory?: long(name='Memory'),
options?: string(name='Options'),
priority?: string(name='Priority'),
quotaId?: string(name='QuotaId'),
quotaName?: string(name='QuotaName'),
reasonCode?: string(name='ReasonCode', example='Delete by user'),
reasonMessage?: string(name='ReasonMessage', example='Tensorboard is deleted'),
requestId?: string(name='RequestId', example='473469C7-AA6F-4DC5-B3DB-A3DC0DE3C83E'),
status?: string(name='Status', example='running'),
summaryPath?: string(name='SummaryPath', example='/root/data'),
summaryRelativePath?: string(name='SummaryRelativePath'),
tensorboardDataSources?: [
TensorboardDataSourceSpec
](name='TensorboardDataSources'),
tensorboardId?: string(name='TensorboardId', example='tensorboard-xxx'),
tensorboardSpec?: TensorboardSpec(name='TensorboardSpec'),
tensorboardUrl?: string(name='TensorboardUrl', example='http://xxxxxx'),
token?: string(name='Token'),
userId?: string(name='UserId', example='lycxxxxx'),
username?: string(name='Username', example='tensorboard.pai'),
workspaceId?: string(name='WorkspaceId'),
}
model TensorboardDataSourceSpec {
dataSourceType?: string(name='DataSourceType', example='OSS'),
directoryName?: string(name='DirectoryName', example='dlcJobName'),
fullSummaryPath?: string(name='FullSummaryPath', example='oss://xxxxx/tensorboard/run1'),
id?: string(name='Id', example='d-vf2fdhxxxxxx'),
name?: string(name='Name', example='dlcJobName'),
sourceType?: string(name='SourceType', example='datasource'),
summaryPath?: string(name='SummaryPath', example='/tensorboard/run1'),
uri?: string(name='Uri', example='oss://.oss-cn-shanghai-finance-1.aliyuncs.com/'),
}
model TensorboardSpec {
ecsType?: string(name='EcsType', example='ecs.g6.large'),
securityGroupId?: string(name='SecurityGroupId', example='sg-xxxxx'),
switchId?: string(name='SwitchId', example='vsw-xxxx'),
vpcId?: string(name='VpcId', example='vpc-xxxx'),
}
model Workspace {
creator?: string(name='Creator', example='ken'),
gmtCreateTime?: string(name='GmtCreateTime', example='2021-01-12T14:36:01Z'),
gmtModifyTime?: string(name='GmtModifyTime', example='2021-01-12T14:36:01Z'),
members?: [
Member
](name='Members'),
quotas?: [
Quota
](name='Quotas'),
totalResources?: Resources(name='TotalResources'),
workspaceAdmins?: [
Member
](name='WorkspaceAdmins'),
workspaceId?: string(name='WorkspaceId', example='ws-20210126170216-mtl37ge7gkvdz'),
workspaceName?: string(name='WorkspaceName', example='dlc-workspace'),
}
model CreateJobRequest {
accessibility?: string(name='Accessibility', description='The job visibility. Valid values:
* PUBLIC: The job is visible to all members in the workspace.
* PRIVATE: The job is visible only to you and the administrator of the workspace.', example='PRIVATE'),
codeSource?: {
branch?: string(name='Branch', description='The branch of the referenced code repository. By default, the branch configured in the code source is used. This parameter is optional.', example='master'),
codeSourceId?: string(name='CodeSourceId', description='The ID of the code source.', example='code-20210111103721-xxxxxxx'),
commit?: string(name='Commit', description='The commit ID of the code to be downloaded. By default, the commit ID configured in the code source is used. This parameter is optional.', example='44da109b5******'),
mountPath?: string(name='MountPath', description='The path to which the job is mounted. By default, the mount path configured in the data source is used. This parameter is optional.', example='/root/data'),
}(name='CodeSource', description='The code source of the job. Before the node of the job runs, DLC automatically downloads the configured code from the code source and mounts the code to the local path of the container.'),
credentialConfig?: CredentialConfig(name='CredentialConfig', description='The access credential configuration.'),
dataSources?: [
{
dataSourceId?: string(name='DataSourceId', description='The data source ID.', example='d-cn9dl*******'),
dataSourceVersion?: string(name='DataSourceVersion'),
mountAccess?: string(name='MountAccess'),
mountPath?: string(name='MountPath', description='The path to which the job is mounted. By default, the mount path in the data source configuration is used. This parameter is optional.', example='/root/data'),
options?: string(name='Options', description='The mount attribute of the custom dataset. Set the value to OSS.', example='{
"fs.oss.download.thread.concurrency": "10",
"fs.oss.upload.thread.concurrency": "10",
"fs.jindo.args": "-oattr_timeout=3 -oentry_timeout=0 -onegative_timeout=0 -oauto_cache -ono_symlink"
}'),
uri?: string(name='Uri', description='The data source path.', example='oss://bucket.oss-cn-hangzhou-internal.aliyuncs.com/path/'),
}
](name='DataSources', description='The data sources for job running.'),
debuggerConfigContent?: string(name='DebuggerConfigContent', description='This parameter is not supported.', example='“”'),
displayName?: string(name='DisplayName', description='The job name. The name must be in the following format:
* The name must be 1 to 256 characters in length.
* The name can contain digits, letters, underscores (_), periods (.), and hyphens (-).
This parameter is required.', example='tf-mnist-test'),
elasticSpec?: JobElasticSpec(name='ElasticSpec', description='This parameter is not supported.'),
envs?: map[string]string(name='Envs', description='The environment variables.'),
jobMaxRunningTimeMinutes?: long(name='JobMaxRunningTimeMinutes', description='The maximum running duration of the job. Unit: minutes.', example='1024'),
jobSpecs?: [
JobSpec
](name='JobSpecs', description='The configurations for job running, such as the image address, startup command, node resource declaration, and number of replicas.****
A DLC job consists of different types of nodes. If nodes of the same type have exactly the same configuration, the configuration is called JobSpec. **JobSpecs** specifies the configurations of all types of nodes. The value is of the array type.
This parameter is required.'),
jobType?: string(name='JobType', description='The job type. The value is case-sensitive. The following job types are supported:
* TFJob
* PyTorchJob
* MPIJob
* XGBoostJob
* OneFlowJob
* ElasticBatchJob
* SlurmJob
* RayJob
Valid values for each job type:
* OneFlowJob: OneFlow.
* PyTorchJob: PyTorch.
* SlurmJob: Slurm.
* XGBoostJob: XGBoost.
* ElasticBatchJob: ElasticBatch.
* MPIJob: MPIJob.
* TFJob: Tensorflow.
* RayJob: Ray.
This parameter is required.', example='TFJob'),
options?: string(name='Options', description='The additional configuration of the job. You can use this parameter to adjust the behavior of the attached data source. For example, if the attached data source of the job is of the OSS type, you can use this parameter to add the following configurations to override the default parameters of JindoFS: `fs.oss.download.thread.concurrency=4,fs.oss.download.queue.size=16`.', example='key1=value1,key2=value2'),
priority?: int32(name='Priority', description='The priority of the job. Default value: 1. Valid values: 1 to 9.
* 1: the lowest priority.
* 9: the highest priority.', example='8'),
resourceId?: string(name='ResourceId', description='The ID of the resource group. This parameter is optional.
* If you leave this parameter empty, the job is submitted to a public resource group.
* If a resource quota is bound to the current workspace, you can specify the resource quota ID. For more information about how to query the resource quota ID, see [Manage resource quotas](https://help.aliyun.com/document_detail/2651299.html).', example='rs-xxx'),
settings?: JobSettings(name='Settings', description='The additional parameter configurations of the job.'),
successPolicy?: string(name='SuccessPolicy', description='The policy that is used to check whether a distributed multi-node job is successful. Only TensorFlow distributed multi-node jobs are supported.
* ChiefWorker: If you use this policy, the job is considered successful when the pod on the chief node completes operations.
* AllWorkers (default): If you use this policy, the job is considered successful when all worker nodes complete operations.', example='AllWorkers'),
thirdpartyLibDir?: string(name='ThirdpartyLibDir', description='The folder in which the third-party Python library file requirements.txt is stored. Before the startup command specified by the UserCommand parameter is run on each node, DLC fetches the requirements.txt file from the folder and runs `pip install -r` to install the required package and library.', example='/root/code/'),
thirdpartyLibs?: [ string ](name='ThirdpartyLibs', description='The third-party Python libraries to be installed.'),
userCommand?: string(name='UserCommand', description='The startup command for all nodes of the job.
This parameter is required.', example='python /root/code/mnist.py'),
userVpc?: {
defaultRoute?: string(name='DefaultRoute', description='The default route. Default value: false. Valid values:
* eth0: The default network interface is used to access the Internet through the public gateway.
* eth1: The user\\\\"s Elastic Network Interface is used to access the Internet through the private gateway. For more information about the configuration method, see [Enable Internet access for a DSW instance by using a private Internet NAT gateway](https://help.aliyun.com/document_detail/2525343.html).', example='eth0'),
extendedCIDRs?: [ string ](name='ExtendedCIDRs', description='The extended CIDR block.
* If you leave the SwitchId and ExtendedCIDRs parameters empty, the system automatically obtains all CIDR blocks in a VPC.
* If you configure the SwitchId and ExtendedCIDRs parameters, we recommend that you specify all CIDR blocks in a VPC.'),
securityGroupId?: string(name='SecurityGroupId', description='The ID of the security group.', example='sg-abcdef****'),
switchId?: string(name='SwitchId', description='The vSwitch ID. This parameter is optional.
* If you leave this parameter empty, the system automatically selects a vSwitch based on the inventory status.
* You can also specify a vSwitch ID.', example='vs-abcdef****'),
vpcId?: string(name='VpcId', description='The VPC ID.', example='vpc-abcdef****'),
}(name='UserVpc', description='The VPC settings.'),
workspaceId?: string(name='WorkspaceId', description='The workspace ID.', example='ws-20210126170216-xxxxxxx'),
}
model CreateJobResponseBody = {
jobId?: string(name='JobId', description='The job ID.', example='dlc7*******'),
requestId?: string(name='RequestId', description='The request ID used to troubleshoot issues.', example='473469C7-AA6F-4DC5-B3DB-xxxxxxx'),
}
model CreateJobResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: CreateJobResponseBody(name='body'),
}
/**
* @summary Creates a job that runs in a cluster. You can configure the data source, code source, startup command, and computing resources of each node on which a job runs.
*
* @description Before you call this operation, make sure that you understand the billing methods and [pricing](https://help.aliyun.com/document_detail/171758.html) of Deep Learning Containers (DLC) of Platform for AI (PAI).
*
* @param request CreateJobRequest
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return CreateJobResponse
*/
async function createJobWithOptions(request: CreateJobRequest, headers: map[string]string, runtime: Util.RuntimeOptions): CreateJobResponse {
Util.validateModel(request);
var body : map[string]any = {};
if (!Util.isUnset(request.accessibility)) {
body['Accessibility'] = request.accessibility;
}
if (!Util.isUnset(request.codeSource)) {
body['CodeSource'] = request.codeSource;
}
if (!Util.isUnset(request.credentialConfig)) {
body['CredentialConfig'] = request.credentialConfig;
}
if (!Util.isUnset(request.dataSources)) {
body['DataSources'] = request.dataSources;
}
if (!Util.isUnset(request.debuggerConfigContent)) {
body['DebuggerConfigContent'] = request.debuggerConfigContent;
}
if (!Util.isUnset(request.displayName)) {
body['DisplayName'] = request.displayName;
}
if (!Util.isUnset(request.elasticSpec)) {
body['ElasticSpec'] = request.elasticSpec;
}
if (!Util.isUnset(request.envs)) {
body['Envs'] = request.envs;
}
if (!Util.isUnset(request.jobMaxRunningTimeMinutes)) {
body['JobMaxRunningTimeMinutes'] = request.jobMaxRunningTimeMinutes;
}
if (!Util.isUnset(request.jobSpecs)) {
body['JobSpecs'] = request.jobSpecs;
}
if (!Util.isUnset(request.jobType)) {
body['JobType'] = request.jobType;
}
if (!Util.isUnset(request.options)) {
body['Options'] = request.options;
}
if (!Util.isUnset(request.priority)) {
body['Priority'] = request.priority;
}
if (!Util.isUnset(request.resourceId)) {
body['ResourceId'] = request.resourceId;
}
if (!Util.isUnset(request.settings)) {
body['Settings'] = request.settings;
}
if (!Util.isUnset(request.successPolicy)) {
body['SuccessPolicy'] = request.successPolicy;
}
if (!Util.isUnset(request.thirdpartyLibDir)) {
body['ThirdpartyLibDir'] = request.thirdpartyLibDir;
}
if (!Util.isUnset(request.thirdpartyLibs)) {
body['ThirdpartyLibs'] = request.thirdpartyLibs;
}
if (!Util.isUnset(request.userCommand)) {
body['UserCommand'] = request.userCommand;
}
if (!Util.isUnset(request.userVpc)) {
body['UserVpc'] = request.userVpc;
}
if (!Util.isUnset(request.workspaceId)) {
body['WorkspaceId'] = request.workspaceId;
}
var req = new OpenApi.OpenApiRequest{
headers = headers,
body = OpenApiUtil.parseToMap(body),
};
var params = new OpenApi.Params{
action = 'CreateJob',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/jobs`,
method = 'POST',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Creates a job that runs in a cluster. You can configure the data source, code source, startup command, and computing resources of each node on which a job runs.
*
* @description Before you call this operation, make sure that you understand the billing methods and [pricing](https://help.aliyun.com/document_detail/171758.html) of Deep Learning Containers (DLC) of Platform for AI (PAI).
*
* @param request CreateJobRequest
* @return CreateJobResponse
*/
async function createJob(request: CreateJobRequest): CreateJobResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return createJobWithOptions(request, headers, runtime);
}
model CreateTensorboardRequest {
accessibility?: string(name='Accessibility', description='The visibility of the job. Valid values:
* PUBLIC: The configuration is public in the workspace.
* PRIVATE: The configuration is visible only to you and the administrator of the workspace.', example='PRIVATE'),
cpu?: long(name='Cpu', description='The number of vCPU cores.', example='1'),
dataSourceId?: string(name='DataSourceId', description='The dataset ID.
<props="china">Call [ListDatasets](https://help.aliyun.com/document_detail/457222.html) to get the dataset ID.', example='d-xxxxxxxx'),
dataSourceType?: string(name='DataSourceType', description='The dataset type. Valid values:
* OSS
* NAS', example='OSS'),
dataSources?: [
DataSourceItem
](name='DataSources', description='The configurations of the data source.'),
displayName?: string(name='DisplayName', description='The TensorBoard name', example='tensorboard'),
jobId?: string(name='JobId', description='The job ID. Call [ListJobs](https://help.aliyun.com/document_detail/459676.html) to get the job ID.', example='dlc-20210126170216-mtl37ge7gkvdz'),
maxRunningTimeMinutes?: long(name='MaxRunningTimeMinutes', description='The maximum running duration. Unit: minutes.', example='240'),
memory?: long(name='Memory', description='The memory size. Unit: GB.', example='1000'),
options?: string(name='Options', description='The extended fields of the dataset are in the JSON format. MountPath: the path to mount the dataset.', example='{"mountpath":"/root/data/"}'),
priority?: string(name='Priority', description='The priority of the job. Default value: 1. Valid values: 1 to 9.
* 1 is the lowest priority.
* 9 is the highest priority.', example='1'),
quotaId?: string(name='QuotaId', description='The resource quota ID. This parameter is required when you create a TensorBoard job by using a resource quota. <props="china">Call [ListQuotas](https://help.aliyun.com/document_detail/2628071.html) to get the quota ID.
This feature is currently limited to whitelisted users. If you need to use this feature, contact us.', example='quota12345'),
sourceId?: string(name='SourceId', description='The source ID.', example='dlc-xxxxxx'),
sourceType?: string(name='SourceType', description='The source type.', example='job'),
summaryPath?: string(name='SummaryPath', description='The directory of summary.', example='/root/data/'),
summaryRelativePath?: string(name='SummaryRelativePath', description='The relative path of summary.', example='/summary/'),
tensorboardDataSources?: [
TensorboardDataSourceSpec
](name='TensorboardDataSources', description='The configurations of datasets mounted with the TensorBoard job.'),
tensorboardSpec?: TensorboardSpec(name='TensorboardSpec', description='The pay-as-you-go configuration of TensorBoard, which is used to create TensorBoard jobs that use pay-as-you-go resources.'),
uri?: string(name='Uri', description='The dataset URI.
* Value format when DataSourceType is set to OSS: `oss://[oss-bucket].[endpoint]/[path]`.
* Value format when DataSourceType is set to NAS:`nas://[nas-filesystem-id].[region]/[path]`.', example='oss://.oss-cn-shanghai-finance-1.aliyuncs.com/'),
workspaceId?: string(name='WorkspaceId', description='The workspace ID.
<props="china">Call [ListWorkspaces](https://help.aliyun.com/document_detail/449124.html) to obtain the workspace ID.', example='123***'),
}
model CreateTensorboardResponseBody = {
dataSourceId?: string(name='DataSourceId', description='The dataset ID.', example='ds-20210126170216-xxxxxxxx'),
jobId?: string(name='JobId', description='The job ID.', example='dlc-20210126170216-xxxxxxxx'),
requestId?: string(name='RequestId', description='The ID of the request.', example='473469C7-AA6F-4DC5-B3DB-A3DC0DE3C83E'),
tensorboardId?: string(name='TensorboardId', description='TensorBoard ID', example='tbxxxxxxxx'),
}
model CreateTensorboardResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: CreateTensorboardResponseBody(name='body'),
}
/**
* @summary Creates a TensorBoard by using a job or specifying a data source configuration.
*
* @param request CreateTensorboardRequest
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return CreateTensorboardResponse
*/
async function createTensorboardWithOptions(request: CreateTensorboardRequest, headers: map[string]string, runtime: Util.RuntimeOptions): CreateTensorboardResponse {
Util.validateModel(request);
var body : map[string]any = {};
if (!Util.isUnset(request.accessibility)) {
body['Accessibility'] = request.accessibility;
}
if (!Util.isUnset(request.cpu)) {
body['Cpu'] = request.cpu;
}
if (!Util.isUnset(request.dataSourceId)) {
body['DataSourceId'] = request.dataSourceId;
}
if (!Util.isUnset(request.dataSourceType)) {
body['DataSourceType'] = request.dataSourceType;
}
if (!Util.isUnset(request.dataSources)) {
body['DataSources'] = request.dataSources;
}
if (!Util.isUnset(request.displayName)) {
body['DisplayName'] = request.displayName;
}
if (!Util.isUnset(request.jobId)) {
body['JobId'] = request.jobId;
}
if (!Util.isUnset(request.maxRunningTimeMinutes)) {
body['MaxRunningTimeMinutes'] = request.maxRunningTimeMinutes;
}
if (!Util.isUnset(request.memory)) {
body['Memory'] = request.memory;
}
if (!Util.isUnset(request.options)) {
body['Options'] = request.options;
}
if (!Util.isUnset(request.priority)) {
body['Priority'] = request.priority;
}
if (!Util.isUnset(request.quotaId)) {
body['QuotaId'] = request.quotaId;
}
if (!Util.isUnset(request.sourceId)) {
body['SourceId'] = request.sourceId;
}
if (!Util.isUnset(request.sourceType)) {
body['SourceType'] = request.sourceType;
}
if (!Util.isUnset(request.summaryPath)) {
body['SummaryPath'] = request.summaryPath;
}
if (!Util.isUnset(request.summaryRelativePath)) {
body['SummaryRelativePath'] = request.summaryRelativePath;
}
if (!Util.isUnset(request.tensorboardDataSources)) {
body['TensorboardDataSources'] = request.tensorboardDataSources;
}
if (!Util.isUnset(request.tensorboardSpec)) {
body['TensorboardSpec'] = request.tensorboardSpec;
}
if (!Util.isUnset(request.uri)) {
body['Uri'] = request.uri;
}
if (!Util.isUnset(request.workspaceId)) {
body['WorkspaceId'] = request.workspaceId;
}
var req = new OpenApi.OpenApiRequest{
headers = headers,
body = OpenApiUtil.parseToMap(body),
};
var params = new OpenApi.Params{
action = 'CreateTensorboard',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/tensorboards`,
method = 'POST',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Creates a TensorBoard by using a job or specifying a data source configuration.
*
* @param request CreateTensorboardRequest
* @return CreateTensorboardResponse
*/
async function createTensorboard(request: CreateTensorboardRequest): CreateTensorboardResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return createTensorboardWithOptions(request, headers, runtime);
}
model DeleteJobResponseBody = {
jobId?: string(name='JobId', description='The job ID.', example='dlc*************'),
requestId?: string(name='RequestId', description='The request ID. You can troubleshoot issues based on the request ID.', example='473469C7-AA6F-4DC5-B3DB-A3DC0DE3C83E'),
}
model DeleteJobResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: DeleteJobResponseBody(name='body'),
}
/**
* @summary Deletes a completed or stopped job.
*
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return DeleteJobResponse
*/
async function deleteJobWithOptions(JobId: string, headers: map[string]string, runtime: Util.RuntimeOptions): DeleteJobResponse {
var req = new OpenApi.OpenApiRequest{
headers = headers,
};
var params = new OpenApi.Params{
action = 'DeleteJob',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/jobs/${OpenApiUtil.getEncodeParam(JobId)}`,
method = 'DELETE',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Deletes a completed or stopped job.
*
* @return DeleteJobResponse
*/
async function deleteJob(JobId: string): DeleteJobResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return deleteJobWithOptions(JobId, headers, runtime);
}
model DeleteTensorboardRequest {
workspaceId?: string(name='WorkspaceId', description='The workspace ID.
<props="china">For more information about how to obtain the workspace ID, see [ListWorkspaces](https://help.aliyun.com/document_detail/449124.html).', example='46099'),
}
model DeleteTensorboardResponseBody = {
requestId?: string(name='RequestId', description='The request ID.', example='473469C7-AA6F-4DC5-B3DB-A3DC0DE3C83E'),
tensorboardId?: string(name='TensorboardId', description='The TensorBoard ID.', example='tensorboard-20210114104214-vf9lowjt3pso'),
}
model DeleteTensorboardResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: DeleteTensorboardResponseBody(name='body'),
}
/**
* @summary Deletes a stopped TensorBoard.
*
* @param request DeleteTensorboardRequest
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return DeleteTensorboardResponse
*/
async function deleteTensorboardWithOptions(TensorboardId: string, request: DeleteTensorboardRequest, headers: map[string]string, runtime: Util.RuntimeOptions): DeleteTensorboardResponse {
Util.validateModel(request);
var query : map[string]any = {};
if (!Util.isUnset(request.workspaceId)) {
query['WorkspaceId'] = request.workspaceId;
}
var req = new OpenApi.OpenApiRequest{
headers = headers,
query = OpenApiUtil.query(query),
};
var params = new OpenApi.Params{
action = 'DeleteTensorboard',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/tensorboards/${OpenApiUtil.getEncodeParam(TensorboardId)}`,
method = 'DELETE',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Deletes a stopped TensorBoard.
*
* @param request DeleteTensorboardRequest
* @return DeleteTensorboardResponse
*/
async function deleteTensorboard(TensorboardId: string, request: DeleteTensorboardRequest): DeleteTensorboardResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return deleteTensorboardWithOptions(TensorboardId, request, headers, runtime);
}
model GetJobRequest {
needDetail?: boolean(name='NeedDetail', description='Specifies whether to return the job details. Default value: true.', example='true'),
}
model GetJobResponseBody = {
accessibility?: string(name='Accessibility', description='The visibility of the job. Valid values:
* PUBLIC: The code is public in the workspace.
* PRIVATE: The workspace is visible only to you and the administrator of the workspace. This is the default value.', example='PRIVATE'),
clusterId?: string(name='ClusterId', description='The cluster ID.', example='a*****'),
codeSource?: {
branch?: string(name='Branch', description='The code branch.', example='master'),
codeSourceId?: string(name='CodeSourceId', description='The code source ID.', example='code******'),
commit?: string(name='Commit', description='The code commit ID', example='44da109b59f8596152987eaa8f3b2487xxxxxx'),
mountPath?: string(name='MountPath', description='The local mount path.', example='/mnt/data'),
}(name='CodeSource', description='The code source.'),
credentialConfig?: CredentialConfig(name='CredentialConfig', description='The access credential configurations.'),
dataSources?: [
{
dataSourceId?: string(name='DataSourceId', description='The data source ID.', example='d*******'),
mountPath?: string(name='MountPath', description='The local mount path. This parameter is optional. The default value is empty, which specifies that the mount path in the data source is used.', example='/mnt/data/'),
uri?: string(name='Uri', description='The data source URL.', example='oss://bucket.oss-cn-hangzhou-internal.aliyuncs.com/path/'),
}
](name='DataSources', description='The data sources.'),
displayName?: string(name='DisplayName', description='The job name.', example='tf-mnist-test'),
duration?: long(name='Duration', description='The duration of the job (seconds).', example='3602'),
elasticSpec?: JobElasticSpec(name='ElasticSpec', description='The elastic job parameters.'),
enabledDebugger?: boolean(name='EnabledDebugger', description='Specifies whether to enable the debugger job.', example='false'),
envs?: map[string]string(name='Envs', description='The configurations of environment variables.'),
gmtCreateTime?: string(name='GmtCreateTime', description='The time when the job was created (UTC).', example='2021-01-12T14:35:01Z'),
gmtFailedTime?: string(name='GmtFailedTime', description='The time of the job failed (UTC).', example='2021-01-12T15:36:08Z'),
gmtFinishTime?: string(name='GmtFinishTime', description='The time when the job ended (UTC).', example='2021-01-12T15:36:08Z'),
gmtRunningTime?: string(name='GmtRunningTime', description='The start time of the job (UTC).', example='2021-01-12T14:36:21Z'),
gmtStoppedTime?: string(name='GmtStoppedTime', description='The time when the job stopped (UTC).', example='2021-01-12T15:36:08Z'),
gmtSubmittedTime?: string(name='GmtSubmittedTime', description='The time when the job was submitted to the cluster (UTC).', example='2021-01-12T14:36:01Z'),
gmtSuccessedTime?: string(name='GmtSuccessedTime', description='The time when the job succeeded (UTC).', example='2021-01-12T15:36:08Z'),
jobId?: string(name='JobId', description='The job ID.', example='dlc*******'),
jobSpecs?: [
JobSpec
](name='JobSpecs', description='The node configurations of the job, which is **JobSpecs** in the CreateJob operation.'),
jobType?: string(name='JobType', description='The job type. Specified by the JobType parameter of the [CreateJob](https://help.aliyun.com/document_detail/459672.html) operation.', example='TFJob'),
pods?: [
{
gmtCreateTime?: string(name='GmtCreateTime', description='The time when the node was created (UTC).', example='2021-01-12T14:36:01Z'),
gmtFinishTime?: string(name='GmtFinishTime', description='The end time of the node (UTC).', example='2021-01-12T15:36:05Z'),
gmtStartTime?: string(name='GmtStartTime', description='The start time of the node (UTC).', example='2021-01-12T14:36:01Z'),
historyPods?: [
{
gmtCreateTime?: string(name='GmtCreateTime', description='The time when the node was created (UTC).', example='2021-01-12T14:36:01Z'),
gmtFinishTime?: string(name='GmtFinishTime', description='The end time of the node (UTC).', example='2021-01-12T14:36:01Z'),
gmtStartTime?: string(name='GmtStartTime', description='The start time of the node (UTC).', example='2021-01-12T14:36:01Z'),
ip?: string(name='Ip', description='The IP address of the node.', example='10.0.1.3'),
podId?: string(name='PodId', description='The ID of the node.', example='Worker'),
podUid?: string(name='PodUid', description='The UID of the node.', example='fe846462-af2c-4521-bd6f-96787a57591d'),
resourceType?: string(name='ResourceType', description='The resource type of the node.', example='Normal'),
status?: string(name='Status', description='The status of the node.', example='Failed'),
subStatus?: string(name='SubStatus', description='The sub-status of the node, such as its preemption status. Valid values:
* Normal
* Evicted', example='Normal'),
type?: string(name='Type', description='The type of the node.', example='Worker'),
}
](name='HistoryPods', description='The historical nodes.'),
ip?: string(name='Ip', description='The IP address of the node.', example='10.0.1.2'),
podId?: string(name='PodId', description='The node ID. It can be used in the GetPodLogs and GetPodEvents operations to obtain the detailed logs and events of the node.', example='Worker'),
podUid?: string(name='PodUid', description='The UID of the node.', example='fe846462-af2c-4521-bd6f-96787a57591d'),
resourceType?: string(name='ResourceType', description='The resource type of the node.', example='Normal'),
status?: string(name='Status', description='The status of the node. Valid values:
* Pending
* Running
* Succeeded
* Failed
* Unknown', example='Running'),
subStatus?: string(name='SubStatus', description='The sub-status of the node, such as its preemption status. Valid values:
* Normal
* Evicted', example='Normal'),
type?: string(name='Type', description='The node type, which corresponds to a specific JobSpec in JobSpecs of the CreateJob operation.', example='Worker'),
}
](name='Pods', description='All running nodes of the job.'),
priority?: int32(name='Priority', description='The priority of the job. Valid values: 1 to 9.', example='1'),
reasonCode?: string(name='ReasonCode', description='The status detail code, which is a sub-status under the current status.', example='JobStoppedByUser'),
reasonMessage?: string(name='ReasonMessage', description='The description of the status detail code.', example='Job is stopped by user.'),
requestId?: string(name='RequestId', description='The request ID, which can be used for troubleshooting.', example='473469C7-AA6F-4DC5-B3DB-xxxxxxxx'),
resourceId?: string(name='ResourceId', description='The ID of the resource group to which the job belongs.', example='r******'),
resourceLevel?: string(name='ResourceLevel', description='The resource level that the job uses.', example='L0'),
resourceType?: string(name='ResourceType', description='The resource type. Valid values: ECS, Lingjun, and ACS.', example='ECS'),
restartTimes?: string(name='RestartTimes', description='The number of retries and the maximum number of retries used by the job.', example='0/10'),
settings?: JobSettings(name='Settings', description='The settings of the additional parameters of the job.'),
status?: string(name='Status', description='The status of the job. Valid values:
* Creating
* Queuing
* Bidding (Only for Lingjun preemptible jobs)
* EnvPreparing
* SanityChecking
* Running
* Restarting
* Stopping
* SucceededReserving
* FailedReserving
* Succeeded
* Failed
* Stopped', example='Stopped'),
statusHistory?: [
StatusTransitionItem
](name='StatusHistory', description='The status history.'),
subStatus?: string(name='SubStatus', description='The sub-status of the job, such as its preemption status.', example='Restarting'),
tenantId?: string(name='TenantId', description='The tenant ID.', example='GAR***W134'),
thirdpartyLibDir?: string(name='ThirdpartyLibDir', description='The directory that contains requirements.txt.', example='/root/code/'),
thirdpartyLibs?: [ string ](name='ThirdpartyLibs', description='The third-party Python libraries to be installed.'),
userCommand?: string(name='UserCommand', description='The command that is run to start each node.', example='python /root/code/mnist.py'),
userId?: string(name='UserId', description='The UID of the Alibaba Cloud account who submitted the job.', example='12*********'),
userVpc?: {
defaultRoute?: string(name='DefaultRoute', description='The default router. This parameter is valid only for general-purpose computing resources. Valid values:
eth0: The default network interface is used to access the Internet through the public gateway. eth1: The user\\\\"s Elastic Network Interface is used to access the Internet through the private gateway.'),
extendedCidrs?: [ string ](name='ExtendedCidrs', description='The extended CIDR block. Example: 192.168.0.1/24.'),
securityGroupId?: string(name='SecurityGroupId', description='The security group ID.', example='sg-abcdef****'),
switchId?: string(name='SwitchId', description='The vSwitch ID.', example='vs-abcdef****'),
vpcId?: string(name='VpcId', description='The VPC ID.', example='vpc-abcdef****'),
}(name='UserVpc', description='The VPC of the user.'),
workspaceId?: string(name='WorkspaceId', description='The ID of the workspace to which the job belongs.', example='268'),
workspaceName?: string(name='WorkspaceName', description='The name of the workspace to which the job belongs.', example='dlc-workspace'),
}
model GetJobResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: GetJobResponseBody(name='body'),
}
/**
* @summary Obtains the configuration and runtime information of a job.
*
* @param request GetJobRequest
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return GetJobResponse
*/
async function getJobWithOptions(JobId: string, request: GetJobRequest, headers: map[string]string, runtime: Util.RuntimeOptions): GetJobResponse {
Util.validateModel(request);
var query : map[string]any = {};
if (!Util.isUnset(request.needDetail)) {
query['NeedDetail'] = request.needDetail;
}
var req = new OpenApi.OpenApiRequest{
headers = headers,
query = OpenApiUtil.query(query),
};
var params = new OpenApi.Params{
action = 'GetJob',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/jobs/${OpenApiUtil.getEncodeParam(JobId)}`,
method = 'GET',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Obtains the configuration and runtime information of a job.
*
* @param request GetJobRequest
* @return GetJobResponse
*/
async function getJob(JobId: string, request: GetJobRequest): GetJobResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return getJobWithOptions(JobId, request, headers, runtime);
}
model GetJobEventsRequest {
endTime?: string(name='EndTime', description='The end time (UTC) of the time range for querying events. The default value is the current time.', example='2020-11-08T18:00:00Z'),
maxEventsNum?: int32(name='MaxEventsNum', description='The maximum number of events that can be returned. Default value: 2000.', example='100'),
startTime?: string(name='StartTime', description='The start time (UTC) of the time range for querying events. The default value is 7 days ago.', example='2020-11-08T16:00:00Z'),
}
model GetJobEventsResponseBody = {
events?: [ string ](name='Events', description='The events.'),
jobId?: string(name='JobId', description='The job ID.', example='dlc-20210126170216-******'),
requestId?: string(name='RequestId', description='The request ID, which can be used for troubleshooting.', example='78F6FCE2-278F-4C4A-A6B7-DD8ECEA9C456'),
}
model GetJobEventsResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: GetJobEventsResponseBody(name='body'),
}
/**
* @summary Obtains the system events of a job.
*
* @param request GetJobEventsRequest
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return GetJobEventsResponse
*/
async function getJobEventsWithOptions(JobId: string, request: GetJobEventsRequest, headers: map[string]string, runtime: Util.RuntimeOptions): GetJobEventsResponse {
Util.validateModel(request);
var query : map[string]any = {};
if (!Util.isUnset(request.endTime)) {
query['EndTime'] = request.endTime;
}
if (!Util.isUnset(request.maxEventsNum)) {
query['MaxEventsNum'] = request.maxEventsNum;
}
if (!Util.isUnset(request.startTime)) {
query['StartTime'] = request.startTime;
}
var req = new OpenApi.OpenApiRequest{
headers = headers,
query = OpenApiUtil.query(query),
};
var params = new OpenApi.Params{
action = 'GetJobEvents',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/jobs/${OpenApiUtil.getEncodeParam(JobId)}/events`,
method = 'GET',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Obtains the system events of a job.
*
* @param request GetJobEventsRequest
* @return GetJobEventsResponse
*/
async function getJobEvents(JobId: string, request: GetJobEventsRequest): GetJobEventsResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return getJobEventsWithOptions(JobId, request, headers, runtime);
}
model GetJobMetricsRequest {
endTime?: string(name='EndTime', description='The end time of the time range to query monitoring data. The time is displayed in UTC. The default value is the current time.', example='2020-11-09T16:00:00Z'),
metricType?: string(name='MetricType', description='The type of the monitoring metrics. Valid values:
* GpuCoreUsage: GPU utilization
* GpuMemoryUsage: GPU memory utilization
* CpuCoreUsage: CPU utilization
* MemoryUsage: memory utilization
* NetworkInputRate: the network write in rate.
* NetworkOutputRate: the network write out rate
* DiskReadRate: the disk read rate
* DiskWriteRate: the disk write rate
This parameter is required.', example='GpuMemoryUsage'),
startTime?: string(name='StartTime', description='The beginning of the time range to query monitoring data. The time is displayed in UTC. The default value is the time 1 hour before the current time.', example='2020-11-08T16:00:00Z'),
timeStep?: string(name='TimeStep', description='The interval at which monitoring data is returned. Default value: 5. Unit: minutes.', example='5m'),
token?: string(name='Token', description='The temporary token used for authentication.', example='eyXXXX-XXXX.XXXXX'),
}
model GetJobMetricsResponseBody = {
jobId?: string(name='JobId', description='The job ID.', example='dlc-20210126170216-*******'),
podMetrics?: [
PodMetric
](name='PodMetrics', description='The monitoring metrics of the job.'),
requestId?: string(name='RequestId', description='The request ID. You can troubleshoot issues based on the request ID.', example='473469C7-AA6F-4DC5-B3DB-A3DC0DE3C83E'),
}
model GetJobMetricsResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: GetJobMetricsResponseBody(name='body'),
}
/**
* @summary Obtains the monitoring data of a job, including the CPU, GPU, and memory utilization, network, and disk read/write rate.
*
* @param request GetJobMetricsRequest
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return GetJobMetricsResponse
*/
async function getJobMetricsWithOptions(JobId: string, request: GetJobMetricsRequest, headers: map[string]string, runtime: Util.RuntimeOptions): GetJobMetricsResponse {
Util.validateModel(request);
var query : map[string]any = {};
if (!Util.isUnset(request.endTime)) {
query['EndTime'] = request.endTime;
}
if (!Util.isUnset(request.metricType)) {
query['MetricType'] = request.metricType;
}
if (!Util.isUnset(request.startTime)) {
query['StartTime'] = request.startTime;
}
if (!Util.isUnset(request.timeStep)) {
query['TimeStep'] = request.timeStep;
}
if (!Util.isUnset(request.token)) {
query['Token'] = request.token;
}
var req = new OpenApi.OpenApiRequest{
headers = headers,
query = OpenApiUtil.query(query),
};
var params = new OpenApi.Params{
action = 'GetJobMetrics',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/jobs/${OpenApiUtil.getEncodeParam(JobId)}/metrics`,
method = 'GET',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Obtains the monitoring data of a job, including the CPU, GPU, and memory utilization, network, and disk read/write rate.
*
* @param request GetJobMetricsRequest
* @return GetJobMetricsResponse
*/
async function getJobMetrics(JobId: string, request: GetJobMetricsRequest): GetJobMetricsResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return getJobMetricsWithOptions(JobId, request, headers, runtime);
}
model GetJobSanityCheckResultRequest {
sanityCheckNumber?: int32(name='SanityCheckNumber', description='The nth time for which the job sanity check is performed.
This parameter is required.', example='1'),
sanityCheckPhase?: string(name='SanityCheckPhase', description='The phase in which the job sanity check is performed.
* CheckInit
* DeviceCheck
* SingleNodeCommCheck
* TwoNodeCommCheck
* AllNodeCommCheck', example='DeviceCheck'),
token?: string(name='Token', description='The token information for job sharing. For more information about how to obtain the token information, see [GetToken](https://help.aliyun.com/document_detail/2557812.html).', example='eyJhbG******zI1NiIsInR5cCI6IkpXVCJ9.eyJle****jE3MDk1Mzk0NDIsImlhdCI6MTcwODkzNDY0MiwidXNlcl9pZCI6IjE3NTgwNTQxNjI0Mzg2NTUiLCJ0YXJnZXRfaWQiOiJkbGM1OGh1a2xyYzZwdGMyIiwidGFyZ2V0X3R5cGUiOiJqb2IifQ.GNL7jo6****mgKKv0QeGIYgvBufSU-PH_EQttX****'),
}
model GetJobSanityCheckResultResponseBody = {
jobId?: string(name='JobId', description='The job ID.', example='dlc-20210126170216-xxxxxx'),
requestID?: string(name='RequestID', description='The request ID.', example='B3789344-F1xxxBE-5xx2-A04D-xxxxx'),
sanityCheckResult?: [
SanityCheckResultItem
](name='SanityCheckResult', description='The job sanity check result.'),
}
model GetJobSanityCheckResultResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: GetJobSanityCheckResultResponseBody(name='body'),
}
/**
* @summary Obtains specified job sanity check result in a Deep Learning Containers (DLC) job.
*
* @param request GetJobSanityCheckResultRequest
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return GetJobSanityCheckResultResponse
*/
async function getJobSanityCheckResultWithOptions(JobId: string, request: GetJobSanityCheckResultRequest, headers: map[string]string, runtime: Util.RuntimeOptions): GetJobSanityCheckResultResponse {
Util.validateModel(request);
var query : map[string]any = {};
if (!Util.isUnset(request.sanityCheckNumber)) {
query['SanityCheckNumber'] = request.sanityCheckNumber;
}
if (!Util.isUnset(request.sanityCheckPhase)) {
query['SanityCheckPhase'] = request.sanityCheckPhase;
}
if (!Util.isUnset(request.token)) {
query['Token'] = request.token;
}
var req = new OpenApi.OpenApiRequest{
headers = headers,
query = OpenApiUtil.query(query),
};
var params = new OpenApi.Params{
action = 'GetJobSanityCheckResult',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/jobs/${OpenApiUtil.getEncodeParam(JobId)}/sanitycheckresult`,
method = 'GET',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Obtains specified job sanity check result in a Deep Learning Containers (DLC) job.
*
* @param request GetJobSanityCheckResultRequest
* @return GetJobSanityCheckResultResponse
*/
async function getJobSanityCheckResult(JobId: string, request: GetJobSanityCheckResultRequest): GetJobSanityCheckResultResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return getJobSanityCheckResultWithOptions(JobId, request, headers, runtime);
}
model GetPodEventsRequest {
endTime?: string(name='EndTime', description='The end time (UTC).', example='2020-11-09T16:00:00Z'),
maxEventsNum?: int32(name='MaxEventsNum', description='The maximum number of events that can be returned.', example='100'),
podUid?: string(name='PodUid', description='The node UID. Call [GetJob](https://help.aliyun.com/document_detail/459677.html) to get the node UID.', example='dlc-20210126170216-*****-chief-0'),
startTime?: string(name='StartTime', description='The start time (UTC).', example='2020-11-08T16:00:00Z'),
}
model GetPodEventsResponseBody = {
events?: [ string ](name='Events', description='The events returned.'),
jobId?: string(name='JobId', description='The job ID.', example='dlc-20210126170216-*****'),
podId?: string(name='PodId', description='The node ID.
This parameter is required.', example='dlc-20210126170216-*****-chief-0'),
podUid?: string(name='PodUid', description='The node UID.', example='94a7cc7c-0033-48b5-85bd-71c63592c268'),
requestId?: string(name='RequestId', description='The request ID, which can be used for troubleshooting.', example='473469C7-AA6F-4DC5-B3DB-A3DC0DE3C83E'),
}
model GetPodEventsResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: GetPodEventsResponseBody(name='body'),
}
/**
* @summary Obtains the system events of a specific node in a job to locate and troubleshoot issues.
*
* @param request GetPodEventsRequest
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return GetPodEventsResponse
*/
async function getPodEventsWithOptions(JobId: string, PodId: string, request: GetPodEventsRequest, headers: map[string]string, runtime: Util.RuntimeOptions): GetPodEventsResponse {
Util.validateModel(request);
var query : map[string]any = {};
if (!Util.isUnset(request.endTime)) {
query['EndTime'] = request.endTime;
}
if (!Util.isUnset(request.maxEventsNum)) {
query['MaxEventsNum'] = request.maxEventsNum;
}
if (!Util.isUnset(request.podUid)) {
query['PodUid'] = request.podUid;
}
if (!Util.isUnset(request.startTime)) {
query['StartTime'] = request.startTime;
}
var req = new OpenApi.OpenApiRequest{
headers = headers,
query = OpenApiUtil.query(query),
};
var params = new OpenApi.Params{
action = 'GetPodEvents',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/jobs/${OpenApiUtil.getEncodeParam(JobId)}/pods/${OpenApiUtil.getEncodeParam(PodId)}/events`,
method = 'GET',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Obtains the system events of a specific node in a job to locate and troubleshoot issues.
*
* @param request GetPodEventsRequest
* @return GetPodEventsResponse
*/
async function getPodEvents(JobId: string, PodId: string, request: GetPodEventsRequest): GetPodEventsResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return getPodEventsWithOptions(JobId, PodId, request, headers, runtime);
}
model GetPodLogsRequest {
downloadToFile?: boolean(name='DownloadToFile', description='Specifies whether to download the log file. Default value: false. Valid values:
* false
* true', example='true'),
endTime?: string(name='EndTime', description='The end time of the query. Default value: current time.', example='2020-11-08T17:00:00Z'),
maxLines?: int32(name='MaxLines', description='The maximum number of log entries. Default value: 2000.', example='100'),
podUid?: string(name='PodUid', description='The node UID. For more information about how to obtain a node UID, see [GetJob](https://help.aliyun.com/document_detail/459677.html).', example='fe846462-af2c-4521-bd6f-96787a57****'),
startTime?: string(name='StartTime', description='The start time of the query. Default value: 7 days ago.', example='2020-11-08T16:00:00Z'),
}
model GetPodLogsResponseBody = {
jobId?: string(name='JobId', description='The job ID.', example='dlc-20210126170216-******'),
logs?: [ string ](name='Logs', description='The logs.'),
podId?: string(name='PodId', description='The node ID.', example='dlc-20210126170216-****-chief-0'),
podUid?: string(name='PodUid', description='The instance UID.', example='94a7cc7c-0033-48b5-85bd-71c63592c268'),
requestId?: string(name='RequestId', description='The request ID which is used for diagnostics and Q\\\\&A.', example='473469C7-AA6F-4DC5-B3DB-A3DC0DE3C83E'),
}
model GetPodLogsResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: GetPodLogsResponseBody(name='body'),
}
/**
* @summary Obtains or downloads the logs of a node for a task. The logs are from the stdout and stderr of the system and user scripts.
*
* @param request GetPodLogsRequest
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return GetPodLogsResponse
*/
async function getPodLogsWithOptions(JobId: string, PodId: string, request: GetPodLogsRequest, headers: map[string]string, runtime: Util.RuntimeOptions): GetPodLogsResponse {
Util.validateModel(request);
var query : map[string]any = {};
if (!Util.isUnset(request.downloadToFile)) {
query['DownloadToFile'] = request.downloadToFile;
}
if (!Util.isUnset(request.endTime)) {
query['EndTime'] = request.endTime;
}
if (!Util.isUnset(request.maxLines)) {
query['MaxLines'] = request.maxLines;
}
if (!Util.isUnset(request.podUid)) {
query['PodUid'] = request.podUid;
}
if (!Util.isUnset(request.startTime)) {
query['StartTime'] = request.startTime;
}
var req = new OpenApi.OpenApiRequest{
headers = headers,
query = OpenApiUtil.query(query),
};
var params = new OpenApi.Params{
action = 'GetPodLogs',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/jobs/${OpenApiUtil.getEncodeParam(JobId)}/pods/${OpenApiUtil.getEncodeParam(PodId)}/logs`,
method = 'GET',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Obtains or downloads the logs of a node for a task. The logs are from the stdout and stderr of the system and user scripts.
*
* @param request GetPodLogsRequest
* @return GetPodLogsResponse
*/
async function getPodLogs(JobId: string, PodId: string, request: GetPodLogsRequest): GetPodLogsResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return getPodLogsWithOptions(JobId, PodId, request, headers, runtime);
}
model GetRayDashboardRequest {
isShared?: boolean(name='isShared', description='Specifies whether the link is a sharing link. If yes, a token is required.
Valid values:
* true
* false', example='false'),
token?: string(name='token', description='The token obtained from GetToken', example='some_token_value'),
}
model GetRayDashboardResponseBody = {
metricsEnabled?: string(name='metricsEnabled', description='Indicates whether the dashboard has been integrated with CloudMonitor and supports ray metrics', example='true'),
url?: string(name='url', description='The Ray Dashboard URL', example='https://pre-pai-dlc-proxy-cn-hangzhou.aliyun.com/ray/dashboard/dlc1k7426goc7bvy'),
}
model GetRayDashboardResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: GetRayDashboardResponseBody(name='body'),
}
/**
* @summary Obtains a Ray Dashboard URL.
*
* @description Before you call this operation, make sure that you understand the billing methods and [pricing](https://help.aliyun.com/document_detail/171758.html) of Deep Learning Containers (DLC) of Platform for AI (PAI).
*
* @param request GetRayDashboardRequest
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return GetRayDashboardResponse
*/
async function getRayDashboardWithOptions(jobId: string, request: GetRayDashboardRequest, headers: map[string]string, runtime: Util.RuntimeOptions): GetRayDashboardResponse {
Util.validateModel(request);
var query : map[string]any = {};
if (!Util.isUnset(request.isShared)) {
query['isShared'] = request.isShared;
}
if (!Util.isUnset(request.token)) {
query['token'] = request.token;
}
var req = new OpenApi.OpenApiRequest{
headers = headers,
query = OpenApiUtil.query(query),
};
var params = new OpenApi.Params{
action = 'GetRayDashboard',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/jobs/${OpenApiUtil.getEncodeParam(jobId)}/rayDashboard`,
method = 'GET',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Obtains a Ray Dashboard URL.
*
* @description Before you call this operation, make sure that you understand the billing methods and [pricing](https://help.aliyun.com/document_detail/171758.html) of Deep Learning Containers (DLC) of Platform for AI (PAI).
*
* @param request GetRayDashboardRequest
* @return GetRayDashboardResponse
*/
async function getRayDashboard(jobId: string, request: GetRayDashboardRequest): GetRayDashboardResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return getRayDashboardWithOptions(jobId, request, headers, runtime);
}
model GetTensorboardRequest {
jodId?: string(name='JodId', description='The job ID. For more information about how to query the job ID, see [ListJob](https://help.aliyun.com/document_detail/459676.html).', example='dlc-xxxxxxxx'),
token?: string(name='Token', description='The information about the shared token. You can specify this parameter to obtain the permission to view a TensorBoard job based on the shared token information. You can execute [GetTensorboardSharedUrl](https://help.aliyun.com/document_detail/2557813.html) and extract the shared token from the obtained information.', example='eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.e
yJleHAiOjE2OTUyODA0NTMsImlhdCI6MTY5NTE5NDA1MywidXNlcl9pZCI6IjExN
Tc3MDMyNzA5OTQ5MDEiLCJ0YXJnZXRfaWQiOiJ0YjRrOGxjNXhmdTM2b3B0Iiw
idGFyZ2V0X3R5cGUiOiJ0ZW5zb3Jib2FyZCJ9.6eT68J-KMBwwfN2d7fj7u6vyPcf0erfqYeizd2N****'),
workspaceId?: string(name='WorkspaceId', description='The workspace ID.
<props="china">For more information about how to query the workspace ID, see [ListWorkspaces](https://help.aliyun.com/document_detail/449124.html).', example='46099'),
}
model GetTensorboardResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: Tensorboard
}
/**
* @summary Queries the information of a TensorBoard instance.
*
* @param request GetTensorboardRequest
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return GetTensorboardResponse
*/
async function getTensorboardWithOptions(TensorboardId: string, request: GetTensorboardRequest, headers: map[string]string, runtime: Util.RuntimeOptions): GetTensorboardResponse {
Util.validateModel(request);
var query : map[string]any = {};
if (!Util.isUnset(request.jodId)) {
query['JodId'] = request.jodId;
}
if (!Util.isUnset(request.token)) {
query['Token'] = request.token;
}
if (!Util.isUnset(request.workspaceId)) {
query['WorkspaceId'] = request.workspaceId;
}
var req = new OpenApi.OpenApiRequest{
headers = headers,
query = OpenApiUtil.query(query),
};
var params = new OpenApi.Params{
action = 'GetTensorboard',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/tensorboards/${OpenApiUtil.getEncodeParam(TensorboardId)}`,
method = 'GET',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Queries the information of a TensorBoard instance.
*
* @param request GetTensorboardRequest
* @return GetTensorboardResponse
*/
async function getTensorboard(TensorboardId: string, request: GetTensorboardRequest): GetTensorboardResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return getTensorboardWithOptions(TensorboardId, request, headers, runtime);
}
model GetTensorboardSharedUrlRequest {
expireTimeSeconds?: string(name='ExpireTimeSeconds', description='The validity period of the shareable link. Unit: seconds. Maximum value: 604800.', example='86400'),
}
model GetTensorboardSharedUrlResponseBody = {
requestId?: string(name='RequestId', description='The request ID which is used for troubleshooting.', example='473469C7-AA6F-4DC5-B3DB-A3DC0DE3C83E'),
tensorboardSharedUrl?: string(name='TensorboardSharedUrl', description='The shareable link of the TensorBoard task.', example='http://pai-dlc-proxy-xxx.alicyuncs.com/xxx/xxx/token/'),
}
model GetTensorboardSharedUrlResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: GetTensorboardSharedUrlResponseBody(name='body'),
}
/**
* @summary Obtains the shareable link of a TensorBoard task. The link contains digital tokens. You can use a shareable link to access a TensorBoard task.
*
* @param request GetTensorboardSharedUrlRequest
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return GetTensorboardSharedUrlResponse
*/
async function getTensorboardSharedUrlWithOptions(TensorboardId: string, request: GetTensorboardSharedUrlRequest, headers: map[string]string, runtime: Util.RuntimeOptions): GetTensorboardSharedUrlResponse {
Util.validateModel(request);
var query : map[string]any = {};
if (!Util.isUnset(request.expireTimeSeconds)) {
query['ExpireTimeSeconds'] = request.expireTimeSeconds;
}
var req = new OpenApi.OpenApiRequest{
headers = headers,
query = OpenApiUtil.query(query),
};
var params = new OpenApi.Params{
action = 'GetTensorboardSharedUrl',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/tensorboards/${OpenApiUtil.getEncodeParam(TensorboardId)}/sharedurl`,
method = 'GET',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Obtains the shareable link of a TensorBoard task. The link contains digital tokens. You can use a shareable link to access a TensorBoard task.
*
* @param request GetTensorboardSharedUrlRequest
* @return GetTensorboardSharedUrlResponse
*/
async function getTensorboardSharedUrl(TensorboardId: string, request: GetTensorboardSharedUrlRequest): GetTensorboardSharedUrlResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return getTensorboardSharedUrlWithOptions(TensorboardId, request, headers, runtime);
}
model GetTokenRequest {
expireTime?: long(name='ExpireTime', description='The time when the share link expires. Default value: 604800. Minimum value: 0. Unit: seconds.', example='60'),
targetId?: string(name='TargetId', description='The ID of the job to be shared.', example='dlc*******'),
targetType?: string(name='TargetType', description='The type of the job that you want to share. Valid values: job and tensorboard.', example='job'),
}
model GetTokenResponseBody = {
requestId?: string(name='RequestId', description='The request ID, which is used to troubleshoot issues.', example='473469C7-AA6F-4DC5-B3DB-xxxxxxxx'),
token?: string(name='Token', description='The sharing token, used to view the information about the shared job.', example='eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9*****'),
}
model GetTokenResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: GetTokenResponseBody(name='body'),
}
/**
* @summary Obtains the sharing token of a DLC job. This token is used to view the information about the shared job.
*
* @param request GetTokenRequest
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return GetTokenResponse
*/
async function getTokenWithOptions(request: GetTokenRequest, headers: map[string]string, runtime: Util.RuntimeOptions): GetTokenResponse {
Util.validateModel(request);
var query : map[string]any = {};
if (!Util.isUnset(request.expireTime)) {
query['ExpireTime'] = request.expireTime;
}
if (!Util.isUnset(request.targetId)) {
query['TargetId'] = request.targetId;
}
if (!Util.isUnset(request.targetType)) {
query['TargetType'] = request.targetType;
}
var req = new OpenApi.OpenApiRequest{
headers = headers,
query = OpenApiUtil.query(query),
};
var params = new OpenApi.Params{
action = 'GetToken',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/tokens`,
method = 'GET',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Obtains the sharing token of a DLC job. This token is used to view the information about the shared job.
*
* @param request GetTokenRequest
* @return GetTokenResponse
*/
async function getToken(request: GetTokenRequest): GetTokenResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return getTokenWithOptions(request, headers, runtime);
}
model GetWebTerminalRequest {
isShared?: boolean(name='IsShared', description='Specifies whether to create a shareable link to access the container. Valid values:
* true: returns a shareable link to access the container. The link will expire after 30 seconds and can only be used once. After you access the container by using the link, other requests that use this link to access the container become invalid.
* false: returns a common shareable link to access the container. If you use a common shareable link to access a container, Alibaba Cloud identity authentication is required. The link will expire after 30 seconds.', example='true'),
podUid?: string(name='PodUid', description='The pod UID.', example='94a7cc7c-0033-48b5-85bd-71c63592c268'),
}
model GetWebTerminalResponseBody = {
requestId?: string(name='RequestId', description='The request ID which is used for diagnostics and Q\\\\&A.', example='473469C7-AA6F-4DC5-B3DB-A3DC0DE3C83E'),
webTerminalUrl?: string(name='WebTerminalUrl', description='The WebSocket URI for accessing the container. You must build a WebSocket client. For more information about the communication format, see the following code:
ws = new WebSocket(
`wss://xxxxx`,
);
ws.onopen = function open() {
console.warn(\\\\"connected\\\\");
term.write(\\\\"\\\\r\\\\");
};
ws.onclose = function close() {
console.warn(\\\\"disconnected\\\\");
term.write(\\\\"Connection closed\\\\");
};
// Return the following information in the backend.
ws.onmessage = function incoming(event) {
const msg = JSON.parse(event.data);
console.warn(msg);
if (msg.operation === \\\\"stdout\\\\") {
term.write(msg.data);
} else {
console.warn(\\\\"invalid msg operation: \\\\" + msg);
}
};
// Enter the following code in the console.
term.onData(data => {
const msg = { operation: \\\\"stdin\\\\", data: data };
ws.send(JSON.stringify(msg));
});
term.onResize(size => {
const msg = { operation: \\\\"resize\\\\", cols: size.cols, rows: size.rows };
ws.send(JSON.stringify(msg));
});
fitAddon.fit();
};', example='wss://*****'),
}
model GetWebTerminalResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: GetWebTerminalResponseBody(name='body'),
}
/**
* @summary Provides methods and steps to obtain a HTTP link for accessing a container.
*
* @param request GetWebTerminalRequest
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return GetWebTerminalResponse
*/
async function getWebTerminalWithOptions(JobId: string, PodId: string, request: GetWebTerminalRequest, headers: map[string]string, runtime: Util.RuntimeOptions): GetWebTerminalResponse {
Util.validateModel(request);
var query : map[string]any = {};
if (!Util.isUnset(request.isShared)) {
query['IsShared'] = request.isShared;
}
if (!Util.isUnset(request.podUid)) {
query['PodUid'] = request.podUid;
}
var req = new OpenApi.OpenApiRequest{
headers = headers,
query = OpenApiUtil.query(query),
};
var params = new OpenApi.Params{
action = 'GetWebTerminal',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/jobs/${OpenApiUtil.getEncodeParam(JobId)}/pods/${OpenApiUtil.getEncodeParam(PodId)}/webterminal`,
method = 'GET',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Provides methods and steps to obtain a HTTP link for accessing a container.
*
* @param request GetWebTerminalRequest
* @return GetWebTerminalResponse
*/
async function getWebTerminal(JobId: string, PodId: string, request: GetWebTerminalRequest): GetWebTerminalResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return getWebTerminalWithOptions(JobId, PodId, request, headers, runtime);
}
model ListEcsSpecsRequest {
acceleratorType?: string(name='AcceleratorType', description='Filter by accelerator type. Valid values:
* CPU
* GPU', example='GPU'),
instanceTypes?: string(name='InstanceTypes', description='The instance types to query. Separate the types with commas (,).', example='ecs.g6.large,ecs.g6.xlarge'),
order?: string(name='Order', description='The sorting order. Valid values:
* desc: descending order.
* asc: ascending order.', example='desc'),
pageNumber?: int32(name='PageNumber', description='The number of the page to query. The start value is 1.', example='1'),
pageSize?: int32(name='PageSize', description='The number of entries returned per page.', example='10'),
resourceType?: string(name='ResourceType', description='The type of the resource. Valid values:
* ECS
* Lingjun', example='ECS'),
sortBy?: string(name='SortBy', description='The field based on which the results are sorted. Valid values:
* CPU
* GPU
* Memory
* GmtCreateTime', example='Gpu'),
}
model ListEcsSpecsResponseBody = {
ecsSpecs?: [
EcsSpec
](name='EcsSpecs', description='The list of ECS specifications.'),
requestId?: string(name='RequestId', description='The request ID.', example='473469C7-AA6F-4DC5-B3DB-A3DC0DE3C83E'),
totalCount?: long(name='TotalCount', description='The number of types that meet the filter conditions.', example='10'),
}
model ListEcsSpecsResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: ListEcsSpecsResponseBody(name='body'),
}
/**
* @summary Queries the list of supported instance types.
*
* @param request ListEcsSpecsRequest
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return ListEcsSpecsResponse
*/
async function listEcsSpecsWithOptions(request: ListEcsSpecsRequest, headers: map[string]string, runtime: Util.RuntimeOptions): ListEcsSpecsResponse {
Util.validateModel(request);
var query : map[string]any = {};
if (!Util.isUnset(request.acceleratorType)) {
query['AcceleratorType'] = request.acceleratorType;
}
if (!Util.isUnset(request.instanceTypes)) {
query['InstanceTypes'] = request.instanceTypes;
}
if (!Util.isUnset(request.order)) {
query['Order'] = request.order;
}
if (!Util.isUnset(request.pageNumber)) {
query['PageNumber'] = request.pageNumber;
}
if (!Util.isUnset(request.pageSize)) {
query['PageSize'] = request.pageSize;
}
if (!Util.isUnset(request.resourceType)) {
query['ResourceType'] = request.resourceType;
}
if (!Util.isUnset(request.sortBy)) {
query['SortBy'] = request.sortBy;
}
var req = new OpenApi.OpenApiRequest{
headers = headers,
query = OpenApiUtil.query(query),
};
var params = new OpenApi.Params{
action = 'ListEcsSpecs',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/ecsspecs`,
method = 'GET',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Queries the list of supported instance types.
*
* @param request ListEcsSpecsRequest
* @return ListEcsSpecsResponse
*/
async function listEcsSpecs(request: ListEcsSpecsRequest): ListEcsSpecsResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return listEcsSpecsWithOptions(request, headers, runtime);
}
model ListJobSanityCheckResultsRequest {
order?: string(name='Order', description='The sorting order:
* desc: descending order
* asc: ascending order', example='desc'),
}
model ListJobSanityCheckResultsResponseBody = {
requestID?: string(name='RequestID', description='The request ID.', example='1AC9xxx-3xxx-5xxx2-xxxx-FA5'),
sanityCheckResults?: [[
SanityCheckResultItem
] ](name='SanityCheckResults', description='The sanity check results.'),
totalCount?: int32(name='TotalCount', description='The total number of results that meet the filter conditions.', example='10'),
}
model ListJobSanityCheckResultsResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: ListJobSanityCheckResultsResponseBody(name='body'),
}
/**
* @summary Obtains the results of all sanity checks for a DLC job.
*
* @param request ListJobSanityCheckResultsRequest
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return ListJobSanityCheckResultsResponse
*/
async function listJobSanityCheckResultsWithOptions(JobId: string, request: ListJobSanityCheckResultsRequest, headers: map[string]string, runtime: Util.RuntimeOptions): ListJobSanityCheckResultsResponse {
Util.validateModel(request);
var query : map[string]any = {};
if (!Util.isUnset(request.order)) {
query['Order'] = request.order;
}
var req = new OpenApi.OpenApiRequest{
headers = headers,
query = OpenApiUtil.query(query),
};
var params = new OpenApi.Params{
action = 'ListJobSanityCheckResults',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/jobs/${OpenApiUtil.getEncodeParam(JobId)}/sanitycheckresults`,
method = 'GET',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Obtains the results of all sanity checks for a DLC job.
*
* @param request ListJobSanityCheckResultsRequest
* @return ListJobSanityCheckResultsResponse
*/
async function listJobSanityCheckResults(JobId: string, request: ListJobSanityCheckResultsRequest): ListJobSanityCheckResultsResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return listJobSanityCheckResultsWithOptions(JobId, request, headers, runtime);
}
model ListJobsRequest {
accessibility?: string(name='Accessibility', description='The job visibility. Valid values:
* PUBLIC: The job is visible to all members in the workspace.
* PRIVATE: The job is visible only to you and the administrator of the workspace.', example='PRIVATE'),
businessUserId?: string(name='BusinessUserId', description='The ID of the user associated with the job.', example='16****'),
caller?: string(name='Caller', description='The caller.', example='local'),
displayName?: string(name='DisplayName', description='The job name. Fuzzy query is supported. The name is case-insensitive. Wildcards are not supported. For example, if you enter test, test-job1, job-test, job-test2, or job-test can be matched, and job-t1 cannot be matched. The default value null indicates any job name.', example='tf-mnist-test'),
endTime?: string(name='EndTime', description='The end time of the query. Use the job creation time to filter data. The default value is the current time.', example='2020-11-09T14:45:00Z'),
fromAllWorkspaces?: boolean(name='FromAllWorkspaces', description='Specifies whether to query a list of jobs across workspaces. This parameter must be used together with `ShowOwn=true`. You can use this parameter to query a list of jobs recently submitted by the current user.', example='false'),
jobId?: string(name='JobId', description='The job ID. Fuzzy query is supported. The name is case-insensitive. Wildcards are not supported. The default value null indicates any job ID.', example='dlc********'),
jobIds?: string(name='JobIds'),
jobType?: string(name='JobType', description='The job type. The default value null indicates any type. Valid values:
* TFJob
* PyTorchJob
* XGBoostJob
* OneFlowJob
* ElasticBatchJob', example='TFJob'),
order?: string(name='Order', description='The sorting order. Valid values:
* desc (default)
* asc', example='desc'),
oversoldInfo?: string(name='OversoldInfo', description='The Idle resource information. Valid values:
* ForbiddenQuotaOverSold
* ForceQuotaOverSold
* AcceptQuotaOverSold-true (true indicates that the job uses idle resources.)
* AcceptQuotaOverSold-false (false indicates that the job uses guaranteed resources.)', example='ForbiddenQuotaOverSold'),
pageNumber?: int32(name='PageNumber', description='The number of the page to return for the current query. Minimum value: 1. Default value: 1.', example='1'),
pageSize?: int32(name='PageSize', description='The number of jobs per page.', example='50'),
paymentType?: string(name='PaymentType', description='The type of the resource. Valid values:
* PrePaid: Resource quota
* Spot: Preemptible resources
* PostPaid: Public resources', example='PostPaid'),
pipelineId?: string(name='PipelineId', description='The specific pipeline ID used to filter jobs.', example='flow-*******'),
resourceId?: string(name='ResourceId', description='The resource group ID. For information about how to obtain the ID of a dedicated resource group, see [Manage resource quota](https://help.aliyun.com/document_detail/2651299.html).', example='r*****'),
resourceQuotaName?: string(name='ResourceQuotaName', description='The resource quota name used to filter jobs. Fuzzy search is supported. Wildcards are not supported. The default value null indicates that jobs are not filtered by resource quota name.', example='quota***'),
showOwn?: boolean(name='ShowOwn', description='Specifies whether to query only the jobs submitted by the current user.', example='true'),
sortBy?: string(name='SortBy', description='The sorting field. Valid values:
* DisplayName
* JobType
* Status
* GmtCreateTime
* GmtFinishTime', example='GmtCreateTime'),
startTime?: string(name='StartTime', description='The start time of the query. Use the job creation time to filter data. The default value is the current time minus seven days. In other words, if you do not configure the StartTime and EndTime parameters, the system queries the job list in the last seven days.', example='2020-11-08T16:00:00Z'),
status?: string(name='Status', description='The job status. Valid values:
* Creating
* Queuing
* Bidding (only available for spot jobs that use Lingjun resources)
* EnvPreparing
* SanityChecking
* Running
* Restarting
* Stopping
* SucceededReserving
* FailedReserving
* Succeeded
* Failed
* Stopped', example='Running'),
tags?: map[string]string(name='Tags', description='The tags.'),
userIdForFilter?: string(name='UserIdForFilter', description='The user ID used to filter jobs.', example='20**************'),
username?: string(name='Username', description='The username used to filter jobs. Fuzzy search is supported. Wildcards are not supported. The default value null indicates that jobs are not filtered by username.', example='test***'),
workspaceId?: string(name='WorkspaceId', description='The workspace ID.', example='1****'),
}
model ListJobsShrinkRequest {
accessibility?: string(name='Accessibility', description='The job visibility. Valid values:
* PUBLIC: The job is visible to all members in the workspace.
* PRIVATE: The job is visible only to you and the administrator of the workspace.', example='PRIVATE'),
businessUserId?: string(name='BusinessUserId', description='The ID of the user associated with the job.', example='16****'),
caller?: string(name='Caller', description='The caller.', example='local'),
displayName?: string(name='DisplayName', description='The job name. Fuzzy query is supported. The name is case-insensitive. Wildcards are not supported. For example, if you enter test, test-job1, job-test, job-test2, or job-test can be matched, and job-t1 cannot be matched. The default value null indicates any job name.', example='tf-mnist-test'),
endTime?: string(name='EndTime', description='The end time of the query. Use the job creation time to filter data. The default value is the current time.', example='2020-11-09T14:45:00Z'),
fromAllWorkspaces?: boolean(name='FromAllWorkspaces', description='Specifies whether to query a list of jobs across workspaces. This parameter must be used together with `ShowOwn=true`. You can use this parameter to query a list of jobs recently submitted by the current user.', example='false'),
jobId?: string(name='JobId', description='The job ID. Fuzzy query is supported. The name is case-insensitive. Wildcards are not supported. The default value null indicates any job ID.', example='dlc********'),
jobIds?: string(name='JobIds'),
jobType?: string(name='JobType', description='The job type. The default value null indicates any type. Valid values:
* TFJob
* PyTorchJob
* XGBoostJob
* OneFlowJob
* ElasticBatchJob', example='TFJob'),
order?: string(name='Order', description='The sorting order. Valid values:
* desc (default)
* asc', example='desc'),
oversoldInfo?: string(name='OversoldInfo', description='The Idle resource information. Valid values:
* ForbiddenQuotaOverSold
* ForceQuotaOverSold
* AcceptQuotaOverSold-true (true indicates that the job uses idle resources.)
* AcceptQuotaOverSold-false (false indicates that the job uses guaranteed resources.)', example='ForbiddenQuotaOverSold'),
pageNumber?: int32(name='PageNumber', description='The number of the page to return for the current query. Minimum value: 1. Default value: 1.', example='1'),
pageSize?: int32(name='PageSize', description='The number of jobs per page.', example='50'),
paymentType?: string(name='PaymentType', description='The type of the resource. Valid values:
* PrePaid: Resource quota
* Spot: Preemptible resources
* PostPaid: Public resources', example='PostPaid'),
pipelineId?: string(name='PipelineId', description='The specific pipeline ID used to filter jobs.', example='flow-*******'),
resourceId?: string(name='ResourceId', description='The resource group ID. For information about how to obtain the ID of a dedicated resource group, see [Manage resource quota](https://help.aliyun.com/document_detail/2651299.html).', example='r*****'),
resourceQuotaName?: string(name='ResourceQuotaName', description='The resource quota name used to filter jobs. Fuzzy search is supported. Wildcards are not supported. The default value null indicates that jobs are not filtered by resource quota name.', example='quota***'),
showOwn?: boolean(name='ShowOwn', description='Specifies whether to query only the jobs submitted by the current user.', example='true'),
sortBy?: string(name='SortBy', description='The sorting field. Valid values:
* DisplayName
* JobType
* Status
* GmtCreateTime
* GmtFinishTime', example='GmtCreateTime'),
startTime?: string(name='StartTime', description='The start time of the query. Use the job creation time to filter data. The default value is the current time minus seven days. In other words, if you do not configure the StartTime and EndTime parameters, the system queries the job list in the last seven days.', example='2020-11-08T16:00:00Z'),
status?: string(name='Status', description='The job status. Valid values:
* Creating
* Queuing
* Bidding (only available for spot jobs that use Lingjun resources)
* EnvPreparing
* SanityChecking
* Running
* Restarting
* Stopping
* SucceededReserving
* FailedReserving
* Succeeded
* Failed
* Stopped', example='Running'),
tagsShrink?: string(name='Tags', description='The tags.'),
userIdForFilter?: string(name='UserIdForFilter', description='The user ID used to filter jobs.', example='20**************'),
username?: string(name='Username', description='The username used to filter jobs. Fuzzy search is supported. Wildcards are not supported. The default value null indicates that jobs are not filtered by username.', example='test***'),
workspaceId?: string(name='WorkspaceId', description='The workspace ID.', example='1****'),
}
model ListJobsResponseBody = {
jobs?: [
JobItem
](name='Jobs', description='The jobs.'),
requestId?: string(name='RequestId', description='The request ID used to troubleshoot issues.', example='473469C7-AA6F-4DC5-B3DB-A3DC0DE3C83E'),
totalCount?: long(name='TotalCount', description='The total number of jobs that meet the filter conditions.', example='2'),
}
model ListJobsResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: ListJobsResponseBody(name='body'),
}
/**
* @summary Queries a list of jobs and supports pagination, sorting, and filtering by conditions.
*
* @param tmpReq ListJobsRequest
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return ListJobsResponse
*/
async function listJobsWithOptions(tmpReq: ListJobsRequest, headers: map[string]string, runtime: Util.RuntimeOptions): ListJobsResponse {
Util.validateModel(tmpReq);
var request = new ListJobsShrinkRequest{};
OpenApiUtil.convert(tmpReq, request);
if (!Util.isUnset(tmpReq.tags)) {
request.tagsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.tags, 'Tags', 'json');
}
var query : map[string]any = {};
if (!Util.isUnset(request.accessibility)) {
query['Accessibility'] = request.accessibility;
}
if (!Util.isUnset(request.businessUserId)) {
query['BusinessUserId'] = request.businessUserId;
}
if (!Util.isUnset(request.caller)) {
query['Caller'] = request.caller;
}
if (!Util.isUnset(request.displayName)) {
query['DisplayName'] = request.displayName;
}
if (!Util.isUnset(request.endTime)) {
query['EndTime'] = request.endTime;
}
if (!Util.isUnset(request.fromAllWorkspaces)) {
query['FromAllWorkspaces'] = request.fromAllWorkspaces;
}
if (!Util.isUnset(request.jobId)) {
query['JobId'] = request.jobId;
}
if (!Util.isUnset(request.jobIds)) {
query['JobIds'] = request.jobIds;
}
if (!Util.isUnset(request.jobType)) {
query['JobType'] = request.jobType;
}
if (!Util.isUnset(request.order)) {
query['Order'] = request.order;
}
if (!Util.isUnset(request.oversoldInfo)) {
query['OversoldInfo'] = request.oversoldInfo;
}
if (!Util.isUnset(request.pageNumber)) {
query['PageNumber'] = request.pageNumber;
}
if (!Util.isUnset(request.pageSize)) {
query['PageSize'] = request.pageSize;
}
if (!Util.isUnset(request.paymentType)) {
query['PaymentType'] = request.paymentType;
}
if (!Util.isUnset(request.pipelineId)) {
query['PipelineId'] = request.pipelineId;
}
if (!Util.isUnset(request.resourceId)) {
query['ResourceId'] = request.resourceId;
}
if (!Util.isUnset(request.resourceQuotaName)) {
query['ResourceQuotaName'] = request.resourceQuotaName;
}
if (!Util.isUnset(request.showOwn)) {
query['ShowOwn'] = request.showOwn;
}
if (!Util.isUnset(request.sortBy)) {
query['SortBy'] = request.sortBy;
}
if (!Util.isUnset(request.startTime)) {
query['StartTime'] = request.startTime;
}
if (!Util.isUnset(request.status)) {
query['Status'] = request.status;
}
if (!Util.isUnset(request.tagsShrink)) {
query['Tags'] = request.tagsShrink;
}
if (!Util.isUnset(request.userIdForFilter)) {
query['UserIdForFilter'] = request.userIdForFilter;
}
if (!Util.isUnset(request.username)) {
query['Username'] = request.username;
}
if (!Util.isUnset(request.workspaceId)) {
query['WorkspaceId'] = request.workspaceId;
}
var req = new OpenApi.OpenApiRequest{
headers = headers,
query = OpenApiUtil.query(query),
};
var params = new OpenApi.Params{
action = 'ListJobs',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/jobs`,
method = 'GET',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Queries a list of jobs and supports pagination, sorting, and filtering by conditions.
*
* @param request ListJobsRequest
* @return ListJobsResponse
*/
async function listJobs(request: ListJobsRequest): ListJobsResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return listJobsWithOptions(request, headers, runtime);
}
model ListTensorboardsRequest {
accessibility?: string(name='Accessibility', description='The instance visibility.
* PUBLIC: TensorBoard instances are visible to all members in the workspace.
* PRIVATE: TensorBoard instances are visible only to you and the administrator of the workspace.', example='PRIVATE'),
displayName?: string(name='DisplayName', description='The TensorBoard instance name.', example='TestTensorboard'),
endTime?: string(name='EndTime', description='The end time of the query. Use the UTC time when the TensorBoard instance is created to filter data. If you leave this parameter empty, the default value is the current time.', example='2020-11-09T14:45:00Z'),
jobId?: string(name='JobId', description='The job ID used to filter TensorBoard instances. For more information about how to obtain the ID of a job, see [ListJobs](https://help.aliyun.com/document_detail/459676.html).', example='dlc-xxx'),
order?: string(name='Order', description='The sorting order.
* desc
* asc', example='desc'),
pageNumber?: int32(name='PageNumber', description='The page number. Minimum value: 1.', example='1'),
pageSize?: int32(name='PageSize', description='The number of TensorBoard instances per page.', example='50'),
paymentType?: string(name='PaymentType', description='The billing method of TensorBoard instances.
* Free: the TensorBoard instance that uses free resources.
* Postpaid: the TensorBoard instance that uses pay-as-you-go resources.', example='Postpaid'),
quotaId?: string(name='QuotaId', description='The resource quota ID.
>
* Only whitelisted users can use resource quotas to create TensorBoard instances. If you want to use this feature, contact us.
* This parameter takes effect only when TensorBoard instances use resource quotas.', example='quota12***'),
showOwn?: boolean(name='ShowOwn', description='Specifies whether to return only the TensorBoard instances created by the current logon account.', example='false'),
sortBy?: string(name='SortBy', description='The returned field used to sort TensorBoard instances.
* DisplayName: the name of the TensorBoard instance.
* GmtCreateTime: the time when the TensorBoard instance is created.', example='GmtCreateTime'),
sourceId?: string(name='SourceId', description='The data source ID. For more information about how to obtain the ID of a job, see [ListJobs](https://help.aliyun.com/document_detail/459676.html).', example='dlc-xxxxxx'),
sourceType?: string(name='SourceType', description='The data source associated with the TensorBoard instance. This parameter is no longer used. Only Deep Learning Containers (DLC) training jobs are supported.', example='job'),
startTime?: string(name='StartTime', description='The start time of the query. Use the UTC time when the TensorBoard instance is created to filter data. If you leave this parameter empty, the default value is seven days before the current time.', example='2020-11-08T16:00:00Z'),
status?: string(name='Status', description='The TensorBoard instance status. Valid values:
* Creating
* Running
* Stopped
* Succeeded
* Failed', example='Running'),
tensorboardId?: string(name='TensorboardId', description='The TensorBoard instance ID used to filter TensorBoard instances.', example='tensorboard-xxx'),
userId?: string(name='UserId', description='The user ID.', example='161****3000'),
username?: string(name='Username', description='The username.', example='she****mo'),
verbose?: boolean(name='Verbose', description='Specifies whether to return the information about the TensorBoard instance.
* true
* false', example='true'),
workspaceId?: string(name='WorkspaceId', description='The workspace ID. Obtain a list of TensorBoard instances based on the workspace ID.
<props="china">For more information, see [ListWorkspaces](https://help.aliyun.com/document_detail/449124.html).', example='380'),
}
model ListTensorboardsResponseBody = {
requestId?: string(name='RequestId', description='The request ID.', example='473469C7-AA6F-4DC5-B3DB-A3DC0DE3C83E'),
tensorboards?: [
Tensorboard
](name='Tensorboards', description='The TensorBoard instances.'),
totalCount?: long(name='TotalCount', description='The total number of data sources that meet the conditions.', example='100'),
}
model ListTensorboardsResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: ListTensorboardsResponseBody(name='body'),
}
/**
* @summary Queries a list of TensorBoard instances.
*
* @param request ListTensorboardsRequest
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return ListTensorboardsResponse
*/
async function listTensorboardsWithOptions(request: ListTensorboardsRequest, headers: map[string]string, runtime: Util.RuntimeOptions): ListTensorboardsResponse {
Util.validateModel(request);
var query : map[string]any = {};
if (!Util.isUnset(request.accessibility)) {
query['Accessibility'] = request.accessibility;
}
if (!Util.isUnset(request.displayName)) {
query['DisplayName'] = request.displayName;
}
if (!Util.isUnset(request.endTime)) {
query['EndTime'] = request.endTime;
}
if (!Util.isUnset(request.jobId)) {
query['JobId'] = request.jobId;
}
if (!Util.isUnset(request.order)) {
query['Order'] = request.order;
}
if (!Util.isUnset(request.pageNumber)) {
query['PageNumber'] = request.pageNumber;
}
if (!Util.isUnset(request.pageSize)) {
query['PageSize'] = request.pageSize;
}
if (!Util.isUnset(request.paymentType)) {
query['PaymentType'] = request.paymentType;
}
if (!Util.isUnset(request.quotaId)) {
query['QuotaId'] = request.quotaId;
}
if (!Util.isUnset(request.showOwn)) {
query['ShowOwn'] = request.showOwn;
}
if (!Util.isUnset(request.sortBy)) {
query['SortBy'] = request.sortBy;
}
if (!Util.isUnset(request.sourceId)) {
query['SourceId'] = request.sourceId;
}
if (!Util.isUnset(request.sourceType)) {
query['SourceType'] = request.sourceType;
}
if (!Util.isUnset(request.startTime)) {
query['StartTime'] = request.startTime;
}
if (!Util.isUnset(request.status)) {
query['Status'] = request.status;
}
if (!Util.isUnset(request.tensorboardId)) {
query['TensorboardId'] = request.tensorboardId;
}
if (!Util.isUnset(request.userId)) {
query['UserId'] = request.userId;
}
if (!Util.isUnset(request.username)) {
query['Username'] = request.username;
}
if (!Util.isUnset(request.verbose)) {
query['Verbose'] = request.verbose;
}
if (!Util.isUnset(request.workspaceId)) {
query['WorkspaceId'] = request.workspaceId;
}
var req = new OpenApi.OpenApiRequest{
headers = headers,
query = OpenApiUtil.query(query),
};
var params = new OpenApi.Params{
action = 'ListTensorboards',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/tensorboards`,
method = 'GET',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Queries a list of TensorBoard instances.
*
* @param request ListTensorboardsRequest
* @return ListTensorboardsResponse
*/
async function listTensorboards(request: ListTensorboardsRequest): ListTensorboardsResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return listTensorboardsWithOptions(request, headers, runtime);
}
model StartTensorboardRequest {
workspaceId?: string(name='WorkspaceId', description='The workspace ID.', example='380'),
}
model StartTensorboardResponseBody = {
requestId?: string(name='RequestId', description='The request ID.', example='473469C7-AA6F-4DC5-B3DB-A3DC0DE3C83E'),
tensorboardId?: string(name='TensorboardId', description='The TensorBoard instance ID.', example='tensorboard-20210114104214-vf9lowjt3pso'),
}
model StartTensorboardResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: StartTensorboardResponseBody(name='body'),
}
/**
* @summary Starts a TensorBoard instance.
*
* @param request StartTensorboardRequest
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return StartTensorboardResponse
*/
async function startTensorboardWithOptions(TensorboardId: string, request: StartTensorboardRequest, headers: map[string]string, runtime: Util.RuntimeOptions): StartTensorboardResponse {
Util.validateModel(request);
var query : map[string]any = {};
if (!Util.isUnset(request.workspaceId)) {
query['WorkspaceId'] = request.workspaceId;
}
var req = new OpenApi.OpenApiRequest{
headers = headers,
query = OpenApiUtil.query(query),
};
var params = new OpenApi.Params{
action = 'StartTensorboard',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/tensorboards/${OpenApiUtil.getEncodeParam(TensorboardId)}/start`,
method = 'PUT',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Starts a TensorBoard instance.
*
* @param request StartTensorboardRequest
* @return StartTensorboardResponse
*/
async function startTensorboard(TensorboardId: string, request: StartTensorboardRequest): StartTensorboardResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return startTensorboardWithOptions(TensorboardId, request, headers, runtime);
}
model StopJobResponseBody = {
jobId?: string(name='JobId', description='The job ID.', example='dlc-20210126170216-xxxxxxx'),
requestId?: string(name='RequestId', description='The request ID. You can troubleshoot issues based on the request ID.', example='473469C7-AA6F-4DC5-B3DB-xxxxxx'),
}
model StopJobResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: StopJobResponseBody(name='body'),
}
/**
* @summary Stops a running job.
*
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return StopJobResponse
*/
async function stopJobWithOptions(JobId: string, headers: map[string]string, runtime: Util.RuntimeOptions): StopJobResponse {
var req = new OpenApi.OpenApiRequest{
headers = headers,
};
var params = new OpenApi.Params{
action = 'StopJob',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/jobs/${OpenApiUtil.getEncodeParam(JobId)}/stop`,
method = 'POST',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Stops a running job.
*
* @return StopJobResponse
*/
async function stopJob(JobId: string): StopJobResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return stopJobWithOptions(JobId, headers, runtime);
}
model StopTensorboardRequest {
workspaceId?: string(name='WorkspaceId', description='The workspace ID.
<props="china">For more information about how to query the workspace ID, see [ListWorkspaces](https://help.aliyun.com/document_detail/449124.html).', example='380'),
}
model StopTensorboardResponseBody = {
requestId?: string(name='RequestId', description='The ID of the request.', example='473469C7-AA6F-4DC5-B3DB-A3DC0DE3C83E'),
tensorboardId?: string(name='TensorboardId', description='The ID of the TensorBoard instance.', example='tensorboard-20210114104214-xxxxxxxx'),
}
model StopTensorboardResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: StopTensorboardResponseBody(name='body'),
}
/**
* @summary Stops a TensorBoard instance.
*
* @param request StopTensorboardRequest
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return StopTensorboardResponse
*/
async function stopTensorboardWithOptions(TensorboardId: string, request: StopTensorboardRequest, headers: map[string]string, runtime: Util.RuntimeOptions): StopTensorboardResponse {
Util.validateModel(request);
var query : map[string]any = {};
if (!Util.isUnset(request.workspaceId)) {
query['WorkspaceId'] = request.workspaceId;
}
var req = new OpenApi.OpenApiRequest{
headers = headers,
query = OpenApiUtil.query(query),
};
var params = new OpenApi.Params{
action = 'StopTensorboard',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/tensorboards/${OpenApiUtil.getEncodeParam(TensorboardId)}/stop`,
method = 'PUT',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Stops a TensorBoard instance.
*
* @param request StopTensorboardRequest
* @return StopTensorboardResponse
*/
async function stopTensorboard(TensorboardId: string, request: StopTensorboardRequest): StopTensorboardResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return stopTensorboardWithOptions(TensorboardId, request, headers, runtime);
}
model UpdateJobRequest {
accessibility?: string(name='Accessibility', description='The job visibility. Valid values:
* PUBLIC: The job is visible to all members in the workspace.
* PRIVATE: The job is visible only to you and the administrator of the workspace.', example='PRIVATE'),
priority?: int32(name='Priority', description='The job priority. Valid values: 1 to 9.
* 1: the lowest priority.
* 9: the highest priority.', example='5'),
}
model UpdateJobResponseBody = {
jobId?: string(name='JobId', description='The job ID.', example='dlc*************'),
requestId?: string(name='RequestId', description='The request ID, which is used for diagnostics and Q\\\\&A.', example='473469C7-AA6F-4DC5-B3DB-A3DC0DE3C83E'),
}
model UpdateJobResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: UpdateJobResponseBody(name='body'),
}
/**
* @summary Updates the configuration information of a job. For example, you can modify the priority of a job in a queue.
*
* @param request UpdateJobRequest
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return UpdateJobResponse
*/
async function updateJobWithOptions(JobId: string, request: UpdateJobRequest, headers: map[string]string, runtime: Util.RuntimeOptions): UpdateJobResponse {
Util.validateModel(request);
var body : map[string]any = {};
if (!Util.isUnset(request.accessibility)) {
body['Accessibility'] = request.accessibility;
}
if (!Util.isUnset(request.priority)) {
body['Priority'] = request.priority;
}
var req = new OpenApi.OpenApiRequest{
headers = headers,
body = OpenApiUtil.parseToMap(body),
};
var params = new OpenApi.Params{
action = 'UpdateJob',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/jobs/${OpenApiUtil.getEncodeParam(JobId)}`,
method = 'PUT',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Updates the configuration information of a job. For example, you can modify the priority of a job in a queue.
*
* @param request UpdateJobRequest
* @return UpdateJobResponse
*/
async function updateJob(JobId: string, request: UpdateJobRequest): UpdateJobResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return updateJobWithOptions(JobId, request, headers, runtime);
}
model UpdateTensorboardRequest {
accessibility?: string(name='Accessibility', description='The visibility of the jobs. Valid values:
* PUBLIC: The jobs are public in the workspace.
* PRIVATE: The jobs are visible only to you and the administrator of the workspace.', example='PRIVATE'),
maxRunningTimeMinutes?: long(name='MaxRunningTimeMinutes', description='The maximum running time. Unit: minutes.', example='300'),
priority?: string(name='Priority'),
workspaceId?: string(name='WorkspaceId', description='The workspace ID.
<props="china">For more information about how to query the workspace ID, see [ListWorkspaces](https://help.aliyun.com/document_detail/449124.html).', example='380'),
}
model UpdateTensorboardResponseBody = {
requestId?: string(name='RequestId', description='The ID of the request.', example='473469C7-AA6F-4DC5-B3DB-A3DC0DE3C83E'),
tensorboardId?: string(name='TensorboardId', description='The ID of the TensorBoard instance.', example='tensorboard-20210114104214-xxxxxxxx'),
}
model UpdateTensorboardResponse = {
headers?: map[string]string(name='headers'),
statusCode?: int32(name='statusCode'),
body?: UpdateTensorboardResponseBody(name='body'),
}
/**
* @summary Updates a TensorBoard instance.
*
* @param request UpdateTensorboardRequest
* @param headers map
* @param runtime runtime options for this request RuntimeOptions
* @return UpdateTensorboardResponse
*/
async function updateTensorboardWithOptions(TensorboardId: string, request: UpdateTensorboardRequest, headers: map[string]string, runtime: Util.RuntimeOptions): UpdateTensorboardResponse {
Util.validateModel(request);
var query : map[string]any = {};
if (!Util.isUnset(request.accessibility)) {
query['Accessibility'] = request.accessibility;
}
if (!Util.isUnset(request.maxRunningTimeMinutes)) {
query['MaxRunningTimeMinutes'] = request.maxRunningTimeMinutes;
}
if (!Util.isUnset(request.priority)) {
query['Priority'] = request.priority;
}
if (!Util.isUnset(request.workspaceId)) {
query['WorkspaceId'] = request.workspaceId;
}
var req = new OpenApi.OpenApiRequest{
headers = headers,
query = OpenApiUtil.query(query),
};
var params = new OpenApi.Params{
action = 'UpdateTensorboard',
version = '2020-12-03',
protocol = 'HTTPS',
pathname = `/api/v1/tensorboards/${OpenApiUtil.getEncodeParam(TensorboardId)}`,
method = 'PUT',
authType = 'AK',
style = 'ROA',
reqBodyType = 'json',
bodyType = 'json',
};
return callApi(params, req, runtime);
}
/**
* @summary Updates a TensorBoard instance.
*
* @param request UpdateTensorboardRequest
* @return UpdateTensorboardResponse
*/
async function updateTensorboard(TensorboardId: string, request: UpdateTensorboardRequest): UpdateTensorboardResponse {
var runtime = new Util.RuntimeOptions{};
var headers : map[string]string = {};
return updateTensorboardWithOptions(TensorboardId, request, headers, runtime);
}