in infrastructure-provisioning/src/general/lib/gcp/actions_lib.py [0:0]
def create_instance(self, instance_name, service_base_name, cluster_name, region, zone, vpc_name, subnet_name,
instance_size, ssh_key_path, initial_user, image_name, secondary_image_name,
service_account_name, instance_class, network_tag, labels, static_ip='',
primary_disk_size='12', secondary_disk_size='30',
gpu_accelerator_type='None', gpu_accelerator_count='1',
os_login_enabled='FALSE', block_project_ssh_keys='FALSE', rsa_encrypted_csek=''):
key = RSA.importKey(open(ssh_key_path, 'rb').read())
ssh_key = key.publickey().exportKey("OpenSSH").decode('UTF-8')
unique_index = datalab.meta_lib.GCPMeta().get_index_by_service_account_name(service_account_name)
service_account_email = "{}-{}@{}.iam.gserviceaccount.com".format(service_base_name, unique_index, self.project)
access_configs = ''
if instance_class == 'edge':
ip_forward = True
else:
ip_forward = False
if instance_class == 'ssn' or instance_class == 'edge':
access_configs = [{
"type": "ONE_TO_ONE_NAT",
"name": "External NAT",
"natIP": static_ip
}]
if instance_class == 'notebook':
GCPActions().create_disk(instance_name, zone, secondary_disk_size, secondary_image_name, rsa_encrypted_csek)
disks = [
{
"name": instance_name,
"tag_name": instance_name + '-volume-primary',
"deviceName": instance_name + '-primary',
"autoDelete": "true",
"boot": "true",
"mode": "READ_WRITE",
"type": "PERSISTENT",
"initializeParams": {
"diskSizeGb": primary_disk_size,
"sourceImage": image_name
}
},
{
"name": instance_name + '-secondary',
"tag_name": instance_name + '-volume-secondary',
"deviceName": instance_name + '-secondary',
"autoDelete": "true",
"boot": "false",
"mode": "READ_WRITE",
"type": "PERSISTENT",
"interface": "SCSI",
"source": "projects/{0}/zones/{1}/disks/{2}-secondary".format(self.project,
zone,
instance_name)
}
]
elif instance_class == 'dataengine':
GCPActions().create_disk(instance_name, zone, secondary_disk_size, secondary_image_name, rsa_encrypted_csek)
disks = [{
"name": instance_name,
"tag_name": cluster_name + '-volume-primary',
"deviceName": cluster_name + '-primary',
"autoDelete": 'true',
"initializeParams": {
"diskSizeGb": primary_disk_size,
"sourceImage": image_name
},
"boot": 'true',
"mode": "READ_WRITE"
},
{
"name": instance_name + '-secondary',
"tag_name": instance_name + '-volume-secondary',
"deviceName": instance_name + '-secondary',
"autoDelete": "true",
"boot": "false",
"mode": "READ_WRITE",
"type": "PERSISTENT",
"interface": "SCSI",
"source": "projects/{0}/zones/{1}/disks/{2}-secondary".format(self.project,
zone,
instance_name)
}
]
else:
disks = [{
"name": instance_name,
"tag_name": instance_name + '-volume-primary',
"deviceName": instance_name + '-primary',
"autoDelete": 'true',
"initializeParams": {
"diskSizeGb": primary_disk_size,
"sourceImage": image_name
},
"boot": 'true',
"mode": "READ_WRITE"
}]
if service_base_name in image_name and rsa_encrypted_csek:
for disk in disks:
if "initializeParams" in disk:
disk["initializeParams"]["sourceImageEncryptionKey"] = {"rsaEncryptedKey": rsa_encrypted_csek}
disk["diskEncryptionKey"] = {"rsaEncryptedKey": rsa_encrypted_csek}
elif rsa_encrypted_csek:
for disk in disks:
disk["diskEncryptionKey"] = {"rsaEncryptedKey": rsa_encrypted_csek}
instance_params = {
"name": instance_name,
"machineType": "zones/{}/machineTypes/{}".format(zone, instance_size),
"labels": labels,
"canIpForward": ip_forward,
"networkInterfaces": [
{
"network": "global/networks/{}".format(vpc_name),
"subnetwork": "regions/{}/subnetworks/{}".format(region, subnet_name),
"accessConfigs": access_configs
},
],
"metadata":
{"items": [
{
"key": "ssh-keys",
"value": "{}:{}".format(initial_user, ssh_key)
},
{
"key": "enable-oslogin",
"value": "{}".format(os_login_enabled)
},
{
"key": "block-project-ssh-keys",
"value": "{}".format(block_project_ssh_keys)
}
]
},
"disks": disks,
"serviceAccounts": [
{
"email": service_account_email,
"scopes": ["https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/compute"]
}
]
}
if instance_class == 'notebook' or instance_class == 'dataengine':
del instance_params['networkInterfaces'][0]['accessConfigs']
if gpu_accelerator_type != 'None':
#request = self.service.acceleratorTypes().list(project=self.project, zone = zone)
#result = request.execute().get('items')
#gpu_accelerator_type = result[0].get('name')
instance_params['guestAccelerators'] = [
{
"acceleratorCount": gpu_accelerator_count,
"acceleratorType": "projects/{0}/zones/{1}/acceleratorTypes/{2}".format(
self.project, zone, gpu_accelerator_type)
}
]
instance_params['scheduling'] = {
"onHostMaintenance": "terminate",
"automaticRestart": "true"
}
request = self.service.instances().insert(project=self.project, zone=zone,
body=instance_params)
try:
result = request.execute()
datalab.meta_lib.GCPMeta().wait_for_operation(result['name'], zone=zone)
print('Instance {} created.'.format(instance_name))
request = self.service.instances().get(instance=instance_name, project=self.project,
zone=zone)
res = request.execute()
if 'ssn' in network_tag:
instance_tag = {"items": [network_tag, "datalab", "ssn"], "fingerprint": res['tags']['fingerprint']}
elif 'edge' in network_tag:
instance_tag = {"items": [network_tag, "datalab", "edge"], "fingerprint": res['tags']['fingerprint']}
else:
instance_tag = {"items": [network_tag, "datalab"], "fingerprint": res['tags']['fingerprint']}
request = self.service.instances().setTags(instance=instance_name, project=self.project,
zone=zone,
body=instance_tag)
GCPActions().set_disks_tag(disks, zone, labels)
request.execute()
return result
except Exception as err:
logging.info(
"Unable to create Instance: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Unable to create Instance",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)