pkg/tfplan2cai/converters/services/compute/compute_instance_helpers.go (953 lines of code) (raw):

package compute import ( "fmt" "reflect" "strconv" "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "google.golang.org/api/googleapi" compute "google.golang.org/api/compute/v0.beta" ) func instanceSchedulingNodeAffinitiesElemSchema() *schema.Resource { return &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { Type: schema.TypeString, Required: true, }, "operator": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{"IN", "NOT_IN"}, false), }, "values": { Type: schema.TypeSet, Required: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, }, } } func expandAliasIpRanges(ranges []interface{}) []*compute.AliasIpRange { ipRanges := make([]*compute.AliasIpRange, 0, len(ranges)) for _, raw := range ranges { data := raw.(map[string]interface{}) ipRanges = append(ipRanges, &compute.AliasIpRange{ IpCidrRange: data["ip_cidr_range"].(string), SubnetworkRangeName: data["subnetwork_range_name"].(string), }) } return ipRanges } func flattenAliasIpRange(d *schema.ResourceData, ranges []*compute.AliasIpRange, i int) []map[string]interface{} { prefix := fmt.Sprintf("network_interface.%d", i) configData := []map[string]interface{}{} for _, item := range d.Get(prefix + ".alias_ip_range").([]interface{}) { configData = append(configData, item.(map[string]interface{})) } apiData := make([]map[string]interface{}, 0, len(ranges)) for _, ipRange := range ranges { apiData = append(apiData, map[string]interface{}{ "ip_cidr_range": ipRange.IpCidrRange, "subnetwork_range_name": ipRange.SubnetworkRangeName, }) } //permadiff fix sorted, err := tpgresource.SortMapsByConfigOrder(configData, apiData, "ip_cidr_range") if err != nil { return apiData } return sorted } func expandScheduling(v interface{}) (*compute.Scheduling, error) { if v == nil { // We can't set default values for lists. return &compute.Scheduling{ AutomaticRestart: googleapi.Bool(true), }, nil } ls := v.([]interface{}) if len(ls) == 0 { // We can't set default values for lists return &compute.Scheduling{ AutomaticRestart: googleapi.Bool(true), }, nil } if len(ls) > 1 || ls[0] == nil { return nil, fmt.Errorf("expected exactly one scheduling block") } original := ls[0].(map[string]interface{}) scheduling := &compute.Scheduling{ ForceSendFields: make([]string, 0, 4), } if v, ok := original["automatic_restart"]; ok { scheduling.AutomaticRestart = googleapi.Bool(v.(bool)) scheduling.ForceSendFields = append(scheduling.ForceSendFields, "AutomaticRestart") } if v, ok := original["preemptible"]; ok { scheduling.Preemptible = v.(bool) scheduling.ForceSendFields = append(scheduling.ForceSendFields, "Preemptible") } if v, ok := original["on_host_maintenance"]; ok { scheduling.OnHostMaintenance = v.(string) scheduling.ForceSendFields = append(scheduling.ForceSendFields, "OnHostMaintenance") } if v, ok := original["node_affinities"]; ok && v != nil { naSet := v.(*schema.Set).List() scheduling.NodeAffinities = make([]*compute.SchedulingNodeAffinity, len(ls)) scheduling.ForceSendFields = append(scheduling.ForceSendFields, "NodeAffinities") for _, nodeAffRaw := range naSet { if nodeAffRaw == nil { continue } nodeAff := nodeAffRaw.(map[string]interface{}) transformed := &compute.SchedulingNodeAffinity{ Key: nodeAff["key"].(string), Operator: nodeAff["operator"].(string), Values: tpgresource.ConvertStringArr(nodeAff["values"].(*schema.Set).List()), } scheduling.NodeAffinities = append(scheduling.NodeAffinities, transformed) } } if v, ok := original["min_node_cpus"]; ok { scheduling.MinNodeCpus = int64(v.(int)) } if v, ok := original["provisioning_model"]; ok { scheduling.ProvisioningModel = v.(string) scheduling.ForceSendFields = append(scheduling.ForceSendFields, "ProvisioningModel") } if v, ok := original["instance_termination_action"]; ok { scheduling.InstanceTerminationAction = v.(string) scheduling.ForceSendFields = append(scheduling.ForceSendFields, "InstanceTerminationAction") } if v, ok := original["availability_domain"]; ok && v != nil { scheduling.AvailabilityDomain = int64(v.(int)) } if v, ok := original["max_run_duration"]; ok { transformedMaxRunDuration, err := expandComputeMaxRunDuration(v) if err != nil { return nil, err } scheduling.MaxRunDuration = transformedMaxRunDuration scheduling.ForceSendFields = append(scheduling.ForceSendFields, "MaxRunDuration") } if v, ok := original["on_instance_stop_action"]; ok { transformedOnInstanceStopAction, err := expandComputeOnInstanceStopAction(v) if err != nil { return nil, err } scheduling.OnInstanceStopAction = transformedOnInstanceStopAction scheduling.ForceSendFields = append(scheduling.ForceSendFields, "OnInstanceStopAction") } if v, ok := original["host_error_timeout_seconds"]; ok { scheduling.HostErrorTimeoutSeconds = int64(v.(int)) //host_error_timeout_seconds doesn't get removed correctly due to an API bug on instances.SetScheduling. //We need to set it to NullFields as a workaround because nil is rounded to 0 if v == 0 || v == nil { scheduling.NullFields = append(scheduling.NullFields, "HostErrorTimeoutSeconds") } else { scheduling.ForceSendFields = append(scheduling.ForceSendFields, "HostErrorTimeoutSeconds") } } if v, ok := original["maintenance_interval"]; ok { scheduling.MaintenanceInterval = v.(string) } if v, ok := original["graceful_shutdown"]; ok { transformedGracefulShutdown, err := expandGracefulShutdown(v) if err != nil { return nil, err } scheduling.GracefulShutdown = transformedGracefulShutdown scheduling.ForceSendFields = append(scheduling.ForceSendFields, "GracefulShutdown") } if v, ok := original["local_ssd_recovery_timeout"]; ok { transformedLocalSsdRecoveryTimeout, err := expandComputeLocalSsdRecoveryTimeout(v) if err != nil { return nil, err } scheduling.LocalSsdRecoveryTimeout = transformedLocalSsdRecoveryTimeout scheduling.ForceSendFields = append(scheduling.ForceSendFields, "LocalSsdRecoveryTimeout") } if v, ok := original["termination_time"]; ok { scheduling.TerminationTime = v.(string) } return scheduling, nil } func expandComputeMaxRunDuration(v interface{}) (*compute.Duration, error) { l := v.([]interface{}) duration := compute.Duration{} if len(l) == 0 || l[0] == nil { return nil, nil } raw := l[0] original := raw.(map[string]interface{}) transformedNanos, err := expandComputeMaxRunDurationNanos(original["nanos"]) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { duration.Nanos = int64(transformedNanos.(int)) } transformedSeconds, err := expandComputeMaxRunDurationSeconds(original["seconds"]) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { duration.Seconds = int64(transformedSeconds.(int)) } return &duration, nil } func expandComputeMaxRunDurationNanos(v interface{}) (interface{}, error) { return v, nil } func expandComputeMaxRunDurationSeconds(v interface{}) (interface{}, error) { return v, nil } func expandComputeOnInstanceStopAction(v interface{}) (*compute.SchedulingOnInstanceStopAction, error) { l := v.([]interface{}) onInstanceStopAction := compute.SchedulingOnInstanceStopAction{} if len(l) == 0 || l[0] == nil { return nil, nil } raw := l[0] original := raw.(map[string]interface{}) if d, ok := original["discard_local_ssd"]; ok { onInstanceStopAction.DiscardLocalSsd = d.(bool) } else { return nil, nil } return &onInstanceStopAction, nil } func expandComputeLocalSsdRecoveryTimeout(v interface{}) (*compute.Duration, error) { l := v.([]interface{}) duration := compute.Duration{} if len(l) == 0 || l[0] == nil { return nil, nil } raw := l[0] original := raw.(map[string]interface{}) transformedNanos, err := expandComputeLocalSsdRecoveryTimeoutNanos(original["nanos"]) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { duration.Nanos = int64(transformedNanos.(int)) } transformedSeconds, err := expandComputeLocalSsdRecoveryTimeoutSeconds(original["seconds"]) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { duration.Seconds = int64(transformedSeconds.(int)) } return &duration, nil } func expandComputeLocalSsdRecoveryTimeoutNanos(v interface{}) (interface{}, error) { return v, nil } func expandComputeLocalSsdRecoveryTimeoutSeconds(v interface{}) (interface{}, error) { return v, nil } func expandGracefulShutdown(v interface{}) (*compute.SchedulingGracefulShutdown, error) { l := v.([]interface{}) gracefulShutdown := compute.SchedulingGracefulShutdown{} if len(l) == 0 || l[0] == nil { return nil, nil } raw := l[0] original := raw.(map[string]interface{}) originalMaxDuration := original["max_duration"].([]interface{}) maxDuration, err := expandGracefulShutdownMaxDuration(originalMaxDuration) if err != nil { return nil, err } if maxDuration != nil { gracefulShutdown.MaxDuration = maxDuration } gracefulShutdown.Enabled = original["enabled"].(bool) gracefulShutdown.ForceSendFields = append(gracefulShutdown.ForceSendFields, "Enabled") return &gracefulShutdown, nil } func expandGracefulShutdownMaxDuration(v interface{}) (*compute.Duration, error) { l := v.([]interface{}) duration := compute.Duration{} if len(l) == 0 || l[0] == nil { return nil, nil } raw := l[0] maxDurationMap := raw.(map[string]interface{}) transformedNanos := maxDurationMap["nanos"] transformedSeconds := maxDurationMap["seconds"] if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { duration.Nanos = int64(transformedNanos.(int)) } if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { duration.Seconds = int64(transformedSeconds.(int)) } duration.ForceSendFields = append(duration.ForceSendFields, "Seconds") return &duration, nil } func flattenScheduling(resp *compute.Scheduling) []map[string]interface{} { schedulingMap := map[string]interface{}{ "on_host_maintenance": resp.OnHostMaintenance, "preemptible": resp.Preemptible, "min_node_cpus": resp.MinNodeCpus, "provisioning_model": resp.ProvisioningModel, "instance_termination_action": resp.InstanceTerminationAction, "availability_domain": resp.AvailabilityDomain, "termination_time": resp.TerminationTime, } if resp.AutomaticRestart != nil { schedulingMap["automatic_restart"] = *resp.AutomaticRestart } if resp.MaxRunDuration != nil { schedulingMap["max_run_duration"] = flattenComputeMaxRunDuration(resp.MaxRunDuration) } if resp.OnInstanceStopAction != nil { schedulingMap["on_instance_stop_action"] = flattenOnInstanceStopAction(resp.OnInstanceStopAction) } if resp.HostErrorTimeoutSeconds != 0 { schedulingMap["host_error_timeout_seconds"] = resp.HostErrorTimeoutSeconds } if resp.MaintenanceInterval != "" { schedulingMap["maintenance_interval"] = resp.MaintenanceInterval } if resp.GracefulShutdown != nil { schedulingMap["graceful_shutdown"] = flattenGracefulShutdown(resp.GracefulShutdown) } if resp.LocalSsdRecoveryTimeout != nil { schedulingMap["local_ssd_recovery_timeout"] = flattenComputeLocalSsdRecoveryTimeout(resp.LocalSsdRecoveryTimeout) } nodeAffinities := schema.NewSet(schema.HashResource(instanceSchedulingNodeAffinitiesElemSchema()), nil) for _, na := range resp.NodeAffinities { nodeAffinities.Add(map[string]interface{}{ "key": na.Key, "operator": na.Operator, "values": schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(na.Values)), }) } schedulingMap["node_affinities"] = nodeAffinities return []map[string]interface{}{schedulingMap} } func flattenComputeMaxRunDuration(v *compute.Duration) []interface{} { if v == nil { return nil } transformed := make(map[string]interface{}) transformed["nanos"] = v.Nanos transformed["seconds"] = v.Seconds return []interface{}{transformed} } func flattenOnInstanceStopAction(v *compute.SchedulingOnInstanceStopAction) []interface{} { if v == nil { return nil } transformed := make(map[string]interface{}) transformed["discard_local_ssd"] = v.DiscardLocalSsd return []interface{}{transformed} } func flattenComputeLocalSsdRecoveryTimeout(v *compute.Duration) []interface{} { if v == nil { return nil } transformed := make(map[string]interface{}) transformed["nanos"] = v.Nanos transformed["seconds"] = v.Seconds return []interface{}{transformed} } func flattenGracefulShutdown(v *compute.SchedulingGracefulShutdown) []interface{} { if v == nil { return nil } transformed := make(map[string]interface{}) transformed["enabled"] = v.Enabled transformed["max_duration"] = flattenGracefulShutdownMaxDuration(v.MaxDuration) return []interface{}{transformed} } func flattenGracefulShutdownMaxDuration(v *compute.Duration) []interface{} { if v == nil { return nil } transformed := make(map[string]interface{}) transformed["nanos"] = v.Nanos transformed["seconds"] = v.Seconds return []interface{}{transformed} } func flattenAccessConfigs(accessConfigs []*compute.AccessConfig) ([]map[string]interface{}, string) { flattened := make([]map[string]interface{}, len(accessConfigs)) natIP := "" for i, ac := range accessConfigs { flattened[i] = map[string]interface{}{ "nat_ip": ac.NatIP, "network_tier": ac.NetworkTier, } if ac.SetPublicPtr { flattened[i]["public_ptr_domain_name"] = ac.PublicPtrDomainName } if natIP == "" { natIP = ac.NatIP } if ac.SecurityPolicy != "" { flattened[i]["security_policy"] = ac.SecurityPolicy } } return flattened, natIP } func flattenIpv6AccessConfigs(ipv6AccessConfigs []*compute.AccessConfig) []map[string]interface{} { flattened := make([]map[string]interface{}, len(ipv6AccessConfigs)) for i, ac := range ipv6AccessConfigs { flattened[i] = map[string]interface{}{ "network_tier": ac.NetworkTier, } flattened[i]["public_ptr_domain_name"] = ac.PublicPtrDomainName flattened[i]["external_ipv6"] = ac.ExternalIpv6 flattened[i]["external_ipv6_prefix_length"] = strconv.FormatInt(ac.ExternalIpv6PrefixLength, 10) flattened[i]["name"] = ac.Name if ac.SecurityPolicy != "" { flattened[i]["security_policy"] = ac.SecurityPolicy } } return flattened } func flattenNetworkInterfaces(d *schema.ResourceData, config *transport_tpg.Config, networkInterfaces []*compute.NetworkInterface) ([]map[string]interface{}, string, string, string, error) { flattened := make([]map[string]interface{}, len(networkInterfaces)) var region, internalIP, externalIP string for i, iface := range networkInterfaces { var ac []map[string]interface{} ac, externalIP = flattenAccessConfigs(iface.AccessConfigs) subnet, err := tpgresource.ParseSubnetworkFieldValue(iface.Subnetwork, d, config) if err != nil { return nil, "", "", "", err } region = subnet.Region flattened[i] = map[string]interface{}{ "network_ip": iface.NetworkIP, "network": tpgresource.ConvertSelfLinkToV1(iface.Network), "subnetwork": tpgresource.ConvertSelfLinkToV1(iface.Subnetwork), "subnetwork_project": subnet.Project, "access_config": ac, "alias_ip_range": flattenAliasIpRange(d, iface.AliasIpRanges, i), "nic_type": iface.NicType, "stack_type": iface.StackType, "ipv6_access_config": flattenIpv6AccessConfigs(iface.Ipv6AccessConfigs), "ipv6_address": iface.Ipv6Address, "queue_count": iface.QueueCount, "internal_ipv6_prefix_length": iface.InternalIpv6PrefixLength, } // Instance template interfaces never have names, so they're absent // in the instance template network_interface schema. We want to use the // same flattening code for both resource types, so we avoid trying to // set the name field when it's not set at the GCE end. if iface.Name != "" { flattened[i]["name"] = iface.Name } if internalIP == "" { internalIP = iface.NetworkIP } if iface.NetworkAttachment != "" { networkAttachment, err := tpgresource.GetRelativePath(iface.NetworkAttachment) if err != nil { return nil, "", "", "", err } flattened[i]["network_attachment"] = networkAttachment } // the security_policy for a network_interface is found in one of its accessConfigs. if len(iface.AccessConfigs) > 0 && iface.AccessConfigs[0].SecurityPolicy != "" { flattened[i]["security_policy"] = iface.AccessConfigs[0].SecurityPolicy } else if len(iface.Ipv6AccessConfigs) > 0 && iface.Ipv6AccessConfigs[0].SecurityPolicy != "" { flattened[i]["security_policy"] = iface.Ipv6AccessConfigs[0].SecurityPolicy } } return flattened, region, internalIP, externalIP, nil } func expandAccessConfigs(configs []interface{}) []*compute.AccessConfig { acs := make([]*compute.AccessConfig, len(configs)) for i, raw := range configs { acs[i] = &compute.AccessConfig{} acs[i].Type = "ONE_TO_ONE_NAT" if raw != nil { data := raw.(map[string]interface{}) acs[i].NatIP = data["nat_ip"].(string) acs[i].NetworkTier = data["network_tier"].(string) if ptr, ok := data["public_ptr_domain_name"]; ok && ptr != "" { acs[i].SetPublicPtr = true acs[i].PublicPtrDomainName = ptr.(string) } } } return acs } func expandIpv6AccessConfigs(configs []interface{}) []*compute.AccessConfig { iacs := make([]*compute.AccessConfig, len(configs)) for i, raw := range configs { iacs[i] = &compute.AccessConfig{} if raw != nil { data := raw.(map[string]interface{}) iacs[i].NetworkTier = data["network_tier"].(string) if ptr, ok := data["public_ptr_domain_name"]; ok && ptr != "" { iacs[i].PublicPtrDomainName = ptr.(string) } if eip, ok := data["external_ipv6"]; ok && eip != "" { iacs[i].ExternalIpv6 = eip.(string) } if eipl, ok := data["external_ipv6_prefix_length"]; ok && eipl != "" { if strVal, ok := eipl.(string); ok { if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { iacs[i].ExternalIpv6PrefixLength = intVal } } } if name, ok := data["name"]; ok && name != "" { iacs[i].Name = name.(string) } iacs[i].Type = "DIRECT_IPV6" // Currently only type supported } } return iacs } func expandNetworkInterfaces(d tpgresource.TerraformResourceData, config *transport_tpg.Config) ([]*compute.NetworkInterface, error) { configs := d.Get("network_interface").([]interface{}) ifaces := make([]*compute.NetworkInterface, len(configs)) for i, raw := range configs { data := raw.(map[string]interface{}) var networkAttachment = "" network := data["network"].(string) subnetwork := data["subnetwork"].(string) if networkAttachmentObj, ok := data["network_attachment"]; ok { networkAttachment = networkAttachmentObj.(string) } // Checks if networkAttachment is not specified in resource, network or subnetwork have to be specified. if networkAttachment == "" && network == "" && subnetwork == "" { return nil, fmt.Errorf("exactly one of network, subnetwork, or network_attachment must be provided") } nf, err := tpgresource.ParseNetworkFieldValue(network, d, config) if err != nil { return nil, fmt.Errorf("cannot determine self_link for network %q: %s", network, err) } subnetProjectField := fmt.Sprintf("network_interface.%d.subnetwork_project", i) sf, err := tpgresource.ParseSubnetworkFieldValueWithProjectField(subnetwork, subnetProjectField, d, config) if err != nil { return nil, fmt.Errorf("cannot determine self_link for subnetwork %q: %s", subnetwork, err) } ifaces[i] = &compute.NetworkInterface{ NetworkIP: data["network_ip"].(string), Network: nf.RelativeLink(), NetworkAttachment: networkAttachment, Subnetwork: sf.RelativeLink(), AccessConfigs: expandAccessConfigs(data["access_config"].([]interface{})), AliasIpRanges: expandAliasIpRanges(data["alias_ip_range"].([]interface{})), NicType: data["nic_type"].(string), StackType: data["stack_type"].(string), QueueCount: int64(data["queue_count"].(int)), Ipv6AccessConfigs: expandIpv6AccessConfigs(data["ipv6_access_config"].([]interface{})), Ipv6Address: data["ipv6_address"].(string), InternalIpv6PrefixLength: int64(data["internal_ipv6_prefix_length"].(int)), } } return ifaces, nil } func flattenServiceAccounts(serviceAccounts []*compute.ServiceAccount) []map[string]interface{} { result := make([]map[string]interface{}, len(serviceAccounts)) for i, serviceAccount := range serviceAccounts { result[i] = map[string]interface{}{ "email": serviceAccount.Email, "scopes": schema.NewSet(tpgresource.StringScopeHashcode, tpgresource.ConvertStringArrToInterface(serviceAccount.Scopes)), } } return result } func expandServiceAccounts(configs []interface{}) []*compute.ServiceAccount { accounts := make([]*compute.ServiceAccount, len(configs)) for i, raw := range configs { data := raw.(map[string]interface{}) accounts[i] = &compute.ServiceAccount{ Email: data["email"].(string), Scopes: tpgresource.CanonicalizeServiceScopes(tpgresource.ConvertStringSet(data["scopes"].(*schema.Set))), } if accounts[i].Email == "" { accounts[i].Email = "default" } } return accounts } func flattenGuestAccelerators(accelerators []*compute.AcceleratorConfig) []map[string]interface{} { acceleratorsSchema := make([]map[string]interface{}, len(accelerators)) for i, accelerator := range accelerators { acceleratorsSchema[i] = map[string]interface{}{ "count": accelerator.AcceleratorCount, "type": accelerator.AcceleratorType, } } return acceleratorsSchema } func resourceInstanceTags(d tpgresource.TerraformResourceData) *compute.Tags { // Calculate the tags var tags *compute.Tags if v := d.Get("tags"); v != nil { vs := v.(*schema.Set) tags = new(compute.Tags) tags.Items = make([]string, vs.Len()) for i, v := range vs.List() { tags.Items[i] = v.(string) } tags.Fingerprint = d.Get("tags_fingerprint").(string) } return tags } func expandShieldedVmConfigs(d tpgresource.TerraformResourceData) *compute.ShieldedInstanceConfig { if _, ok := d.GetOk("shielded_instance_config"); !ok { return nil } prefix := "shielded_instance_config.0" return &compute.ShieldedInstanceConfig{ EnableSecureBoot: d.Get(prefix + ".enable_secure_boot").(bool), EnableVtpm: d.Get(prefix + ".enable_vtpm").(bool), EnableIntegrityMonitoring: d.Get(prefix + ".enable_integrity_monitoring").(bool), ForceSendFields: []string{"EnableSecureBoot", "EnableVtpm", "EnableIntegrityMonitoring"}, } } func expandConfidentialInstanceConfig(d tpgresource.TerraformResourceData) *compute.ConfidentialInstanceConfig { if _, ok := d.GetOk("confidential_instance_config"); !ok { return nil } prefix := "confidential_instance_config.0" return &compute.ConfidentialInstanceConfig{ EnableConfidentialCompute: d.Get(prefix + ".enable_confidential_compute").(bool), ConfidentialInstanceType: d.Get(prefix + ".confidential_instance_type").(string), } } func flattenConfidentialInstanceConfig(ConfidentialInstanceConfig *compute.ConfidentialInstanceConfig) []map[string]interface{} { if ConfidentialInstanceConfig == nil { return nil } return []map[string]interface{}{{ "enable_confidential_compute": ConfidentialInstanceConfig.EnableConfidentialCompute, "confidential_instance_type": ConfidentialInstanceConfig.ConfidentialInstanceType, }} } func expandAdvancedMachineFeatures(d tpgresource.TerraformResourceData) *compute.AdvancedMachineFeatures { if _, ok := d.GetOk("advanced_machine_features"); !ok { return nil } prefix := "advanced_machine_features.0" return &compute.AdvancedMachineFeatures{ EnableNestedVirtualization: d.Get(prefix + ".enable_nested_virtualization").(bool), ThreadsPerCore: int64(d.Get(prefix + ".threads_per_core").(int)), TurboMode: d.Get(prefix + ".turbo_mode").(string), VisibleCoreCount: int64(d.Get(prefix + ".visible_core_count").(int)), PerformanceMonitoringUnit: d.Get(prefix + ".performance_monitoring_unit").(string), EnableUefiNetworking: d.Get(prefix + ".enable_uefi_networking").(bool), } } func flattenAdvancedMachineFeatures(AdvancedMachineFeatures *compute.AdvancedMachineFeatures) []map[string]interface{} { if AdvancedMachineFeatures == nil { return nil } return []map[string]interface{}{{ "enable_nested_virtualization": AdvancedMachineFeatures.EnableNestedVirtualization, "threads_per_core": AdvancedMachineFeatures.ThreadsPerCore, "turbo_mode": AdvancedMachineFeatures.TurboMode, "visible_core_count": AdvancedMachineFeatures.VisibleCoreCount, "performance_monitoring_unit": AdvancedMachineFeatures.PerformanceMonitoringUnit, "enable_uefi_networking": AdvancedMachineFeatures.EnableUefiNetworking, }} } func flattenShieldedVmConfig(shieldedVmConfig *compute.ShieldedInstanceConfig) []map[string]bool { if shieldedVmConfig == nil { return nil } return []map[string]bool{{ "enable_secure_boot": shieldedVmConfig.EnableSecureBoot, "enable_vtpm": shieldedVmConfig.EnableVtpm, "enable_integrity_monitoring": shieldedVmConfig.EnableIntegrityMonitoring, }} } func expandDisplayDevice(d tpgresource.TerraformResourceData) *compute.DisplayDevice { if _, ok := d.GetOk("enable_display"); !ok { return nil } return &compute.DisplayDevice{ EnableDisplay: d.Get("enable_display").(bool), ForceSendFields: []string{"EnableDisplay"}, } } func flattenEnableDisplay(displayDevice *compute.DisplayDevice) interface{} { if displayDevice == nil { return nil } return displayDevice.EnableDisplay } // Node affinity updates require a reboot func schedulingHasChangeRequiringReboot(d *schema.ResourceData) bool { o, n := d.GetChange("scheduling") oScheduling := o.([]interface{})[0].(map[string]interface{}) newScheduling := n.([]interface{})[0].(map[string]interface{}) return hasNodeAffinitiesChanged(oScheduling, newScheduling) || hasMaxRunDurationChanged(oScheduling, newScheduling) || hasGracefulShutdownChangedWithReboot(d, oScheduling, newScheduling) || hasTerminationTimeChanged(oScheduling, newScheduling) } // Terraform doesn't correctly calculate changes on schema.Set, so we do it manually // https://github.com/hashicorp/terraform-plugin-sdk/issues/98 func schedulingHasChangeWithoutReboot(d *schema.ResourceData) bool { if !d.HasChange("scheduling") { // This doesn't work correctly, which is why this method exists // But it is here for posterity return false } o, n := d.GetChange("scheduling") oScheduling := o.([]interface{})[0].(map[string]interface{}) newScheduling := n.([]interface{})[0].(map[string]interface{}) if schedulingHasChangeRequiringReboot(d) { return false } if oScheduling["automatic_restart"] != newScheduling["automatic_restart"] { return true } if oScheduling["preemptible"] != newScheduling["preemptible"] { return true } if oScheduling["on_host_maintenance"] != newScheduling["on_host_maintenance"] { return true } if oScheduling["provisioning_model"] != newScheduling["provisioning_model"] { return true } if oScheduling["instance_termination_action"] != newScheduling["instance_termination_action"] { return true } if oScheduling["availability_domain"] != newScheduling["availability_domain"] { return true } if oScheduling["host_error_timeout_seconds"] != newScheduling["host_error_timeout_seconds"] { return true } if hasGracefulShutdownChanged(oScheduling, newScheduling) { return true } return false } func hasTerminationTimeChanged(oScheduling, nScheduling map[string]interface{}) bool { oTerminationTime := oScheduling["termination_time"].(string) nTerminationTime := nScheduling["termination_time"].(string) if len(oTerminationTime) == 0 && len(nTerminationTime) == 0 { return false } if len(oTerminationTime) == 0 || len(nTerminationTime) == 0 { return true } if oTerminationTime != nTerminationTime { return true } return false } func hasGracefulShutdownChangedWithReboot(d *schema.ResourceData, oScheduling, nScheduling map[string]interface{}) bool { allow_stopping_for_update := d.Get("allow_stopping_for_update").(bool) if !allow_stopping_for_update { return false } return hasGracefulShutdownChanged(oScheduling, nScheduling) } func hasGracefulShutdownChanged(oScheduling, nScheduling map[string]interface{}) bool { oGrShut := oScheduling["graceful_shutdown"].([]interface{}) nGrShut := nScheduling["graceful_shutdown"].([]interface{}) if (len(oGrShut) == 0 || oGrShut[0] == nil) && (len(nGrShut) == 0 || nGrShut[0] == nil) { return false } if (len(oGrShut) == 0 || oGrShut[0] == nil) || (len(nGrShut) == 0 || nGrShut[0] == nil) { return true } oldGrShut := oGrShut[0].(map[string]interface{}) newGrShut := nGrShut[0].(map[string]interface{}) oldMaxDuration := oldGrShut["max_duration"].([]interface{}) newMaxDuration := newGrShut["max_duration"].([]interface{}) var oldMaxDurationMap map[string]interface{} var newMaxDurationMap map[string]interface{} if len(oldMaxDuration) > 0 && oldMaxDuration[0] != nil { oldMaxDurationMap = oldMaxDuration[0].(map[string]interface{}) } else { oldMaxDurationMap = nil } if len(newMaxDuration) > 0 && newMaxDuration[0] != nil { newMaxDurationMap = newMaxDuration[0].(map[string]interface{}) } else { newMaxDurationMap = nil } if oldGrShut["enabled"] != newGrShut["enabled"] { return true } if oldMaxDurationMap["seconds"] != newMaxDurationMap["seconds"] { return true } if oldMaxDurationMap["nanos"] != newMaxDurationMap["nanos"] { return true } return false } func hasMaxRunDurationChanged(oScheduling, nScheduling map[string]interface{}) bool { oMrd := oScheduling["max_run_duration"].([]interface{}) nMrd := nScheduling["max_run_duration"].([]interface{}) if (len(oMrd) == 0 || oMrd[0] == nil) && (len(nMrd) == 0 || nMrd[0] == nil) { return false } if (len(oMrd) == 0 || oMrd[0] == nil) || (len(nMrd) == 0 || nMrd[0] == nil) { return true } oldMrd := oMrd[0].(map[string]interface{}) newMrd := nMrd[0].(map[string]interface{}) if oldMrd["seconds"] != newMrd["seconds"] { return true } if oldMrd["nanos"] != newMrd["nanos"] { return true } return false } func hasNodeAffinitiesChanged(oScheduling, newScheduling map[string]interface{}) bool { oldNAs := oScheduling["node_affinities"].(*schema.Set).List() newNAs := newScheduling["node_affinities"].(*schema.Set).List() if len(oldNAs) != len(newNAs) { return true } for i := range oldNAs { oldNodeAffinity := oldNAs[i].(map[string]interface{}) newNodeAffinity := newNAs[i].(map[string]interface{}) if oldNodeAffinity["key"] != newNodeAffinity["key"] { return true } if oldNodeAffinity["operator"] != newNodeAffinity["operator"] { return true } // ConvertStringSet will sort the set into a slice, allowing DeepEqual if !reflect.DeepEqual(tpgresource.ConvertStringSet(oldNodeAffinity["values"].(*schema.Set)), tpgresource.ConvertStringSet(newNodeAffinity["values"].(*schema.Set))) { return true } } return false } func expandReservationAffinity(d tpgresource.TerraformResourceData) (*compute.ReservationAffinity, error) { _, ok := d.GetOk("reservation_affinity") if !ok { return nil, nil } prefix := "reservation_affinity.0" reservationAffinityType := d.Get(prefix + ".type").(string) affinity := compute.ReservationAffinity{ ConsumeReservationType: reservationAffinityType, ForceSendFields: []string{"ConsumeReservationType"}, } _, hasSpecificReservation := d.GetOk(prefix + ".specific_reservation") if (reservationAffinityType == "SPECIFIC_RESERVATION") != hasSpecificReservation { return nil, fmt.Errorf("specific_reservation must be set when reservation_affinity is SPECIFIC_RESERVATION, and not set otherwise") } prefix = prefix + ".specific_reservation.0" if hasSpecificReservation { affinity.Key = d.Get(prefix + ".key").(string) affinity.ForceSendFields = append(affinity.ForceSendFields, "Key", "Values") for _, v := range d.Get(prefix + ".values").([]interface{}) { affinity.Values = append(affinity.Values, v.(string)) } } return &affinity, nil } func flattenReservationAffinity(affinity *compute.ReservationAffinity) []map[string]interface{} { if affinity == nil { return nil } flattened := map[string]interface{}{ "type": affinity.ConsumeReservationType, } if affinity.ConsumeReservationType == "SPECIFIC_RESERVATION" { flattened["specific_reservation"] = []map[string]interface{}{{ "key": affinity.Key, "values": affinity.Values, }} } return []map[string]interface{}{flattened} } func expandNetworkPerformanceConfig(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (*compute.NetworkPerformanceConfig, error) { configs, ok := d.GetOk("network_performance_config") if !ok { return nil, nil } npcSlice := configs.([]interface{}) if len(npcSlice) > 1 { return nil, fmt.Errorf("cannot specify multiple network_performance_configs") } if len(npcSlice) == 0 || npcSlice[0] == nil { return nil, nil } npc := npcSlice[0].(map[string]interface{}) return &compute.NetworkPerformanceConfig{ TotalEgressBandwidthTier: npc["total_egress_bandwidth_tier"].(string), }, nil } func flattenComputeInstanceGuestOsFeatures(v interface{}) []interface{} { if v == nil { return nil } features, ok := v.([]*compute.GuestOsFeature) if !ok { return nil } var result []interface{} for _, feature := range features { if feature != nil && feature.Type != "" { result = append(result, feature.Type) } } return result } func expandComputeInstanceGuestOsFeatures(v interface{}) []*compute.GuestOsFeature { if v == nil { return nil } var result []*compute.GuestOsFeature for _, feature := range v.([]interface{}) { result = append(result, &compute.GuestOsFeature{Type: feature.(string)}) } return result } func flattenNetworkPerformanceConfig(c *compute.NetworkPerformanceConfig) []map[string]interface{} { if c == nil { return nil } return []map[string]interface{}{ { "total_egress_bandwidth_tier": c.TotalEgressBandwidthTier, }, } } func expandComputeInstanceEncryptionKey(d tpgresource.TerraformResourceData) *compute.CustomerEncryptionKey { iek, ok := d.GetOk("instance_encryption_key") if !ok { return nil } iekRes := iek.([]interface{})[0].(map[string]interface{}) return &compute.CustomerEncryptionKey{ KmsKeyName: iekRes["kms_key_self_link"].(string), Sha256: iekRes["sha256"].(string), KmsKeyServiceAccount: iekRes["kms_key_service_account"].(string), } } func flattenComputeInstanceEncryptionKey(v *compute.CustomerEncryptionKey) []map[string]interface{} { if v == nil { return nil } return []map[string]interface{}{ { "kms_key_self_link": v.KmsKeyName, "sha256": v.Sha256, "kms_key_service_account": v.KmsKeyServiceAccount, }, } } func expandComputeInstanceSourceEncryptionKey(d tpgresource.TerraformResourceData, field string) *compute.CustomerEncryptionKey { cek, ok := d.GetOk(field) if !ok { return nil } cekRes := cek.([]interface{})[0].(map[string]interface{}) return &compute.CustomerEncryptionKey{ RsaEncryptedKey: cekRes["rsa_encrypted_key"].(string), RawKey: cekRes["raw_key"].(string), KmsKeyName: cekRes["kms_key_self_link"].(string), Sha256: cekRes["sha256"].(string), KmsKeyServiceAccount: cekRes["kms_key_service_account"].(string), } } func flattenComputeInstanceSourceEncryptionKey(v *compute.CustomerEncryptionKey) []map[string]interface{} { if v == nil { return nil } return []map[string]interface{}{ { "rsa_encrypted_key": v.RsaEncryptedKey, "raw_key": v.RawKey, "kms_key_self_link": v.KmsKeyName, "sha256": v.Sha256, "kms_key_service_account": v.KmsKeyServiceAccount, }, } }