in pkg/providers/v1/aws.go [1297:1410]
func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) {
// We have some state in the Cloud object - in particular the attaching map
// Log so that if we are building multiple Cloud objects, it is obvious!
klog.Infof("Building AWS cloudprovider")
metadata, err := awsServices.Metadata()
if err != nil {
return nil, fmt.Errorf("error creating AWS metadata client: %q", err)
}
err = updateConfigZone(&cfg, metadata)
if err != nil {
return nil, fmt.Errorf("unable to determine AWS zone from cloud provider config or EC2 instance metadata: %v", err)
}
zone := cfg.Global.Zone
if len(zone) <= 1 {
return nil, fmt.Errorf("invalid AWS zone in config file: %s", zone)
}
regionName, err := azToRegion(zone)
if err != nil {
return nil, err
}
if !cfg.Global.DisableStrictZoneCheck {
if !isRegionValid(regionName, metadata) {
return nil, fmt.Errorf("not a valid AWS zone (unknown region): %s", zone)
}
} else {
klog.Warningf("Strict AWS zone checking is disabled. Proceeding with zone: %s", zone)
}
ec2, err := awsServices.Compute(regionName)
if err != nil {
return nil, fmt.Errorf("error creating AWS EC2 client: %v", err)
}
elb, err := awsServices.LoadBalancing(regionName)
if err != nil {
return nil, fmt.Errorf("error creating AWS ELB client: %v", err)
}
elbv2, err := awsServices.LoadBalancingV2(regionName)
if err != nil {
return nil, fmt.Errorf("error creating AWS ELBV2 client: %v", err)
}
asg, err := awsServices.Autoscaling(regionName)
if err != nil {
return nil, fmt.Errorf("error creating AWS autoscaling client: %v", err)
}
kms, err := awsServices.KeyManagement(regionName)
if err != nil {
return nil, fmt.Errorf("error creating AWS key management client: %v", err)
}
awsCloud := &Cloud{
ec2: ec2,
elb: elb,
elbv2: elbv2,
asg: asg,
metadata: metadata,
kms: kms,
cfg: &cfg,
region: regionName,
attaching: make(map[types.NodeName]map[mountDevice]EBSVolumeID),
deviceAllocators: make(map[types.NodeName]DeviceAllocator),
}
awsCloud.instanceCache.cloud = awsCloud
tagged := cfg.Global.KubernetesClusterTag != "" || cfg.Global.KubernetesClusterID != ""
if cfg.Global.VPC != "" && (cfg.Global.SubnetID != "" || cfg.Global.RoleARN != "") && tagged {
// When the master is running on a different AWS account, cloud provider or on-premise
// build up a dummy instance and use the VPC from the nodes account
klog.Info("Master is configured to run on a different AWS account, different cloud provider or on-premises")
awsCloud.selfAWSInstance = &awsInstance{
nodeName: "master-dummy",
vpcID: cfg.Global.VPC,
subnetID: cfg.Global.SubnetID,
}
awsCloud.vpcID = cfg.Global.VPC
} else {
selfAWSInstance, err := awsCloud.buildSelfAWSInstance()
if err != nil {
return nil, err
}
awsCloud.selfAWSInstance = selfAWSInstance
awsCloud.vpcID = selfAWSInstance.vpcID
}
if cfg.Global.KubernetesClusterTag != "" || cfg.Global.KubernetesClusterID != "" {
if err := awsCloud.tagging.init(cfg.Global.KubernetesClusterTag, cfg.Global.KubernetesClusterID); err != nil {
return nil, err
}
} else {
// TODO: Clean up double-API query
info, err := awsCloud.selfAWSInstance.describeInstance()
if err != nil {
return nil, err
}
if err := awsCloud.tagging.initFromTags(info.Tags); err != nil {
return nil, err
}
}
if len(cfg.Global.NodeIPFamilies) == 0 {
cfg.Global.NodeIPFamilies = []string{"ipv4"}
}
klog.Infof("The following IP families will be added to nodes: %v", cfg.Global.NodeIPFamilies)
return awsCloud, nil
}