in pkg/providers/instance/instance.go [324:369]
func newAgentPoolObject(vmSize string, nodeClaim *karpenterv1.NodeClaim) (armcontainerservice.AgentPool, error) {
taints := nodeClaim.Spec.Taints
taintsStr := []*string{}
for _, t := range taints {
taintsStr = append(taintsStr, to.Ptr(fmt.Sprintf("%s=%s:%s", t.Key, t.Value, t.Effect)))
}
scaleSetsType := armcontainerservice.AgentPoolTypeVirtualMachineScaleSets
// todo: why nodepool label is used here
labels := map[string]*string{karpenterv1.NodePoolLabelKey: to.Ptr("kaito")}
for k, v := range nodeClaim.Labels {
labels[k] = to.Ptr(v)
}
if strings.Contains(vmSize, "Standard_N") {
labels = lo.Assign(labels, map[string]*string{LabelMachineType: to.Ptr("gpu")})
} else {
labels = lo.Assign(labels, map[string]*string{LabelMachineType: to.Ptr("cpu")})
}
// NodeClaimCreationLabel is used for recording the create timestamp of agentPool resource.
// then used by garbage collection controller to cleanup orphan agentpool which lived more than 10min
labels[NodeClaimCreationLabel] = to.Ptr(nodeClaim.CreationTimestamp.UTC().Format(CreationTimestampLayout))
storage := &resource.Quantity{}
if nodeClaim.Spec.Resources.Requests != nil {
storage = nodeClaim.Spec.Resources.Requests.Storage()
}
var diskSizeGB int32
if storage.Value() <= 0 {
return armcontainerservice.AgentPool{}, fmt.Errorf("storage request of nodeclaim(%s) should be more than 0", nodeClaim.Name)
} else {
diskSizeGB = int32(storage.Value() >> 30)
}
return armcontainerservice.AgentPool{
Properties: &armcontainerservice.ManagedClusterAgentPoolProfileProperties{
NodeLabels: labels,
NodeTaints: taintsStr, //[]*string{to.Ptr("sku=gpu:NoSchedule")},
Type: to.Ptr(scaleSetsType),
VMSize: to.Ptr(vmSize),
OSType: to.Ptr(armcontainerservice.OSTypeLinux),
Count: to.Ptr(int32(1)),
OSDiskSizeGB: to.Ptr(diskSizeGB),
},
}, nil
}