in pkg/controller/direct/spanner/instance_controller.go [192:298]
func (a *SpannerInstanceAdapter) Update(ctx context.Context, updateOp *directbase.UpdateOperation) error {
log := klog.FromContext(ctx).WithName(ctrlName)
log.V(2).Info("updating Instance", "name", a.id)
mapCtx := &direct.MapContext{}
if err := a.SpecValidation(); err != nil {
return err
}
desired := a.desired.DeepCopy()
resource := SpannerInstanceSpec_ToProto(mapCtx, &desired.Spec, a.id.SpannerInstanceConfigPrefix())
resource.Name = a.id.String()
if resource.Labels == nil {
resource.Labels = make(map[string]string)
}
resource.Labels["managed-by-cnrm"] = "true"
if mapCtx.Err() != nil {
return mapCtx.Err()
}
updateMask := &fieldmaskpb.FieldMask{}
if !reflect.DeepEqual(a.desired.Spec.DisplayName, a.actual.DisplayName) {
updateMask.Paths = append(updateMask.Paths, "display_name")
}
// If node count is unset, the field become unmanaged.
// If autoscaling is set, this field become output-only.
if a.desired.Spec.AutoscalingConfig == nil && a.desired.Spec.NumNodes != nil && !reflect.DeepEqual(resource.NodeCount, a.actual.NodeCount) {
updateMask.Paths = append(updateMask.Paths, "node_count")
}
// If processing unit is unset, the field become unmanaged.
// If autoscaling is set, this field become output-only.
if a.desired.Spec.AutoscalingConfig == nil && a.desired.Spec.ProcessingUnits != nil && !reflect.DeepEqual(resource.ProcessingUnits, a.actual.ProcessingUnits) {
updateMask.Paths = append(updateMask.Paths, "processing_units")
}
if !reflect.DeepEqual(resource.Labels, a.actual.Labels) {
updateMask.Paths = append(updateMask.Paths, "labels")
}
if !reflect.DeepEqual(resource.DefaultBackupScheduleType, a.actual.DefaultBackupScheduleType) {
updateMask.Paths = append(updateMask.Paths, "default_backup_schedule_type")
}
autoscaling_path, err := common.CompareProtoMessage(resource.AutoscalingConfig, a.actual.AutoscalingConfig, common.BasicDiff)
if err != nil {
return err
}
if len(autoscaling_path) > 0 {
updateMask.Paths = append(updateMask.Paths, "autoscaling_config")
}
var editionDowngrade = false
// If edition field is specified, the field become unmanaged.
if desired.Spec.Edition != nil && !reflect.DeepEqual(resource.Edition, a.actual.Edition) {
// Upgrading Edition to higher tier can be done along with other fields.
if resource.Edition > a.actual.Edition {
updateMask.Paths = append(updateMask.Paths, "edition")
} else {
editionDowngrade = true
}
}
if len(updateMask.Paths) == 0 && !editionDowngrade {
log.V(2).Info("no field needs update", "name", a.id)
return nil
}
var updated *spannerpb.Instance
if len(updateMask.Paths) > 0 {
req := &spannerpb.UpdateInstanceRequest{
FieldMask: updateMask,
Instance: resource,
}
op, err := a.gcpClient.UpdateInstance(ctx, req)
if err != nil {
return fmt.Errorf("updating Instance %s: %w", a.id, err)
}
updated, err = op.Wait(ctx)
if err != nil {
return fmt.Errorf("Instance %s waiting update: %w", a.id, err)
}
}
// The updatet for downgrading Edition separately call with edition is the single item in the fieldmask.
// This will fail if higher tier's features are not disabled.
if editionDowngrade {
log.V(2).Info("Upgrading Edition to lower tier", "name", a.id)
req := &spannerpb.UpdateInstanceRequest{
Instance: resource,
}
req.FieldMask = &fieldmaskpb.FieldMask{
Paths: []string{"edition"},
}
op, err := a.gcpClient.UpdateInstance(ctx, req)
if err != nil {
return fmt.Errorf("updating Instance %s: %w", a.id, err)
}
updated, err = op.Wait(ctx)
if err != nil {
return fmt.Errorf("Instance %s waiting update: %w", a.id, err)
}
}
log.V(2).Info("successfully updated Instance", "name", a.id)
status := SpannerInstanceStatus_FromProto(mapCtx, updated)
if mapCtx.Err() != nil {
return mapCtx.Err()
}
return updateOp.UpdateStatus(ctx, status, nil)
}