in pkg/controllers/clusterresourceplacement/resource_selector.go [359:425]
func generateRawContent(object *unstructured.Unstructured) ([]byte, error) {
// we keep the annotation/label/finalizer/owner references/delete grace period
object.SetResourceVersion("")
object.SetGeneration(0)
object.SetUID("")
object.SetSelfLink("")
object.SetDeletionTimestamp(nil)
object.SetManagedFields(nil)
// remove kubectl last applied annotation if exist
annots := object.GetAnnotations()
if annots != nil {
delete(annots, corev1.LastAppliedConfigAnnotation)
if len(annots) == 0 {
object.SetAnnotations(nil)
} else {
object.SetAnnotations(annots)
}
}
// Remove all the owner references as the UID in the owner reference can't be transferred to
// the member clusters
// TODO: Establish a way to keep the ownership relation through work-api
object.SetOwnerReferences(nil)
unstructured.RemoveNestedField(object.Object, "metadata", "creationTimestamp")
unstructured.RemoveNestedField(object.Object, "status")
// TODO: see if there are other cases that we may have some extra fields
if object.GetKind() == "Service" && object.GetAPIVersion() == "v1" {
if clusterIP, exist, _ := unstructured.NestedString(object.Object, "spec", "clusterIP"); exist && clusterIP != corev1.ClusterIPNone {
unstructured.RemoveNestedField(object.Object, "spec", "clusterIP")
unstructured.RemoveNestedField(object.Object, "spec", "clusterIPs")
}
// We should remove all node ports that are assigned by hubcluster if any.
unstructured.RemoveNestedField(object.Object, "spec", "healthCheckNodePort")
vals, found, err := unstructured.NestedFieldNoCopy(object.Object, "spec", "ports")
if found && err == nil {
if ports, ok := vals.([]interface{}); ok {
for i := range ports {
if each, ok := ports[i].(map[string]interface{}); ok {
delete(each, "nodePort")
}
}
}
}
if err != nil {
return nil, fmt.Errorf("failed to get the ports field in Service object, name =%s: %w", object.GetName(), err)
}
} else if object.GetKind() == "Job" && object.GetAPIVersion() == batchv1.SchemeGroupVersion.String() {
if manualSelector, exist, _ := unstructured.NestedBool(object.Object, "spec", "manualSelector"); !exist || !manualSelector {
// remove the selector field and labels added by the api-server if the job is not created with manual selector
// whose value conflict with the ones created by the member cluster api server
// https://github.com/kubernetes/kubernetes/blob/d4fde1e92a83cb533ae63b3abe9d49f08efb7a2f/pkg/registry/batch/job/strategy.go#L219
// k8s used to add an old label called "controller-uid" but use a new label called "batch.kubernetes.io/controller-uid" after 1.26
unstructured.RemoveNestedField(object.Object, "spec", "selector", "matchLabels", "controller-uid")
unstructured.RemoveNestedField(object.Object, "spec", "selector", "matchLabels", "batch.kubernetes.io/controller-uid")
unstructured.RemoveNestedField(object.Object, "spec", "template", "metadata", "creationTimestamp")
unstructured.RemoveNestedField(object.Object, "spec", "template", "metadata", "labels", "controller-uid")
unstructured.RemoveNestedField(object.Object, "spec", "template", "metadata", "labels", "batch.kubernetes.io/controller-uid")
}
}
rawContent, err := object.MarshalJSON()
if err != nil {
return nil, fmt.Errorf("failed to marshal the unstructured object gvk = %s, name =%s: %w", object.GroupVersionKind(), object.GetName(), err)
}
return rawContent, nil
}