in oracle/controllers/pitrcontroller/pitr_controller.go [276:424]
func (r *PITRReconciler) ensureAgent(ctx context.Context, p *v1alpha1.PITR, i *v1alpha1.Instance) error {
// TODO better validation
if p.Spec.Images == nil {
return errors.New("PITR .spec.images must be specified")
}
agentImage, ok := p.Spec.Images[agentImageKey]
if !ok {
return fmt.Errorf("failed to find an required image from %v, want image with key %s", p.Spec.Images, agentImageKey)
}
options := []client.PatchOption{client.ForceOwnership, client.FieldOwner("pitr-controller")}
pitrLabel := map[string]string{controllers.PITRLabel: p.GetName()}
instlabels := map[string]string{"instance": i.GetName()}
uid := controllers.DefaultUID
if i.Spec.DatabaseUID != nil {
uid = *i.Spec.DatabaseUID
}
gid := controllers.DefaultGID
if i.Spec.DatabaseGID != nil {
gid = *i.Spec.DatabaseGID
}
logDiskPVC, logDiskMount := controllers.GetPVCNameAndMount(i.GetName(), "LogDisk")
// TODO better way to find the PVC
logDiskPVC = fmt.Sprintf("%s-%s-sts-0", logDiskPVC, i.GetName())
deployName := fmt.Sprintf(deploymentTemplate, p.GetName())
// for now, PITR and DB instance are in the same namespace.
deployNS := p.GetNamespace()
dbdaemonSvc := &corev1.Service{}
if err := r.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(controllers.DbdaemonSvcName, i.GetName()), Namespace: i.GetNamespace()}, dbdaemonSvc); err != nil {
return err
}
dbdaemonIP := dbdaemonSvc.Spec.ClusterIP
dbdaemonPort := consts.DefaultDBDaemonPort
svc := &corev1.Service{
TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String(), Kind: "Service"},
ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf(PITRSvcTemplate, p.GetName()), Namespace: p.GetNamespace()},
Spec: corev1.ServiceSpec{
Selector: pitrLabel,
Ports: []corev1.ServicePort{
{
Name: "pitr",
Protocol: "TCP",
Port: DefaultPITRAgentPort,
TargetPort: intstr.FromInt(DefaultPITRAgentPort),
},
},
Type: corev1.ServiceTypeClusterIP,
},
}
deployment := &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: "Deployment"},
ObjectMeta: metav1.ObjectMeta{Name: deployName, Namespace: deployNS},
Spec: appsv1.DeploymentSpec{
Replicas: pointer.Int32(1),
Selector: &metav1.LabelSelector{
MatchLabels: pitrLabel,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: pitrLabel,
Namespace: deployNS,
},
Spec: corev1.PodSpec{
SecurityContext: &corev1.PodSecurityContext{
RunAsUser: pointer.Int64Ptr(uid),
RunAsGroup: pointer.Int64Ptr(gid),
FSGroup: pointer.Int64Ptr(gid),
RunAsNonRoot: pointer.Bool(true),
},
Volumes: []corev1.Volume{
{
Name: "log",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: logDiskPVC,
ReadOnly: true,
},
},
},
},
Containers: []corev1.Container{
{
Name: agentName,
Image: agentImage,
Command: []string{pitrCmd},
Args: []string{
"--dbservice=" + dbdaemonIP,
"--dbport=" + strconv.Itoa(dbdaemonPort),
"--dest=" + p.Spec.StorageURI,
"--port=" + strconv.Itoa(DefaultPITRAgentPort),
},
Ports: []corev1.ContainerPort{
{Name: "pitr-port", Protocol: "TCP", ContainerPort: DefaultPITRAgentPort},
},
SecurityContext: &corev1.SecurityContext{
AllowPrivilegeEscalation: pointer.BoolPtr(false),
},
ImagePullPolicy: corev1.PullAlways,
VolumeMounts: []corev1.VolumeMount{
{
Name: "log",
ReadOnly: true,
MountPath: logDiskMount,
},
},
},
},
// Add pod affinity for pitr agent pod, so that pitr agent pod can access DB disk.
Affinity: &corev1.Affinity{
PodAffinity: &corev1.PodAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchLabels: instlabels,
},
Namespaces: []string{p.GetNamespace()},
TopologyKey: "kubernetes.io/hostname",
},
},
},
},
},
},
},
}
if err := ctrl.SetControllerReference(p, deployment, r.Scheme); err != nil {
return err
}
if err := r.Patch(ctx, deployment, client.Apply, options...); err != nil {
return err
}
if err := ctrl.SetControllerReference(p, svc, r.Scheme); err != nil {
return err
}
if err := r.Patch(ctx, svc, client.Apply, options...); err != nil {
return err
}
return nil
}