diff --git a/plugins/network/go.mod b/plugins/network/go.mod index 74e391e6324..8cbf987e162 100644 --- a/plugins/network/go.mod +++ b/plugins/network/go.mod @@ -10,20 +10,25 @@ require ( require ( github.com/alexellis/go-execute/v2 v2.2.1 // indirect github.com/fatih/color v1.18.0 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/kr/fs v0.1.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/melbahja/goph v1.4.0 // indirect + github.com/onsi/gomega v1.36.2 // indirect github.com/otiai10/copy v1.14.1 // indirect github.com/otiai10/mint v1.6.3 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pkg/sftp v1.13.5 // indirect github.com/ryanuber/columnize v2.1.2+incompatible // indirect golang.org/x/crypto v0.32.0 // indirect + golang.org/x/net v0.33.0 // indirect golang.org/x/sync v0.10.0 // indirect golang.org/x/sys v0.29.0 // indirect + golang.org/x/text v0.21.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) replace github.com/dokku/dokku/plugins/common => ../common diff --git a/plugins/scheduler-k3s/Makefile b/plugins/scheduler-k3s/Makefile index 449103c2ce4..e9dd2351f56 100644 --- a/plugins/scheduler-k3s/Makefile +++ b/plugins/scheduler-k3s/Makefile @@ -1,4 +1,4 @@ -SUBCOMMANDS = subcommands/annotations:set subcommands/autoscaling-auth:set subcommands/autoscaling-auth:report subcommands/cluster-add subcommands/cluster-list subcommands/cluster-remove subcommands/ensure-charts subcommands/initialize subcommands/labels:set subcommands/report subcommands/set subcommands/show-kubeconfig subcommands/uninstall +SUBCOMMANDS = subcommands/annotations:set subcommands/autoscaling-auth:set subcommands/autoscaling-auth:report subcommands/cluster-add subcommands/cluster-list subcommands/cluster-remove subcommands/ensure-charts subcommands/initialize subcommands/labels:set subcommands/report subcommands/set subcommands/show-kubeconfig subcommands/add-pvc subcommands/remove-pvc subcommands/mount subcommands/unmount subcommands/uninstall TRIGGERS = triggers/install triggers/post-app-clone-setup triggers/post-app-rename-setup triggers/post-delete triggers/report triggers/scheduler-app-status triggers/scheduler-deploy triggers/scheduler-enter triggers/scheduler-logs triggers/scheduler-proxy-config triggers/scheduler-proxy-logs triggers/scheduler-post-delete triggers/scheduler-run triggers/scheduler-run-list triggers/scheduler-stop BUILD = commands subcommands triggers PLUGIN_NAME = scheduler-k3s diff --git a/plugins/scheduler-k3s/functions.go b/plugins/scheduler-k3s/functions.go index f03b9159c7d..1c556f4cbee 100644 --- a/plugins/scheduler-k3s/functions.go +++ b/plugins/scheduler-k3s/functions.go @@ -548,6 +548,13 @@ func getAnnotations(appName string, processType string) (ProcessAnnotations, err } annotations.TraefikMiddlewareAnnotations = traefikMiddlewareAnnotations + // TODO: check if this needed??? + pvcAnnotations, err := getAnnotation(appName, processType, "pvc") + if err != nil { + return annotations, err + } + annotations.PvcAnnotations = pvcAnnotations + return annotations, nil } @@ -1136,9 +1143,27 @@ func getLabels(appName string, processType string) (ProcessLabels, error) { } labels.TraefikMiddlewareLabels = traefikMiddlewareLabels + // TODO: check if this needed ??? + pvcLabels, err := getLabel(appName, processType, "pvc") + if err != nil { + return labels, err + } + labels.PvcLabels = pvcLabels + return labels, nil } +func getVolumes(appName string, processType string) ([]ProcessVolume, error) { + volumes := []ProcessVolume{} + propValue := common.PropertyGet("scheduler-k3s", appName, fmt.Sprintf("volumes.%s", processType)) + err := yaml.Unmarshal([]byte(propValue), &volumes) + if err != nil { + return volumes, err + } + + return volumes, nil +} + // getGlobalLabel retrieves global labels for a given app func getGlobalLabel(appName string) (ProcessLabels, error) { return getLabels(appName, GlobalProcessType) diff --git a/plugins/scheduler-k3s/k8s.go b/plugins/scheduler-k3s/k8s.go index 8ad39e4540f..d69ea920f99 100644 --- a/plugins/scheduler-k3s/k8s.go +++ b/plugins/scheduler-k3s/k8s.go @@ -940,3 +940,16 @@ func streamLogsFromRequest(ctx context.Context, request rest.ResponseWrapper, ou } } } + +// PvcInput contains all the information needed to get/delete a Kubernetes PVC +type PvcInput struct { + // Name is the Kubernetes PVC name + Name string + // Namespace is the Kubernetes namespace + Namespace string +} + +func (k KubernetesClient) GetPvc(ctx context.Context, input PvcInput) (*corev1.PersistentVolumeClaim, error) { + // Retrieve the PVC + return k.Client.CoreV1().PersistentVolumeClaims(input.Namespace).Get(ctx, input.Name, metav1.GetOptions{}) +} diff --git a/plugins/scheduler-k3s/src/commands/commands.go b/plugins/scheduler-k3s/src/commands/commands.go index a87f59ad81c..19ef8a8b68c 100644 --- a/plugins/scheduler-k3s/src/commands/commands.go +++ b/plugins/scheduler-k3s/src/commands/commands.go @@ -22,12 +22,16 @@ Additional commands:` scheduler-k3s:cluster-add [--insecure-allow-unknown-hosts] [--server-ip SERVER_IP] [--taint-scheduling] , Adds a server node to a Dokku-managed cluster scheduler-k3s:cluster-list [--format json|stdout], Lists all nodes in a Dokku-managed cluster scheduler-k3s:cluster-remove [node-id], Removes client node to a Dokku-managed cluster - scheduler-k3s:ensure-charts, Ensures the k3s charts are installed + scheduler-k3s:ensure-charts, Ensures the k3s charts are installed scheduler-k3s:initialize [--server-ip SERVER_IP] [--taint-scheduling], Initializes a cluster scheduler-k3s:labels:set () [--process-type PROCESS_TYPE] <--resource-type RESOURCE_TYPE>, Set or clear a label for a given app/process-type/resource-type combination scheduler-k3s:report [] [], Displays a scheduler-k3s report for one or more apps scheduler-k3s:set (), Set or clear a scheduler-k3s property for an app scheduler-k3s:show-kubeconfig, Displays the kubeconfig for remote usage + scheduler-k3s:add-pvc [--access-mode ] [--namespace ] [--storage-class-name ], Adds a Persistent Volume Claim (PVC) + scheduler-k3s:remove-pvc [--namespace ], Remove Persistent Volume Claim in Namespace + scheduler-k3s:mount [--process-type PROCESS_TYPE] [--subpath SUBPATH] [--readonly] [--chown option], Mount a Volume to Container Path for an app. Default processtype: web + scheduler-k3s:unmount , Unmount a Volume from an app scheduler-k3s:uninstall, Uninstalls k3s from the Dokku server` ) diff --git a/plugins/scheduler-k3s/src/subcommands/subcommands.go b/plugins/scheduler-k3s/src/subcommands/subcommands.go index e23336bb837..90e9a24b6bf 100644 --- a/plugins/scheduler-k3s/src/subcommands/subcommands.go +++ b/plugins/scheduler-k3s/src/subcommands/subcommands.go @@ -1,8 +1,10 @@ package main import ( + "errors" "fmt" "os" + "slices" "strings" "github.com/dokku/dokku/plugins/common" @@ -128,6 +130,46 @@ func main() { args := flag.NewFlagSet("scheduler-k3s:show-kubeconfig", flag.ExitOnError) args.Parse(os.Args[2:]) err = scheduler_k3s.CommandShowKubeconfig() + case "add-pvc": + args := flag.NewFlagSet("scheduler-k3s:add-pvc", flag.ExitOnError) + accessMode := args.String("access-mode", "ReadWriteOnce", "--access-mode: access mode default ReadWriteOnce") + namespace := args.String("namespace", "default", "--namespace: default") + storageClass := args.String("storage-class-name", "", "--storage-class-name: e.g. longhorn") + args.Parse(os.Args[2:]) + // check accessMode + accessModes := []string{"ReadWriteOnce", "ReadWriteMany", "ReadOnlyMany"} + if !slices.Contains(accessModes, *accessMode) { + err = errors.New("Please specify PVC access mode as either ReadWriteOnce, ReadOnlyMany, ReadWriteMany") + break + } + pvcName := args.Arg(0) + storageSize := args.Arg(1) + err = scheduler_k3s.CommandAddPVC(pvcName, *namespace, *accessMode, storageSize, *storageClass) + case "remove-pvc": + args := flag.NewFlagSet("scheduler-k3s:remove-pvc", flag.ExitOnError) + namespace := args.String("namespace", "default", "--namespace: default") + args.Parse(os.Args[2:]) + pvcName := args.Arg(0) + err = scheduler_k3s.CommandRemovePVC(pvcName, *namespace) + case "mount": + args := flag.NewFlagSet("scheduler-k3s:mount", flag.ExitOnError) + subPath := args.String("subpath", "", "--subpath: ") + readOnly := args.Bool("readonly", false, "--readonly: false") + processType := args.String("process-type", "web", "--process-type: web") + chown := args.String("chown", "", "--chown: UID:GID") + args.Parse(os.Args[2:]) + appName := args.Arg(0) + pvcName := args.Arg(1) + mountPath := args.Arg(2) + err = scheduler_k3s.CommandMountPVC(appName, *processType, pvcName, mountPath, *subPath, *readOnly, *chown) + case "unmount": + args := flag.NewFlagSet("scheduler-k3s:unmount", flag.ExitOnError) + processType := args.String("process-type", "web", "--process-type: web") + args.Parse(os.Args[2:]) + appName := args.Arg(0) + pvcName := args.Arg(1) + mountPath := args.Arg(2) + err = scheduler_k3s.CommandUnMountPVC(appName, *processType, pvcName, mountPath) case "uninstall": args := flag.NewFlagSet("scheduler-k3s:uninstall", flag.ExitOnError) args.Parse(os.Args[2:]) diff --git a/plugins/scheduler-k3s/subcommands.go b/plugins/scheduler-k3s/subcommands.go index aa7dc51e94f..8b04630292e 100644 --- a/plugins/scheduler-k3s/subcommands.go +++ b/plugins/scheduler-k3s/subcommands.go @@ -9,14 +9,17 @@ import ( "net/url" "os" "os/signal" + "path/filepath" "slices" "sort" "strings" "syscall" + "time" "github.com/dokku/dokku/plugins/common" resty "github.com/go-resty/resty/v2" "github.com/ryanuber/columnize" + "gopkg.in/yaml.v3" ) // CommandAnnotationsSet set or clear a scheduler-k3s annotation for an app @@ -1041,3 +1044,231 @@ func CommandUninstall() error { common.LogInfo2Quiet("Removing k3s dependencies") return uninstallHelperCommands(context.Background()) } + +func CommandAddPVC(pvcName string, namespace string, accessMode string, storageSize string, storageClass string) error { + chartDir, err := os.MkdirTemp("", "pvc-chart-") + if err != nil { + return fmt.Errorf("Error creating pvc chart directory: %w", err) + } + defer os.RemoveAll(chartDir) + + if err := os.MkdirAll(filepath.Join(chartDir, "templates"), os.FileMode(0755)); err != nil { + return fmt.Errorf("Error creating pvc chart templates directory: %w", err) + } + + // create the chart.yaml + chart := &Chart{ + ApiVersion: "v2", + AppVersion: "1.0.0", + Icon: "https://dokku.com/assets/dokku-logo.svg", + Name: "PersistentVolumeClaim", + Version: "0.0.1", + } + + err = writeYaml(WriteYamlInput{ + Object: chart, + Path: filepath.Join(chartDir, "Chart.yaml"), + }) + if err != nil { + return fmt.Errorf("Error writing PersistentVolumeClaim chart: %w", err) + } + + // create the values.yaml + values := PersistentVolumeClaim{ + Name: pvcName, + AccessMode: accessMode, + Storage: storageSize, + StorageClass: storageClass, + Namespace: namespace, + } + + err = writeYaml(WriteYamlInput{ + Object: values, + Path: filepath.Join(chartDir, "values.yaml"), + }) + if err != nil { + return fmt.Errorf("Error writing chart: %w", err) + } + if os.Getenv("DOKKU_TRACE") == "1" { + common.CatFile(filepath.Join(chartDir, "values.yaml")) + } + + b, err := templates.ReadFile("templates/chart/pvc.yaml") + if err != nil { + return fmt.Errorf("Error reading PVC template: %w", err) + } + common.CatFile("templates/chart/pvc.yaml") + + // write pvc.yaml + pvcFile := filepath.Join(chartDir, "templates", "pvc.yaml") + err = os.WriteFile(pvcFile, []byte(b), os.FileMode(0644)) + if err != nil { + return fmt.Errorf("Error writing cron job template: %w", err) + } + if os.Getenv("DOKKU_TRACE") == "1" { + common.CatFile(pvcFile) + } + + // add _helpers.tpl + b, err = templates.ReadFile("templates/chart/_helpers.tpl") + if err != nil { + return fmt.Errorf("Error reading _helpers template: %w", err) + } + + helpersFile := filepath.Join(chartDir, "templates", "_helpers.tpl") + err = os.WriteFile(helpersFile, b, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("Error writing _helpers template: %w", err) + } + + // install the chart + helmAgent, err := NewHelmAgent(namespace, DeployLogPrinter) + if err != nil { + return fmt.Errorf("Error creating helm agent: %w", err) + } + + chartPath, err := filepath.Abs(chartDir) + if err != nil { + return fmt.Errorf("Error getting chart path: %w", err) + } + + timeoutDuration, err := time.ParseDuration("300s") + if err != nil { + return fmt.Errorf("Error parsing deploy timeout duration: %w", err) + } + + err = helmAgent.InstallOrUpgradeChart(context.Background(), ChartInput{ + ChartPath: chartPath, + Namespace: namespace, + ReleaseName: fmt.Sprintf("pvc-dokku-%s", pvcName), + RollbackOnFailure: true, + Timeout: timeoutDuration, + Wait: true, + }) + if err != nil { + return fmt.Errorf("Error installing pvc chart: %w", err) + } + + common.LogInfo1Quiet("Applied pvc chart") + + return nil +} + +func CommandRemovePVC(pvcName string, namespace string) error { + if err := isKubernetesAvailable(); err != nil { + return fmt.Errorf("kubernetes api not available: %w", err) + } + + helmAgent, err := NewHelmAgent(namespace, DeployLogPrinter) + if err != nil { + return fmt.Errorf("Error creating helm agent: %w", err) + } + err = helmAgent.UninstallChart(fmt.Sprintf("pvc-dokku-%s", pvcName)) + if err != nil { + return fmt.Errorf("Error uninstalling chart: %w", err) + } + + return nil +} + +func CommandMountPVC(appName string, processType string, pvcName string, mountPath string, subPath string, readOnly bool, chown string) error { + clientset, err := NewKubernetesClient() + if err != nil { + if isK3sKubernetes() { + if err := isK3sInstalled(); err != nil { + common.LogWarn("k3s is not installed, skipping") + return nil + } + } + return fmt.Errorf("Error creating kubernetes client: %w", err) + } + + if err := clientset.Ping(); err != nil { + return fmt.Errorf("kubernetes api not available: %w", err) + } + + // 1. get namespace for the app + namespace := getComputedNamespace(appName) + // 2. check if pvcName exists in this namespace + pvcInput := PvcInput{ + Name: pvcName, + Namespace: namespace, + } + // Retrieve the PVC + _, err = clientset.GetPvc(context.Background(), pvcInput) + if err != nil { + return fmt.Errorf("failed to get PVC %s in namespace %s: %w", pvcInput.Name, pvcInput.Namespace, err) + } + // TODO: 2.2. maybe check if pvc has dokku.com/managed ?? + // 3. add to properties + volume := ProcessVolume{ + Name: pvcName, + Type: "persistentVolumeClaim", + ClaimName: pvcName, + MountPath: mountPath, + } + if len(subPath) > 0 { + volume.SubPath = subPath + } + if readOnly { + volume.ReadOnly = readOnly + } + if len(chown) > 0 { + volume.Chown = chown + } + // Create an empty slice of ProcessVolume + var volumes []ProcessVolume + // get already defined volumes and add above + propertyName := fmt.Sprintf("volumes.%s", processType) + err = yaml.Unmarshal([]byte(common.PropertyGet("scheduler-k3s", appName, propertyName)), &volumes) + if err != nil { + return fmt.Errorf("failed to decode YAML in properties: %w", err) + } + // Check if the volume already exists + for _, v := range volumes { + if v.Name == volume.Name { + return fmt.Errorf("Volume %s already exists, skipping append.", volume.Name) + } + } + volumes = append(volumes, volume) + volumesYaml, err := yaml.Marshal(&volumes) + if err != nil { + return fmt.Errorf("failed to marshal PVC %s in namespace %s: %w", pvcName, namespace, err) + } + err = common.PropertyWrite("scheduler-k3s", appName, propertyName, string(volumesYaml)) + if err != nil { + return fmt.Errorf("failed to store property PVC %s in namespace %s: %w", pvcName, namespace, err) + } + + return nil +} + +func CommandUnMountPVC(appName string, processType string, pvcName string, mountPath string) error { + // Create an empty slice of ProcessVolume + var volumes []ProcessVolume + // get already defined volumes and add above + propertyName := fmt.Sprintf("volumes.%s", processType) + err := yaml.Unmarshal([]byte(common.PropertyGet("scheduler-k3s", appName, propertyName)), &volumes) + if err != nil { + return fmt.Errorf("failed to decode YAML in properties: %w", err) + } + + // Create a new slice without the volume to delete + filteredVolumes := []ProcessVolume{} + volName := pvcName + for _, v := range volumes { + if v.Name != volName || v.MountPath != mountPath { + filteredVolumes = append(filteredVolumes, v) + } + } + volumesYaml, err := yaml.Marshal(&filteredVolumes) + if err != nil { + return fmt.Errorf("failed to marshal delete PVC %s: %w", pvcName, err) + } + err = common.PropertyWrite("scheduler-k3s", appName, propertyName, string(volumesYaml)) + if err != nil { + return fmt.Errorf("failed to store property PVC %s: %w", pvcName, err) + } + + return nil +} diff --git a/plugins/scheduler-k3s/template.go b/plugins/scheduler-k3s/template.go index 94bb9baa808..f2acc2c50ad 100644 --- a/plugins/scheduler-k3s/template.go +++ b/plugins/scheduler-k3s/template.go @@ -113,6 +113,7 @@ type ProcessValues struct { Replicas int32 `yaml:"replicas"` Resources ProcessResourcesMap `yaml:"resources,omitempty"` Web ProcessWeb `yaml:"web,omitempty"` + Volumes []ProcessVolume `yaml:"volumes,omitempty"` } type ProcessAnnotations struct { @@ -132,6 +133,7 @@ type ProcessAnnotations struct { ServiceAnnotations map[string]string `yaml:"service,omitempty"` TraefikIngressRouteAnnotations map[string]string `yaml:"traefik_ingressroute,omitempty"` TraefikMiddlewareAnnotations map[string]string `yaml:"traefik_middleware,omitempty"` + PvcAnnotations map[string]string `yaml:"pvc,omitempty"` } // ProcessAutoscaling contains the autoscaling configuration for a process @@ -251,6 +253,7 @@ type ProcessLabels struct { ServiceLabels map[string]string `yaml:"service,omitempty"` TraefikIngressRouteLabels map[string]string `yaml:"traefik_ingressroute,omitempty"` TraefikMiddlewareLabels map[string]string `yaml:"traefik_middleware,omitempty"` + PvcLabels map[string]string `yaml:"pvc,omitempty"` } type ProcessWeb struct { @@ -275,6 +278,18 @@ type ProcessResources struct { Memory string `yaml:"memory,omitempty"` } +type ProcessVolume struct { + Name string `yaml:"name"` + Type string `yaml:"type"` + MountPath string `yaml:"mountPath"` + SubPath string `yaml:"subPath,omitempty"` + ReadOnly bool `yaml:"readOnly,omitempty"` + ClaimName string `yaml:"claimName,omitempty"` + ConfigMapName string `yaml:"configMapName,omitempty"` + SecretName string `yaml:"secretName,omitempty"` + Chown string `yaml:"chown,omitempty"` +} + type ProcessType string const ( @@ -324,6 +339,18 @@ type ClusterIssuer struct { Server string `yaml:"server"` } +type PersistentVolumeClaim struct { + Global struct { + Annotations ProcessAnnotations `yaml:"annotations,omitempty"` + Labels ProcessLabels `yaml:"labels,omitempty"` + } `yaml:"global"` + Name string `yaml:"name"` + AccessMode string `yaml:"accessMode"` + Storage string `yaml:"storage"` + StorageClass string `yaml:"storageClass"` + Namespace string `yaml:"namespace"` +} + type Job struct { AppName string Command []string @@ -405,6 +432,79 @@ func templateKubernetesJob(input Job) (batchv1.Job, error) { podAnnotations[key] = value } + // get volumes + processVolumes, err := getVolumes(input.AppName, input.ProcessType) + if err != nil { + return batchv1.Job{}, fmt.Errorf("Error getting process volumes: %w", err) + } + var volumeMounts []corev1.VolumeMount + var podVolumes []corev1.Volume + var initContainerVolumeMounts []corev1.VolumeMount + var chownCommands []string + hasChown := false + for _, volume := range processVolumes { + // create volumeMounts + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: volume.Name, + // ReadOnly: volume.ReadOnly, + MountPath: volume.MountPath, + // SubPath: volume.SubPath, + }) + // create podVolumes + volumeSource := corev1.VolumeSource{} + switch volume.Type { + case "persistentVolumeClaim": + volumeSource.PersistentVolumeClaim = &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: volume.ClaimName, + ReadOnly: volume.ReadOnly, + } + case "configMap": + volumeSource.ConfigMap = &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: volume.ClaimName, // Assuming ClaimName is used as ConfigMap name + }, + } + case "secret": + volumeSource.Secret = &corev1.SecretVolumeSource{ + SecretName: volume.ClaimName, // Assuming ClaimName is used as Secret name + } + default: + return batchv1.Job{}, fmt.Errorf("unknown volume type: %s", volume.Type) + } + podVolumes = append(podVolumes, corev1.Volume{ + Name: volume.Name, + VolumeSource: volumeSource, + }) + // Handle ownership fix if chown is specified + if volume.Chown != "" { + hasChown = true + initContainerVolumeMounts = append(initContainerVolumeMounts, corev1.VolumeMount{ + Name: volume.Name, + MountPath: volume.MountPath, + }) + chownCommands = append(chownCommands, fmt.Sprintf(` +if [ -d "%s" ]; then + echo "Setting ownership for %s"; + chown -R %s %s; +else + echo "Warning: Directory %s not found, skipping chown"; +fi; +`, volume.MountPath, volume.MountPath, volume.Chown, volume.MountPath, volume.MountPath)) + } + } + // Create the initContainer only if there are chown operations + var initContainers []corev1.Container + if hasChown { + initContainers = []corev1.Container{ + { + Name: "fix-permissions", + Image: "busybox", + Command: []string{"sh", "-c", strings.Join(chownCommands, "\n")}, + VolumeMounts: initContainerVolumeMounts, + }, + } + } + job := batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%s-%s", input.AppName, input.ProcessType, suffix), @@ -420,6 +520,7 @@ func templateKubernetesJob(input Job) (batchv1.Job, error) { Annotations: podAnnotations, }, Spec: corev1.PodSpec{ + InitContainers: initContainers, Containers: []corev1.Container{ { Args: input.Command, @@ -441,11 +542,13 @@ func templateKubernetesJob(input Job) (batchv1.Job, error) { Limits: corev1.ResourceList{}, Requests: corev1.ResourceList{}, }, - WorkingDir: input.WorkingDir, + WorkingDir: input.WorkingDir, + VolumeMounts: volumeMounts, }, }, RestartPolicy: corev1.RestartPolicyNever, ServiceAccountName: input.AppName, + Volumes: podVolumes, }, }, }, diff --git a/plugins/scheduler-k3s/templates/chart/deployment.yaml b/plugins/scheduler-k3s/templates/chart/deployment.yaml index 1457ccd5f94..9c422e6ea49 100644 --- a/plugins/scheduler-k3s/templates/chart/deployment.yaml +++ b/plugins/scheduler-k3s/templates/chart/deployment.yaml @@ -6,6 +6,34 @@ {{- $mappings := set $mappings $port_map.name "true" }} {{- end }} {{- end }} + +{{- /* Check if any volumes have chown set */}} +{{- $has_chown := false }} +{{- if $config.volumes }} +{{- range $vol := $config.volumes }} +{{- if $vol.chown }} +{{- $has_chown = true }} +{{- end }} +{{- end }} +{{- end }} + +{{- /* Set default values to avoid nil pointer errors */}} +{{- if not $.Values.deployment }} +{{- $_ := set $.Values "deployment" (dict "strategy" (dict "type" "RollingUpdate" "rollingUpdate" (dict "maxUnavailable" "25%" "maxSurge" "25%"))) }} +{{- end }} + +{{- /* Define strategy type */}} +{{- $strategyType := $.Values.deployment.strategy.type | default "RollingUpdate" }} + +{{- /* Define default values for maxUnavailable and maxSurge */}} +{{- $maxUnavailable := $.Values.deployment.strategy.rollingUpdate.maxUnavailable | default "25%" }} +{{- $maxSurge := $.Values.deployment.strategy.rollingUpdate.maxSurge | default "25%" }} + +{{- /* Override values if volumes exist */}} +{{- if and $config.volumes (eq $strategyType "RollingUpdate") }} +{{- $maxUnavailable = "100%" }} +{{- $maxSurge = "0" }} +{{- end }} --- apiVersion: apps/v1 kind: Deployment @@ -35,7 +63,13 @@ spec: app.kubernetes.io/part-of: {{ $.Values.global.app_name }} {{ include "print.labels" (dict "config" $.Values.global "key" "pod") | indent 6 }} {{ include "print.labels" (dict "config" $config "key" "pod") | indent 6 }} - strategy: {} + strategy: + type: {{ $strategyType }} + {{- if eq $strategyType "RollingUpdate" }} + rollingUpdate: + maxUnavailable: {{ $maxUnavailable }} + maxSurge: {{ $maxSurge }} + {{- end }} template: metadata: annotations: @@ -113,6 +147,19 @@ spec: readinessProbe: {{ $config.healthchecks.readiness | toJson | indent 10 }} {{- end }} + {{- if $config.volumes }} + volumeMounts: + {{- range $vol := $config.volumes }} + - name: {{ $vol.name }} + mountPath: {{ $vol.mountPath }} + {{- if $vol.subPath }} + subPath: {{ $vol.subPath }} + {{- end }} + {{- if $vol.readOnly }} + readOnly: {{ $vol.readOnly }} + {{- end }} + {{- end }} + {{- end }} {{- if $.Values.global.image.working_dir }} workingDir: {{ $.Values.global.image.working_dir }} {{- end }} @@ -121,3 +168,48 @@ spec: - name: {{ $.Values.global.image.image_pull_secrets }} {{- end }} serviceAccountName: {{ $.Values.global.app_name }} + {{- if $has_chown }} + initContainers: + - name: fix-permissions + image: busybox + command: + - "sh" + - "-c" + - > + {{- range $vol := $config.volumes }} + {{- if $vol.chown }} + if [ -d "{{ $vol.mountPath }}" ]; then + echo "Setting ownership for {{ $vol.mountPath }}"; + chown -R {{ $vol.chown }} {{ $vol.mountPath }}; + else + echo "Warning: Directory {{ $vol.mountPath }} not found, skipping chown"; + fi; + {{- end }} + {{- end }} + volumeMounts: + {{- range $vol := $config.volumes }} + {{- if $vol.chown }} + - mountPath: {{ $vol.mountPath }} + name: {{ $vol.name }} + {{- end }} + {{- end }} + {{- end }} + {{- if $config.volumes }} + volumes: + {{- range $vol := $config.volumes }} + - name: {{ $vol.name }} + {{- if eq $vol.type "persistentVolumeClaim" }} + persistentVolumeClaim: + claimName: {{ $vol.claimName }} + {{- else if eq $vol.type "configMap" }} + configMap: + name: {{ $vol.configMapName }} + {{- else if eq $vol.type "secret" }} + secret: + secretName: {{ $vol.secretName }} + {{- else if eq $vol.type "emptyDir" }} + emptyDir: {} + {{- end }} + {{- end }} + {{- end }} + diff --git a/plugins/scheduler-k3s/templates/chart/pvc.yaml b/plugins/scheduler-k3s/templates/chart/pvc.yaml new file mode 100644 index 00000000000..5d2b3dfaa3a --- /dev/null +++ b/plugins/scheduler-k3s/templates/chart/pvc.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + annotations: + dokku.com/managed: "true" + {{ include "print.annotations" (dict "config" $.Values.global "key" "pvc") | indent 4 }} + name: {{ $.Values.name }} + namespace: {{ $.Values.namespace }} + labels: + pvc: {{ $.Values.name }} + {{ include "print.labels" (dict "config" $.Values.global "key" "pvc") | indent 4 }} +spec: + # replicas: {{ $.Values.replicas }} + accessModes: + - {{ $.Values.accessMode }} + resources: + requests: + storage: {{ $.Values.storage }} + storageClassName: {{ $.Values.storageClass }} diff --git a/plugins/scheduler-k3s/triggers.go b/plugins/scheduler-k3s/triggers.go index aede9c47372..3007ea11b3c 100644 --- a/plugins/scheduler-k3s/triggers.go +++ b/plugins/scheduler-k3s/triggers.go @@ -385,7 +385,6 @@ func TriggerSchedulerDeploy(scheduler string, appName string, imageTag string) e for processType, processCount := range processes { // todo: implement deployment annotations // todo: implement pod annotations - // todo: implement volumes healthchecks, ok := appJSON.Healthchecks[processType] if !ok { @@ -430,6 +429,11 @@ func TriggerSchedulerDeploy(scheduler string, appName string, imageTag string) e return fmt.Errorf("Error getting autoscaling: %w", err) } + processVolumes, err := getVolumes(appName, processType) + if err != nil { + return fmt.Errorf("Error getting process volumes: %w", err) + } + processValues := ProcessValues{ Annotations: annotations, Autoscaling: autoscaling, @@ -439,6 +443,7 @@ func TriggerSchedulerDeploy(scheduler string, appName string, imageTag string) e ProcessType: ProcessType_Worker, Replicas: int32(processCount), Resources: processResources, + Volumes: processVolumes, } if processType == "web" { @@ -1210,7 +1215,7 @@ func TriggerSchedulerRun(scheduler string, appName string, envCount int, args [] Clientset: clientset, Namespace: namespace, LabelSelector: batchJobSelector, - Timeout: 10, + Timeout: 30, // need to wait a bit longer if volumes attached Waiter: isPodReady, }) if err != nil {