From 37e444f59bc9c2b0649edcf1e5c0526bed7c4351 Mon Sep 17 00:00:00 2001 From: schrodit Date: Thu, 5 Mar 2026 14:33:13 +0100 Subject: [PATCH 01/31] feat: add local bootstrap command Signed-off-by: Tim Schrodi --- cli/cmd/beta.go | 1 + cli/cmd/bootstrap_local.go | 52 ++++++++++++++++++++++++++++++++ docs/oms_beta.md | 1 + docs/oms_beta_bootstrap-local.md | 25 +++++++++++++++ 4 files changed, 79 insertions(+) create mode 100644 cli/cmd/bootstrap_local.go create mode 100644 docs/oms_beta_bootstrap-local.md diff --git a/cli/cmd/beta.go b/cli/cmd/beta.go index a280d559..27ea37f2 100644 --- a/cli/cmd/beta.go +++ b/cli/cmd/beta.go @@ -25,4 +25,5 @@ func AddBetaCmd(rootCmd *cobra.Command, opts *GlobalOptions) { AddExtendCmd(beta.cmd, opts) AddBootstrapGcpCmd(beta.cmd, opts) + AddBootstrapLocalCmd(beta.cmd) } diff --git a/cli/cmd/bootstrap_local.go b/cli/cmd/bootstrap_local.go new file mode 100644 index 00000000..2f9c9995 --- /dev/null +++ b/cli/cmd/bootstrap_local.go @@ -0,0 +1,52 @@ +// Copyright (c) Codesphere Inc. +// SPDX-License-Identifier: Apache-2.0 + +package cmd + +import ( + "fmt" + + "github.com/codesphere-cloud/cs-go/pkg/io" + "github.com/codesphere-cloud/oms/internal/bootstrap" + "github.com/codesphere-cloud/oms/internal/installer" + "github.com/codesphere-cloud/oms/internal/util" + "github.com/spf13/cobra" +) + +type BootstrapLocalCmd struct { + cmd *cobra.Command +} + +func (c *BootstrapLocalCmd) RunE(_ *cobra.Command, args []string) error { + err := c.BootstrapLocal() + + if err != nil { + return fmt.Errorf("failed to bootstrap: %w", err) + } + + return nil +} + +func AddBootstrapLocalCmd(parent *cobra.Command) { + bootstrapLocalCmd := BootstrapLocalCmd{ + cmd: &cobra.Command{ + Use: "bootstrap-local", + Short: "Bootstrap a local Codesphere environment", + Long: io.Long(`Bootstraps a local Codesphere environment using only a single Kubernetes cluster. + Rook is used to install Ceph, and CNPG is used for the PostgreSQL database. + Warning: For local setups, it is highly recommended to use Minikube with a virtual machine, similar to the Rook Minikube guide. + Not for production use.`), + }, + } + + parent.AddCommand(bootstrapLocalCmd.cmd) +} + +func (c *BootstrapLocalCmd) BootstrapLocal() error { + ctx := c.cmd.Context() + stlog := bootstrap.NewStepLogger(false) + icg := installer.NewInstallConfigManager() + fw := util.NewFilesystemWriter() + + return nil +} diff --git a/docs/oms_beta.md b/docs/oms_beta.md index a3099355..6f9f79c5 100644 --- a/docs/oms_beta.md +++ b/docs/oms_beta.md @@ -17,5 +17,6 @@ Be aware that that usage and behavior may change as the features are developed. * [oms](oms.md) - Codesphere Operations Management System (OMS) * [oms beta bootstrap-gcp](oms_beta_bootstrap-gcp.md) - Bootstrap GCP infrastructure for Codesphere +* [oms beta bootstrap-local](oms_beta_bootstrap-local.md) - Bootstrap a local Codesphere environment * [oms beta extend](oms_beta_extend.md) - Extend Codesphere ressources such as base images. diff --git a/docs/oms_beta_bootstrap-local.md b/docs/oms_beta_bootstrap-local.md new file mode 100644 index 00000000..69cc03bf --- /dev/null +++ b/docs/oms_beta_bootstrap-local.md @@ -0,0 +1,25 @@ +## oms beta bootstrap-local + +Bootstrap a local Codesphere environment + +### Synopsis + +Bootstraps a local Codesphere environment using only a single Kubernetes cluster. +Rook is used to install Ceph, and CNPG is used for the PostgreSQL database. +Warning: For local setups, it is highly recommended to use Minikube with a virtual machine, similar to the Rook Minikube guide. +Not for production use. + +``` +oms beta bootstrap-local [flags] +``` + +### Options + +``` + -h, --help help for bootstrap-local +``` + +### SEE ALSO + +* [oms beta](oms_beta.md) - Commands for early testing + From 4ac80bada3e62e6a99444cc6c726b8ff8b7abc1c Mon Sep 17 00:00:00 2001 From: schrodit Date: Thu, 5 Mar 2026 15:14:28 +0100 Subject: [PATCH 02/31] feat: add bootstrap of cnpg helm chart Signed-off-by: Tim Schrodi --- cli/cmd/bootstrap_local.go | 139 +++++++++++++++++++++++++++++- docs/oms_beta_bootstrap-local.md | 1 + go.mod | 18 ++++ go.sum | 43 +++++++++ internal/bootstrap/local/local.go | 117 +++++++++++++++++++++++++ 5 files changed, 316 insertions(+), 2 deletions(-) create mode 100644 internal/bootstrap/local/local.go diff --git a/cli/cmd/bootstrap_local.go b/cli/cmd/bootstrap_local.go index 2f9c9995..a5808f87 100644 --- a/cli/cmd/bootstrap_local.go +++ b/cli/cmd/bootstrap_local.go @@ -4,17 +4,31 @@ package cmd import ( + "bufio" + "context" + "errors" "fmt" + stdio "io" + "os" + "os/exec" + "strings" - "github.com/codesphere-cloud/cs-go/pkg/io" + csio "github.com/codesphere-cloud/cs-go/pkg/io" "github.com/codesphere-cloud/oms/internal/bootstrap" + "github.com/codesphere-cloud/oms/internal/bootstrap/local" "github.com/codesphere-cloud/oms/internal/installer" "github.com/codesphere-cloud/oms/internal/util" "github.com/spf13/cobra" + "golang.org/x/mod/semver" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + ctrlconfig "sigs.k8s.io/controller-runtime/pkg/client/config" ) type BootstrapLocalCmd struct { cmd *cobra.Command + Yes bool } func (c *BootstrapLocalCmd) RunE(_ *cobra.Command, args []string) error { @@ -32,21 +46,142 @@ func AddBootstrapLocalCmd(parent *cobra.Command) { cmd: &cobra.Command{ Use: "bootstrap-local", Short: "Bootstrap a local Codesphere environment", - Long: io.Long(`Bootstraps a local Codesphere environment using only a single Kubernetes cluster. + Long: csio.Long(`Bootstraps a local Codesphere environment using only a single Kubernetes cluster. Rook is used to install Ceph, and CNPG is used for the PostgreSQL database. Warning: For local setups, it is highly recommended to use Minikube with a virtual machine, similar to the Rook Minikube guide. Not for production use.`), }, } + bootstrapLocalCmd.cmd.Flags().BoolVarP(&bootstrapLocalCmd.Yes, "yes", "y", false, "Auto-approve the local bootstrapping warning prompt") + bootstrapLocalCmd.cmd.RunE = bootstrapLocalCmd.RunE parent.AddCommand(bootstrapLocalCmd.cmd) } func (c *BootstrapLocalCmd) BootstrapLocal() error { ctx := c.cmd.Context() + if err := c.ConfirmLocalBootstrapWarning(); err != nil { + return err + } + + if err := c.ValidatePrerequisites(ctx); err != nil { + return err + } + stlog := bootstrap.NewStepLogger(false) icg := installer.NewInstallConfigManager() fw := util.NewFilesystemWriter() + kubeClient, err := c.GetKubeClient(ctx) + if err != nil { + return fmt.Errorf("failed to initialize Kubernetes client: %w", err) + } + + bs := local.NewLocalBootstrapper(ctx, stlog, kubeClient, fw, icg) + return bs.Bootstrap() +} + +func (c *BootstrapLocalCmd) ConfirmLocalBootstrapWarning() error { + fmt.Println(csio.Long(`Codesphere uses Ceph for storage. For local Codesphere bootstrapping, Ceph is installed via Rook, and Minikube is the recommended way to quickly get Rook up and running. Minikube should not be used for production. While other tools such as k3d/kind are great, users have faced issues deploying Rook. + +Always use a virtual machine when testing Rook. Never use your host system where local devices may mistakenly be consumed. + +To install Minikube follow the official guide. It is recommended to use the qemu2 driver when running on a Linux machine and the hyperkit driver when running on a MacOS. Both allow to create and attach additional disks to the virtual machine. This is required for the Ceph OSD to consume one drive. We don't recommend any other drivers for Rook. You will need a Minikube version 1.23 or higher. + +Starting the cluster on Minikube is as simple as running: + +# On Linux +minikube start --disk-size=40g --extra-disks=1 --driver qemu2 + +# On MacOS with Intel processor +minikube start --disk-size=40g --extra-disks=1 --driver hyperkit + +# On MacOS with Apple silicon +minikube start --disk-size=40g --extra-disks 1 --driver qemu + + +Consult the offical rook guide for more details and troubleshooting: https://rook.io/docs/rook/latest-release/Contributing/development-environment/#minikube +`)) + + if c.Yes { + return nil + } + + fmt.Print("\nType 'yes' to continue: ") + reader := bufio.NewReader(os.Stdin) + input, err := reader.ReadString('\n') + if err != nil && !errors.Is(err, stdio.EOF) { + return fmt.Errorf("failed to read confirmation: %w", err) + } + + if strings.TrimSpace(strings.ToLower(input)) != "yes" { + return fmt.Errorf("aborted: type 'yes' to continue or pass --yes") + } + + return nil +} + +func (c *BootstrapLocalCmd) GetKubeClient(ctx context.Context) (client.Client, error) { + kubeConfig, err := ctrlconfig.GetConfig() + if err != nil { + return nil, fmt.Errorf("failed to load Kubernetes config: %w", err) + } + + kubeClient, err := ctrlclient.New(kubeConfig, ctrlclient.Options{}) + if err != nil { + return nil, fmt.Errorf("failed to initialize Kubernetes client: %w", err) + } + return kubeClient, nil +} + +func (c *BootstrapLocalCmd) ValidatePrerequisites(ctx context.Context) error { + if err := c.ValidateKubernetesCluster(ctx); err != nil { + return err + } + + if err := c.ValidateHelmVersion(ctx); err != nil { + return err + } + + return nil +} + +func (c *BootstrapLocalCmd) ValidateKubernetesCluster(ctx context.Context) error { + kubeClient, err := c.GetKubeClient(ctx) + if err != nil { + return err + } + + nodeList := &corev1.NodeList{} + if err := kubeClient.List(ctx, nodeList); err != nil { + return fmt.Errorf("failed to list Kubernetes nodes: %w", err) + } + + if len(nodeList.Items) == 0 { + return fmt.Errorf("connected to Kubernetes cluster but no nodes are available") + } + + return nil +} + +func (c *BootstrapLocalCmd) ValidateHelmVersion(ctx context.Context) error { + helmPath, err := exec.LookPath("helm") + if err != nil { + return fmt.Errorf("helm binary not found in PATH, Helm 3 or newer is required") + } + + out, err := exec.CommandContext(ctx, helmPath, "version", "--template={{.Version}}").CombinedOutput() + if err != nil { + return fmt.Errorf("failed to get helm version: %w (%s)", err, strings.TrimSpace(string(out))) + } + + version := strings.TrimSpace(string(out)) + if !semver.IsValid(version) { + return fmt.Errorf("failed to parse helm version %q: not a valid semantic version", version) + } + + if semver.Compare(version, "v3.0.0") < 0 { + return fmt.Errorf("helm version %s is not supported, Helm 3 or newer is required", version) + } return nil } diff --git a/docs/oms_beta_bootstrap-local.md b/docs/oms_beta_bootstrap-local.md index 69cc03bf..052e1a80 100644 --- a/docs/oms_beta_bootstrap-local.md +++ b/docs/oms_beta_bootstrap-local.md @@ -17,6 +17,7 @@ oms beta bootstrap-local [flags] ``` -h, --help help for bootstrap-local + -y, --yes Auto-approve the local bootstrapping warning prompt ``` ### SEE ALSO diff --git a/go.mod b/go.mod index e6897fd5..a2c5e022 100644 --- a/go.mod +++ b/go.mod @@ -19,9 +19,12 @@ require ( github.com/stretchr/testify v1.11.1 go.yaml.in/yaml/v3 v3.0.4 golang.org/x/crypto v0.48.0 + golang.org/x/mod v0.33.0 golang.org/x/term v0.40.0 google.golang.org/api v0.269.0 google.golang.org/grpc v1.79.2 + k8s.io/api v0.35.2 + sigs.k8s.io/controller-runtime v0.23.2 ) require ( @@ -512,9 +515,12 @@ require ( github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/denis-tingaikin/go-header v0.5.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d // indirect + github.com/google/gnostic-models v0.7.0 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/go-github/v84 v84.0.0 // indirect github.com/google/go-licenses/v2 v2.0.1 // indirect @@ -523,7 +529,10 @@ require ( github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 // indirect github.com/goreleaser/go-shellwords v1.0.13 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect github.com/mattn/go-runewidth v0.0.19 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/otiai10/copy v1.14.1 // indirect github.com/otiai10/mint v1.6.3 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -533,6 +542,7 @@ require ( github.com/spf13/pflag v1.0.10 // indirect github.com/stretchr/objx v0.5.3 // indirect github.com/ulikunitz/xz v0.5.15 // indirect + github.com/x448/float16 v0.8.4 // indirect go.opencensus.io v0.24.0 // indirect golang.org/x/mod v0.33.0 // indirect golang.org/x/net v0.51.0 // indirect @@ -542,8 +552,16 @@ require ( golang.org/x/text v0.34.0 // indirect golang.org/x/tools v0.42.0 // indirect google.golang.org/protobuf v1.36.11 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apimachinery v0.35.2 // indirect + k8s.io/client-go v0.35.2 // indirect k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e // indirect + k8s.io/utils v0.0.0-20260106112306-0fe9cd71b2f8 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 // indirect ) tool ( diff --git a/go.sum b/go.sum index 873b6512..408f6ad4 100644 --- a/go.sum +++ b/go.sum @@ -446,6 +446,8 @@ github.com/earthboundkid/versioninfo/v2 v2.24.1 h1:SJTMHaoUx3GzjjnUO1QzP3ZXK6Ee/ github.com/earthboundkid/versioninfo/v2 v2.24.1/go.mod h1:VcWEooDEuyUJnMfbdTh0uFN4cfEIg+kHMuWB2CDCLjw= github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o= github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -480,6 +482,8 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/gabriel-vasile/mimetype v1.4.12 h1:e9hWvmLYvtp846tLHam2o++qitpguFiYCKbn0w9jyqw= @@ -518,6 +522,8 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/analysis v0.24.2 h1:6p7WXEuKy1llDgOH8FooVeO+Uq2za9qoAOq4ZN08B50= github.com/go-openapi/analysis v0.24.2/go.mod h1:x27OOHKANE0lutg2ml4kzYLoHGMKgRm1Cj2ijVOjJuE= github.com/go-openapi/errors v0.22.6 h1:eDxcf89O8odEnohIXwEjY1IB4ph5vmbUsBMsFNwXWPo= @@ -662,6 +668,8 @@ github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e h1:gD6P7NEo7Eqt github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e/go.mod h1:h+wZwLjUTJnm/P2rwlbJdRPZXOzaT36/FwnPnY2inzc= github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -688,6 +696,7 @@ github.com/google/go-replayers/grpcreplay v1.3.0 h1:1Keyy0m1sIpqstQmgz307zhiJ1pV github.com/google/go-replayers/grpcreplay v1.3.0/go.mod h1:v6NgKtkijC0d3e3RW8il6Sy5sqRVUwoQa4mHOGEy8DI= github.com/google/go-replayers/httpreplay v1.2.0 h1:VM1wEyyjaoU53BwrOnaf9VhAyQQEEioJvFYxYcLRKzk= github.com/google/go-replayers/httpreplay v1.2.0/go.mod h1:WahEFFZZ7a1P4VM1qEeHy+tME4bwyqPcwWbNlUI1Mcg= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/ko v0.18.1 h1:F2WDFIi/eZe5thmFCuk/uH0eVr7ilWCThl+UoTHEKSk= @@ -862,6 +871,8 @@ github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE= github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julz/importas v0.2.0 h1:y+MJN/UdL63QbFJHws9BVC5RpA2iq0kpjrFajTGivjQ= @@ -995,6 +1006,12 @@ github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modelcontextprotocol/registry v1.4.1 h1:KLQM2SuNbsl2EpXHWEq9IJoNxXaYTQpqUx8b+RmyvXI= github.com/modelcontextprotocol/registry v1.4.1/go.mod h1:SmF5s9FkPsfBRXh2asdLocIinTmDadLjv2nmUr9UCDY= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/moricho/tparallel v0.3.2 h1:odr8aZVFA3NZrNybggMkYO3rgPRcqjeQUlBBFVxKHTI= github.com/moricho/tparallel v0.3.2/go.mod h1:OQ+K3b4Ln3l2TZveGCywybl68glfLEwFGqvnjok8b+U= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= @@ -1300,6 +1317,8 @@ github.com/whyrusleeping/cbor-gen v0.3.1 h1:82ioxmhEYut7LBVGhGq8xoRkXPLElVuh5mV6 github.com/whyrusleeping/cbor-gen v0.3.1/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -1631,6 +1650,10 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/mail.v2 v2.3.1 h1:WYFn/oANrAGP2C0dcV6/pbkPzv8yGzqTjPmTeO7qoXk= gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= gopkg.in/validator.v2 v2.0.1 h1:xF0KWyGWXm/LM2G1TrEjqOu4pa6coO9AlWSf3msVfDY= @@ -1650,16 +1673,36 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.7.0 h1:w6WUp1VbkqPEgLz4rkBzH/CSU6HkoqNLp6GstyTx3lU= honnef.co/go/tools v0.7.0/go.mod h1:pm29oPxeP3P82ISxZDgIYeOaf9ta6Pi0EWvCFoLG2vc= +k8s.io/api v0.35.2 h1:tW7mWc2RpxW7HS4CoRXhtYHSzme1PN1UjGHJ1bdrtdw= +k8s.io/api v0.35.2/go.mod h1:7AJfqGoAZcwSFhOjcGM7WV05QxMMgUaChNfLTXDRE60= +k8s.io/apiextensions-apiserver v0.35.0 h1:3xHk2rTOdWXXJM+RDQZJvdx0yEOgC0FgQ1PlJatA5T4= +k8s.io/apiextensions-apiserver v0.35.0/go.mod h1:E1Ahk9SADaLQ4qtzYFkwUqusXTcaV2uw3l14aqpL2LU= +k8s.io/apimachinery v0.35.2 h1:NqsM/mmZA7sHW02JZ9RTtk3wInRgbVxL8MPfzSANAK8= +k8s.io/apimachinery v0.35.2/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= +k8s.io/client-go v0.35.2 h1:YUfPefdGJA4aljDdayAXkc98DnPkIetMl4PrKX97W9o= +k8s.io/client-go v0.35.2/go.mod h1:4QqEwh4oQpeK8AaefZ0jwTFJw/9kIjdQi0jpKeYvz7g= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e h1:iW9ChlU0cU16w8MpVYjXk12dqQ4BPFBEgif+ap7/hqQ= +k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/utils v0.0.0-20260106112306-0fe9cd71b2f8 h1:oV4uULAC2QPIdMQwjMaNIwykyhWhnhBwX40yd5h9u3U= +k8s.io/utils v0.0.0-20260106112306-0fe9cd71b2f8/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= mvdan.cc/gofumpt v0.9.2 h1:zsEMWL8SVKGHNztrx6uZrXdp7AX8r421Vvp23sz7ik4= mvdan.cc/gofumpt v0.9.2/go.mod h1:iB7Hn+ai8lPvofHd9ZFGVg2GOr8sBUw1QUWjNbmIL/s= mvdan.cc/unparam v0.0.0-20251027182757-5beb8c8f8f15 h1:ssMzja7PDPJV8FStj7hq9IKiuiKhgz9ErWw+m68e7DI= mvdan.cc/unparam v0.0.0-20251027182757-5beb8c8f8f15/go.mod h1:4M5MMXl2kW6fivUT6yRGpLLPNfuGtU2Z0cPvFquGDYU= +sigs.k8s.io/controller-runtime v0.23.2 h1:Oh3FliXaA2CS1chpUXvjVNJtsvGZYUxQH8s7bvR7aXk= +sigs.k8s.io/controller-runtime v0.23.2/go.mod h1:B6COOxKptp+YaUT5q4l6LqUJTRpizbgf9KSRNdQGns0= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kind v0.31.0 h1:UcT4nzm+YM7YEbqiAKECk+b6dsvc/HRZZu9U0FolL1g= sigs.k8s.io/kind v0.31.0/go.mod h1:FSqriGaoTPruiXWfRnUXNykF8r2t+fHtK0P0m1AbGF8= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 h1:2WOzJpHUBVrrkDjU4KBT8n5LDcj824eX0I5UKcgeRUs= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= software.sslmate.com/src/go-pkcs12 v0.7.0 h1:Db8W44cB54TWD7stUFFSWxdfpdn6fZVcDl0w3R4RVM0= diff --git a/internal/bootstrap/local/local.go b/internal/bootstrap/local/local.go new file mode 100644 index 00000000..c37e3ecc --- /dev/null +++ b/internal/bootstrap/local/local.go @@ -0,0 +1,117 @@ +package local + +import ( + "context" + "fmt" + "os" + "os/exec" + "strings" + + "github.com/codesphere-cloud/oms/internal/bootstrap" + "github.com/codesphere-cloud/oms/internal/installer" + "github.com/codesphere-cloud/oms/internal/util" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + codesphereSystemNamespace = "codesphere-system" + cnpgRepoName = "cnpg" + cnpgRepoURL = "https://cloudnative-pg.github.io/charts" + cnpgReleaseName = "cnpg" + cnpgChartRef = "cnpg/cloudnative-pg" +) + +type LocalBootstrapper struct { + ctx context.Context + stlog *bootstrap.StepLogger + kubeClient client.Client + fw util.FileIO + icg installer.InstallConfigManager + // Environment + Env *CodesphereEnvironment +} + +type CodesphereEnvironment struct { + // Add any environment-specific fields here, e.g.: + ClusterName string +} + +func NewLocalBootstrapper(ctx context.Context, stlog *bootstrap.StepLogger, kubeClient client.Client, fw util.FileIO, icg installer.InstallConfigManager) *LocalBootstrapper { + return &LocalBootstrapper{ + ctx: ctx, + stlog: stlog, + kubeClient: kubeClient, + fw: fw, + icg: icg, + Env: &CodesphereEnvironment{}, + } +} + +func (c *LocalBootstrapper) Bootstrap() error { + if c.stlog == nil { + return fmt.Errorf("step logger is not initialized") + } + + err := c.stlog.Step("Ensure codesphere system namespace", c.EnsureCodesphereSystemNamespace) + if err != nil { + return fmt.Errorf("failed to ensure namespace %q: %w", codesphereSystemNamespace, err) + } + + err = c.stlog.Step("Install CloudNativePG", c.InstallCloudNativePGHelmChart) + if err != nil { + return fmt.Errorf("failed to install CloudNativePG Helm chart: %w", err) + } + + return nil +} + +func (c *LocalBootstrapper) EnsureCodesphereSystemNamespace() error { + ctx := c.ctx + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: codesphereSystemNamespace}, + } + if err := c.kubeClient.Create(ctx, namespace); err != nil && !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("failed to create namespace %q: %w", codesphereSystemNamespace, err) + } + + return nil +} + +func (c *LocalBootstrapper) InstallCloudNativePGHelmChart() error { + ctx := c.ctx + repoAddOut, err := exec.CommandContext(ctx, "helm", "repo", "add", cnpgRepoName, cnpgRepoURL).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to add helm repo %q: %w (%s)", cnpgRepoName, err, strings.TrimSpace(string(repoAddOut))) + } + + repoUpdateOut, err := exec.CommandContext(ctx, "helm", "repo", "update", cnpgRepoName).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to update helm repo %q: %w (%s)", cnpgRepoName, err, strings.TrimSpace(string(repoUpdateOut))) + } + + cmd := exec.CommandContext( + ctx, + "helm", + "upgrade", + "--install", + cnpgReleaseName, + cnpgChartRef, + "--namespace", + codesphereSystemNamespace, + "--create-namespace", + "--set", + "config.clusterWide=false", + "--wait", + ) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to install helm chart %q: %w", cnpgChartRef, err) + } + + return nil +} From 7c8f07a6231e114e061cfa466072b269603e390b Mon Sep 17 00:00:00 2001 From: schrodit Date: Thu, 5 Mar 2026 16:13:49 +0100 Subject: [PATCH 03/31] feat: add postgres deployment Signed-off-by: Tim Schrodi --- .gitignore | 3 + cli/cmd/bootstrap_local.go | 14 ++- go.mod | 25 +++- go.sum | 38 +++++- internal/bootstrap/local/local.go | 203 ++++++++++++++++++++++++++++-- 5 files changed, 260 insertions(+), 23 deletions(-) diff --git a/.gitignore b/.gitignore index 1dac6edb..0be0cf89 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,9 @@ vendor/ go.work go.work.sum +# Go modules +vendor + # local env .envrc .env diff --git a/cli/cmd/bootstrap_local.go b/cli/cmd/bootstrap_local.go index a5808f87..148f7992 100644 --- a/cli/cmd/bootstrap_local.go +++ b/cli/cmd/bootstrap_local.go @@ -13,6 +13,7 @@ import ( "os/exec" "strings" + cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" csio "github.com/codesphere-cloud/cs-go/pkg/io" "github.com/codesphere-cloud/oms/internal/bootstrap" "github.com/codesphere-cloud/oms/internal/bootstrap/local" @@ -21,6 +22,8 @@ import ( "github.com/spf13/cobra" "golang.org/x/mod/semver" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" ctrlconfig "sigs.k8s.io/controller-runtime/pkg/client/config" @@ -126,7 +129,16 @@ func (c *BootstrapLocalCmd) GetKubeClient(ctx context.Context) (client.Client, e return nil, fmt.Errorf("failed to load Kubernetes config: %w", err) } - kubeClient, err := ctrlclient.New(kubeConfig, ctrlclient.Options{}) + scheme := runtime.NewScheme() + if err := clientgoscheme.AddToScheme(scheme); err != nil { + return nil, fmt.Errorf("failed to add Kubernetes core scheme: %w", err) + } + + if err := cnpgv1.AddToScheme(scheme); err != nil { + return nil, fmt.Errorf("failed to add CloudNativePG scheme: %w", err) + } + + kubeClient, err := ctrlclient.New(kubeConfig, ctrlclient.Options{Scheme: scheme}) if err != nil { return nil, fmt.Errorf("failed to initialize Kubernetes client: %w", err) } diff --git a/go.mod b/go.mod index a2c5e022..26862b43 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( cloud.google.com/go/iam v1.5.3 cloud.google.com/go/resourcemanager v1.10.7 cloud.google.com/go/serviceusage v1.9.7 + github.com/cloudnative-pg/cloudnative-pg v1.28.1 github.com/codesphere-cloud/cs-go v0.19.2 github.com/creativeprojects/go-selfupdate v1.5.2 github.com/jedib0t/go-pretty/v6 v6.7.8 @@ -24,6 +25,8 @@ require ( google.golang.org/api v0.269.0 google.golang.org/grpc v1.79.2 k8s.io/api v0.35.2 + k8s.io/apimachinery v0.35.2 + k8s.io/client-go v0.35.2 sigs.k8s.io/controller-runtime v0.23.2 ) @@ -512,15 +515,20 @@ require ( require ( github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/clipperhouse/uax29/v2 v2.7.0 // indirect + github.com/cloudnative-pg/barman-cloud v0.4.1-0.20260108104508-ced266c145f5 // indirect + github.com/cloudnative-pg/cnpg-i v0.3.1 // indirect + github.com/cloudnative-pg/machinery v0.3.3 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/denis-tingaikin/go-header v0.5.0 // indirect - github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/emicklei/go-restful/v3 v3.13.0 // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/zapr v1.3.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d // indirect - github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/gnostic-models v0.7.1 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/go-github/v84 v84.0.0 // indirect github.com/google/go-licenses/v2 v2.0.1 // indirect @@ -530,12 +538,17 @@ require ( github.com/goreleaser/go-shellwords v1.0.13 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/kubernetes-csi/external-snapshotter/client/v8 v8.4.0 // indirect + github.com/lib/pq v1.11.2 // indirect github.com/mattn/go-runewidth v0.0.19 // indirect + github.com/moby/spdystream v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/otiai10/copy v1.14.1 // indirect github.com/otiai10/mint v1.6.3 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.87.1 // indirect github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect @@ -544,21 +557,21 @@ require ( github.com/ulikunitz/xz v0.5.15 // indirect github.com/x448/float16 v0.8.4 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/mod v0.33.0 // indirect golang.org/x/net v0.51.0 // indirect golang.org/x/oauth2 v0.35.0 // indirect golang.org/x/sync v0.20.0 // indirect golang.org/x/sys v0.41.0 // indirect golang.org/x/text v0.34.0 // indirect golang.org/x/tools v0.42.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect google.golang.org/protobuf v1.36.11 // indirect + gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apimachinery v0.35.2 // indirect - k8s.io/client-go v0.35.2 // indirect + k8s.io/apiextensions-apiserver v0.35.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e // indirect - k8s.io/utils v0.0.0-20260106112306-0fe9cd71b2f8 // indirect + k8s.io/utils v0.0.0-20260108192941-914a6e750570 // indirect sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 // indirect diff --git a/go.sum b/go.sum index 408f6ad4..434981b8 100644 --- a/go.sum +++ b/go.sum @@ -355,6 +355,14 @@ github.com/clipperhouse/uax29/v2 v2.7.0 h1:+gs4oBZ2gPfVrKPthwbMzWZDaAFPGYK72F0NJ github.com/clipperhouse/uax29/v2 v2.7.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM= github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8= github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4= +github.com/cloudnative-pg/barman-cloud v0.4.1-0.20260108104508-ced266c145f5 h1:wPB7VTNgTv6t9sl4QYOBakmVTqHnOdKUht7Q3aL+uns= +github.com/cloudnative-pg/barman-cloud v0.4.1-0.20260108104508-ced266c145f5/go.mod h1:qD0NtJOllNQbRB0MaleuHsZjFYaXtXfdg0HbFTbuHn0= +github.com/cloudnative-pg/cloudnative-pg v1.28.1 h1:HdOUWgFhta558uHfXeO/199qCApxaj5yi05x6nWNmgs= +github.com/cloudnative-pg/cloudnative-pg v1.28.1/go.mod h1:yhRa4GqJAjNd0tT9AiRgk1KdqLhMjo/JmGGoASRl2CU= +github.com/cloudnative-pg/cnpg-i v0.3.1 h1:fKj8NoToWI11HUL2UWYJBpkVzmaTvbs3kDMo7wQF8RU= +github.com/cloudnative-pg/cnpg-i v0.3.1/go.mod h1:glRDiJLJY51FY8ScJIv/OkaGJxFnojJkkNAqSy5XC6s= +github.com/cloudnative-pg/machinery v0.3.3 h1:CaqXqLTJH9RrVv3R/YU0NmFaI/F18HLg2JfH3mQLcDk= +github.com/cloudnative-pg/machinery v0.3.3/go.mod h1:RYAYlVKBF5pH4mg+Q8wHjNDyENV9ajbkG41zOEf8DEs= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/Tv1FZd4SCg8axKApyNyRsAt/w= github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI= @@ -446,8 +454,8 @@ github.com/earthboundkid/versioninfo/v2 v2.24.1 h1:SJTMHaoUx3GzjjnUO1QzP3ZXK6Ee/ github.com/earthboundkid/versioninfo/v2 v2.24.1/go.mod h1:VcWEooDEuyUJnMfbdTh0uFN4cfEIg+kHMuWB2CDCLjw= github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o= github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= -github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= -github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= +github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -464,6 +472,8 @@ github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA= github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q= github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= @@ -666,10 +676,12 @@ github.com/golangci/swaggoswag v0.0.0-20250504205917-77f2aca3143e h1:ai0EfmVYE2b github.com/golangci/swaggoswag v0.0.0-20250504205917-77f2aca3143e/go.mod h1:Vrn4B5oR9qRwM+f54koyeH3yzphlecwERs0el27Fr/s= github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e h1:gD6P7NEo7Eqtt0ssnqSJNNndxe69DOQ24A5h7+i3KpM= github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e/go.mod h1:h+wZwLjUTJnm/P2rwlbJdRPZXOzaT36/FwnPnY2inzc= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= -github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= -github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/gnostic-models v0.7.1 h1:SisTfuFKJSKM5CPZkffwi6coztzzeYUhc3v4yxLWH8c= +github.com/google/gnostic-models v0.7.1/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -918,6 +930,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kubernetes-csi/external-snapshotter/client/v8 v8.4.0 h1:bMqrb3UHgHbP+PW9VwiejfDJU1R0PpXVZNMdeH8WYKI= +github.com/kubernetes-csi/external-snapshotter/client/v8 v8.4.0/go.mod h1:E3vdYxHj2C2q6qo8/Da4g7P+IcwqRZyy3gJBzYybV9Y= github.com/kulti/thelper v0.7.1 h1:fI8QITAoFVLx+y+vSyuLBP+rcVIB8jKooNSCT2EiI98= github.com/kulti/thelper v0.7.1/go.mod h1:NsMjfQEy6sd+9Kfw8kCP61W1I0nerGSYSFnGaxQkcbs= github.com/kunwardeep/paralleltest v1.0.15 h1:ZMk4Qt306tHIgKISHWFJAO1IDQJLc6uDyJMLyncOb6w= @@ -942,6 +956,8 @@ github.com/leonklingele/grouper v1.1.2 h1:o1ARBDLOmmasUaNDesWqWCIFH3u7hoFlM84Yrj github.com/leonklingele/grouper v1.1.2/go.mod h1:6D0M/HVkhs2yRKRFZUoGjeDy7EZTfFBE9gl4kjmIGkA= github.com/letsencrypt/boulder v0.20260105.0 h1:P94haPlN1xm8MhIHSXbUu1cA0t0EoMhXQyMz/jLwR34= github.com/letsencrypt/boulder v0.20260105.0/go.mod h1:FWHD4EclPHIQ1y2AKEXyySrM3eKiwEyGzcwcupVEFyE= +github.com/lib/pq v1.11.2 h1:x6gxUeu39V0BHZiugWe8LXZYZ+Utk7hSJGThs8sdzfs= +github.com/lib/pq v1.11.2/go.mod h1:/p+8NSbOcwzAEI7wiMXFlgydTwcgTr3OSKMsD2BitpA= github.com/lithammer/shortuuid v3.0.0+incompatible h1:NcD0xWW/MZYXEHa6ITy6kaXN5nwm/V115vj2YXfhS0w= github.com/lithammer/shortuuid v3.0.0+incompatible/go.mod h1:FR74pbAuElzOUuenUHTK2Tciko1/vKuIKS9dSkDrA4w= github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag= @@ -996,6 +1012,8 @@ github.com/moby/moby/api v1.52.0 h1:00BtlJY4MXkkt84WhUZPRqt5TvPbgig2FZvTbe3igYg= github.com/moby/moby/api v1.52.0/go.mod h1:8mb+ReTlisw4pS6BRzCMts5M49W5M7bKt1cJy/YbAqc= github.com/moby/moby/client v0.2.1 h1:1Grh1552mvv6i+sYOdY+xKKVTvzJegcVMhuXocyDz/k= github.com/moby/moby/client v0.2.1/go.mod h1:O+/tw5d4a1Ha/ZA/tPxIZJapJRUS6LNZ1wiVRxYHyUE= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= @@ -1042,6 +1060,8 @@ github.com/multiformats/go-varint v0.1.0 h1:i2wqFp4sdl3IcIxfAonHQV9qU5OsZ4Ts9IOo github.com/multiformats/go-varint v0.1.0/go.mod h1:5KVAVXegtfmNQQm/lCY+ATvDzvJJhSkUlGQV9wgObdI= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= @@ -1098,6 +1118,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.89.1-0.20221221234430-40501e09de1f h1:VXTQfuJj9vKR4TCkEuWIckKvdHFeJH/huIFJ9/cXOB0= github.com/polydawn/refmt v0.89.1-0.20221221234430-40501e09de1f/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.87.1 h1:wyKanf+IFdbIqbDNYGt+f1dabLErLWtBaxd0KaAx4aM= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.87.1/go.mod h1:WHiLZmOWVop/MoYvRD58LfnPeyE+dcITby/jQjg83Hw= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1262,6 +1284,8 @@ github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qv github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= github.com/theupdateframework/go-tuf/v2 v2.4.1 h1:K6ewW064rKZCPkRo1W/CTbTtm/+IB4+coG1iNURAGCw= github.com/theupdateframework/go-tuf/v2 v2.4.1/go.mod h1:Nex2enPVYDFCklrnbTzl3OVwD7fgIAj0J5++z/rvCj8= +github.com/thoas/go-funk v0.9.3 h1:7+nAEx3kn5ZJcnDm2Bh23N2yOtweO14bi//dvRtgLpw= +github.com/thoas/go-funk v0.9.3/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q= github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= @@ -1609,6 +1633,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= +gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.269.0 h1:qDrTOxKUQ/P0MveH6a7vZ+DNHxJQjtGm/uvdbdGXCQg= @@ -1685,8 +1711,8 @@ k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e h1:iW9ChlU0cU16w8MpVYjXk12dqQ4BPFBEgif+ap7/hqQ= k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= -k8s.io/utils v0.0.0-20260106112306-0fe9cd71b2f8 h1:oV4uULAC2QPIdMQwjMaNIwykyhWhnhBwX40yd5h9u3U= -k8s.io/utils v0.0.0-20260106112306-0fe9cd71b2f8/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= +k8s.io/utils v0.0.0-20260108192941-914a6e750570 h1:JT4W8lsdrGENg9W+YwwdLJxklIuKWdRm+BC+xt33FOY= +k8s.io/utils v0.0.0-20260108192941-914a6e750570/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= mvdan.cc/gofumpt v0.9.2 h1:zsEMWL8SVKGHNztrx6uZrXdp7AX8r421Vvp23sz7ik4= diff --git a/internal/bootstrap/local/local.go b/internal/bootstrap/local/local.go index c37e3ecc..5c2a9c24 100644 --- a/internal/bootstrap/local/local.go +++ b/internal/bootstrap/local/local.go @@ -2,17 +2,23 @@ package local import ( "context" + "errors" "fmt" "os" "os/exec" "strings" + "time" + cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/codesphere-cloud/oms/internal/bootstrap" "github.com/codesphere-cloud/oms/internal/installer" "github.com/codesphere-cloud/oms/internal/util" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -22,8 +28,31 @@ const ( cnpgRepoURL = "https://cloudnative-pg.github.io/charts" cnpgReleaseName = "cnpg" cnpgChartRef = "cnpg/cloudnative-pg" + cnpgDatabaseClusterName = "masterdata" + cnpgDatabaseVersion = "15.14" + cnpgDatabaseStorageSize = "10Gi" + cnpgReadyTimeout = 15 * time.Minute + cnpgReadyPollInterval = 5 * time.Second + cnpgSecretPasswordKey = "password" ) +type retryableWaitError struct { + err error +} + +func (e *retryableWaitError) Error() string { + return e.err.Error() +} + +func (e *retryableWaitError) Unwrap() error { + return e.err +} + +func isRetryableWaitError(err error) bool { + var target *retryableWaitError + return errors.As(err, &target) +} + type LocalBootstrapper struct { ctx context.Context stlog *bootstrap.StepLogger @@ -60,21 +89,31 @@ func (c *LocalBootstrapper) Bootstrap() error { return fmt.Errorf("failed to ensure namespace %q: %w", codesphereSystemNamespace, err) } - err = c.stlog.Step("Install CloudNativePG", c.InstallCloudNativePGHelmChart) + err = c.stlog.Step("Install CloudNativePG and PostgreSQL", func() error { + err := c.stlog.Substep("Install CloudNativePG operator", c.InstallCloudNativePGHelmChart) + if err != nil { + return err + } + + err = c.stlog.Substep("Deploy PostgreSQL database", c.DeployPostgresDatabase) + if err != nil { + return err + } + + return nil + }) if err != nil { - return fmt.Errorf("failed to install CloudNativePG Helm chart: %w", err) + return fmt.Errorf("failed to install CloudNativePG and deploy PostgreSQL database: %w", err) } return nil } func (c *LocalBootstrapper) EnsureCodesphereSystemNamespace() error { - ctx := c.ctx - namespace := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{Name: codesphereSystemNamespace}, } - if err := c.kubeClient.Create(ctx, namespace); err != nil && !apierrors.IsAlreadyExists(err) { + if err := c.kubeClient.Create(c.ctx, namespace); err != nil && !apierrors.IsAlreadyExists(err) { return fmt.Errorf("failed to create namespace %q: %w", codesphereSystemNamespace, err) } @@ -82,19 +121,18 @@ func (c *LocalBootstrapper) EnsureCodesphereSystemNamespace() error { } func (c *LocalBootstrapper) InstallCloudNativePGHelmChart() error { - ctx := c.ctx - repoAddOut, err := exec.CommandContext(ctx, "helm", "repo", "add", cnpgRepoName, cnpgRepoURL).CombinedOutput() - if err != nil { + repoAddOut, err := exec.CommandContext(c.ctx, "helm", "repo", "add", cnpgRepoName, cnpgRepoURL).CombinedOutput() + if err != nil && !strings.Contains(string(repoAddOut), "already exists") { return fmt.Errorf("failed to add helm repo %q: %w (%s)", cnpgRepoName, err, strings.TrimSpace(string(repoAddOut))) } - repoUpdateOut, err := exec.CommandContext(ctx, "helm", "repo", "update", cnpgRepoName).CombinedOutput() + repoUpdateOut, err := exec.CommandContext(c.ctx, "helm", "repo", "update", cnpgRepoName).CombinedOutput() if err != nil { return fmt.Errorf("failed to update helm repo %q: %w (%s)", cnpgRepoName, err, strings.TrimSpace(string(repoUpdateOut))) } cmd := exec.CommandContext( - ctx, + c.ctx, "helm", "upgrade", "--install", @@ -115,3 +153,148 @@ func (c *LocalBootstrapper) InstallCloudNativePGHelmChart() error { return nil } + +func (c *LocalBootstrapper) DeployPostgresDatabase() error { + postgresCluster := &cnpgv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: cnpgDatabaseClusterName, + Namespace: codesphereSystemNamespace, + }, + Spec: cnpgv1.ClusterSpec{ + ImageName: fmt.Sprintf("ghcr.io/cloudnative-pg/postgresql:%s-system-trixie", cnpgDatabaseVersion), + Instances: 1, + StorageConfiguration: cnpgv1.StorageConfiguration{ + Size: cnpgDatabaseStorageSize, + }, + }, + } + + if err := c.kubeClient.Create(c.ctx, postgresCluster); err != nil && !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("failed to create PostgreSQL cluster %q: %w", cnpgDatabaseClusterName, err) + } + + if err := c.WaitForPostgresDatabaseReady(); err != nil { + return err + } + + return nil +} + +func (c *LocalBootstrapper) WaitForPostgresDatabaseReady() error { + ctx, cancel := context.WithTimeout(c.ctx, cnpgReadyTimeout) + defer cancel() + + clusterKey := client.ObjectKey{ + Name: cnpgDatabaseClusterName, + Namespace: codesphereSystemNamespace, + } + + steps := int(cnpgReadyTimeout / cnpgReadyPollInterval) + if steps < 1 { + steps = 1 + } + + backoff := wait.Backoff{ + Duration: cnpgReadyPollInterval, + Factor: 1.0, + Jitter: 0.1, + Steps: steps, + } + + lastPhase := "" + lastReadyInstances := 0 + lastInstances := 0 + + err := retry.OnError(backoff, isRetryableWaitError, func() error { + if err := ctx.Err(); err != nil { + return err + } + + cluster := &cnpgv1.Cluster{} + err := c.kubeClient.Get(ctx, clusterKey, cluster) + if err != nil { + if apierrors.IsNotFound(err) { + return &retryableWaitError{err: fmt.Errorf("PostgreSQL cluster %q not found yet", cnpgDatabaseClusterName)} + } + + return err + } + + lastPhase = cluster.Status.Phase + lastReadyInstances = cluster.Status.ReadyInstances + lastInstances = cluster.Status.Instances + + if isCNPGClusterReady(cluster) { + return nil + } + + return &retryableWaitError{err: fmt.Errorf( + "PostgreSQL cluster is not ready yet (phase=%q, readyInstances=%d, instances=%d)", + lastPhase, + lastReadyInstances, + lastInstances, + )} + }) + if err == nil { + return nil + } + + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) || isRetryableWaitError(err) { + return fmt.Errorf( + "timed out waiting for PostgreSQL cluster %q to become ready (phase=%q, readyInstances=%d, instances=%d, error=%v)", + cnpgDatabaseClusterName, + lastPhase, + lastReadyInstances, + lastInstances, + err, + ) + } + + return fmt.Errorf("failed to fetch PostgreSQL cluster %q: %w", cnpgDatabaseClusterName, err) +} + +func (c *LocalBootstrapper) ReadPostgresSuperuserPassword() (string, error) { + clusterKey := client.ObjectKey{ + Name: cnpgDatabaseClusterName, + Namespace: codesphereSystemNamespace, + } + + cluster := &cnpgv1.Cluster{} + if err := c.kubeClient.Get(c.ctx, clusterKey, cluster); err != nil { + return "", fmt.Errorf("failed to get PostgreSQL cluster %q: %w", cnpgDatabaseClusterName, err) + } + + secretName := cluster.GetSuperuserSecretName() + secretKey := client.ObjectKey{ + Name: secretName, + Namespace: codesphereSystemNamespace, + } + + secret := &corev1.Secret{} + if err := c.kubeClient.Get(c.ctx, secretKey, secret); err != nil { + return "", fmt.Errorf("failed to get PostgreSQL superuser secret %q: %w", secretName, err) + } + + passwordBytes, ok := secret.Data[cnpgSecretPasswordKey] + if !ok { + return "", fmt.Errorf("PostgreSQL superuser secret %q does not contain key %q", secretName, cnpgSecretPasswordKey) + } + if len(passwordBytes) == 0 { + return "", fmt.Errorf("PostgreSQL superuser secret %q contains an empty %q value", secretName, cnpgSecretPasswordKey) + } + + return string(passwordBytes), nil +} + +func isCNPGClusterReady(cluster *cnpgv1.Cluster) bool { + if cluster == nil { + return false + } + + readyCondition := apimeta.FindStatusCondition(cluster.Status.Conditions, string(cnpgv1.ConditionClusterReady)) + if readyCondition != nil && readyCondition.Status == metav1.ConditionTrue { + return true + } + + return cluster.Status.Instances > 0 && cluster.Status.ReadyInstances == cluster.Status.Instances +} From 00dca53dcf6e784daf3550d58959629d7a725faf Mon Sep 17 00:00:00 2001 From: schrodit Date: Fri, 6 Mar 2026 13:11:20 +0100 Subject: [PATCH 04/31] feat: add rook and ceph deployment Signed-off-by: Tim Schrodi --- cli/cmd/bootstrap_local.go | 5 + external/ceph.rook.io/doc.go | 9 + external/ceph.rook.io/register.go | 5 + external/ceph.rook.io/v1/annotations.go | 129 + external/ceph.rook.io/v1/annotations_test.go | 154 + external/ceph.rook.io/v1/cleanup.go | 47 + external/ceph.rook.io/v1/cluster.go | 57 + external/ceph.rook.io/v1/doc.go | 21 + external/ceph.rook.io/v1/filesystem.go | 21 + external/ceph.rook.io/v1/keys.go | 36 + external/ceph.rook.io/v1/labels.go | 218 + external/ceph.rook.io/v1/labels_test.go | 329 + external/ceph.rook.io/v1/mirror.go | 26 + external/ceph.rook.io/v1/namespace.go | 31 + external/ceph.rook.io/v1/network.go | 196 + external/ceph.rook.io/v1/network_test.go | 444 ++ external/ceph.rook.io/v1/nfs.go | 105 + external/ceph.rook.io/v1/nfs_test.go | 224 + external/ceph.rook.io/v1/nvmeof.go | 26 + external/ceph.rook.io/v1/object.go | 203 + external/ceph.rook.io/v1/object_test.go | 366 ++ external/ceph.rook.io/v1/placement.go | 146 + external/ceph.rook.io/v1/placement_test.go | 360 ++ external/ceph.rook.io/v1/pool.go | 97 + external/ceph.rook.io/v1/pool_test.go | 74 + external/ceph.rook.io/v1/priorityclasses.go | 73 + .../ceph.rook.io/v1/priorityclasses_test.go | 63 + external/ceph.rook.io/v1/register.go | 96 + external/ceph.rook.io/v1/resources.go | 123 + external/ceph.rook.io/v1/scc.go | 81 + external/ceph.rook.io/v1/scc_test.go | 30 + external/ceph.rook.io/v1/spec_test.go | 169 + external/ceph.rook.io/v1/status.go | 74 + external/ceph.rook.io/v1/status_test.go | 135 + external/ceph.rook.io/v1/storage.go | 198 + external/ceph.rook.io/v1/storage_test.go | 291 + external/ceph.rook.io/v1/topic.go | 84 + external/ceph.rook.io/v1/topic_test.go | 170 + external/ceph.rook.io/v1/types.go | 4082 ++++++++++++ external/ceph.rook.io/v1/volume.go | 84 + external/ceph.rook.io/v1/volume_test.go | 110 + .../ceph.rook.io/v1/zz_generated.deepcopy.go | 5592 +++++++++++++++++ go.mod | 11 +- go.sum | 14 + internal/bootstrap/local/local.go | 245 +- 45 files changed, 15043 insertions(+), 11 deletions(-) create mode 100644 external/ceph.rook.io/doc.go create mode 100644 external/ceph.rook.io/register.go create mode 100644 external/ceph.rook.io/v1/annotations.go create mode 100644 external/ceph.rook.io/v1/annotations_test.go create mode 100644 external/ceph.rook.io/v1/cleanup.go create mode 100644 external/ceph.rook.io/v1/cluster.go create mode 100644 external/ceph.rook.io/v1/doc.go create mode 100644 external/ceph.rook.io/v1/filesystem.go create mode 100644 external/ceph.rook.io/v1/keys.go create mode 100644 external/ceph.rook.io/v1/labels.go create mode 100644 external/ceph.rook.io/v1/labels_test.go create mode 100644 external/ceph.rook.io/v1/mirror.go create mode 100644 external/ceph.rook.io/v1/namespace.go create mode 100644 external/ceph.rook.io/v1/network.go create mode 100644 external/ceph.rook.io/v1/network_test.go create mode 100644 external/ceph.rook.io/v1/nfs.go create mode 100644 external/ceph.rook.io/v1/nfs_test.go create mode 100644 external/ceph.rook.io/v1/nvmeof.go create mode 100644 external/ceph.rook.io/v1/object.go create mode 100644 external/ceph.rook.io/v1/object_test.go create mode 100644 external/ceph.rook.io/v1/placement.go create mode 100644 external/ceph.rook.io/v1/placement_test.go create mode 100644 external/ceph.rook.io/v1/pool.go create mode 100644 external/ceph.rook.io/v1/pool_test.go create mode 100644 external/ceph.rook.io/v1/priorityclasses.go create mode 100644 external/ceph.rook.io/v1/priorityclasses_test.go create mode 100644 external/ceph.rook.io/v1/register.go create mode 100644 external/ceph.rook.io/v1/resources.go create mode 100644 external/ceph.rook.io/v1/scc.go create mode 100644 external/ceph.rook.io/v1/scc_test.go create mode 100644 external/ceph.rook.io/v1/spec_test.go create mode 100644 external/ceph.rook.io/v1/status.go create mode 100644 external/ceph.rook.io/v1/status_test.go create mode 100644 external/ceph.rook.io/v1/storage.go create mode 100644 external/ceph.rook.io/v1/storage_test.go create mode 100644 external/ceph.rook.io/v1/topic.go create mode 100644 external/ceph.rook.io/v1/topic_test.go create mode 100755 external/ceph.rook.io/v1/types.go create mode 100644 external/ceph.rook.io/v1/volume.go create mode 100644 external/ceph.rook.io/v1/volume_test.go create mode 100644 external/ceph.rook.io/v1/zz_generated.deepcopy.go diff --git a/cli/cmd/bootstrap_local.go b/cli/cmd/bootstrap_local.go index 148f7992..3d65d723 100644 --- a/cli/cmd/bootstrap_local.go +++ b/cli/cmd/bootstrap_local.go @@ -15,6 +15,7 @@ import ( cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" csio "github.com/codesphere-cloud/cs-go/pkg/io" + rookcephv1 "github.com/codesphere-cloud/oms/external/ceph.rook.io/v1" "github.com/codesphere-cloud/oms/internal/bootstrap" "github.com/codesphere-cloud/oms/internal/bootstrap/local" "github.com/codesphere-cloud/oms/internal/installer" @@ -138,6 +139,10 @@ func (c *BootstrapLocalCmd) GetKubeClient(ctx context.Context) (client.Client, e return nil, fmt.Errorf("failed to add CloudNativePG scheme: %w", err) } + if err := rookcephv1.AddToScheme(scheme); err != nil { + return nil, fmt.Errorf("failed to add Rook Ceph scheme: %w", err) + } + kubeClient, err := ctrlclient.New(kubeConfig, ctrlclient.Options{Scheme: scheme}) if err != nil { return nil, fmt.Errorf("failed to initialize Kubernetes client: %w", err) diff --git a/external/ceph.rook.io/doc.go b/external/ceph.rook.io/doc.go new file mode 100644 index 00000000..b03c6f15 --- /dev/null +++ b/external/ceph.rook.io/doc.go @@ -0,0 +1,9 @@ +// Copy of github.com/rook/rook/pkg/apis/ceph.rook.io/v1, due to issues +// with the package dependencies: +// +// go: github.com/libopenstorage/secrets@v0.0.0-00010101000000-000000000000 requires +// +// github.com/portworx/sched-ops@v1.20.4-rc1 requires +// github.com/kubernetes-incubator/external-storage@v0.20.4-openstorage-rc2 requires +// k8s.io/apiextensions-apiserver@v0.0.0: reading k8s.io/apiextensions-apiserver/go.mod at revision v0.0.0: unknown revision v0.0.0 +package cephrookio diff --git a/external/ceph.rook.io/register.go b/external/ceph.rook.io/register.go new file mode 100644 index 00000000..d722c383 --- /dev/null +++ b/external/ceph.rook.io/register.go @@ -0,0 +1,5 @@ +package cephrookio + +const ( + CustomResourceGroupName = "ceph.rook.io" +) diff --git a/external/ceph.rook.io/v1/annotations.go b/external/ceph.rook.io/v1/annotations.go new file mode 100644 index 00000000..4eaa6487 --- /dev/null +++ b/external/ceph.rook.io/v1/annotations.go @@ -0,0 +1,129 @@ +/* +Copyright 2019 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// AnnotationsSpec is the main spec annotation for all daemons +// +kubebuilder:pruning:PreserveUnknownFields +// +nullable +type AnnotationsSpec map[KeyType]Annotations + +// Annotations are annotations +type Annotations map[string]string + +func (a AnnotationsSpec) All() Annotations { + return a[KeyAll] +} + +// GetMgrAnnotations returns the Annotations for the MGR service +func GetMgrAnnotations(a AnnotationsSpec) Annotations { + return mergeAllAnnotationsWithKey(a, KeyMgr) +} + +// GetDashboardAnnotations returns the Annotations for the Dashboard service +func GetDashboardAnnotations(a AnnotationsSpec) Annotations { + return mergeAllAnnotationsWithKey(a, KeyDashboard) +} + +// GetMonAnnotations returns the Annotations for the MON service +func GetMonAnnotations(a AnnotationsSpec) Annotations { + return mergeAllAnnotationsWithKey(a, KeyMon) +} + +// GetKeyRotationAnnotations returns the annotations for the key rotation job +func GetKeyRotationAnnotations(a AnnotationsSpec) Annotations { + return mergeAllAnnotationsWithKey(a, KeyRotation) +} + +// GetOSDPrepareAnnotations returns the annotations for the OSD service +func GetOSDPrepareAnnotations(a AnnotationsSpec) Annotations { + return mergeAllAnnotationsWithKey(a, KeyOSDPrepare) +} + +// GetOSDAnnotations returns the annotations for the OSD service +func GetOSDAnnotations(a AnnotationsSpec) Annotations { + return mergeAllAnnotationsWithKey(a, KeyOSD) +} + +// GetCleanupAnnotations returns the Annotations for the cleanup job +func GetCleanupAnnotations(a AnnotationsSpec) Annotations { + return mergeAllAnnotationsWithKey(a, KeyCleanup) +} + +// GetCephExporterAnnotations returns the Annotations for the MGR service +func GetCephExporterAnnotations(a AnnotationsSpec) Annotations { + return mergeAllAnnotationsWithKey(a, KeyCephExporter) +} + +// GetCmdReporterAnnotations returns the Annotations for jobs detecting versions +func GetCmdReporterAnnotations(a AnnotationsSpec) Annotations { + return mergeAllAnnotationsWithKey(a, KeyCmdReporter) +} + +// GetCrashCollectorAnnotations returns the Annotations for the crash collector +func GetCrashCollectorAnnotations(a AnnotationsSpec) Annotations { + return mergeAllAnnotationsWithKey(a, KeyCrashCollector) +} + +func GetClusterMetadataAnnotations(a AnnotationsSpec) Annotations { + return a[KeyClusterMetadata] +} + +func mergeAllAnnotationsWithKey(a AnnotationsSpec, name KeyType) Annotations { + all := a.All() + if all != nil { + return all.Merge(a[name]) + } + return a[name] +} + +// ApplyToObjectMeta adds annotations to object meta unless the keys are already defined. +func (a Annotations) ApplyToObjectMeta(t *metav1.ObjectMeta) { + if t.Annotations == nil { + t.Annotations = map[string]string{} + } + for k, v := range a { + if _, ok := t.Annotations[k]; !ok { + t.Annotations[k] = v + } + } +} + +// Merge returns an Annotations which results from merging the attributes of the +// original Annotations with the attributes of the supplied one. The supplied +// Annotation attributes will override the original ones if defined. +func (a Annotations) Merge(with map[string]string) Annotations { + // Create a new map of type Annotations to hold the merged results + ret := Annotations{} + + // Copy the contents of the original map (a) into ret + for k, v := range a { + ret[k] = v + } + + // Add entries from the 'with' map only if the key does not already exist + for k, v := range with { + if _, exists := ret[k]; !exists { + ret[k] = v + } + } + + return ret +} diff --git a/external/ceph.rook.io/v1/annotations_test.go b/external/ceph.rook.io/v1/annotations_test.go new file mode 100644 index 00000000..465ccbd2 --- /dev/null +++ b/external/ceph.rook.io/v1/annotations_test.go @@ -0,0 +1,154 @@ +/* +Copyright 2020 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +    http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/yaml" +) + +func TestCephAnnotationsMerge(t *testing.T) { + // No annotations defined + testAnnotations := AnnotationsSpec{} + a := GetOSDAnnotations(testAnnotations) + assert.Nil(t, a) + + // Only a specific component annotations without "all" + testAnnotations = AnnotationsSpec{ + "mgr": {"mgrkey": "mgrval"}, + "mon": {"monkey": "monval"}, + "osd": {"osdkey": "osdval"}, + "rgw": {"rgwkey": "rgwval"}, + "rbdmirror": {"rbdmirrorkey": "rbdmirrorval"}, + } + a = GetMgrAnnotations(testAnnotations) + assert.Equal(t, "mgrval", a["mgrkey"]) + assert.Equal(t, 1, len(a)) + a = GetMonAnnotations(testAnnotations) + assert.Equal(t, "monval", a["monkey"]) + assert.Equal(t, 1, len(a)) + a = GetOSDAnnotations(testAnnotations) + assert.Equal(t, "osdval", a["osdkey"]) + assert.Equal(t, 1, len(a)) + + // No annotations matching the component + testAnnotations = AnnotationsSpec{ + "mgr": {"mgrkey": "mgrval"}, + } + a = GetMonAnnotations(testAnnotations) + assert.Nil(t, a) + + // Merge with "all" + testAnnotations = AnnotationsSpec{ + "all": {"allkey1": "allval1", "allkey2": "allval2"}, + "mgr": {"mgrkey": "mgrval"}, + "cmdreporter": {"myversions": "detect"}, + "crashcollector": {"crash": "crashval"}, + "osd": {"osdkey": "osdval"}, + } + a = GetMonAnnotations(testAnnotations) + assert.Equal(t, "allval1", a["allkey1"]) + assert.Equal(t, "allval2", a["allkey2"]) + assert.Equal(t, 2, len(a)) + a = GetMgrAnnotations(testAnnotations) + assert.Equal(t, "mgrval", a["mgrkey"]) + assert.Equal(t, "allval1", a["allkey1"]) + assert.Equal(t, "allval2", a["allkey2"]) + assert.Equal(t, 3, len(a)) + b := GetCmdReporterAnnotations(testAnnotations) + assert.Equal(t, "detect", b["myversions"]) + assert.Equal(t, "allval1", b["allkey1"]) + assert.Equal(t, "allval2", b["allkey2"]) + c := GetCrashCollectorAnnotations(testAnnotations) + assert.Equal(t, "crashval", c["crash"]) + assert.Equal(t, "allval1", c["allkey1"]) + assert.Equal(t, "allval2", c["allkey2"]) + d := GetOSDAnnotations(testAnnotations) + assert.Equal(t, "allval1", d["allkey1"]) + assert.Equal(t, "allval2", d["allkey2"]) + assert.Equal(t, "osdval", d["osdkey"]) +} + +func TestAnnotationsSpec(t *testing.T) { + specYaml := []byte(` +mgr: + foo: bar + hello: world +mon: +`) + + // convert the raw spec yaml into JSON + rawJSON, err := yaml.ToJSON(specYaml) + assert.Nil(t, err) + + // unmarshal the JSON into a strongly typed annotations spec object + var annotations AnnotationsSpec + err = json.Unmarshal(rawJSON, &annotations) + assert.Nil(t, err) + + // the unmarshalled annotations spec should equal the expected spec below + expected := AnnotationsSpec{ + "mgr": map[string]string{ + "foo": "bar", + "hello": "world", + }, + "mon": nil, + } + assert.Equal(t, expected, annotations) +} + +func TestAnnotationsApply(t *testing.T) { + objMeta := &metav1.ObjectMeta{} + testAnnotations := Annotations{ + "foo": "bar", + "hello": "world", + } + testAnnotations.ApplyToObjectMeta(objMeta) + assert.Equal(t, testAnnotations, Annotations(objMeta.Annotations)) + + testAnnotations["isthisatest"] = "test" + testAnnotations.ApplyToObjectMeta(objMeta) + assert.Equal(t, testAnnotations, Annotations(objMeta.Annotations)) +} + +func TestAnnotationsMerge(t *testing.T) { + testAnnotationsPart1 := Annotations{ + "foo": "bar", + "hello": "world", + } + testAnnotationsPart2 := Annotations{ + "bar": "foo", + "hello": "earth", + } + expected := map[string]string{ + "foo": "bar", + "bar": "foo", + "hello": "world", + } + assert.Equal(t, expected, map[string]string(testAnnotationsPart1.Merge(testAnnotationsPart2))) + + // Test that nil annotations can still be appended to + testAnnotationsPart3 := Annotations{ + "hello": "world", + } + var empty Annotations + assert.Equal(t, map[string]string(testAnnotationsPart3), map[string]string(empty.Merge(testAnnotationsPart3))) +} diff --git a/external/ceph.rook.io/v1/cleanup.go b/external/ceph.rook.io/v1/cleanup.go new file mode 100644 index 00000000..e17e82ae --- /dev/null +++ b/external/ceph.rook.io/v1/cleanup.go @@ -0,0 +1,47 @@ +/* +Copyright 2020 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +    http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + // SanitizeDataSourceZero uses /dev/zero as sanitize source + SanitizeDataSourceZero SanitizeDataSourceProperty = "zero" + + // SanitizeDataSourceRandom uses `shred's default entropy source + SanitizeDataSourceRandom SanitizeDataSourceProperty = "random" + + // SanitizeMethodComplete will sanitize everything on the disk + SanitizeMethodComplete SanitizeMethodProperty = "complete" + + // SanitizeMethodQuick will sanitize metadata only on the disk + SanitizeMethodQuick SanitizeMethodProperty = "quick" + + // DeleteDataDirOnHostsConfirmation represents the validation to destroy dataDirHostPath + DeleteDataDirOnHostsConfirmation CleanupConfirmationProperty = "yes-really-destroy-data" +) + +// HasDataDirCleanPolicy returns whether the cluster has a data dir policy +func (c *CleanupPolicySpec) HasDataDirCleanPolicy() bool { + return c.Confirmation == DeleteDataDirOnHostsConfirmation +} + +func (c *SanitizeMethodProperty) String() string { + return string(*c) +} + +func (c *SanitizeDataSourceProperty) String() string { + return string(*c) +} diff --git a/external/ceph.rook.io/v1/cluster.go b/external/ceph.rook.io/v1/cluster.go new file mode 100644 index 00000000..36e800e0 --- /dev/null +++ b/external/ceph.rook.io/v1/cluster.go @@ -0,0 +1,57 @@ +/* +Copyright 2020 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// RequireMsgr2 checks if the network settings require the msgr2 protocol +func (c *ClusterSpec) RequireMsgr2() bool { + if c.Network.Connections == nil { + return false + } + if c.Network.Connections.RequireMsgr2 { + return true + } + if c.Network.Connections.Compression != nil && c.Network.Connections.Compression.Enabled { + return true + } + if c.Network.Connections.Encryption != nil && c.Network.Connections.Encryption.Enabled { + return true + } + return false +} + +// RequireMsgr2 checks if the network settings require the msgr2 protocol +func (c *ClusterSpec) NetworkEncryptionEnabled() bool { + if c.Network.Connections == nil { + return false + } + if c.Network.Connections.Encryption == nil { + return false + } + return c.Network.Connections.Encryption.Enabled +} + +func (c *ClusterSpec) IsStretchCluster() bool { + return c.Mon.StretchCluster != nil && len(c.Mon.StretchCluster.Zones) > 0 +} + +func (c *ClusterSpec) ZonesRequired() bool { + return c.IsStretchCluster() || len(c.Mon.Zones) > 0 +} + +func (c *CephCluster) GetStatusConditions() *[]Condition { + return &c.Status.Conditions +} diff --git a/external/ceph.rook.io/v1/doc.go b/external/ceph.rook.io/v1/doc.go new file mode 100644 index 00000000..b8774f97 --- /dev/null +++ b/external/ceph.rook.io/v1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +    http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package,register + +// Package v1 is the v1 version of the API. +// +groupName=ceph.rook.io +package v1 diff --git a/external/ceph.rook.io/v1/filesystem.go b/external/ceph.rook.io/v1/filesystem.go new file mode 100644 index 00000000..d5128ec1 --- /dev/null +++ b/external/ceph.rook.io/v1/filesystem.go @@ -0,0 +1,21 @@ +/* +Copyright 2021 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +    http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +func (c *CephFilesystem) GetStatusConditions() *[]Condition { + return &c.Status.Conditions +} diff --git a/external/ceph.rook.io/v1/keys.go b/external/ceph.rook.io/v1/keys.go new file mode 100644 index 00000000..88467d31 --- /dev/null +++ b/external/ceph.rook.io/v1/keys.go @@ -0,0 +1,36 @@ +/* +Copyright 2018 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +    http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + KeyAll = "all" + KeyMds KeyType = "mds" + KeyRgw KeyType = "rgw" + KeyMon KeyType = "mon" + KeyMonArbiter KeyType = "arbiter" + KeyMgr KeyType = "mgr" + KeyDashboard KeyType = "dashboard" + KeyOSDPrepare KeyType = "prepareosd" + KeyRotation KeyType = "keyrotation" + KeyOSD KeyType = "osd" + KeyCleanup KeyType = "cleanup" + KeyMonitoring KeyType = "monitoring" + KeyCrashCollector KeyType = "crashcollector" + KeyClusterMetadata KeyType = "clusterMetadata" + KeyCephExporter KeyType = "exporter" + KeyCmdReporter KeyType = "cmdreporter" +) diff --git a/external/ceph.rook.io/v1/labels.go b/external/ceph.rook.io/v1/labels.go new file mode 100644 index 00000000..d019aba2 --- /dev/null +++ b/external/ceph.rook.io/v1/labels.go @@ -0,0 +1,218 @@ +/* +Copyright 2019 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation" +) + +const ( + // SkipReconcileLabelKey is a label indicating that the pod should not be reconciled + SkipReconcileLabelKey = "ceph.rook.io/do-not-reconcile" +) + +// LabelsSpec is the main spec label for all daemons +type LabelsSpec map[KeyType]Labels + +// KeyType type safety +type KeyType string + +// Labels are label for a given daemons +type Labels map[string]string + +func (a LabelsSpec) All() Labels { + return a[KeyAll] +} + +// GetMgrLabels returns the Labels for the MGR service +func GetMgrLabels(a LabelsSpec) Labels { + return mergeAllLabelsWithKey(a, KeyMgr) +} + +// GetDashboardLabels returns the Labels for the Dashboard service +func GetDashboardLabels(a LabelsSpec) Labels { + return mergeAllLabelsWithKey(a, KeyDashboard) +} + +// GetMonLabels returns the Labels for the MON service +func GetMonLabels(a LabelsSpec) Labels { + return mergeAllLabelsWithKey(a, KeyMon) +} + +// GetKeyRotationLabels returns labels for the key Rotation job +func GetKeyRotationLabels(a LabelsSpec) Labels { + return mergeAllLabelsWithKey(a, KeyRotation) +} + +// GetOSDPrepareLabels returns the Labels for the OSD prepare job +func GetOSDPrepareLabels(a LabelsSpec) Labels { + return mergeAllLabelsWithKey(a, KeyOSDPrepare) +} + +// GetOSDLabels returns the Labels for the OSD service +func GetOSDLabels(a LabelsSpec) Labels { + return mergeAllLabelsWithKey(a, KeyOSD) +} + +// GetCleanupLabels returns the Labels for the cleanup job +func GetCleanupLabels(a LabelsSpec) Labels { + return mergeAllLabelsWithKey(a, KeyCleanup) +} + +// GetMonitoringLabels returns the Labels for monitoring resources +func GetMonitoringLabels(a LabelsSpec) Labels { + return mergeAllLabelsWithKey(a, KeyMonitoring) +} + +// GetCrashCollectorLabels returns the Labels for the crash collector resources +func GetCrashCollectorLabels(a LabelsSpec) Labels { + return mergeAllLabelsWithKey(a, KeyCrashCollector) +} + +func GetCephExporterLabels(a LabelsSpec) Labels { + return mergeAllLabelsWithKey(a, KeyCephExporter) +} + +func GetCmdReporterLabels(a LabelsSpec) Labels { + return mergeAllLabelsWithKey(a, KeyCmdReporter) +} + +func mergeAllLabelsWithKey(a LabelsSpec, name KeyType) Labels { + all := a.All() + if all != nil { + return all.Merge(a[name]) + } + return a[name] +} + +// ApplyToObjectMeta adds labels to object meta unless the keys are already defined. +func (a Labels) ApplyToObjectMeta(t *metav1.ObjectMeta) { + if t.Labels == nil { + t.Labels = map[string]string{} + } + for k, v := range a { + if _, ok := t.Labels[k]; !ok { + t.Labels[k] = v + } + } +} + +// OverwriteApplyToObjectMeta adds labels to object meta, overwriting keys that are already defined. +func (a Labels) OverwriteApplyToObjectMeta(t *metav1.ObjectMeta) { + if t.Labels == nil { + t.Labels = map[string]string{} + } + for k, v := range a { + t.Labels[k] = v + } +} + +// Merge returns a Labels which results from merging the attributes of the +// original Labels with the attributes of the supplied one. The supplied +// Labels attributes will override the original ones if defined. +func (a Labels) Merge(with Labels) Labels { + ret := Labels{} + for k, v := range a { + if _, ok := ret[k]; !ok { + ret[k] = v + } + } + for k, v := range with { + if _, ok := ret[k]; !ok { + ret[k] = v + } + } + return ret +} + +// ToValidDNSLabel converts a given string to a valid DNS-1035 spec label. The DNS-1035 spec +// follows the regex '[a-z]([-a-z0-9]*[a-z0-9])?' and is at most 63 chars long. DNS-1035 is used +// over DNS-1123 because it is more strict. Kubernetes docs are not always clear when a DNS_LABEL is +// supposed to be 1035 or 1123 compliant, so we use the more strict version for ease of use. +// - Any input symbol that is not valid is converted to a dash ('-'). +// - Multiple resultant dashes in a row are compressed to a single dash. +// - If the starting character is a number, a 'd' is prepended to preserve the number. +// - Any non-alphanumeric starting or ending characters are removed. +// - If the resultant string is longer than the maximum-allowed 63 characters], characters are +// removed from the middle and replaced with a double dash ('--') to reduce the string to 63 +// characters. +func ToValidDNSLabel(input string) string { + maxl := validation.DNS1035LabelMaxLength + + if input == "" { + return "" + } + + outbuf := make([]byte, len(input)+1) + j := 0 // position in output buffer + last := byte('-') + for _, c := range []byte(input) { + switch { + case c >= 'a' && c <= 'z': + outbuf[j] = c + case c >= '0' && c <= '9': + // if the first char is a number, add a 'd' (for decimal) in front + if j == 0 { + outbuf[j] = 'd' // for decimal + j++ + } + outbuf[j] = c + case c >= 'A' && c <= 'Z': + // convert to lower case + outbuf[j] = c - 'A' + 'a' // convert to lower case + default: + if last == '-' { + // don't write two dashes in a row + continue + } + outbuf[j] = byte('-') + } + last = outbuf[j] + j++ + } + + // set the length of the output buffer to the number of chars we copied to it so there aren't + // \0x00 chars at the end + outbuf = outbuf[:j] + + // trim any leading or trailing dashes + out := strings.Trim(string(outbuf), "-") + + // if string is longer than max length, cut content from the middle to get it to length + if len(out) > maxl { + out = cutMiddle(out, maxl) + } + + return out +} + +// don't use this function for anything less than toSize=4 chars long +func cutMiddle(input string, toSize int) string { + if len(input) <= toSize { + return input + } + + lenLeft := toSize / 2 // truncation rounds down the left side + lenRight := toSize/2 + (toSize % 2) // modulo rounds up the right side + + buf := []byte(input) + + return string(buf[:lenLeft-1]) + "--" + string(buf[len(input)-lenRight+1:]) +} diff --git a/external/ceph.rook.io/v1/labels_test.go b/external/ceph.rook.io/v1/labels_test.go new file mode 100644 index 00000000..71cd1a73 --- /dev/null +++ b/external/ceph.rook.io/v1/labels_test.go @@ -0,0 +1,329 @@ +/* +Copyright 2020 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +    http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/yaml" +) + +func TestCephLabelsMerge(t *testing.T) { + // No Labels defined + testLabels := LabelsSpec{} + a := GetOSDLabels(testLabels) + assert.Nil(t, a) + + // Only a specific component labels without "all" + testLabels = LabelsSpec{ + "mgr": {"mgrkey": "mgrval"}, + "mon": {"monkey": "monval"}, + "osd": {"osdkey": "osdval"}, + "rgw": {"rgwkey": "rgwval"}, + "rbdmirror": {"rbdmirrorkey": "rbdmirrorval"}, + } + a = GetMgrLabels(testLabels) + assert.Equal(t, "mgrval", a["mgrkey"]) + assert.Equal(t, 1, len(a)) + a = GetMonLabels(testLabels) + assert.Equal(t, "monval", a["monkey"]) + assert.Equal(t, 1, len(a)) + a = GetOSDLabels(testLabels) + assert.Equal(t, "osdval", a["osdkey"]) + assert.Equal(t, 1, len(a)) + + // No Labels matching the component + testLabels = LabelsSpec{ + "mgr": {"mgrkey": "mgrval"}, + } + a = GetMonLabels(testLabels) + assert.Nil(t, a) + + // Merge with "all" + testLabels = LabelsSpec{ + "all": {"allkey1": "allval1", "allkey2": "allval2"}, + "mgr": {"mgrkey": "mgrval"}, + "cmdreporter": {"detect": "myversion"}, + } + a = GetMonLabels(testLabels) + assert.Equal(t, "allval1", a["allkey1"]) + assert.Equal(t, "allval2", a["allkey2"]) + assert.Equal(t, 2, len(a)) + a = GetMgrLabels(testLabels) + assert.Equal(t, "mgrval", a["mgrkey"]) + assert.Equal(t, "allval1", a["allkey1"]) + assert.Equal(t, "allval2", a["allkey2"]) + assert.Equal(t, 3, len(a)) + a = GetCmdReporterLabels(testLabels) + assert.Equal(t, "myversion", a["detect"]) + assert.Equal(t, "allval1", a["allkey1"]) + assert.Equal(t, "allval2", a["allkey2"]) + assert.Equal(t, 3, len(a)) +} + +func TestLabelsSpec(t *testing.T) { + specYaml := []byte(` +mgr: + foo: bar + hello: world +mon: +`) + + // convert the raw spec yaml into JSON + rawJSON, err := yaml.ToJSON(specYaml) + assert.Nil(t, err) + + // unmarshal the JSON into a strongly typed Labels spec object + var Labels LabelsSpec + err = json.Unmarshal(rawJSON, &Labels) + assert.Nil(t, err) + + // the unmarshalled Labels spec should equal the expected spec below + expected := LabelsSpec{ + "mgr": map[string]string{ + "foo": "bar", + "hello": "world", + }, + "mon": nil, + } + assert.Equal(t, expected, Labels) +} + +func TestLabelsApply(t *testing.T) { + tcs := []struct { + name string + target *metav1.ObjectMeta + input Labels + expected Labels + }{ + { + name: "it should be able to update meta with no label", + target: &metav1.ObjectMeta{}, + input: Labels{ + "foo": "bar", + }, + expected: Labels{ + "foo": "bar", + }, + }, + { + name: "it should keep the original labels when new labels are set", + target: &metav1.ObjectMeta{ + Labels: Labels{ + "foo": "bar", + }, + }, + input: Labels{ + "hello": "world", + }, + expected: Labels{ + "foo": "bar", + "hello": "world", + }, + }, + { + name: "it should NOT overwrite the existing keys", + target: &metav1.ObjectMeta{ + Labels: Labels{ + "foo": "bar", + }, + }, + input: Labels{ + "foo": "baz", + }, + expected: Labels{ + "foo": "bar", + }, + }, + } + + for _, tc := range tcs { + tc.input.ApplyToObjectMeta(tc.target) + assert.Equal(t, map[string]string(tc.expected), tc.target.Labels) + } +} + +func TestLabelsOverwriteApply(t *testing.T) { + tcs := []struct { + name string + target *metav1.ObjectMeta + input Labels + expected Labels + }{ + { + name: "it should be able to update meta with no label", + target: &metav1.ObjectMeta{}, + input: Labels{ + "foo": "bar", + }, + expected: Labels{ + "foo": "bar", + }, + }, + { + name: "it should keep the original labels when new labels are set", + target: &metav1.ObjectMeta{ + Labels: Labels{ + "foo": "bar", + }, + }, + input: Labels{ + "hello": "world", + }, + expected: Labels{ + "foo": "bar", + "hello": "world", + }, + }, + { + name: "it should overwrite the existing keys", + target: &metav1.ObjectMeta{ + Labels: Labels{ + "foo": "bar", + }, + }, + input: Labels{ + "foo": "baz", + }, + expected: Labels{ + "foo": "baz", + }, + }, + } + + for _, tc := range tcs { + tc.input.OverwriteApplyToObjectMeta(tc.target) + assert.Equal(t, map[string]string(tc.expected), tc.target.Labels) + } +} + +func TestLabelsMerge(t *testing.T) { + testLabelsPart1 := Labels{ + "foo": "bar", + "hello": "world", + } + testLabelsPart2 := Labels{ + "bar": "foo", + "hello": "earth", + } + expected := map[string]string{ + "foo": "bar", + "bar": "foo", + "hello": "world", + } + assert.Equal(t, expected, map[string]string(testLabelsPart1.Merge(testLabelsPart2))) + + // Test that nil Labels can still be appended to + testLabelsPart3 := Labels{ + "hello": "world", + } + var empty Labels + assert.Equal(t, map[string]string(testLabelsPart3), map[string]string(empty.Merge(testLabelsPart3))) +} + +func TestToValidDNSLabel(t *testing.T) { + tests := []struct { + name string + input string + want string + }{ + {"empty string", "", ""}, + {"single dash", "-", ""}, + {"multiple dashes", "----", ""}, + {"lc a", "a", "a"}, + {"lc z", "z", "z"}, + {"lc alphabet", "abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxyz"}, + {"UC A", "A", "a"}, + {"UC Z", "Z", "z"}, + {"UC ALPHABET", "ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz"}, + {"mixed case AlPhAbEt", "AbCdEfGhIjKlMnOpQrStUvWxYz", "abcdefghijklmnopqrstuvwxyz"}, + {"single 0", "0", "d0"}, + {"single 9", "9", "d9"}, + {"single 1", "1", "d1"}, + {"numbers", "01234567890", "d01234567890"}, + {"letters with numbers", "1a0b1c2d3e4f5g6h7i8j9k0", "d1a0b1c2d3e4f5g6h7i8j9k0"}, + {"single / symbol", "/", ""}, + {"single : symbol", ":", ""}, + {"single . symbol", ".", ""}, + {"bunch of symbols", "`~!@#$%^&*()_+-={}[]\\|;':\",.<>/?", ""}, + { + "alphabet with symbols", + "a~b!c@d#e$f^g&h*i(j)k_l-m+n+o[p]q{r}s|t:u;v'wz", "a-b-c-d-e-f-g-h-i-j-k-l-m-n-o-p-q-r-s-t-u-v-w-x-y-z", + }, + {"multiple symbols between letters", "a//b//c", "a-b-c"}, + {"symbol before", "/a/b/c", "a-b-c"}, + {"symbol after", "a/b/c/", "a-b-c"}, + {"symbols before and after", "/a/b/c/", "a-b-c"}, + {"multiple symbols before after between", "//a//b//c//", "a-b-c"}, + {"mix of all tests except length", "//1a//B-c/d_f/../00-thing.ini/", "d1a-b-c-d-f-00-thing-ini"}, + { + "too long input -> middle trim", + "qwertyuiopqwertyuiopqwertyuiopaaqwertyuiopqwertyuiopqwertyuiopaa", + "qwertyuiopqwertyuiopqwertyuiop--wertyuiopqwertyuiopqwertyuiopaa", + }, + { + "too long input but symbols allow for no middle trim", + "/qwertyuiopqwerty/uiopqwertyuiop//qwertyuiopqwerty/uiopqwertyuiop/", + "qwertyuiopqwerty-uiopqwertyuiop-qwertyuiopqwerty-uiopqwertyuiop", + }, + { + "max allowed length but starts with number -> middle trim", + "123qwertyuiopqwertyuiopqwertyuiopqwertyuiopqwertyuiopqwertyuiop", + "d123qwertyuiopqwertyuiopqwerty--pqwertyuiopqwertyuiopqwertyuiop", + }, + { + "max allowed length ok", + "qwertyuiopqwertyuiopqwertyuiopqwertyuiopqwertyuiopqwertyuiop123", + "qwertyuiopqwertyuiopqwertyuiopqwertyuiopqwertyuiopqwertyuiop123", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.want, ToValidDNSLabel(tt.input)) + }) + } +} + +func Test_cutMiddle(t *testing.T) { + // not an exported function, so don't bother with extreme cases like 0, 1, 2, or 3 len inputs + t.Run("len 8 -> 6", func(t *testing.T) { + assert.Equal(t, "ab--gh", cutMiddle("abcdefgh", 6)) + }) + t.Run("len 9 -> 6", func(t *testing.T) { + assert.Equal(t, "ab--hi", cutMiddle("abcdefghi", 6)) + }) + t.Run("len 9 -> 7", func(t *testing.T) { + assert.Equal(t, "ab--ghi", cutMiddle("abcdefghi", 7)) + }) + t.Run("len 10 -> 10", func(t *testing.T) { + assert.Equal(t, "qwertyuiop", cutMiddle("qwertyuiop", 10)) + }) + // below is what we really want to test + t.Run("len 63 -> 63", func(t *testing.T) { + assert.Equal(t, + "qwertyuiopqwertyuiopqwertyuiop12qwertyuiopqwertyuiopqwertyuiop1", + cutMiddle("qwertyuiopqwertyuiopqwertyuiop12qwertyuiopqwertyuiopqwertyuiop1", 63)) + }) + t.Run("len 64 -> 63", func(t *testing.T) { + assert.Equal(t, + "qwertyuiopqwertyuiopqwertyuiop--wertyuiopqwertyuiopqwertyuiop12", + cutMiddle("qwertyuiopqwertyuiopqwertyuiop12qwertyuiopqwertyuiopqwertyuiop12", 63)) + }) +} diff --git a/external/ceph.rook.io/v1/mirror.go b/external/ceph.rook.io/v1/mirror.go new file mode 100644 index 00000000..205aad78 --- /dev/null +++ b/external/ceph.rook.io/v1/mirror.go @@ -0,0 +1,26 @@ +/* +Copyright 2020 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +    http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// HasPeers returns whether the RBD mirror daemon has peer and should connect to it +func (m *MirroringPeerSpec) HasPeers() bool { + return len(m.SecretNames) != 0 +} + +func (m *FSMirroringSpec) SnapShotScheduleEnabled() bool { + return len(m.SnapshotSchedules) != 0 +} diff --git a/external/ceph.rook.io/v1/namespace.go b/external/ceph.rook.io/v1/namespace.go new file mode 100644 index 00000000..58b4b3e4 --- /dev/null +++ b/external/ceph.rook.io/v1/namespace.go @@ -0,0 +1,31 @@ +/* +Copyright 2025 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +    http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + ImplicitNamespaceKey = "" + ImplicitNamespaceVal = "" +) + +func GetRadosNamespaceName(cephBlockPoolRadosNamespace *CephBlockPoolRadosNamespace) string { + if cephBlockPoolRadosNamespace.Spec.Name == ImplicitNamespaceKey { + return ImplicitNamespaceVal + } else if cephBlockPoolRadosNamespace.Spec.Name != "" { + return cephBlockPoolRadosNamespace.Spec.Name + } + return cephBlockPoolRadosNamespace.Name +} diff --git a/external/ceph.rook.io/v1/network.go b/external/ceph.rook.io/v1/network.go new file mode 100644 index 00000000..debe0d39 --- /dev/null +++ b/external/ceph.rook.io/v1/network.go @@ -0,0 +1,196 @@ +/* +Copyright 2019 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +    http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "fmt" + "net" + "strings" + + nadv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + nadutils "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils" + "github.com/pkg/errors" +) + +// enforceHostNetwork is a private package variable that can be set via the rook-operator-config +// setting "ROOK_ENFORCE_HOST_NETWORK". when set to "true", it lets rook create all pods with host network enabled. +// This can be used, for example, to run Rook in k8s clusters with no CNI where host networking is required +var enforceHostNetwork bool = false + +// IsMultus get whether to use multus network provider +func (n *NetworkSpec) IsMultus() bool { + return n.Provider == NetworkProviderMultus +} + +// IsHost is intended to be used to determine if the rook operator should configure +// managed pods to use host networking. +// This behavior is enabled by configuring the cephCluster with the "host" network provider. +// This method also maintains compatibility with the old HostNetwork setting +// which is incompatible with other network providers: HostNetwork set to true +// together with an empty or unset network provider has the same effect as +// network.Provider set to "host" +func (n *NetworkSpec) IsHost() bool { + return enforceHostNetwork || (n.HostNetwork && n.Provider == NetworkProviderDefault) || n.Provider == NetworkProviderHost +} + +func ValidateNetworkSpec(clusterNamespace string, spec NetworkSpec) error { + if spec.HostNetwork && (spec.Provider != NetworkProviderDefault) { + return errors.Errorf(`the legacy hostNetwork setting is only valid with the default network provider ("") and not with '%q'`, spec.Provider) + } + if spec.IsMultus() { + if len(spec.Selectors) == 0 { + return errors.Errorf("at least one network selector must be specified when using the %q network provider", NetworkProviderMultus) + } + + if _, err := spec.GetNetworkSelection(clusterNamespace, CephNetworkPublic); err != nil { + return errors.Wrap(err, "ceph public network selector provided for multus is invalid") + } + if _, err := spec.GetNetworkSelection(clusterNamespace, CephNetworkCluster); err != nil { + return errors.Wrap(err, "ceph cluster network selector provided for multus is invalid") + } + } + + if !spec.AddressRanges.IsEmpty() { + if !spec.IsMultus() && !spec.IsHost() { + // TODO: be sure to update docs that AddressRanges can be specified for host networking as + // well as multus so that the override configmap doesn't need to be set + return errors.Errorf("network ranges can only be specified for %q and %q network providers", NetworkProviderHost, NetworkProviderMultus) + } + if spec.IsMultus() { + if len(spec.AddressRanges.Public) > 0 && !spec.NetworkHasSelection(CephNetworkPublic) { + return errors.Errorf("public address range can only be specified for multus if there is a public network selection") + } + if len(spec.AddressRanges.Cluster) > 0 && !spec.NetworkHasSelection(CephNetworkCluster) { + return errors.Errorf("cluster address range can only be specified for multus if there is a cluster network selection") + } + } + } + + if err := spec.AddressRanges.Validate(); err != nil { + return err + } + + return nil +} + +func ValidateNetworkSpecUpdate(clusterNamespace string, oldSpec, newSpec NetworkSpec) error { + // Allow an attempt to enable or disable host networking, but not other provider changes + oldProvider := oldSpec.Provider + newProvider := newSpec.Provider + if oldProvider != newProvider && oldProvider != "host" && newProvider != "host" { + return errors.Errorf("invalid update: network provider change from %q to %q is not allowed", oldProvider, newProvider) + } + + return ValidateNetworkSpec(clusterNamespace, newSpec) +} + +// NetworkHasSelection returns true if the given Ceph network has a selection. +func (n *NetworkSpec) NetworkHasSelection(network CephNetworkType) bool { + s, ok := n.Selectors[network] + if !ok || s == "" { + return false + } + return true +} + +// GetNetworkSelection gets the network selection for a given Ceph network, or nil if the network +// doesn't have a selection. +func (n *NetworkSpec) GetNetworkSelection(clusterNamespace string, network CephNetworkType) (*nadv1.NetworkSelectionElement, error) { + if !n.NetworkHasSelection(network) { + return nil, nil // no selection for network + } + s := n.Selectors[network] + // From documentation of the "k8s.v1.cni.cncf.io/network-status" annotation, valid JSON inputs + // must be in list form, surrounded with brackets. The NAD utility library will only parse + // list-format JSON input. However, old versions of Rook code allowed non-list JSON objects. + // In order to support legacy users, make an attempt to turn single-JSON-object inputs into + // len(1) lists so that they parse correctly by the util library. Do not advertise this + // "feature" in documentation since it is not technically the correct format. + if strings.HasPrefix(s, "{") && strings.HasSuffix(s, "}") { + s = "[" + s + "]" + } + selection, err := nadutils.ParseNetworkAnnotation(s, clusterNamespace) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse %q network selector %q", network, s) + } + if len(selection) != 1 { + return nil, errors.Errorf("%q network selector %q has multiple (%d) selections, which is not supported", network, s, len(selection)) + } + return selection[0], nil +} + +// NetworkSelectionsToAnnotationValue converts NetworkAttachmentDefinition network selection +// elements to an annotation value for the "k8s.v1.cni.cncf.io/networks" annotation key. +func NetworkSelectionsToAnnotationValue(selections ...*nadv1.NetworkSelectionElement) (string, error) { + reduced := []*nadv1.NetworkSelectionElement{} + for _, s := range selections { + if s != nil { + reduced = append(reduced, s) + } + } + if len(reduced) == 0 { + return "", nil + } + b, err := json.Marshal(reduced) + if err != nil { + return "", errors.Wrap(err, "failed to convert network selections to annotation value") + } + return string(b), nil +} + +func (n *AddressRangesSpec) IsEmpty() bool { + return n == nil || len(n.Public) == 0 && len(n.Cluster) == 0 +} + +func (n *AddressRangesSpec) Validate() error { + if n.IsEmpty() { + return nil + } + + allRanges := append(n.Public, n.Cluster...) + invalid := []string{} + for _, cidr := range allRanges { + _, _, err := net.ParseCIDR(string(cidr)) + if err != nil { + // returned err is "invalid CIDR: " & not more useful than invalid list below + invalid = append(invalid, string(cidr)) + } + } + if len(invalid) == 0 { + return nil + } + + return fmt.Errorf("%d network ranges are invalid: %v", len(invalid), invalid) +} + +// String turns a CIDR list into a comma-delimited string of CIDRs +func (l *CIDRList) String() string { + sl := []string{} + for _, c := range *l { + sl = append(sl, string(c)) + } + return strings.Join(sl, ", ") +} + +func SetEnforceHostNetwork(val bool) { + enforceHostNetwork = val +} + +func EnforceHostNetwork() bool { + return enforceHostNetwork +} diff --git a/external/ceph.rook.io/v1/network_test.go b/external/ceph.rook.io/v1/network_test.go new file mode 100644 index 00000000..c5b65e55 --- /dev/null +++ b/external/ceph.rook.io/v1/network_test.go @@ -0,0 +1,444 @@ +/* +Copyright 2019 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +    http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "fmt" + "testing" + + nadv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/util/yaml" +) + +func TestNetworkCephSpecLegacy(t *testing.T) { + netSpecYAML := []byte(`hostNetwork: true`) + + rawJSON, err := yaml.ToJSON(netSpecYAML) + assert.Nil(t, err) + + var net NetworkSpec + + err = json.Unmarshal(rawJSON, &net) + assert.Nil(t, err) + + expected := NetworkSpec{HostNetwork: true} + + assert.Equal(t, expected, net) +} + +func TestValidateNetworkSpec(t *testing.T) { + net := NetworkSpec{ + HostNetwork: true, + Provider: NetworkProviderDefault, + } + err := ValidateNetworkSpec("", net) + assert.NoError(t, err) + + net = NetworkSpec{ + HostNetwork: true, + Provider: NetworkProviderHost, + } + err = ValidateNetworkSpec("", net) + assert.Error(t, err) + + net = NetworkSpec{ + HostNetwork: false, + Provider: NetworkProviderDefault, + } + err = ValidateNetworkSpec("", net) + assert.NoError(t, err) + + net = NetworkSpec{ + HostNetwork: false, + Provider: NetworkProviderHost, + } + err = ValidateNetworkSpec("", net) + assert.NoError(t, err) +} + +// test the NetworkSpec.IsHost method with different network providers +// Also test it in combination with the legacy +// "HostNetwork" setting. +// Also test the effect of the operator config setting +// ROOK_ENFORCE_HOST_NETWORK. +func TestNetworkCephIsHost(t *testing.T) { + net := NetworkSpec{HostNetwork: false} + + net.Provider = NetworkProviderHost + assert.True(t, net.IsHost()) + + net.Provider = NetworkProviderDefault + net.HostNetwork = true + assert.True(t, net.IsHost()) + + // enforcing does not change the result if host network is selected + // anyway in the cluster. + SetEnforceHostNetwork(true) + assert.True(t, net.IsHost()) + + SetEnforceHostNetwork(false) + assert.True(t, net.IsHost()) + + net = NetworkSpec{} + net.Provider = NetworkProviderDefault + net.HostNetwork = false + assert.False(t, net.IsHost()) + + net = NetworkSpec{} + net.Provider = NetworkProviderMultus + net.HostNetwork = false + assert.False(t, net.IsHost()) + + // test that not enforcing does not change the result. + SetEnforceHostNetwork(false) + assert.False(t, net.IsHost()) + + // test enforcing of host network + SetEnforceHostNetwork(true) + assert.True(t, net.IsHost()) + + SetEnforceHostNetwork(false) + net = NetworkSpec{} + net.Provider = NetworkProviderMultus + net.HostNetwork = true + assert.False(t, net.IsHost()) + + // test with nonempty but invalid provider + net = NetworkSpec{} + net.HostNetwork = true + net.Provider = "foo" + SetEnforceHostNetwork(false) + assert.False(t, net.IsHost()) + SetEnforceHostNetwork(true) + assert.True(t, net.IsHost()) +} + +func TestNetworkSpec(t *testing.T) { + netSpecYAML := []byte(` +provider: host +selectors: + server: enp2s0f0 + broker: enp2s0f0`) + + rawJSON, err := yaml.ToJSON(netSpecYAML) + assert.Nil(t, err) + + var net NetworkSpec + + err = json.Unmarshal(rawJSON, &net) + assert.Nil(t, err) + + expected := NetworkSpec{ + Provider: "host", + Selectors: map[CephNetworkType]string{ + "server": "enp2s0f0", + "broker": "enp2s0f0", + }, + } + + assert.Equal(t, expected, net) +} + +func TestAddressRangesSpec_IsEmpty(t *testing.T) { + var specNil *AddressRangesSpec + assert.True(t, specNil.IsEmpty()) + + empty := &AddressRangesSpec{} + assert.True(t, empty.IsEmpty()) + + someCIDR := CIDR("1.1.1.1/16") + nonEmptyTests := []AddressRangesSpec{ + {Public: []CIDR{someCIDR}}, + {Public: []CIDR{someCIDR, someCIDR}}, + {Cluster: []CIDR{someCIDR}}, + {Cluster: []CIDR{someCIDR, someCIDR}}, + {Public: []CIDR{someCIDR}, Cluster: []CIDR{someCIDR}}, + {Public: []CIDR{someCIDR, someCIDR}, Cluster: []CIDR{someCIDR, someCIDR}}, + } + for _, spec := range nonEmptyTests { + assert.False(t, spec.IsEmpty()) + } +} + +func TestAddressRangesSpec_Validate(t *testing.T) { + // only test a small subset of CIDRs since Rook should definitely use the Go stdlib underneath + v1 := CIDR("123.123.123.123/24") + v2 := CIDR("1.0.0.1/24") + v3 := CIDR("2000::/64") + v4 := CIDR("2000:2000:2000:2000:2000:2000:2000:2000/64") + v5 := CIDR("2000::128.128.128.128/96") // ipv4 expressed as subnet of ipv6 is valid + + // invalid CIDRs + i1 := CIDR("123.123.123/24") + i2 := CIDR("123.123.123.123/33") + i4 := CIDR("2000/64") + i3 := CIDR("2000:/64") + i5 := CIDR("2000::128.128.128.128/129") + + tests := []struct { + name string + spec AddressRangesSpec + numErrs int + }{ + {"empty", AddressRangesSpec{}, 0}, + {"all valid", AddressRangesSpec{ + Public: []CIDR{v1}, + Cluster: []CIDR{v2, v3, v4, v5}, + }, 0}, + {"all invalid", AddressRangesSpec{ + Public: []CIDR{i1}, + Cluster: []CIDR{i2, i3, i4, i5}, + }, 5}, + {"public only, valid", AddressRangesSpec{Public: []CIDR{v1}}, 0}, + {"public only, invalid", AddressRangesSpec{Public: []CIDR{i1}}, 1}, + {"cluster only, valid", AddressRangesSpec{Cluster: []CIDR{v2}}, 0}, + {"cluster only, invalid", AddressRangesSpec{Cluster: []CIDR{i2}}, 1}, + {"public valid, cluster valid", AddressRangesSpec{ + Public: []CIDR{v1}, + Cluster: []CIDR{v2}, + }, 0}, + {"public valid, cluster invalid", AddressRangesSpec{ + Public: []CIDR{v2}, + Cluster: []CIDR{i2}, + }, 1}, + {"public invalid, cluster valid", AddressRangesSpec{ + Public: []CIDR{i3}, + Cluster: []CIDR{v2}, + }, 1}, + {"public invalid, cluster invalid", AddressRangesSpec{ + Public: []CIDR{i3}, + Cluster: []CIDR{i4}, + }, 2}, + {"both, valid and invalid", AddressRangesSpec{ + Public: []CIDR{v1, i2}, + Cluster: []CIDR{v3, i4}, + }, 2}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.spec.Validate() + if tt.numErrs > 0 { + assert.Error(t, err) + t.Log(err) + assert.ErrorContains(t, err, fmt.Sprintf("%d network ranges are invalid", tt.numErrs)) + } else { + assert.NoError(t, err) + } + }) + } +} + +// these two functions are should almost always used together and can be unit tested together more +// easily than apart +func TestNetworkSpec_GetNetworkSelection_NetworkSelectionsToAnnotationValue(t *testing.T) { + // inputs are the same definition expressed in json format or non-json format + input1 := func(json bool) string { + if json { + return `[{"name": "macvlan", "interface": "net1"}]` + } + return "macvlan@net1" + } + input2 := func(json bool) string { + if json { + return `[{"name": "macvlan", "interface": "net2"}]` + } + return "macvlan@net2" + } + + // allow running the test suite with json-format or non-json-format inputs + testGetNetworkAnnotationValue := func(t *testing.T, json bool) { + t.Helper() + + tests := []struct { + name string + specSelectors map[CephNetworkType]string + cephNets []CephNetworkType + want string + wantErr bool + }{ + { + name: "public want public", + specSelectors: map[CephNetworkType]string{ + "public": input1(json), + }, + cephNets: []CephNetworkType{CephNetworkPublic}, + want: `[{"name":"macvlan","namespace":"ns","interface":"net1"}]`, + wantErr: false, + }, + { + name: "cluster want cluster", + specSelectors: map[CephNetworkType]string{ + "cluster": input1(json), + }, + cephNets: []CephNetworkType{CephNetworkCluster}, + want: `[{"name":"macvlan","namespace":"ns","interface":"net1"}]`, + wantErr: false, + }, + { + name: "public want cluster", + specSelectors: map[CephNetworkType]string{ + "public": input1(json), + }, + cephNets: []CephNetworkType{CephNetworkCluster}, + want: ``, + wantErr: false, + }, + { + name: "cluster want public", + specSelectors: map[CephNetworkType]string{ + "cluster": input1(json), + }, + cephNets: []CephNetworkType{CephNetworkPublic}, + want: ``, + wantErr: false, + }, + { + name: "nothing want public", + specSelectors: map[CephNetworkType]string{}, + cephNets: []CephNetworkType{CephNetworkPublic}, + want: ``, + wantErr: false, + }, + { + name: "nothing want cluster", + specSelectors: map[CephNetworkType]string{}, + cephNets: []CephNetworkType{CephNetworkCluster}, + want: ``, + wantErr: false, + }, + { + name: "unknown want public", + specSelectors: map[CephNetworkType]string{ + "uncleKnown": input1(json), + }, + cephNets: []CephNetworkType{CephNetworkPublic}, + want: ``, + wantErr: false, + }, + { + name: "unknown want cluster", + specSelectors: map[CephNetworkType]string{ + "uncleKnown": input1(json), + }, + cephNets: []CephNetworkType{CephNetworkCluster}, + want: ``, + wantErr: false, + }, + { + name: "public want public and cluster", + specSelectors: map[CephNetworkType]string{ + "public": input1(json), + }, + cephNets: []CephNetworkType{CephNetworkPublic, CephNetworkCluster}, + want: `[{"name":"macvlan","namespace":"ns","interface":"net1"}]`, + wantErr: false, + }, + { + name: "cluster want public and cluster", + specSelectors: map[CephNetworkType]string{ + "cluster": input1(json), + }, + cephNets: []CephNetworkType{CephNetworkPublic, CephNetworkCluster}, + want: `[{"name":"macvlan","namespace":"ns","interface":"net1"}]`, + wantErr: false, + }, + { + name: "public and cluster want public and cluster", + specSelectors: map[CephNetworkType]string{ + "public": input1(json), + "cluster": input2(json), + }, + cephNets: []CephNetworkType{CephNetworkPublic, CephNetworkCluster}, + want: `[{"name":"macvlan","namespace":"ns","interface":"net1"},{"name":"macvlan","namespace":"ns","interface":"net2"}]`, + wantErr: false, + }, + { + name: "support mixed json-non-json spec", + specSelectors: map[CephNetworkType]string{ + "public": input1(json), + "cluster": input2(!json), // invert json-ness of this one + }, + cephNets: []CephNetworkType{CephNetworkPublic, CephNetworkCluster}, + want: `[{"name":"macvlan","namespace":"ns","interface":"net1"},{"name":"macvlan","namespace":"ns","interface":"net2"}]`, + wantErr: false, + }, + { + name: "public and cluster want nothing", + specSelectors: map[CephNetworkType]string{ + "public": input1(json), + "cluster": input2(json), + }, + cephNets: []CephNetworkType{}, + want: ``, + wantErr: false, + }, + { + name: "legacy single json object support", + specSelectors: map[CephNetworkType]string{ + "public": `{"name": "legacyJsonObject"}`, + }, + cephNets: []CephNetworkType{CephNetworkPublic, CephNetworkCluster}, + want: `[{"name":"legacyJsonObject","namespace":"ns"}]`, + wantErr: false, + }, + { + name: "invalid network selections", + specSelectors: map[CephNetworkType]string{ + "public": `[{"name": "jsonWithNoClosingBracket"}`, + "cluster": "multus%net", + }, + cephNets: []CephNetworkType{CephNetworkPublic, CephNetworkCluster}, + want: ``, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := &NetworkSpec{ + Selectors: tt.specSelectors, + } + + selections := []*nadv1.NetworkSelectionElement{} + errs := []error{} + for _, net := range tt.cephNets { + s, err := n.GetNetworkSelection("ns", net) + if err != nil { + errs = append(errs, err) + } + selections = append(selections, s) + } + got, err := NetworkSelectionsToAnnotationValue(selections...) + if err != nil { + errs = append(errs, err) + } + + assert.Equal(t, tt.wantErr, len(errs) > 0, "wantErr %v but got errs %v", tt.wantErr, errs) + assert.Equal(t, tt.want, got) + }) + } + } + + // Actual subtests + t.Run("non-JSON input", func(t *testing.T) { + testGetNetworkAnnotationValue(t, false) + }) + t.Run("JSON input", func(t *testing.T) { + testGetNetworkAnnotationValue(t, true) + }) +} diff --git a/external/ceph.rook.io/v1/nfs.go b/external/ceph.rook.io/v1/nfs.go new file mode 100644 index 00000000..37d5a3cf --- /dev/null +++ b/external/ceph.rook.io/v1/nfs.go @@ -0,0 +1,105 @@ +/* +Copyright 2022 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "reflect" + + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" +) + +// KerberosEnabled returns true if Kerberos is enabled from the spec. +func (n *NFSSecuritySpec) KerberosEnabled() bool { + if n == nil { + return false + } + if n.Kerberos != nil { + return true + } + return false +} + +// GetPrincipalName gets the principal name for the Kerberos spec or the default value if it is unset. +func (k *KerberosSpec) GetPrincipalName() string { + if k.PrincipalName == "" { + return "nfs" + } + return k.PrincipalName +} + +func (n *CephNFS) IsHostNetwork(c *ClusterSpec) bool { + if n.Spec.Server.HostNetwork != nil { + return *n.Spec.Server.HostNetwork + } + return c.Network.IsHost() +} + +func (sec *NFSSecuritySpec) Validate() error { + if sec == nil { + return nil + } + + if sec.SSSD != nil { + sidecar := sec.SSSD.Sidecar + if sidecar == nil { + return errors.New("System Security Services Daemon (SSSD) is enabled, but no runtime option is specified; supported: [runInSidecar]") + } + + if sidecar.Image == "" { + return errors.New("System Security Services Daemon (SSSD) sidecar is enabled, but no image is specified") + } + + if volSourceExistsAndIsEmpty(sidecar.SSSDConfigFile.VolumeSource.ToKubernetesVolumeSource()) { + return errors.New("System Security Services Daemon (SSSD) sidecar is enabled with config from a VolumeSource, but no source is specified") + } + + subDirs := map[string]bool{} + for _, additionalFile := range sidecar.AdditionalFiles { + subDir := additionalFile.SubPath + if subDir == "" { + return errors.New("System Security Services Daemon (SSSD) sidecar is enabled with additional file having no subPath specified") + } + + if volSourceExistsAndIsEmpty(additionalFile.VolumeSource.ToKubernetesVolumeSource()) { + return errors.Errorf("System Security Services Daemon (SSSD) sidecar is enabled with additional file (subPath %q), but no source is specified", subDir) + } + + if _, ok := subDirs[subDir]; ok { + return errors.Errorf("System Security Services Daemon (SSSD) sidecar is enabled with additional file containing duplicate subPath %q", subDir) + } + subDirs[subDir] = true + } + } + + krb := sec.Kerberos + if krb != nil { + if volSourceExistsAndIsEmpty(krb.ConfigFiles.VolumeSource.ToKubernetesVolumeSource()) { + return errors.New("Kerberos is enabled with config from a VolumeSource, but no source is specified") + } + + if volSourceExistsAndIsEmpty(krb.KeytabFile.VolumeSource.ToKubernetesVolumeSource()) { + return errors.New("Kerberos is enabled with keytab from a VolumeSource, but no source is specified") + } + } + + return nil +} + +func volSourceExistsAndIsEmpty(v *v1.VolumeSource) bool { + return v != nil && reflect.DeepEqual(*v, v1.VolumeSource{}) +} diff --git a/external/ceph.rook.io/v1/nfs_test.go b/external/ceph.rook.io/v1/nfs_test.go new file mode 100644 index 00000000..a5a2a6d7 --- /dev/null +++ b/external/ceph.rook.io/v1/nfs_test.go @@ -0,0 +1,224 @@ +/* +Copyright 2022 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" +) + +func TestNFSSecuritySpec_Validate(t *testing.T) { + isFailing := true + isOkay := false + + withSSSD := func(sssd *SSSDSpec) *NFSSecuritySpec { + return &NFSSecuritySpec{ + SSSD: sssd, + } + } + + configMapVolumeSource := &ConfigFileVolumeSource{ + ConfigMap: &v1.ConfigMapVolumeSource{}, + } + + tests := []struct { + name string + security *NFSSecuritySpec + wantErr bool + }{ + {"security = nil", nil, isOkay}, + {"security empty", &NFSSecuritySpec{}, isOkay}, + {"security.sssd empty", withSSSD(&SSSDSpec{}), isFailing}, + { + "security.sssd.sidecar empty", + withSSSD(&SSSDSpec{ + Sidecar: &SSSDSidecar{}, + }), + isFailing, + }, + { + "security.sssd.sidecar fully specified", + withSSSD(&SSSDSpec{ + Sidecar: &SSSDSidecar{ + Image: "myimage", + SSSDConfigFile: SSSDSidecarConfigFile{ + VolumeSource: configMapVolumeSource, + }, + }, + }), + isOkay, + }, + { + "security.sssd.sidecar missing image", + withSSSD(&SSSDSpec{ + Sidecar: &SSSDSidecar{ + Image: "", + SSSDConfigFile: SSSDSidecarConfigFile{ + VolumeSource: configMapVolumeSource, + }, + }, + }), + isFailing, + }, + { + "security.sssd.sidecar.sssdConfigFile empty", + withSSSD(&SSSDSpec{ + Sidecar: &SSSDSidecar{ + Image: "myimage", + SSSDConfigFile: SSSDSidecarConfigFile{}, + }, + }), + isOkay, + }, + { + "security.sssd.sidecar.sssdConfigFile.volumeSource empty", + withSSSD(&SSSDSpec{ + Sidecar: &SSSDSidecar{ + Image: "myimage", + SSSDConfigFile: SSSDSidecarConfigFile{ + VolumeSource: &ConfigFileVolumeSource{}, + }, + }, + }), + isFailing, + }, + { + "security.sssd.sidecar.additionalFiles empty", + withSSSD(&SSSDSpec{ + Sidecar: &SSSDSidecar{ + Image: "myimage", + AdditionalFiles: AdditionalVolumeMounts{}, + }, + }), + isOkay, + }, + { + "security.sssd.sidecar.additionalFiles multiple valid", + withSSSD(&SSSDSpec{ + Sidecar: &SSSDSidecar{ + Image: "myimage", + AdditionalFiles: AdditionalVolumeMounts{ + {SubPath: "one", VolumeSource: configMapVolumeSource}, + {SubPath: "two", VolumeSource: configMapVolumeSource}, + {SubPath: "three", VolumeSource: configMapVolumeSource}, + }, + }, + }), + isOkay, + }, + { + "security.sssd.sidecar.additionalFiles one empty subDir", + withSSSD(&SSSDSpec{ + Sidecar: &SSSDSidecar{ + Image: "myimage", + AdditionalFiles: AdditionalVolumeMounts{ + {SubPath: "one", VolumeSource: configMapVolumeSource}, + {SubPath: "", VolumeSource: configMapVolumeSource}, + {SubPath: "three", VolumeSource: configMapVolumeSource}, + }, + }, + }), + isFailing, + }, + { + "security.sssd.sidecar.additionalFiles duplicate subDirs", + withSSSD(&SSSDSpec{ + Sidecar: &SSSDSidecar{ + Image: "myimage", + AdditionalFiles: AdditionalVolumeMounts{ + {SubPath: "one", VolumeSource: configMapVolumeSource}, + {SubPath: "two", VolumeSource: configMapVolumeSource}, + {SubPath: "one", VolumeSource: configMapVolumeSource}, + }, + }, + }), + isFailing, + }, + { + "security.sssd.sidecar.additionalFiles one vol source empty", + withSSSD(&SSSDSpec{ + Sidecar: &SSSDSidecar{ + Image: "myimage", + AdditionalFiles: AdditionalVolumeMounts{ + {SubPath: "one", VolumeSource: configMapVolumeSource}, + {SubPath: "", VolumeSource: &ConfigFileVolumeSource{}}, + {SubPath: "three", VolumeSource: configMapVolumeSource}, + }, + }, + }), + isFailing, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := tt.security.Validate(); (err != nil) != tt.wantErr { + t.Errorf("NFSSecuritySpec.Validate() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestNFSSecuritySpec_KerberosEnabled(t *testing.T) { + t.Run("nil security spec", func(t *testing.T) { + var sec *NFSSecuritySpec + assert.False(t, sec.KerberosEnabled()) + }) + + t.Run("empty security spec", func(t *testing.T) { + sec := &NFSSecuritySpec{} + assert.False(t, sec.KerberosEnabled()) + }) + + t.Run("empty kerberos spec", func(t *testing.T) { + sec := &NFSSecuritySpec{ + Kerberos: &KerberosSpec{}, + } + assert.True(t, sec.KerberosEnabled()) + }) + + t.Run("filled in kerberos spec", func(t *testing.T) { + sec := &NFSSecuritySpec{ + Kerberos: &KerberosSpec{ + PrincipalName: "mom", + }, + } + assert.True(t, sec.KerberosEnabled()) + }) +} + +func TestKerberosSpec_GetPrincipalName(t *testing.T) { + t.Run("empty kerberos spec", func(t *testing.T) { + k := &KerberosSpec{} + assert.Equal(t, "nfs", k.GetPrincipalName()) + }) + + t.Run("principal name nfs", func(t *testing.T) { + k := &KerberosSpec{ + PrincipalName: "nfs", + } + assert.Equal(t, "nfs", k.GetPrincipalName()) + }) + + t.Run("principal name set", func(t *testing.T) { + k := &KerberosSpec{ + PrincipalName: "set", + } + assert.Equal(t, "set", k.GetPrincipalName()) + }) +} diff --git a/external/ceph.rook.io/v1/nvmeof.go b/external/ceph.rook.io/v1/nvmeof.go new file mode 100644 index 00000000..1d2b2811 --- /dev/null +++ b/external/ceph.rook.io/v1/nvmeof.go @@ -0,0 +1,26 @@ +/* +Copyright 2024 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// IsHostNetwork returns true if host networking is enabled for this gateway. +// If not explicitly set on the gateway spec, it inherits from the cluster spec. +func (n *CephNVMeOFGateway) IsHostNetwork(c *ClusterSpec) bool { + if n.Spec.HostNetwork != nil { + return *n.Spec.HostNetwork + } + return c.Network.IsHost() +} diff --git a/external/ceph.rook.io/v1/object.go b/external/ceph.rook.io/v1/object.go new file mode 100644 index 00000000..9aee85e9 --- /dev/null +++ b/external/ceph.rook.io/v1/object.go @@ -0,0 +1,203 @@ +/* +Copyright 2018 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +    http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/util/validation" +) + +const ServiceServingCertKey = "service.beta.openshift.io/serving-cert-secret-name" + +// 38 is the max length of a ceph store name as total length of the resource name cannot be more than 63 characters limit +// and there is a configmap which is formed by appending `rook-ceph-rgw--mime-types` +// so over all it brings up to (63-14-11 = 38) characters for the store name +const objectStoreNameMaxLen = 38 + +func (s *ObjectStoreSpec) IsMultisite() bool { + return s.Zone.Name != "" +} + +func (s *ObjectStoreSpec) IsTLSEnabled() bool { + return s.Gateway.SecurePort != 0 && (s.Gateway.SSLCertificateRef != "" || s.GetServiceServingCert() != "") +} + +func (s *ObjectStoreSpec) IsRGWDashboardEnabled() bool { + return s.Gateway.DashboardEnabled == nil || *s.Gateway.DashboardEnabled +} + +func (s *ObjectStoreSpec) GetPort() (int32, error) { + if s.IsTLSEnabled() { + return s.Gateway.SecurePort, nil + } else if s.Gateway.Port != 0 { + return s.Gateway.Port, nil + } + return -1, errors.New("At least one of Port or SecurePort should be non-zero") +} + +func (s *ObjectStoreSpec) IsExternal() bool { + return len(s.Gateway.ExternalRgwEndpoints) != 0 +} + +func (s *ObjectStoreSpec) IsHostNetwork(c *ClusterSpec) bool { + if s.Gateway.HostNetwork != nil { + return *s.Gateway.HostNetwork + } + return c.Network.IsHost() +} + +func (s *ObjectRealmSpec) IsPullRealm() bool { + return s.Pull.Endpoint != "" +} + +// ValidateObjectSpec validate the object store arguments +func ValidateObjectSpec(gs *CephObjectStore) error { + if gs.Name == "" { + return errors.New("missing name") + } + if gs.Namespace == "" { + return errors.New("missing namespace") + } + + // validate the object store name only if it is not an external cluster + // as external cluster won't create the rgw daemon and it's other resources + // and there is some legacy external cluster which has more length of objectstore + // so to run them successfully we are not validating the objectstore name + if !gs.Spec.IsExternal() { + if len(gs.Name) > objectStoreNameMaxLen { + return errors.New("object store name cannot be longer than 38 characters") + } + } + securePort := gs.Spec.Gateway.SecurePort + if securePort < 0 || securePort > 65535 { + return errors.Errorf("securePort value of %d must be between 0 and 65535", securePort) + } + if gs.Spec.Gateway.Port <= 0 && gs.Spec.Gateway.SecurePort <= 0 { + return errors.New("invalid create: either of port or securePort fields should be not be zero") + } + + // check hosting spec + if gs.Spec.Hosting != nil { + if gs.Spec.Hosting.AdvertiseEndpoint != nil { + ep := gs.Spec.Hosting.AdvertiseEndpoint + errList := validation.IsDNS1123Subdomain(ep.DnsName) + if len(errList) > 0 { + return errors.Errorf("hosting.advertiseEndpoint.dnsName %q must be a valid DNS-1123 subdomain: %v", ep.DnsName, errList) + } + if ep.Port < 1 || ep.Port > 65535 { + return errors.Errorf("hosting.advertiseEndpoint.port %d must be between 1 and 65535", ep.Port) + } + } + dnsNameErrs := []string{} + for _, dnsName := range gs.Spec.Hosting.DNSNames { + errs := validation.IsDNS1123Subdomain(dnsName) + if len(errs) > 0 { + // errors do not report the domains that are errored; add them to help users + errs = append(errs, fmt.Sprintf("error on dns name %q", dnsName)) + dnsNameErrs = append(dnsNameErrs, errs...) + } + } + if len(dnsNameErrs) > 0 { + return errors.Errorf("one or more hosting.dnsNames is not a valid DNS-1123 subdomain: %v", dnsNameErrs) + } + } + + return nil +} + +func (s *ObjectStoreSpec) GetServiceServingCert() string { + if s.Gateway.Service != nil { + return s.Gateway.Service.Annotations[ServiceServingCertKey] + } + return "" +} + +// GetServiceName gets the name of the Rook-created CephObjectStore service. +// This method helps ensure adherence to stable, documented behavior (API). +func (c *CephObjectStore) GetServiceName() string { + return "rook-ceph-rgw-" + c.GetName() +} + +// GetServiceDomainName gets the domain name of the Rook-created CephObjectStore service. +// This method helps ensure adherence to stable, documented behavior (API). +func (c *CephObjectStore) GetServiceDomainName() string { + return fmt.Sprintf("%s.%s.svc", c.GetServiceName(), c.GetNamespace()) +} + +func (c *CephObjectStore) AdvertiseEndpointIsSet() bool { + return c.Spec.Hosting != nil && c.Spec.Hosting.AdvertiseEndpoint != nil && + c.Spec.Hosting.AdvertiseEndpoint.DnsName != "" && c.Spec.Hosting.AdvertiseEndpoint.Port != 0 +} + +// GetAdvertiseEndpoint returns address, port, and isTls information about the advertised endpoint +// for the CephObjectStore. This method helps ensure adherence to stable, documented behavior (API). +func (c *CephObjectStore) GetAdvertiseEndpoint() (string, int32, bool, error) { + port, err := c.Spec.GetPort() + if err != nil { + return "", 0, false, err + } + isTls := c.Spec.IsTLSEnabled() + + address := c.GetServiceDomainName() // service domain name is the default advertise address + if c.Spec.IsExternal() { + // for external clusters, the first external RGW endpoint is the default advertise address + address = c.Spec.Gateway.ExternalRgwEndpoints[0].String() + } + + // if users override the advertise endpoint themselves, these value take priority + if c.AdvertiseEndpointIsSet() { + address = c.Spec.Hosting.AdvertiseEndpoint.DnsName + port = c.Spec.Hosting.AdvertiseEndpoint.Port + isTls = c.Spec.Hosting.AdvertiseEndpoint.UseTls + } + + return address, port, isTls, nil +} + +// GetAdvertiseEndpointUrl gets the fully-formed advertised endpoint URL for the CephObjectStore. +// This method helps ensure adherence to stable, documented behavior (API). +func (c *CephObjectStore) GetAdvertiseEndpointUrl() (string, error) { + address, port, isTls, err := c.GetAdvertiseEndpoint() + if err != nil { + return "", err + } + + protocol := "http" + if isTls { + protocol = "https" + } + return fmt.Sprintf("%s://%s:%d", protocol, address, port), nil +} + +func (c *CephObjectStore) GetStatusConditions() *[]Condition { + return &c.Status.Conditions +} + +func (z *CephObjectZone) GetStatusConditions() *[]Condition { + return &z.Status.Conditions +} + +// String returns an addressable string representation of the EndpointAddress. +func (e *EndpointAddress) String() string { + // hostname is easier to read, and it is probably less likely to change, so prefer it over IP + if e.Hostname != "" { + return e.Hostname + } + return e.IP +} diff --git a/external/ceph.rook.io/v1/object_test.go b/external/ceph.rook.io/v1/object_test.go new file mode 100644 index 00000000..5ecfa732 --- /dev/null +++ b/external/ceph.rook.io/v1/object_test.go @@ -0,0 +1,366 @@ +/* +Copyright 2021 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestValidateObjectStoreSpec(t *testing.T) { + o := &CephObjectStore{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-store", + Namespace: "rook-ceph", + }, + Spec: ObjectStoreSpec{ + Gateway: GatewaySpec{ + Port: 1, + SecurePort: 0, + }, + }, + } + err := ValidateObjectSpec(o) + assert.NoError(t, err) + + // when both port and securePort are o + o.Spec.Gateway.Port = 0 + err = ValidateObjectSpec(o) + assert.Error(t, err) + + // when securePort is greater than 65535 + o.Spec.Gateway.SecurePort = 65536 + err = ValidateObjectSpec(o) + assert.Error(t, err) + + // when name is empty + o.ObjectMeta.Name = "" + err = ValidateObjectSpec(o) + assert.Error(t, err) + + // when namespace is empty + o.ObjectMeta.Namespace = "" + err = ValidateObjectSpec(o) + assert.Error(t, err) + + t.Run("hosting", func(t *testing.T) { + o := &CephObjectStore{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-store", + Namespace: "rook-ceph", + }, + Spec: ObjectStoreSpec{ + Gateway: GatewaySpec{ + Port: 1, + SecurePort: 0, + }, + Hosting: &ObjectStoreHostingSpec{ + AdvertiseEndpoint: &ObjectEndpointSpec{ + DnsName: "valid.dns.addr", + Port: 1, + }, + DNSNames: []string{"valid.dns.addr", "valid.dns.com"}, + }, + }, + } + err := ValidateObjectSpec(o) + assert.NoError(t, err) + + // wildcard advertise dns name + s := o.DeepCopy() + s.Spec.Hosting.AdvertiseEndpoint.DnsName = "*.invalid.dns.addr" + err = ValidateObjectSpec(s) + assert.ErrorContains(t, err, `"*.invalid.dns.addr"`) + + // empty advertise dns name + s = o.DeepCopy() + s.Spec.Hosting.AdvertiseEndpoint.DnsName = "" + err = ValidateObjectSpec(s) + assert.ErrorContains(t, err, `""`) + + // zero port + s = o.DeepCopy() + s.Spec.Hosting.AdvertiseEndpoint.Port = 0 + err = ValidateObjectSpec(s) + assert.ErrorContains(t, err, "0") + + // 65536 port + s = o.DeepCopy() + s.Spec.Hosting.AdvertiseEndpoint.Port = 65536 + err = ValidateObjectSpec(s) + assert.ErrorContains(t, err, "65536") + + // first dnsName invalid + s = o.DeepCopy() + s.Spec.Hosting.DNSNames = []string{"-invalid.dns.name", "accepted.dns.name"} + err = ValidateObjectSpec(s) + assert.ErrorContains(t, err, `"-invalid.dns.name"`) + assert.NotContains(t, err.Error(), "accepted.dns.name") + + // second dnsName invalid + s = o.DeepCopy() + s.Spec.Hosting.DNSNames = []string{"accepted.dns.name", "-invalid.dns.name"} + err = ValidateObjectSpec(s) + assert.ErrorContains(t, err, `"-invalid.dns.name"`) + assert.NotContains(t, err.Error(), "accepted.dns.name") + + // both dnsNames invalid + s = o.DeepCopy() + s.Spec.Hosting.DNSNames = []string{"*.invalid.dns.name", "-invalid.dns.name"} + err = ValidateObjectSpec(s) + assert.ErrorContains(t, err, `"-invalid.dns.name"`) + assert.ErrorContains(t, err, `"*.invalid.dns.name"`) + }) +} + +func TestIsTLSEnabled(t *testing.T) { + objStore := &CephObjectStore{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-store", + Namespace: "rook-ceph", + }, + Spec: ObjectStoreSpec{ + Gateway: GatewaySpec{ + Port: 1, + SecurePort: 0, + }, + }, + } + IsTLS := objStore.Spec.IsTLSEnabled() + assert.False(t, IsTLS) + + // only securePort is set without certs + objStore.Spec.Gateway.SecurePort = 443 + IsTLS = objStore.Spec.IsTLSEnabled() + assert.False(t, IsTLS) + + // when SSLCertificateRef is set with securePort + objStore.Spec.Gateway.SSLCertificateRef = "my-tls-cert" + IsTLS = objStore.Spec.IsTLSEnabled() + assert.True(t, IsTLS) + + // when service serving cert is used + objStore.Spec.Gateway.SSLCertificateRef = "" + objStore.Spec.Gateway.Service = &(RGWServiceSpec{Annotations: Annotations{ServiceServingCertKey: "rgw-cert"}}) + IsTLS = objStore.Spec.IsTLSEnabled() + assert.True(t, IsTLS) + + // when cert are set but securePort unset + objStore.Spec.Gateway.SecurePort = 0 + IsTLS = objStore.Spec.IsTLSEnabled() + assert.False(t, IsTLS) +} + +func TestCephObjectStore_GetAdvertiseEndpointUrl(t *testing.T) { + emptySpec := func() *CephObjectStore { + return &CephObjectStore{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-store", + Namespace: "my-ns", + }, + } + } + + httpSpec := func() *CephObjectStore { + s := emptySpec() + s.Spec.Gateway.Port = 8080 + return s + } + + httpsSpec := func() *CephObjectStore { + s := emptySpec() + s.Spec.Gateway.SecurePort = 8443 + s.Spec.Gateway.SSLCertificateRef = "my-cert" + return s + } + + dualSpec := func() *CephObjectStore { + s := emptySpec() + s.Spec.Gateway.Port = 8080 + s.Spec.Gateway.SecurePort = 8443 + s.Spec.Gateway.SSLCertificateRef = "my-cert" + return s + } + + removeCert := func(s *CephObjectStore) *CephObjectStore { + s.Spec.Gateway.SSLCertificateRef = "" + return s + } + + initHosting := func(s *CephObjectStore) *CephObjectStore { + if s.Spec.Hosting == nil { + s.Spec.Hosting = &ObjectStoreHostingSpec{} + } + return s + } + + addExternalIPs := func(s *CephObjectStore) *CephObjectStore { + s.Spec.Gateway.ExternalRgwEndpoints = []EndpointAddress{ + {IP: "192.168.1.1"}, + {IP: "192.168.1.2"}, + } + return s + } + + addExternalHostnames := func(s *CephObjectStore) *CephObjectStore { + s.Spec.Gateway.ExternalRgwEndpoints = []EndpointAddress{ + {Hostname: "s3.external.com"}, + {Hostname: "s3.other.com"}, + } + return s + } + + addNilAdvertise := func(s *CephObjectStore) *CephObjectStore { + s = initHosting(s) + s.Spec.Hosting.AdvertiseEndpoint = nil + return s + } + + addAdvertiseHttp := func(s *CephObjectStore) *CephObjectStore { + s = initHosting(s) + s.Spec.Hosting.AdvertiseEndpoint = &ObjectEndpointSpec{ + DnsName: "my-endpoint.com", + Port: 80, + UseTls: false, + } + return s + } + + addAdvertiseHttps := func(s *CephObjectStore) *CephObjectStore { + s = initHosting(s) + s.Spec.Hosting.AdvertiseEndpoint = &ObjectEndpointSpec{ + DnsName: "my-endpoint.com", + Port: 443, + UseTls: true, + } + return s + } + + type test struct { + name string + store *CephObjectStore + want string + wantErrContain string + } + + // base level tests, internal mode + tests := []test{ + {"nil hosting : internal : empty ", emptySpec(), "", "Port"}, + {"nil hosting : internal : port ", httpSpec(), "http://rook-ceph-rgw-my-store.my-ns.svc:8080", ""}, + {"nil hosting : internal : securePort ", httpsSpec(), "https://rook-ceph-rgw-my-store.my-ns.svc:8443", ""}, + {"nil hosting : internal : port + securePort ", dualSpec(), "https://rook-ceph-rgw-my-store.my-ns.svc:8443", ""}, + {"nil hosting : internal : securePort, no cert ", removeCert(httpsSpec()), "", "Port"}, + {"nil hosting : internal : port + securePort, no cert", removeCert(dualSpec()), "http://rook-ceph-rgw-my-store.my-ns.svc:8080", ""}, + {"nil hosting : external IPs : empty ", addExternalIPs(emptySpec()), "", "Port"}, + {"nil hosting : external IPs : port ", addExternalIPs(httpSpec()), "http://192.168.1.1:8080", ""}, + {"nil hosting : external IPs : securePort ", addExternalIPs(httpsSpec()), "https://192.168.1.1:8443", ""}, + {"nil hosting : external IPs : port + securePort ", addExternalIPs(dualSpec()), "https://192.168.1.1:8443", ""}, + {"nil hosting : external IPs : securePort, no cert ", addExternalIPs(removeCert(httpsSpec())), "", "Port"}, + {"nil hosting : external IPs : port + securePort, no cert", addExternalIPs(removeCert(dualSpec())), "http://192.168.1.1:8080", ""}, + {"nil hosting : external Hostnames: empty ", addExternalHostnames(emptySpec()), "", "Port"}, + {"nil hosting : external Hostnames: port ", addExternalHostnames(httpSpec()), "http://s3.external.com:8080", ""}, + {"nil hosting : external Hostnames: securePort ", addExternalHostnames(httpsSpec()), "https://s3.external.com:8443", ""}, + {"nil hosting : external Hostnames: port + securePort ", addExternalHostnames(dualSpec()), "https://s3.external.com:8443", ""}, + {"nil hosting : external Hostnames: securePort, no cert ", addExternalHostnames(removeCert(httpsSpec())), "", "Port"}, + {"nil hosting : external Hostnames: port + securePort, no cert", addExternalHostnames(removeCert(dualSpec())), "http://s3.external.com:8080", ""}, + + {"nil advertise : internal : empty ", addNilAdvertise(emptySpec()), "", "Port"}, + {"nil advertise : internal : port ", addNilAdvertise(httpSpec()), "http://rook-ceph-rgw-my-store.my-ns.svc:8080", ""}, + {"nil advertise : internal : securePort ", addNilAdvertise(httpsSpec()), "https://rook-ceph-rgw-my-store.my-ns.svc:8443", ""}, + {"nil advertise : internal : port + securePort ", addNilAdvertise(dualSpec()), "https://rook-ceph-rgw-my-store.my-ns.svc:8443", ""}, + {"nil advertise : internal : securePort, no cert ", addNilAdvertise(removeCert(httpsSpec())), "", "Port"}, + {"nil advertise : internal : port + securePort, no cert", addNilAdvertise(removeCert(dualSpec())), "http://rook-ceph-rgw-my-store.my-ns.svc:8080", ""}, + {"nil advertise : external IPs : empty ", addNilAdvertise(addExternalIPs(emptySpec())), "", "Port"}, + {"nil advertise : external IPs : port ", addNilAdvertise(addExternalIPs(httpSpec())), "http://192.168.1.1:8080", ""}, + {"nil advertise : external IPs : securePort ", addNilAdvertise(addExternalIPs(httpsSpec())), "https://192.168.1.1:8443", ""}, + {"nil advertise : external IPs : port + securePort ", addNilAdvertise(addExternalIPs(dualSpec())), "https://192.168.1.1:8443", ""}, + {"nil advertise : external IPs : securePort, no cert ", addNilAdvertise(addExternalIPs(removeCert(httpsSpec()))), "", "Port"}, + {"nil advertise : external IPs : port + securePort, no cert", addNilAdvertise(addExternalIPs(removeCert(dualSpec()))), "http://192.168.1.1:8080", ""}, + {"nil advertise : external Hostnames: empty ", addNilAdvertise(addExternalHostnames(emptySpec())), "", "Port"}, + {"nil advertise : external Hostnames: port ", addNilAdvertise(addExternalHostnames(httpSpec())), "http://s3.external.com:8080", ""}, + {"nil advertise : external Hostnames: securePort ", addNilAdvertise(addExternalHostnames(httpsSpec())), "https://s3.external.com:8443", ""}, + {"nil advertise : external Hostnames: port + securePort ", addNilAdvertise(addExternalHostnames(dualSpec())), "https://s3.external.com:8443", ""}, + {"nil advertise : external Hostnames: securePort, no cert ", addNilAdvertise(addExternalHostnames(removeCert(httpsSpec()))), "", "Port"}, + {"nil advertise : external Hostnames: port + securePort, no cert", addNilAdvertise(addExternalHostnames(removeCert(dualSpec()))), "http://s3.external.com:8080", ""}, + + {"HTTP advertise : internal : empty ", addAdvertiseHttp(emptySpec()), "", "Port"}, + {"HTTP advertise : internal : port ", addAdvertiseHttp(httpSpec()), "http://my-endpoint.com:80", ""}, + {"HTTP advertise : internal : securePort ", addAdvertiseHttp(httpsSpec()), "http://my-endpoint.com:80", ""}, + {"HTTP advertise : internal : port + securePort ", addAdvertiseHttp(dualSpec()), "http://my-endpoint.com:80", ""}, + {"HTTP advertise : internal : securePort, no cert ", addAdvertiseHttp(removeCert(httpsSpec())), "", "Port"}, + {"HTTP advertise : internal : port + securePort, no cert", addAdvertiseHttp(removeCert(dualSpec())), "http://my-endpoint.com:80", ""}, + {"HTTP advertise : external IPs : empty ", addAdvertiseHttp(addExternalIPs(emptySpec())), "", "Port"}, + {"HTTP advertise : external IPs : port ", addAdvertiseHttp(addExternalIPs(httpSpec())), "http://my-endpoint.com:80", ""}, + {"HTTP advertise : external IPs : securePort ", addAdvertiseHttp(addExternalIPs(httpsSpec())), "http://my-endpoint.com:80", ""}, + {"HTTP advertise : external IPs : port + securePort ", addAdvertiseHttp(addExternalIPs(dualSpec())), "http://my-endpoint.com:80", ""}, + {"HTTP advertise : external IPs : securePort, no cert ", addAdvertiseHttp(addExternalIPs(removeCert(httpsSpec()))), "", "Port"}, + {"HTTP advertise : external IPs : port + securePort, no cert", addAdvertiseHttp(addExternalIPs(removeCert(dualSpec()))), "http://my-endpoint.com:80", ""}, + {"HTTP advertise : external Hostnames: empty ", addAdvertiseHttp(addExternalHostnames(emptySpec())), "", "Port"}, + {"HTTP advertise : external Hostnames: port ", addAdvertiseHttp(addExternalHostnames(httpSpec())), "http://my-endpoint.com:80", ""}, + {"HTTP advertise : external Hostnames: securePort ", addAdvertiseHttp(addExternalHostnames(httpsSpec())), "http://my-endpoint.com:80", ""}, + {"HTTP advertise : external Hostnames: port + securePort ", addAdvertiseHttp(addExternalHostnames(dualSpec())), "http://my-endpoint.com:80", ""}, + {"HTTP advertise : external Hostnames: securePort, no cert ", addAdvertiseHttp(addExternalHostnames(removeCert(httpsSpec()))), "", "Port"}, + {"HTTP advertise : external Hostnames: port + securePort, no cert", addAdvertiseHttp(addExternalHostnames(removeCert(dualSpec()))), "http://my-endpoint.com:80", ""}, + + {"HTTPS advertise: internal : empty ", addAdvertiseHttps(emptySpec()), "", "Port"}, + {"HTTPS advertise: internal : port ", addAdvertiseHttps(httpSpec()), "https://my-endpoint.com:443", ""}, + {"HTTPS advertise: internal : securePort ", addAdvertiseHttps(httpsSpec()), "https://my-endpoint.com:443", ""}, + {"HTTPS advertise: internal : port + securePort ", addAdvertiseHttps(dualSpec()), "https://my-endpoint.com:443", ""}, + {"HTTPS advertise: internal : securePort, no cert ", addAdvertiseHttps(removeCert(httpsSpec())), "", "Port"}, + {"HTTPS advertise: internal : port + securePort, no cert", addAdvertiseHttps(removeCert(dualSpec())), "https://my-endpoint.com:443", ""}, + {"HTTPS advertise: external IPs : empty ", addAdvertiseHttps(addExternalIPs(emptySpec())), "", "Port"}, + {"HTTPS advertise: external IPs : port ", addAdvertiseHttps(addExternalIPs(httpSpec())), "https://my-endpoint.com:443", ""}, + {"HTTPS advertise: external IPs : securePort ", addAdvertiseHttps(addExternalIPs(httpsSpec())), "https://my-endpoint.com:443", ""}, + {"HTTPS advertise: external IPs : port + securePort ", addAdvertiseHttps(addExternalIPs(dualSpec())), "https://my-endpoint.com:443", ""}, + {"HTTPS advertise: external IPs : securePort, no cert ", addAdvertiseHttps(addExternalIPs(removeCert(httpsSpec()))), "", "Port"}, + {"HTTPS advertise: external IPs : port + securePort, no cert", addAdvertiseHttps(addExternalIPs(removeCert(dualSpec()))), "https://my-endpoint.com:443", ""}, + {"HTTPS advertise: external Hostnames: empty ", addAdvertiseHttps(addExternalHostnames(emptySpec())), "", "Port"}, + {"HTTPS advertise: external Hostnames: port ", addAdvertiseHttps(addExternalHostnames(httpSpec())), "https://my-endpoint.com:443", ""}, + {"HTTPS advertise: external Hostnames: securePort ", addAdvertiseHttps(addExternalHostnames(httpsSpec())), "https://my-endpoint.com:443", ""}, + {"HTTPS advertise: external Hostnames: port + securePort ", addAdvertiseHttps(addExternalHostnames(dualSpec())), "https://my-endpoint.com:443", ""}, + {"HTTPS advertise: external Hostnames: securePort, no cert ", addAdvertiseHttps(addExternalHostnames(removeCert(httpsSpec()))), "", "Port"}, + {"HTTPS advertise: external Hostnames: port + securePort, no cert", addAdvertiseHttps(addExternalHostnames(removeCert(dualSpec()))), "https://my-endpoint.com:443", ""}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.store.GetAdvertiseEndpointUrl() + assert.Equal(t, tt.want, got) + if tt.wantErrContain != "" { + assert.ErrorContains(t, err, tt.wantErrContain) + } else { + assert.NoError(t, err) + } + }) + + if tt.store.Spec.Hosting != nil { + t.Run("with DNS names: "+tt.name, func(t *testing.T) { + // dnsNames shouldn't change the test result at all + s := tt.store.DeepCopy() + s.Spec.Hosting.DNSNames = []string{"should.not.show.up"} + got, err := s.GetAdvertiseEndpointUrl() + assert.Equal(t, tt.want, got) + if tt.wantErrContain != "" { + assert.ErrorContains(t, err, tt.wantErrContain) + } else { + assert.NoError(t, err) + } + }) + } + } +} diff --git a/external/ceph.rook.io/v1/placement.go b/external/ceph.rook.io/v1/placement.go new file mode 100644 index 00000000..0cc327fe --- /dev/null +++ b/external/ceph.rook.io/v1/placement.go @@ -0,0 +1,146 @@ +/* +Copyright 2018 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +func (p PlacementSpec) All() Placement { + return p[KeyAll] +} + +// ApplyToPodSpec adds placement to a pod spec +func (p Placement) ApplyToPodSpec(t *v1.PodSpec) { + if t.Affinity == nil { + t.Affinity = &v1.Affinity{} + } + if p.NodeAffinity != nil { + t.Affinity.NodeAffinity = p.mergeNodeAffinity(t.Affinity.NodeAffinity) + } + if p.PodAffinity != nil { + t.Affinity.PodAffinity = p.PodAffinity.DeepCopy() + } + if p.PodAntiAffinity != nil { + t.Affinity.PodAntiAffinity = p.PodAntiAffinity.DeepCopy() + } + if p.Tolerations != nil { + t.Tolerations = p.mergeTolerations(t.Tolerations) + } + if p.TopologySpreadConstraints != nil { + t.TopologySpreadConstraints = p.TopologySpreadConstraints + } +} + +func (p Placement) mergeNodeAffinity(nodeAffinity *v1.NodeAffinity) *v1.NodeAffinity { + // no node affinity is specified yet, so return the placement's nodeAffinity + result := p.NodeAffinity.DeepCopy() + if nodeAffinity == nil { + return result + } + + // merge the preferred node affinity that was already specified, and the placement's nodeAffinity + result.PreferredDuringSchedulingIgnoredDuringExecution = append( + nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution, + p.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution...) + + // nothing to merge if no affinity was passed in + if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil { + return result + } + // take the desired affinity if there was none on the placement + if p.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil { + result.RequiredDuringSchedulingIgnoredDuringExecution = nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution + return result + } + // take the desired affinity node selectors without the need to merge + if len(nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) == 0 { + return result + } + // take the placement affinity node selectors without the need to merge + if len(p.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) == 0 { + // take the placement from the first option since the second isn't specified + result.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms + return result + } + + // merge the match expressions together since they are defined in both placements + // this will only work if we want an "and" between all the expressions, more complex conditions won't work with this merge + var nodeTerm v1.NodeSelectorTerm + nodeTerm.MatchExpressions = append( + nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions, + p.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions...) + nodeTerm.MatchFields = append( + nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields, + p.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields...) + result.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0] = nodeTerm + + return result +} + +func (p Placement) mergeTolerations(tolerations []v1.Toleration) []v1.Toleration { + // no toleration is specified yet, return placement's toleration + if tolerations == nil { + return p.Tolerations + } + + return append(p.Tolerations, tolerations...) +} + +// Merge returns a Placement which results from merging the attributes of the +// original Placement with the attributes of the supplied one. The supplied +// Placement's attributes will override the original ones if defined. +func (p Placement) Merge(with Placement) Placement { + ret := p + if with.NodeAffinity != nil { + ret.NodeAffinity = with.NodeAffinity + } + if with.PodAffinity != nil { + ret.PodAffinity = with.PodAffinity + } + if with.PodAntiAffinity != nil { + ret.PodAntiAffinity = with.PodAntiAffinity + } + if with.Tolerations != nil { + ret.Tolerations = ret.mergeTolerations(with.Tolerations) + } + if with.TopologySpreadConstraints != nil { + ret.TopologySpreadConstraints = with.TopologySpreadConstraints + } + return ret +} + +// GetMgrPlacement returns the placement for the MGR service +func GetMgrPlacement(p PlacementSpec) Placement { + return p.All().Merge(p[KeyMgr]) +} + +// GetMonPlacement returns the placement for the MON service +func GetMonPlacement(p PlacementSpec) Placement { + return p.All().Merge(p[KeyMon]) +} + +// GetArbiterPlacement returns the placement for the arbiter MON service +func GetArbiterPlacement(p PlacementSpec) Placement { + // If the mon is the arbiter in a stretch cluster and its placement is specified, return it + // without merging with the "all" placement so it can be handled separately from all other daemons + return p[KeyMonArbiter] +} + +// GetOSDPlacement returns the placement for the OSD service +func GetOSDPlacement(p PlacementSpec) Placement { + return p.All().Merge(p[KeyOSD]) +} diff --git a/external/ceph.rook.io/v1/placement_test.go b/external/ceph.rook.io/v1/placement_test.go new file mode 100644 index 00000000..c8dc8115 --- /dev/null +++ b/external/ceph.rook.io/v1/placement_test.go @@ -0,0 +1,360 @@ +/* +Copyright 2016 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1 + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/yaml" +) + +func TestPlacementSpec(t *testing.T) { + specYaml := []byte(` +nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: foo + operator: In + values: + - bar +tolerations: + - key: foo + operator: Exists +topologySpreadConstraints: + - maxSkew: 1 + topologyKey: zone + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + foo: bar`) + // convert the raw spec yaml into JSON + rawJSON, err := yaml.ToJSON(specYaml) + assert.Nil(t, err) + + // unmarshal the JSON into a strongly typed placement spec object + var placement Placement + err = json.Unmarshal(rawJSON, &placement) + assert.Nil(t, err) + + // the unmarshalled placement spec should equal the expected spec below + expected := Placement{ + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "foo", + Operator: v1.NodeSelectorOpIn, + Values: []string{"bar"}, + }, + }, + }, + }, + }, + }, + Tolerations: []v1.Toleration{ + { + Key: "foo", + Operator: v1.TolerationOpExists, + }, + }, + TopologySpreadConstraints: []v1.TopologySpreadConstraint{ + { + MaxSkew: 1, + TopologyKey: "zone", + WhenUnsatisfiable: "DoNotSchedule", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + }, + }, + } + assert.Equal(t, expected, placement) +} + +func TestMergeNodeAffinity(t *testing.T) { + // affinity is nil + p := Placement{} + result := p.mergeNodeAffinity(nil) + assert.Nil(t, result) + + // node affinity is only set on the placement and should remain unchanged + p.NodeAffinity = placementTestGenerateNodeAffinity() + result = p.mergeNodeAffinity(nil) + assert.Equal(t, p.NodeAffinity, result) + + // preferred set, but required not set + affinityToMerge := placementTestGenerateNodeAffinity() + affinityToMerge.RequiredDuringSchedulingIgnoredDuringExecution = nil + result = p.mergeNodeAffinity(affinityToMerge) + assert.Equal(t, 2, len(result.PreferredDuringSchedulingIgnoredDuringExecution)) + assert.Equal(t, p.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution, result.RequiredDuringSchedulingIgnoredDuringExecution) + + // preferred and required expressions set + affinityToMerge = placementTestGenerateNodeAffinity() + affinityToMerge.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Key = "baz" + result = p.mergeNodeAffinity(affinityToMerge) + assert.Equal(t, 2, len(result.PreferredDuringSchedulingIgnoredDuringExecution)) + assert.Equal(t, 2, len(result.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions)) + assert.Equal(t, "baz", result.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Key) + assert.Equal(t, "foo", result.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[1].Key) +} + +func TestPlacementApplyToPodSpec(t *testing.T) { + to := placementTestGetTolerations("foo", "bar") + na := placementTestGenerateNodeAffinity() + antiaffinity := placementAntiAffinity("v1") + tc := placementTestGetTopologySpreadConstraints("zone") + expected := &v1.PodSpec{ + Affinity: &v1.Affinity{NodeAffinity: na, PodAntiAffinity: antiaffinity}, + Tolerations: to, + TopologySpreadConstraints: tc, + } + + var p Placement + var ps *v1.PodSpec + + p = Placement{ + NodeAffinity: na, + Tolerations: to, + PodAntiAffinity: antiaffinity, + TopologySpreadConstraints: tc, + } + ps = &v1.PodSpec{} + p.ApplyToPodSpec(ps) + assert.Equal(t, expected, ps) + assert.Equal(t, 1, len(ps.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution)) + + // Appending some other antiaffinity to the pod spec should not alter the original placement antiaffinity + otherAntiAffinity := placementAntiAffinity("v2") + ps.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append( + ps.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, + otherAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution...) + assert.Equal(t, 1, len(antiaffinity.PreferredDuringSchedulingIgnoredDuringExecution)) + + // partial update + p = Placement{NodeAffinity: na, PodAntiAffinity: antiaffinity} + ps = &v1.PodSpec{Tolerations: to, TopologySpreadConstraints: tc} + p.ApplyToPodSpec(ps) + assert.Equal(t, expected, ps) + + // overridden attributes + p = Placement{ + NodeAffinity: na, + PodAntiAffinity: antiaffinity, + Tolerations: to, + TopologySpreadConstraints: tc, + } + ps = &v1.PodSpec{ + TopologySpreadConstraints: placementTestGetTopologySpreadConstraints("rack"), + } + p.ApplyToPodSpec(ps) + assert.Equal(t, expected, ps) + + // The preferred affinity is merged from both sources to result in two node affinities + p = Placement{NodeAffinity: na, PodAntiAffinity: antiaffinity} + nap := placementTestGenerateNodeAffinity() + nap.PreferredDuringSchedulingIgnoredDuringExecution[0].Weight = 5 + ps = &v1.PodSpec{ + Affinity: &v1.Affinity{NodeAffinity: nap}, + Tolerations: to, + TopologySpreadConstraints: tc, + } + p.ApplyToPodSpec(ps) + assert.Equal(t, 2, len(ps.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution)) + + p = Placement{NodeAffinity: na, PodAntiAffinity: antiaffinity} + to = placementTestGetTolerations("foo", "bar") + ps = &v1.PodSpec{ + Tolerations: to, + } + p.ApplyToPodSpec(ps) + assert.Equal(t, 1, len(ps.Tolerations)) + p = Placement{Tolerations: to, NodeAffinity: na, PodAntiAffinity: antiaffinity} + p.ApplyToPodSpec(ps) + assert.Equal(t, 2, len(ps.Tolerations)) +} + +func TestPlacementMerge(t *testing.T) { + to := placementTestGetTolerations("foo", "bar") + na := placementTestGenerateNodeAffinity() + tc := placementTestGetTopologySpreadConstraints("zone") + + var original, with, expected, merged Placement + + original = Placement{} + with = Placement{Tolerations: to} + expected = Placement{Tolerations: to} + merged = original.Merge(with) + assert.Equal(t, expected, merged) + + original = Placement{NodeAffinity: na} + with = Placement{Tolerations: to} + expected = Placement{NodeAffinity: na, Tolerations: to} + merged = original.Merge(with) + assert.Equal(t, expected, merged) + + original = Placement{} + with = Placement{TopologySpreadConstraints: tc} + expected = Placement{TopologySpreadConstraints: tc} + merged = original.Merge(with) + assert.Equal(t, expected, merged) + + original = Placement{ + Tolerations: placementTestGetTolerations("bar", "baz"), + TopologySpreadConstraints: placementTestGetTopologySpreadConstraints("rack"), + } + with = Placement{ + NodeAffinity: na, + Tolerations: to, + TopologySpreadConstraints: tc, + } + var ts int64 = 10 + expected = Placement{ + NodeAffinity: na, + Tolerations: []v1.Toleration{ + { + Key: "bar", + Operator: v1.TolerationOpExists, + Value: "baz", + Effect: v1.TaintEffectNoSchedule, + TolerationSeconds: &ts, + }, + { + Key: "foo", + Operator: v1.TolerationOpExists, + Value: "bar", + Effect: v1.TaintEffectNoSchedule, + TolerationSeconds: &ts, + }, + }, + TopologySpreadConstraints: tc, + } + merged = original.Merge(with) + assert.Equal(t, expected, merged) +} + +func placementTestGetTolerations(key, value string) []v1.Toleration { + var ts int64 = 10 + return []v1.Toleration{ + { + Key: key, + Operator: v1.TolerationOpExists, + Value: value, + Effect: v1.TaintEffectNoSchedule, + TolerationSeconds: &ts, + }, + } +} + +func placementTestGetTopologySpreadConstraints(topologyKey string) []v1.TopologySpreadConstraint { + return []v1.TopologySpreadConstraint{ + { + MaxSkew: 1, + TopologyKey: topologyKey, + WhenUnsatisfiable: "DoNotSchedule", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + }, + } +} + +func placementAntiAffinity(value string) *v1.PodAntiAffinity { + return &v1.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{ + { + Weight: 50, + PodAffinityTerm: v1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": value, + }, + }, + TopologyKey: v1.LabelHostname, + }, + }, + }, + } +} + +func placementTestGenerateNodeAffinity() *v1.NodeAffinity { + return &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "foo", + Operator: v1.NodeSelectorOpExists, + Values: []string{"bar"}, + }, + }, + }, + }, + }, + PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{ + { + Weight: 10, + Preference: v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "foo", + Operator: v1.NodeSelectorOpExists, + Values: []string{"bar"}, + }, + }, + }, + }, + }, + } +} + +func TestMergeToleration(t *testing.T) { + // placement is nil + p := Placement{} + result := p.mergeTolerations(nil) + assert.Nil(t, result) + + placementToleration := []v1.Toleration{ + { + Key: "foo", + Operator: v1.TolerationOpEqual, + }, + } + + p.Tolerations = placementToleration + result = p.mergeTolerations(nil) + assert.Equal(t, p.Tolerations, result) + + newToleration := []v1.Toleration{ + { + Key: "new", + Operator: v1.TolerationOpExists, + }, + } + + result = p.mergeTolerations(newToleration) + assert.Equal(t, 2, len(result)) + assert.Equal(t, placementToleration[0].Key, result[0].Key) + assert.Equal(t, newToleration[0].Key, result[1].Key) +} diff --git a/external/ceph.rook.io/v1/pool.go b/external/ceph.rook.io/v1/pool.go new file mode 100644 index 00000000..a3653f04 --- /dev/null +++ b/external/ceph.rook.io/v1/pool.go @@ -0,0 +1,97 @@ +/* +Copyright 2020 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "github.com/pkg/errors" +) + +func (p *PoolSpec) IsReplicated() bool { + return p.Replicated.Size > 0 +} + +func (p *PoolSpec) IsErasureCoded() bool { + return p.ErasureCoded.CodingChunks > 0 || p.ErasureCoded.DataChunks > 0 +} + +func (p *PoolSpec) IsHybridStoragePool() bool { + return p.Replicated.HybridStorage != nil +} + +// ValidateCephBlockPool validates specifically a CephBlockPool's spec (not just any NamedPoolSpec) +func ValidateCephBlockPool(p *CephBlockPool) error { + if p.Spec.Name == ".rgw.root" || p.Spec.Name == ".mgr" || p.Spec.Name == ".nfs" { + if p.Spec.IsErasureCoded() { + return errors.Errorf("invalid CephBlockPool spec: ceph built-in pool %q cannot be erasure coded", p.Name) + } + } + + return validatePoolSpec(p.ToNamedPoolSpec()) +} + +// validate any NamedPoolSpec +func validatePoolSpec(ps NamedPoolSpec) error { + // Checks if either ErasureCoded or Replicated fields are set + if ps.ErasureCoded.CodingChunks <= 0 && ps.ErasureCoded.DataChunks <= 0 && ps.Replicated.TargetSizeRatio <= 0 && ps.Replicated.Size <= 0 { + return errors.New("invalid pool spec: either of erasurecoded or replicated fields should be set") + } + // Check if any of the ErasureCoded fields are populated. Then check if replicated is populated. Both can't be populated at same time. + if ps.ErasureCoded.CodingChunks > 0 || ps.ErasureCoded.DataChunks > 0 || ps.ErasureCoded.Algorithm != "" { + if ps.Replicated.Size > 0 || ps.Replicated.TargetSizeRatio > 0 { + return errors.New("invalid pool spec: both erasurecoded and replicated fields cannot be set at the same time") + } + } + + if ps.Replicated.Size == 0 && ps.Replicated.TargetSizeRatio == 0 { + // Check if datachunks is set and has value less than 2. + if ps.ErasureCoded.DataChunks < 2 && ps.ErasureCoded.DataChunks != 0 { + return errors.New("invalid pool spec: erasurecoded.datachunks needs minimum value of 2") + } + + // Check if codingchunks is set and has value less than 1. + if ps.ErasureCoded.CodingChunks < 1 && ps.ErasureCoded.CodingChunks != 0 { + return errors.New("invalid pool spec: erasurecoded.codingchunks needs minimum value of 1") + } + } + return nil +} + +func (p *CephBlockPool) ToNamedPoolSpec() NamedPoolSpec { + // If the name is not overridden in the pool spec.name, set it to the name of the pool CR + name := p.Spec.Name + if name == "" { + // Set the name of the pool CR since a name override wasn't specified in the spec + name = p.Name + } + return NamedPoolSpec{ + Name: name, + PoolSpec: p.Spec.PoolSpec, + } +} + +func (p *CephBlockPool) GetStatusConditions() *[]Condition { + return &p.Status.Conditions +} + +func (p *CephBlockPoolRadosNamespace) GetStatusConditions() *[]Condition { + return &p.Status.Conditions +} + +// SnapshotSchedulesEnabled returns whether snapshot schedules are desired +func (p *MirroringSpec) SnapshotSchedulesEnabled() bool { + return len(p.SnapshotSchedules) > 0 +} diff --git a/external/ceph.rook.io/v1/pool_test.go b/external/ceph.rook.io/v1/pool_test.go new file mode 100644 index 00000000..a72d31a5 --- /dev/null +++ b/external/ceph.rook.io/v1/pool_test.go @@ -0,0 +1,74 @@ +/* +Copyright 2020 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestValidatePoolSpec(t *testing.T) { + p := &CephBlockPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ec-pool", + }, + Spec: NamedBlockPoolSpec{ + PoolSpec: PoolSpec{ + ErasureCoded: ErasureCodedSpec{ + CodingChunks: 1, + DataChunks: 2, + }, + }, + }, + } + err := validatePoolSpec(p.ToNamedPoolSpec()) + assert.NoError(t, err) + + p.Spec.ErasureCoded.DataChunks = 1 + err = validatePoolSpec(p.ToNamedPoolSpec()) + assert.Error(t, err) +} + +func TestMirroringSpec_SnapshotSchedulesEnabled(t *testing.T) { + type fields struct { + Enabled bool + Mode string + SnapshotSchedules []SnapshotScheduleSpec + } + tests := []struct { + name string + fields fields + want bool + }{ + {"disabled", fields{Enabled: true, Mode: "pool", SnapshotSchedules: []SnapshotScheduleSpec{}}, false}, + {"enabled", fields{Enabled: true, Mode: "pool", SnapshotSchedules: []SnapshotScheduleSpec{{Interval: "2d"}}}, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := &MirroringSpec{ + Enabled: tt.fields.Enabled, + Mode: tt.fields.Mode, + SnapshotSchedules: tt.fields.SnapshotSchedules, + } + if got := p.SnapshotSchedulesEnabled(); got != tt.want { + t.Errorf("MirroringSpec.SnapshotSchedulesEnabled() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/external/ceph.rook.io/v1/priorityclasses.go b/external/ceph.rook.io/v1/priorityclasses.go new file mode 100644 index 00000000..05e56c61 --- /dev/null +++ b/external/ceph.rook.io/v1/priorityclasses.go @@ -0,0 +1,73 @@ +/* +Copyright 2019 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// All returns the priority class name defined for 'all' daemons in the Ceph cluster CRD. +func (p PriorityClassNamesSpec) All() string { + if val, ok := p[KeyAll]; ok { + return val + } + return "" +} + +// GetMgrPriorityClassName returns the priority class name for the MGR service +func GetMgrPriorityClassName(p PriorityClassNamesSpec) string { + if _, ok := p[KeyMgr]; !ok { + return p.All() + } + return p[KeyMgr] +} + +// GetMonPriorityClassName returns the priority class name for the monitors +func GetMonPriorityClassName(p PriorityClassNamesSpec) string { + if _, ok := p[KeyMon]; !ok { + return p.All() + } + return p[KeyMon] +} + +// GetOSDPriorityClassName returns the priority class name for the OSDs +func GetOSDPriorityClassName(p PriorityClassNamesSpec) string { + if _, ok := p[KeyOSD]; !ok { + return p.All() + } + return p[KeyOSD] +} + +// GetCleanupPriorityClassName returns the priority class name for the cleanup job +func GetCleanupPriorityClassName(p PriorityClassNamesSpec) string { + if _, ok := p[KeyCleanup]; !ok { + return p.All() + } + return p[KeyCleanup] +} + +// GetCrashCollectorPriorityClassName returns the priority class name for the crashcollector +func GetCrashCollectorPriorityClassName(p PriorityClassNamesSpec) string { + if _, ok := p[KeyCrashCollector]; !ok { + return p.All() + } + return p[KeyCrashCollector] +} + +// GetCephExporterPriorityClassName returns the priority class name for the ceph-exporter +func GetCephExporterPriorityClassName(p PriorityClassNamesSpec) string { + if _, ok := p[KeyCephExporter]; !ok { + return p.All() + } + return p[KeyCephExporter] +} diff --git a/external/ceph.rook.io/v1/priorityclasses_test.go b/external/ceph.rook.io/v1/priorityclasses_test.go new file mode 100644 index 00000000..cc2f018a --- /dev/null +++ b/external/ceph.rook.io/v1/priorityclasses_test.go @@ -0,0 +1,63 @@ +/* +Copyright 2019 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/util/yaml" +) + +func TestPriorityClassNamesSpec(t *testing.T) { + specYaml := []byte(` +all: all-class +mgr: mgr-class +mon: mon-class +osd: osd-class +crashcollector: crashcollector-class +`) + + // convert the raw spec yaml into JSON + rawJSON, err := yaml.ToJSON(specYaml) + assert.Nil(t, err) + + // unmarshal the JSON into a strongly typed annotations spec object + var priorityClassNames PriorityClassNamesSpec + err = json.Unmarshal(rawJSON, &priorityClassNames) + assert.Nil(t, err) + + // the unmarshalled priority class names spec should equal the expected spec below + expected := PriorityClassNamesSpec{ + "all": "all-class", + "mgr": "mgr-class", + "mon": "mon-class", + "osd": "osd-class", + "crashcollector": "crashcollector-class", + } + assert.Equal(t, expected, priorityClassNames) +} + +func TestPriorityClassNamesDefaultToAll(t *testing.T) { + priorityClassNames := PriorityClassNamesSpec{ + "all": "all-class", + "mon": "mon-class", + } + + assert.Equal(t, "all-class", priorityClassNames.All()) +} diff --git a/external/ceph.rook.io/v1/register.go b/external/ceph.rook.io/v1/register.go new file mode 100644 index 00000000..8f0e895f --- /dev/null +++ b/external/ceph.rook.io/v1/register.go @@ -0,0 +1,96 @@ +/* +Copyright 2020 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +    http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + cephrookio "github.com/codesphere-cloud/oms/external/ceph.rook.io" +) + +const ( + CustomResourceGroup = "ceph.rook.io" + Version = "v1" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: cephrookio.CustomResourceGroupName, Version: Version} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &CephClient{}, + &CephClientList{}, + &CephCluster{}, + &CephClusterList{}, + &CephBlockPool{}, + &CephBlockPoolList{}, + &CephFilesystem{}, + &CephFilesystemList{}, + &CephNFS{}, + &CephNFSList{}, + &CephNVMeOFGateway{}, + &CephNVMeOFGatewayList{}, + &CephObjectStore{}, + &CephObjectStoreList{}, + &CephObjectStoreUser{}, + &CephObjectStoreUserList{}, + &CephObjectRealm{}, + &CephObjectRealmList{}, + &CephObjectZoneGroup{}, + &CephObjectZoneGroupList{}, + &CephObjectZone{}, + &CephObjectZoneList{}, + &CephBucketTopic{}, + &CephBucketTopicList{}, + &CephBucketNotification{}, + &CephBucketNotificationList{}, + &CephRBDMirror{}, + &CephRBDMirrorList{}, + &CephFilesystemMirror{}, + &CephFilesystemMirrorList{}, + &CephFilesystemSubVolumeGroup{}, + &CephFilesystemSubVolumeGroupList{}, + &CephBlockPoolRadosNamespace{}, + &CephBlockPoolRadosNamespaceList{}, + &CephCOSIDriver{}, + &CephCOSIDriverList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/external/ceph.rook.io/v1/resources.go b/external/ceph.rook.io/v1/resources.go new file mode 100644 index 00000000..95baec3c --- /dev/null +++ b/external/ceph.rook.io/v1/resources.go @@ -0,0 +1,123 @@ +/* +Copyright 2018 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +const ( + // ResourcesKeyMon represents the name of resource in the CR for a mon + ResourcesKeyMon = "mon" + // ResourcesKeyMgr represents the name of resource in the CR for a mgr + ResourcesKeyMgr = "mgr" + // ResourcesKeyMgrSidecar represents the name of resource in the CR for a mgr + ResourcesKeyMgrSidecar = "mgr-sidecar" + // ResourcesKeyOSD represents the name of a resource in the CR for all OSDs + ResourcesKeyOSD = "osd" + // ResourcesKeyPrepareOSD represents the name of resource in the CR for the osd prepare job + ResourcesKeyPrepareOSD = "prepareosd" + // ResourcesKeyCmdReporter represents the name of resource in the CR for the detect version and network jobs + ResourcesKeyCmdReporter = "cmd-reporter" + // ResourcesKeyMDS represents the name of resource in the CR for the mds + ResourcesKeyMDS = "mds" + // ResourcesKeyCrashCollector represents the name of resource in the CR for the crash + ResourcesKeyCrashCollector = "crashcollector" + // ResourcesKeyLogCollector represents the name of resource in the CR for the log + ResourcesKeyLogCollector = "logcollector" + // ResourcesKeyRBDMirror represents the name of resource in the CR for the rbd mirror + ResourcesKeyRBDMirror = "rbdmirror" + // ResourcesKeyFilesystemMirror represents the name of resource in the CR for the filesystem mirror + ResourcesKeyFilesystemMirror = "fsmirror" + // ResourcesKeyCleanup represents the name of resource in the CR for the cleanup + ResourcesKeyCleanup = "cleanup" + // ResourcesKeyCleanup represents the name of resource in the CR for ceph-exporter + ResourcesKeyCephExporter = "exporter" +) + +// GetMgrResources returns the resources for the MGR service +func GetMgrResources(p ResourceSpec) v1.ResourceRequirements { + return p[ResourcesKeyMgr] +} + +// GetMgrSidecarResources returns the resources for the MGR sidecar container +func GetMgrSidecarResources(p ResourceSpec) v1.ResourceRequirements { + return p[ResourcesKeyMgrSidecar] +} + +// GetMonResources returns the resources for the monitors +func GetMonResources(p ResourceSpec) v1.ResourceRequirements { + return p[ResourcesKeyMon] +} + +// GetOSDResources returns the resources for all OSDs or for OSDs of specified device class (hdd, nvme, ssd) +func GetOSDResources(p ResourceSpec, deviceClass string) v1.ResourceRequirements { + if deviceClass == "" { + return p[ResourcesKeyOSD] + } + // if device class specified, but not set in requirements return common osd requirements if present + r, ok := p[getOSDResourceKeyForDeviceClass(deviceClass)] + if ok { + return r + } + return p[ResourcesKeyOSD] +} + +// GetOSDResourcesForDeviceClass returns the resources for a device class, if specified +func GetOSDResourcesForDeviceClass(resourceSpec ResourceSpec, deviceClass string) (v1.ResourceRequirements, bool) { + // if the device class requests specific resources, return them here + if resources, ok := resourceSpec[getOSDResourceKeyForDeviceClass(deviceClass)]; ok { + return resources, true + } + // no resources requested specific to the device class + return v1.ResourceRequirements{}, false +} + +// getOSDResourceKeyForDeviceClass returns key name for device class in resources spec +func getOSDResourceKeyForDeviceClass(deviceClass string) string { + return ResourcesKeyOSD + "-" + deviceClass +} + +// GetPrepareOSDResources returns the resources for the OSDs prepare job +func GetPrepareOSDResources(p ResourceSpec) v1.ResourceRequirements { + return p[ResourcesKeyPrepareOSD] +} + +// GetCmdReporterResources returns the resources for the detect version job +func GetCmdReporterResources(p ResourceSpec) v1.ResourceRequirements { + return p[ResourcesKeyCmdReporter] +} + +// GetCrashCollectorResources returns the resources for the crash daemon +func GetCrashCollectorResources(p ResourceSpec) v1.ResourceRequirements { + return p[ResourcesKeyCrashCollector] +} + +// GetLogCollectorResources returns the resources for the logo collector +func GetLogCollectorResources(p ResourceSpec) v1.ResourceRequirements { + return p[ResourcesKeyLogCollector] +} + +// GetCleanupResources returns the resources for the cleanup job +func GetCleanupResources(p ResourceSpec) v1.ResourceRequirements { + return p[ResourcesKeyCleanup] +} + +// GetCephExporterResources returns the resources for the cleanup job +func GetCephExporterResources(p ResourceSpec) v1.ResourceRequirements { + return p[ResourcesKeyCephExporter] +} diff --git a/external/ceph.rook.io/v1/scc.go b/external/ceph.rook.io/v1/scc.go new file mode 100644 index 00000000..8a76c156 --- /dev/null +++ b/external/ceph.rook.io/v1/scc.go @@ -0,0 +1,81 @@ +/* +Copyright 2021 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +    http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + secv1 "github.com/openshift/api/security/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NewSecurityContextConstraints returns a new SecurityContextConstraints for Rook-Ceph to run on +// OpenShift. +func NewSecurityContextConstraints(name string, namespaces ...string) *secv1.SecurityContextConstraints { + return &secv1.SecurityContextConstraints{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "security.openshift.io/v1", + Kind: "SecurityContextConstraints", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + AllowPrivilegedContainer: true, + AllowHostDirVolumePlugin: true, + ReadOnlyRootFilesystem: false, + AllowHostIPC: true, + AllowHostNetwork: false, + AllowHostPorts: false, + AllowedCapabilities: []corev1.Capability{"MKNOD"}, + RequiredDropCapabilities: []corev1.Capability{"ALL"}, + DefaultAddCapabilities: []corev1.Capability{}, + RunAsUser: secv1.RunAsUserStrategyOptions{ + Type: secv1.RunAsUserStrategyRunAsAny, + }, + SELinuxContext: secv1.SELinuxContextStrategyOptions{ + Type: secv1.SELinuxStrategyMustRunAs, + }, + FSGroup: secv1.FSGroupStrategyOptions{ + Type: secv1.FSGroupStrategyMustRunAs, + }, + SupplementalGroups: secv1.SupplementalGroupsStrategyOptions{ + Type: secv1.SupplementalGroupsStrategyRunAsAny, + }, + Volumes: []secv1.FSType{ + secv1.FSTypeConfigMap, + secv1.FSTypeDownwardAPI, + secv1.FSTypeEmptyDir, + secv1.FSTypeHostPath, + secv1.FSTypePersistentVolumeClaim, + secv1.FSProjected, + secv1.FSTypeSecret, + }, + Users: func() (users []string) { + for _, ns := range namespaces { + users = append(users, []string{ + fmt.Sprintf("system:serviceaccount:%s:rook-ceph-system", ns), + fmt.Sprintf("system:serviceaccount:%s:rook-ceph-default", ns), + fmt.Sprintf("system:serviceaccount:%s:rook-ceph-mgr", ns), + fmt.Sprintf("system:serviceaccount:%s:rook-ceph-osd", ns), + fmt.Sprintf("system:serviceaccount:%s:rook-ceph-rgw", ns), + }...) + } + return + }(), + } +} diff --git a/external/ceph.rook.io/v1/scc_test.go b/external/ceph.rook.io/v1/scc_test.go new file mode 100644 index 00000000..37553fce --- /dev/null +++ b/external/ceph.rook.io/v1/scc_test.go @@ -0,0 +1,30 @@ +/* +Copyright 2021 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +    http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewSecurityContextConstraints(t *testing.T) { + name := "rook-ceph" + scc := NewSecurityContextConstraints(name, name) + assert.True(t, scc.AllowPrivilegedContainer) + assert.Equal(t, name, scc.Name) +} diff --git a/external/ceph.rook.io/v1/spec_test.go b/external/ceph.rook.io/v1/spec_test.go new file mode 100644 index 00000000..88e95992 --- /dev/null +++ b/external/ceph.rook.io/v1/spec_test.go @@ -0,0 +1,169 @@ +/* +Copyright 2018 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1 + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/util/yaml" +) + +func TestClusterSpecMarshal(t *testing.T) { + specYaml := []byte(` +dataDirHostPath: /var/lib/rook +mon: + count: 5 + allowMultiplePerNode: false +network: + hostNetwork: true +storage: + useAllNodes: false + useAllDevices: false + deviceFilter: "^sd." + devicePathFilter: "^/dev/disk/by-path/pci-.*" + location: "region=us-west,datacenter=delmar" + config: + metadataDevice: "nvme01" + databaseSizeMB: "1024" + nodes: + - name: "node2" + deviceFilter: "^foo*" + devicePathFilter: "^/dev/disk/by-id/.*foo.*"`) + + // convert the raw spec yaml into JSON + rawJSON, err := yaml.ToJSON(specYaml) + assert.Nil(t, err) + fmt.Printf("rawJSON: %s\n", string(rawJSON)) + + // unmarshal the JSON into a strongly typed storage spec object + var clusterSpec ClusterSpec + err = json.Unmarshal(rawJSON, &clusterSpec) + assert.Nil(t, err) + + // the unmarshalled storage spec should equal the expected spec below + useAllDevices := false + expectedSpec := ClusterSpec{ + Mon: MonSpec{ + Count: 5, + AllowMultiplePerNode: false, + }, + DataDirHostPath: "/var/lib/rook", + Network: NetworkSpec{ + HostNetwork: true, + }, + Storage: StorageScopeSpec{ + UseAllNodes: false, + Selection: Selection{ + UseAllDevices: &useAllDevices, + DeviceFilter: "^sd.", + DevicePathFilter: "^/dev/disk/by-path/pci-.*", + }, + Config: map[string]string{ + "metadataDevice": "nvme01", + "databaseSizeMB": "1024", + }, + Nodes: []Node{ + { + Name: "node2", + Selection: Selection{ + DeviceFilter: "^foo*", + DevicePathFilter: "^/dev/disk/by-id/.*foo.*", + }, + }, + }, + }, + } + + assert.Equal(t, expectedSpec, clusterSpec) +} + +func newTrue() *bool { + t := true + return &t +} + +func newFalse() *bool { + t := false + return &t +} + +func newInt(val int) *int { + return &val +} + +func newString(val string) *string { + return &val +} + +func TestObjectStoreSpecMarshalSwiftAndKeystone(t *testing.T) { + // Assert that the new ObjectStoreSpec fields specified in are correctly parsed + specYaml := []byte(` +auth: + keystone: + url: https://keystone:5000/ + acceptedRoles: ["_member_", "service", "admin"] + implicitTenants: swift + tokenCacheSize: 1000 + revocationInterval: 1200 + serviceUserSecretName: rgw-service-user +protocols: + swift: + accountInUrl: true + urlPrefix: /example + versioningEnabled: false + s3: + enabled: false + authUseKeystone: true +`) + rawJSON, err := yaml.ToJSON(specYaml) + assert.Nil(t, err) + fmt.Printf("rawJSON: %s\n", string(rawJSON)) + + // unmarshal the JSON into a strongly typed storage spec object + var objectStoreSpec ObjectStoreSpec + err = json.Unmarshal(rawJSON, &objectStoreSpec) + assert.Nil(t, err) + + // the unmarshalled storage spec should equal the expected spec below + expectedSpec := ObjectStoreSpec{ + Auth: AuthSpec{ + Keystone: &KeystoneSpec{ + Url: "https://keystone:5000/", + AcceptedRoles: []string{"_member_", "service", "admin"}, + ImplicitTenants: "swift", + TokenCacheSize: newInt(1000), + RevocationInterval: newInt(1200), + ServiceUserSecretName: "rgw-service-user", + }, + }, + Protocols: ProtocolSpec{ + S3: &S3Spec{ + Enabled: newFalse(), + AuthUseKeystone: newTrue(), + }, + Swift: &SwiftSpec{ + AccountInUrl: newTrue(), + UrlPrefix: newString("/example"), + VersioningEnabled: newFalse(), + }, + }, + } + + assert.Equal(t, expectedSpec, objectStoreSpec) +} diff --git a/external/ceph.rook.io/v1/status.go b/external/ceph.rook.io/v1/status.go new file mode 100644 index 00000000..ce91612f --- /dev/null +++ b/external/ceph.rook.io/v1/status.go @@ -0,0 +1,74 @@ +/* +Copyright 2021 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// SetStatusCondition sets the corresponding condition in conditions to newCondition. +// conditions must be non-nil. +// 1. if the condition of the specified type already exists (all fields of the existing condition are updated to +// newCondition, LastTransitionTime is set to now if the new status differs from the old status) +// 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, and newCondition is appended) +func SetStatusCondition(conditions *[]Condition, newCondition Condition) { + if conditions == nil { + return + } + + now := metav1.NewTime(time.Now()) + + existingCondition := FindStatusCondition(*conditions, newCondition.Type) + if existingCondition == nil { + if newCondition.LastTransitionTime.IsZero() { + newCondition.LastTransitionTime = now + newCondition.LastHeartbeatTime = now + } + *conditions = append(*conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + if !newCondition.LastTransitionTime.IsZero() { + existingCondition.LastTransitionTime = newCondition.LastTransitionTime + } else { + existingCondition.LastTransitionTime = now + } + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message + if !newCondition.LastHeartbeatTime.IsZero() { + existingCondition.LastHeartbeatTime = newCondition.LastHeartbeatTime + } else { + existingCondition.LastHeartbeatTime = now + } +} + +// FindStatusCondition finds the conditionType in conditions. +func FindStatusCondition(conditions []Condition, conditionType ConditionType) *Condition { + for i := range conditions { + if conditions[i].Type == conditionType { + return &conditions[i] + } + } + + return nil +} diff --git a/external/ceph.rook.io/v1/status_test.go b/external/ceph.rook.io/v1/status_test.go new file mode 100644 index 00000000..cdf9a622 --- /dev/null +++ b/external/ceph.rook.io/v1/status_test.go @@ -0,0 +1,135 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "reflect" + "testing" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Based on code from https://github.com/kubernetes/apimachinery/blob/master/pkg/api/meta/conditions.go + +func TestSetStatusCondition(t *testing.T) { + oneHourBefore := metav1.Time{Time: time.Now().Add(-1 * time.Hour)} + oneHourAfter := metav1.Time{Time: time.Now().Add(1 * time.Hour)} + + tests := []struct { + name string + conditions []Condition + toAdd Condition + expected []Condition + }{ + { + name: "should-add", + conditions: []Condition{ + {Type: "first"}, + {Type: "third"}, + }, + toAdd: Condition{Type: "second", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourBefore, Reason: "reason", Message: "message"}, + expected: []Condition{ + {Type: "first"}, + {Type: "third"}, + {Type: "second", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourBefore, Reason: "reason", Message: "message"}, + }, + }, + { + name: "use-supplied-transition-time", + conditions: []Condition{ + {Type: "first"}, + {Type: "second", Status: v1.ConditionFalse}, + {Type: "third"}, + }, + toAdd: Condition{Type: "second", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourBefore, Reason: "reason", Message: "message"}, + expected: []Condition{ + {Type: "first"}, + {Type: "second", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourBefore, Reason: "reason", Message: "message"}, + {Type: "third"}, + }, + }, + { + name: "update-fields", + conditions: []Condition{ + {Type: "first"}, + {Type: "second", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourBefore}, + {Type: "third"}, + }, + toAdd: Condition{Type: "second", Status: v1.ConditionTrue, LastTransitionTime: oneHourAfter, LastHeartbeatTime: oneHourAfter, Reason: "reason", Message: "message"}, + expected: []Condition{ + {Type: "first"}, + {Type: "second", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourAfter, Reason: "reason", Message: "message"}, + {Type: "third"}, + }, + }, + { + name: "empty-conditions", + conditions: []Condition{}, + toAdd: Condition{Type: "first", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourBefore, Reason: "reason", Message: "message"}, + expected: []Condition{ + {Type: "first", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourBefore, Reason: "reason", Message: "message"}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + SetStatusCondition(&test.conditions, test.toAdd) + if !reflect.DeepEqual(test.conditions, test.expected) { + t.Error(test.conditions) + } + }) + } +} + +func TestFindStatusCondition(t *testing.T) { + tests := []struct { + name string + conditions []Condition + conditionType string + expected *Condition + }{ + { + name: "not-present", + conditions: []Condition{ + {Type: "first"}, + }, + conditionType: "second", + expected: nil, + }, + { + name: "present", + conditions: []Condition{ + {Type: "first"}, + {Type: "second"}, + }, + conditionType: "second", + expected: &Condition{Type: "second"}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + actual := FindStatusCondition(test.conditions, ConditionType(test.conditionType)) + if !reflect.DeepEqual(actual, test.expected) { + t.Error(actual) + } + }) + } +} diff --git a/external/ceph.rook.io/v1/storage.go b/external/ceph.rook.io/v1/storage.go new file mode 100644 index 00000000..36a66ebd --- /dev/null +++ b/external/ceph.rook.io/v1/storage.go @@ -0,0 +1,198 @@ +/* +Copyright 2018 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1 + +import "fmt" + +type StoreType string + +const ( + // StoreTypeBlueStore is the bluestore backend storage for OSDs + StoreTypeBlueStore StoreType = "bluestore" + + // StoreTypeBlueStoreRDR is the bluestore-rdr backed storage for OSDs + StoreTypeBlueStoreRDR StoreType = "bluestore-rdr" +) + +// AnyUseAllDevices gets whether to use all devices +func (s *StorageScopeSpec) AnyUseAllDevices() bool { + if s.Selection.GetUseAllDevices() { + return true + } + + for _, n := range s.Nodes { + if n.Selection.GetUseAllDevices() { + return true + } + } + + return false +} + +// ClearUseAllDevices clears all devices +func (s *StorageScopeSpec) ClearUseAllDevices() { + clear := false + s.Selection.UseAllDevices = &clear + for i := range s.Nodes { + s.Nodes[i].Selection.UseAllDevices = &clear + } +} + +// NodeExists returns true if the node exists in the storage spec. False otherwise. +func (s *StorageScopeSpec) NodeExists(nodeName string) bool { + for i := range s.Nodes { + if s.Nodes[i].Name == nodeName { + return true + } + } + return false +} + +// Fully resolves the config of the given node name, taking into account cluster level and node level specified config. +// In general, the more fine grained the configuration is specified, the more precedence it takes. Fully resolved +// configuration for the node has the following order of precedence. +// 1) Node (config defined on the node itself) +// 2) Cluster (config defined on the cluster) +// 3) Default values (if no config exists for the node or cluster) +func (s *StorageScopeSpec) ResolveNode(nodeName string) *Node { + // find the requested storage node first, if it exists + var node *Node + for i := range s.Nodes { + if s.Nodes[i].Name == nodeName { + node = &(s.Nodes[i]) + break + } + } + + if node == nil { + // a node with the given name was not found + return nil + } + if node.Config == nil { + node.Config = map[string]string{} + } + + // now resolve all properties that haven't already been set on the node + s.resolveNodeSelection(node) + s.resolveNodeConfig(node) + + return node +} + +func (s *StorageScopeSpec) resolveNodeSelection(node *Node) { + if node.Selection.UseAllDevices == nil { + if s.Selection.UseAllDevices != nil { + // the node does not have a value specified for use all devices, but the cluster does. Use the cluster's. + node.Selection.UseAllDevices = s.Selection.UseAllDevices + } else { + // neither node nor cluster have a value set for use all devices, use the default value. + node.Selection.UseAllDevices = newBool(false) + } + } + + resolveString(&(node.Selection.DeviceFilter), s.Selection.DeviceFilter, "") + resolveString(&(node.Selection.DevicePathFilter), s.Selection.DevicePathFilter, "") + + if len(node.Selection.Devices) == 0 { + node.Selection.Devices = s.Devices + } + + if len(node.Selection.VolumeClaimTemplates) == 0 { + node.Selection.VolumeClaimTemplates = s.VolumeClaimTemplates + } +} + +func (s *StorageScopeSpec) resolveNodeConfig(node *Node) { + // check for any keys the parent scope has that the node does not + for scopeKey, scopeVal := range s.Config { + if _, ok := node.Config[scopeKey]; !ok { + // the node's config does not have an entry that the parent scope does, add the parent's + // value for that key to the node's config. + node.Config[scopeKey] = scopeVal + } + } +} + +// NodeWithNameExists returns true if the storage spec defines a node with the given name. +func (s *StorageScopeSpec) NodeWithNameExists(name string) bool { + for _, n := range s.Nodes { + if name == n.Name { + return true + } + } + return false +} + +// GetUseAllDevices return if all devices should be used. +func (s *Selection) GetUseAllDevices() bool { + return s.UseAllDevices != nil && *(s.UseAllDevices) +} + +func resolveString(setting *string, parent, defaultVal string) { + if *setting == "" { + if parent != "" { + *setting = parent + } else { + *setting = defaultVal + } + } +} + +func newBool(val bool) *bool { + return &val +} + +// NodesByName implements an interface to sort nodes by name +type NodesByName []Node + +func (s NodesByName) Len() int { + return len(s) +} + +func (s NodesByName) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s NodesByName) Less(i, j int) bool { + return s[i].Name < s[j].Name +} + +// IsOnPVCEncrypted returns whether a Ceph Cluster on PVC will be encrypted +func (s *StorageScopeSpec) IsOnPVCEncrypted() bool { + for _, storageClassDeviceSet := range s.StorageClassDeviceSets { + if storageClassDeviceSet.Encrypted { + return true + } + } + + return false +} + +// GetOSDStore returns osd backend store type provided in the cluster spec +func (s *StorageScopeSpec) GetOSDStore() string { + if s.Store.Type == "" { + return string(StoreTypeBlueStore) + } + return s.Store.Type +} + +// GetOSDStoreFlag returns osd backend store type prefixed with "--" +func (s *StorageScopeSpec) GetOSDStoreFlag() string { + if s.Store.Type == "" { + return fmt.Sprintf("--%s", StoreTypeBlueStore) + } + return fmt.Sprintf("--%s", s.Store.Type) +} diff --git a/external/ceph.rook.io/v1/storage_test.go b/external/ceph.rook.io/v1/storage_test.go new file mode 100644 index 00000000..f431c86c --- /dev/null +++ b/external/ceph.rook.io/v1/storage_test.go @@ -0,0 +1,291 @@ +/* +Copyright 2018 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNodeExists(t *testing.T) { + t.Run("does not exist - no nodes specified", func(t *testing.T) { + spec := StorageScopeSpec{} + assert.False(t, spec.NodeExists("does-not-exist")) + }) + + t.Run("exists - single node specified", func(t *testing.T) { + spec := StorageScopeSpec{ + Nodes: []Node{ + {Name: "node1"}, // node gets nothing but its name set + }, + } + assert.True(t, spec.NodeExists("node1")) + }) + + t.Run("exists and not exists - multiple nodes specified", func(t *testing.T) { + spec := StorageScopeSpec{ + Nodes: []Node{ + {Name: "node1"}, // node gets nothing but its name set + {Name: "node3"}, + {Name: "node4"}, + }, + } + assert.True(t, spec.NodeExists("node1")) + assert.False(t, spec.NodeExists("node2")) + assert.True(t, spec.NodeExists("node3")) + assert.True(t, spec.NodeExists("node4")) + assert.False(t, spec.NodeExists("node5")) + assert.False(t, spec.NodeExists("does-not-exist")) + }) +} + +func TestResolveNodeNotExist(t *testing.T) { + // a nonexistent node should return nil + storageSpec := StorageScopeSpec{} + node := storageSpec.ResolveNode("fake node") + assert.Nil(t, node) +} + +func TestResolveNodeDefaultValues(t *testing.T) { + // a node with no properties and none defined in the cluster storage spec should get the default values + storageSpec := StorageScopeSpec{ + Nodes: []Node{ + {Name: "node1"}, // node gets nothing but its name set + }, + } + + node := storageSpec.ResolveNode("node1") + assert.NotNil(t, node) + assert.Equal(t, "", node.Selection.DeviceFilter) + assert.Equal(t, "", node.Selection.DevicePathFilter) + assert.False(t, node.Selection.GetUseAllDevices()) + assert.Equal(t, storageSpec.Devices, node.Devices) +} + +func TestResolveNodeInherentFromCluster(t *testing.T) { + // a node with no properties defined should inherit them from the cluster storage spec + storageSpec := StorageScopeSpec{ + Selection: Selection{ + DeviceFilter: "^sd.", + DevicePathFilter: "^/dev/disk/by-path/pci-.*", + Devices: []Device{{Name: "sda"}}, + }, + Config: map[string]string{ + "foo": "bar", + }, + Nodes: []Node{ + {Name: "node1"}, // node gets nothing but its name set + }, + } + + node := storageSpec.ResolveNode("node1") + assert.NotNil(t, node) + assert.Equal(t, "^sd.", node.Selection.DeviceFilter) + assert.Equal(t, "^/dev/disk/by-path/pci-.*", node.Selection.DevicePathFilter) + assert.False(t, node.Selection.GetUseAllDevices()) + assert.Equal(t, "bar", node.Config["foo"]) + assert.Equal(t, []Device{{Name: "sda"}}, node.Devices) +} + +func TestResolveNodeSpecificProperties(t *testing.T) { + // a node with its own specific properties defined should keep those values, regardless of what the global cluster config is + storageSpec := StorageScopeSpec{ + Selection: Selection{ + DeviceFilter: "^sd.", + DevicePathFilter: "^/dev/disk/by-path/pci-.*", + }, + Config: map[string]string{ + "foo": "bar", + "baz": "biz", + }, + Nodes: []Node{ + { + Name: "node1", // node has its own config that should override cluster level config + Selection: Selection{ + DeviceFilter: "nvme.*", + DevicePathFilter: "^/dev/disk/by-id/.*foo.*", + Devices: []Device{{Name: "device026"}}, + }, + Config: map[string]string{ + "foo": "node1bar", + }, + }, + }, + } + + node := storageSpec.ResolveNode("node1") + assert.NotNil(t, node) + assert.False(t, node.Selection.GetUseAllDevices()) + assert.Equal(t, "nvme.*", node.Selection.DeviceFilter) + assert.Equal(t, "^/dev/disk/by-id/.*foo.*", node.Selection.DevicePathFilter) + assert.Equal(t, []Device{{Name: "device026"}}, node.Devices) + assert.Equal(t, "node1bar", node.Config["foo"]) + assert.Equal(t, "biz", node.Config["baz"]) +} + +func TestResolveNodeUseAllDevices(t *testing.T) { + storageSpec := StorageScopeSpec{ + Selection: Selection{UseAllDevices: newBool(true)}, // UseAllDevices is set to true on the storage spec + Nodes: []Node{ + {Name: "node1"}, // node gets nothing but its name set + }, + } + + node := storageSpec.ResolveNode("node1") + assert.NotNil(t, node) + assert.True(t, node.Selection.GetUseAllDevices()) +} + +func TestUseAllDevices(t *testing.T) { + storageSpec := StorageScopeSpec{} + assert.False(t, storageSpec.AnyUseAllDevices()) + + storageSpec = StorageScopeSpec{ + Selection: Selection{ + UseAllDevices: newBool(true), + }, // UseAllDevices is set to true on the storage spec + } + assert.True(t, storageSpec.AnyUseAllDevices()) + + storageSpec = StorageScopeSpec{ + Selection: Selection{UseAllDevices: newBool(false)}, + Nodes: []Node{ + { + Name: "node1", + Selection: Selection{UseAllDevices: newBool(true)}, + }, + }, + } + assert.True(t, storageSpec.AnyUseAllDevices()) +} + +func TestClearUseAllDevices(t *testing.T) { + // create a storage spec with use all devices set to true for the cluster and for all nodes + storageSpec := StorageScopeSpec{ + Selection: Selection{UseAllDevices: newBool(true)}, + Nodes: []Node{ + { + Name: "node1", + Selection: Selection{UseAllDevices: newBool(true)}, + }, + }, + } + assert.True(t, storageSpec.AnyUseAllDevices()) + + // now clear the use all devices field, it should be cleared from the entire cluster and its nodes + storageSpec.ClearUseAllDevices() + assert.False(t, storageSpec.AnyUseAllDevices()) +} + +func TestClusterDirsDevsInherit(t *testing.T) { + // test for no directories or devices given + storageSpec := StorageScopeSpec{ + Nodes: []Node{ + { + Name: "node1", + }, + }, + } + node := storageSpec.ResolveNode("node1") + assert.NotNil(t, node) + assert.Equal(t, storageSpec.Devices, node.Devices) + + // test if cluster wide devices are inherited to no-directories/devices node + storageSpec = StorageScopeSpec{ + Selection: Selection{ + Devices: []Device{{Name: "device1"}}, + }, + Nodes: []Node{ + { + Name: "node1", + }, + }, + } + node = storageSpec.ResolveNode("node1") + assert.NotNil(t, node) + assert.Equal(t, []Device{{Name: "device1"}}, node.Devices) + + // test if node directories and devices are used + storageSpec = StorageScopeSpec{ + Nodes: []Node{ + { + Name: "node1", + Selection: Selection{ + Devices: []Device{{Name: "device2"}}, + }, + }, + }, + } + node = storageSpec.ResolveNode("node1") + assert.NotNil(t, node) + assert.Equal(t, []Device{{Name: "device2"}}, node.Devices) + + // test if cluster wide devices are and aren't inherited to nodes with and without directories/devices + storageSpec = StorageScopeSpec{ + Selection: Selection{ + Devices: []Device{{Name: "device4"}}, + }, + Nodes: []Node{ + { + Name: "node1", + Selection: Selection{ + Devices: []Device{{Name: "device3"}}, + }, + }, + { + Name: "node2", + }, + }, + } + // node1 keeps its specified devices + node = storageSpec.ResolveNode("node1") + assert.NotNil(t, node) + assert.Equal(t, []Device{{Name: "device3"}}, node.Devices) + + // node2 inherits the cluster wide devices since it specified none of its own + node = storageSpec.ResolveNode("node2") + assert.NotNil(t, node) + assert.Equal(t, []Device{{Name: "device4"}}, node.Devices) +} + +func TestStorageScopeSpec_NodeWithNameExists(t *testing.T) { + spec := &StorageScopeSpec{ + Nodes: []Node{}, + } + + assert.False(t, spec.NodeWithNameExists("node0")) + + spec.Nodes = []Node{ + {Name: "node0-hostname"}, + {Name: "node1"}, + {Name: "node2"}, + } + assert.True(t, spec.NodeWithNameExists("node0-hostname")) + assert.False(t, spec.NodeWithNameExists("node0")) + assert.True(t, spec.NodeWithNameExists("node1")) + assert.True(t, spec.NodeWithNameExists("node2")) +} + +func TestIsOnPVCEncrypted(t *testing.T) { + s := &StorageScopeSpec{} + assert.False(t, s.IsOnPVCEncrypted()) + + s.StorageClassDeviceSets = []StorageClassDeviceSet{ + {Encrypted: true}, + } + assert.True(t, s.IsOnPVCEncrypted()) +} diff --git a/external/ceph.rook.io/v1/topic.go b/external/ceph.rook.io/v1/topic.go new file mode 100644 index 00000000..40d9d8f9 --- /dev/null +++ b/external/ceph.rook.io/v1/topic.go @@ -0,0 +1,84 @@ +/* +Copyright 2021 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +    http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "net/url" + "strings" + + "github.com/pkg/errors" +) + +func validateURI(uri string, expectedSchemas []string) error { + parsedURI, err := url.Parse(uri) + if err != nil { + return err + } + schema := strings.ToLower(parsedURI.Scheme) + for _, s := range expectedSchemas { + if s == schema { + return nil + } + } + return errors.Errorf("URI schema %q no in %v", schema, expectedSchemas) +} + +func ValidateHTTPSpec(s *HTTPEndpointSpec) error { + return validateURI(s.URI, []string{"http", "https"}) +} + +func ValidateAMQPSpec(s *AMQPEndpointSpec) error { + return validateURI(s.URI, []string{"amqp", "amqps"}) +} + +func ValidateKafkaSpec(s *KafkaEndpointSpec) error { + return validateURI(s.URI, []string{"kafka"}) +} + +// ValidateTopicSpec validate the bucket notification topic arguments +func (t *CephBucketTopic) ValidateTopicSpec() error { + hasEndpoint := false + if t.Spec.Endpoint.HTTP != nil { + hasEndpoint = true + if err := ValidateHTTPSpec(t.Spec.Endpoint.HTTP); err != nil { + return err + } + } + if t.Spec.Endpoint.AMQP != nil { + if hasEndpoint { + return errors.New("multiple endpoint specs") + } + hasEndpoint = true + if err := ValidateAMQPSpec(t.Spec.Endpoint.AMQP); err != nil { + return err + } + } + if t.Spec.Endpoint.Kafka != nil { + if hasEndpoint { + return errors.New("multiple endpoint specs") + } + hasEndpoint = true + if err := ValidateKafkaSpec(t.Spec.Endpoint.Kafka); err != nil { + return err + } + } + + if !hasEndpoint { + return errors.New("missing endpoint spec") + } + return nil +} diff --git a/external/ceph.rook.io/v1/topic_test.go b/external/ceph.rook.io/v1/topic_test.go new file mode 100644 index 00000000..fee59a69 --- /dev/null +++ b/external/ceph.rook.io/v1/topic_test.go @@ -0,0 +1,170 @@ +/* +Copyright 2021 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestValidateHTTPTopicSpec(t *testing.T) { + topic := &CephBucketTopic{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fish-topic", + }, + Spec: BucketTopicSpec{ + OpaqueData: "me@email.com", + Persistent: true, + Endpoint: TopicEndpointSpec{ + HTTP: &HTTPEndpointSpec{ + URI: "http://myserver:9999", + DisableVerifySSL: false, + SendCloudEvents: false, + }, + }, + }, + } + + t.Run("valid", func(t *testing.T) { + err := topic.ValidateTopicSpec() + assert.NoError(t, err) + }) + t.Run("invalid endpoint host", func(t *testing.T) { + topic.Spec.Endpoint.HTTP.URI = "http://my server:9999" + err := topic.ValidateTopicSpec() + assert.Error(t, err) + }) + t.Run("https host", func(t *testing.T) { + topic.Spec.Endpoint.HTTP.URI = "https://127.0.0.1:9999" + err := topic.ValidateTopicSpec() + assert.NoError(t, err) + }) + t.Run("invalid endpoint schema", func(t *testing.T) { + topic.Spec.Endpoint.HTTP.URI = "kaboom://myserver:9999" + err := topic.ValidateTopicSpec() + assert.Error(t, err) + }) +} + +func TestValidateAMQPTopicSpec(t *testing.T) { + topic := &CephBucketTopic{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fish-topic", + }, + Spec: BucketTopicSpec{ + OpaqueData: "me@email.com", + Persistent: true, + Endpoint: TopicEndpointSpec{ + AMQP: &AMQPEndpointSpec{ + URI: "amqp://myserver:9999", + Exchange: "fish-ex", + DisableVerifySSL: true, + AckLevel: "broker", + }, + }, + }, + } + + t.Run("valid", func(t *testing.T) { + err := topic.ValidateTopicSpec() + assert.NoError(t, err) + }) + t.Run("amqps host", func(t *testing.T) { + topic.Spec.Endpoint.AMQP.URI = "amqps://myserver:9999" + err := topic.ValidateTopicSpec() + assert.NoError(t, err) + }) + t.Run("endpoint schema mismatch", func(t *testing.T) { + topic.Spec.Endpoint.AMQP.URI = "http://myserver:9999" + err := topic.ValidateTopicSpec() + assert.Error(t, err) + }) +} + +func TestValidateKafkaTopicSpec(t *testing.T) { + topic := &CephBucketTopic{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fish-topic", + }, + Spec: BucketTopicSpec{ + OpaqueData: "me@email.com", + Persistent: true, + Endpoint: TopicEndpointSpec{ + Kafka: &KafkaEndpointSpec{ + URI: "kafka://myserver:9999", + UseSSL: true, + DisableVerifySSL: true, + AckLevel: "broker", + Mechanism: "SCRAM-SHA-512", + }, + }, + }, + } + + t.Run("valid", func(t *testing.T) { + err := topic.ValidateTopicSpec() + assert.NoError(t, err) + }) + t.Run("endpoint schema mismatch", func(t *testing.T) { + topic.Spec.Endpoint.Kafka.URI = "http://myserver:9999" + err := topic.ValidateTopicSpec() + assert.Error(t, err) + }) +} + +func TestInvalidTopicSpec(t *testing.T) { + topic := &CephBucketTopic{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fish-topic", + }, + Spec: BucketTopicSpec{ + OpaqueData: "me@email.com", + Persistent: true, + Endpoint: TopicEndpointSpec{ + Kafka: &KafkaEndpointSpec{ + URI: "kafka://myserver:9999", + UseSSL: true, + DisableVerifySSL: true, + AckLevel: "broker", + }, + AMQP: &AMQPEndpointSpec{ + URI: "amqp://myserver:9999", + Exchange: "fish-ex", + DisableVerifySSL: true, + AckLevel: "broker", + }, + }, + }, + } + + t.Run("too many endpoint specs", func(t *testing.T) { + err := topic.ValidateTopicSpec() + assert.Error(t, err) + }) + t.Run("valid", func(t *testing.T) { + topic.Spec.Endpoint.AMQP = nil + err := topic.ValidateTopicSpec() + assert.NoError(t, err) + }) + t.Run("too few endpoint specs", func(t *testing.T) { + topic.Spec.Endpoint.Kafka = nil + err := topic.ValidateTopicSpec() + assert.Error(t, err) + }) +} diff --git a/external/ceph.rook.io/v1/types.go b/external/ceph.rook.io/v1/types.go new file mode 100755 index 00000000..affb98aa --- /dev/null +++ b/external/ceph.rook.io/v1/types.go @@ -0,0 +1,4082 @@ +/* +Copyright 2020 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// *************************************************************************** +// IMPORTANT FOR CODE GENERATION +// If the types in this file are updated, you will need to run +// `make codegen` to generate the new types under the client/clientset folder. +// *************************************************************************** + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephCluster is a Ceph storage cluster +// +kubebuilder:printcolumn:name="DataDirHostPath",type=string,JSONPath=`.spec.dataDirHostPath`,description="Directory used on the K8s nodes" +// +kubebuilder:printcolumn:name="MonCount",type=string,JSONPath=`.spec.mon.count`,description="Number of MONs" +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Message",type=string,JSONPath=`.status.message`,description="Message" +// +kubebuilder:printcolumn:name="Health",type=string,JSONPath=`.status.ceph.health`,description="Ceph Health" +// +kubebuilder:printcolumn:name="External",type=boolean,JSONPath=`.spec.external.enable` +// +kubebuilder:printcolumn:name="FSID",type=string,JSONPath=`.status.ceph.fsid`,description="Ceph FSID" +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=ceph +type CephCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec ClusterSpec `json:"spec"` + // +kubebuilder:pruning:PreserveUnknownFields + // +optional + // +nullable + Status ClusterStatus `json:"status,omitempty"` +} + +// CephClusterHealthCheckSpec represent the healthcheck for Ceph daemons +type CephClusterHealthCheckSpec struct { + // DaemonHealth is the health check for a given daemon + // +optional + // +nullable + DaemonHealth DaemonHealthSpec `json:"daemonHealth,omitempty"` + // LivenessProbe allows changing the livenessProbe configuration for a given daemon + // +optional + LivenessProbe map[KeyType]*ProbeSpec `json:"livenessProbe,omitempty"` + // StartupProbe allows changing the startupProbe configuration for a given daemon + // +optional + StartupProbe map[KeyType]*ProbeSpec `json:"startupProbe,omitempty"` +} + +// DaemonHealthSpec is a daemon health check +type DaemonHealthSpec struct { + // Status represents the health check settings for the Ceph health + // +optional + // +nullable + Status HealthCheckSpec `json:"status,omitempty"` + // Monitor represents the health check settings for the Ceph monitor + // +optional + // +nullable + Monitor HealthCheckSpec `json:"mon,omitempty"` + // ObjectStorageDaemon represents the health check settings for the Ceph OSDs + // +optional + // +nullable + ObjectStorageDaemon HealthCheckSpec `json:"osd,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephClusterList is a list of CephCluster +type CephClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephCluster `json:"items"` +} + +// ClusterSpec represents the specification of Ceph Cluster +type ClusterSpec struct { + // The version information that instructs Rook to orchestrate a particular version of Ceph. + // +optional + // +nullable + CephVersion CephVersionSpec `json:"cephVersion,omitempty"` + + // A spec for available storage in the cluster and how it should be used + // +optional + // +nullable + Storage StorageScopeSpec `json:"storage,omitempty"` + + // The annotations-related configuration to add/set on each Pod related object. + // +nullable + // +optional + Annotations AnnotationsSpec `json:"annotations,omitempty"` + + // The labels-related configuration to add/set on each Pod related object. + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Labels LabelsSpec `json:"labels,omitempty"` + + // The placement-related configuration to pass to kubernetes (affinity, node selector, tolerations). + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Placement PlacementSpec `json:"placement,omitempty"` + + // Network related configuration + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Network NetworkSpec `json:"network,omitempty"` + + // Resources set resource requests and limits + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Resources ResourceSpec `json:"resources,omitempty"` + + // PriorityClassNames sets priority classes on components + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + PriorityClassNames PriorityClassNamesSpec `json:"priorityClassNames,omitempty"` + + // The path on the host where config and data can be persisted + // +kubebuilder:validation:Pattern=`^/(\S+)` + // +kubebuilder:validation:XValidation:message="DataDirHostPath is immutable",rule="self == oldSelf" + // +optional + DataDirHostPath string `json:"dataDirHostPath,omitempty"` + + // SkipUpgradeChecks defines if an upgrade should be forced even if one of the check fails + // +optional + SkipUpgradeChecks bool `json:"skipUpgradeChecks,omitempty"` + + // ContinueUpgradeAfterChecksEvenIfNotHealthy defines if an upgrade should continue even if PGs are not clean + // +optional + ContinueUpgradeAfterChecksEvenIfNotHealthy bool `json:"continueUpgradeAfterChecksEvenIfNotHealthy,omitempty"` + + // WaitTimeoutForHealthyOSDInMinutes defines the time the operator would wait before an OSD can be stopped for upgrade or restart. + // If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one + // if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would + // continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`. + // The default wait timeout is 10 minutes. + // +optional + WaitTimeoutForHealthyOSDInMinutes time.Duration `json:"waitTimeoutForHealthyOSDInMinutes,omitempty"` + + // UpgradeOSDRequiresHealthyPGs defines if OSD upgrade requires PGs are clean. If set to `true` OSD upgrade process won't start until PGs are healthy. + // This configuration will be ignored if `skipUpgradeChecks` is `true`. + // Default is false. + // +optional + UpgradeOSDRequiresHealthyPGs bool `json:"upgradeOSDRequiresHealthyPGs,omitempty"` + + // A spec for configuring disruption management. + // +nullable + // +optional + DisruptionManagement DisruptionManagementSpec `json:"disruptionManagement,omitempty"` + + // A spec for mon related options + // +optional + // +nullable + Mon MonSpec `json:"mon,omitempty"` + + // A spec for the crash controller + // +optional + // +nullable + CrashCollector CrashCollectorSpec `json:"crashCollector,omitempty"` + + // Dashboard settings + // +optional + // +nullable + Dashboard DashboardSpec `json:"dashboard,omitempty"` + + // Prometheus based Monitoring settings + // +optional + // +nullable + Monitoring MonitoringSpec `json:"monitoring,omitempty"` + + // Whether the Ceph Cluster is running external to this Kubernetes cluster + // mon, mgr, osd, mds, and discover daemons will not be created for external clusters. + // +optional + // +nullable + External ExternalSpec `json:"external,omitempty"` + + // A spec for mgr related options + // +optional + // +nullable + Mgr MgrSpec `json:"mgr,omitempty"` + + // Remove the OSD that is out and safe to remove only if this option is true + // +optional + RemoveOSDsIfOutAndSafeToRemove bool `json:"removeOSDsIfOutAndSafeToRemove,omitempty"` + + // Indicates user intent when deleting a cluster; blocks orchestration and should not be set if cluster + // deletion is not imminent. + // +optional + // +nullable + CleanupPolicy CleanupPolicySpec `json:"cleanupPolicy,omitempty"` + + // Internal daemon healthchecks and liveness probe + // +optional + // +nullable + HealthCheck CephClusterHealthCheckSpec `json:"healthCheck,omitempty"` + + // Security represents security settings + // +optional + // +nullable + Security ClusterSecuritySpec `json:"security,omitempty"` + + // Logging represents loggings settings + // +optional + // +nullable + LogCollector LogCollectorSpec `json:"logCollector,omitempty"` + + // CSI Driver Options applied per cluster. + // +optional + CSI CSIDriverSpec `json:"csi,omitempty"` + + // Ceph Config options + // +optional + // +nullable + CephConfig map[string]map[string]string `json:"cephConfig,omitempty"` + + // CephConfigFromSecret works exactly like CephConfig but takes config value from Secret Key reference. + // +optional + // +nullable + CephConfigFromSecret map[string]map[string]v1.SecretKeySelector `json:"cephConfigFromSecret,omitempty"` +} + +// CSIDriverSpec defines CSI Driver settings applied per cluster. +type CSIDriverSpec struct { + // ReadAffinity defines the read affinity settings for CSI driver. + // +optional + ReadAffinity ReadAffinitySpec `json:"readAffinity"` + // CephFS defines CSI Driver settings for CephFS driver. + // +optional + CephFS CSICephFSSpec `json:"cephfs,omitempty"` + // SkipUserCreation determines whether CSI users and their associated secrets should be skipped. + // If set to true, the user must manually manage these secrets. + // +optional + SkipUserCreation bool `json:"skipUserCreation,omitempty"` +} + +// CSICephFSSpec defines the settings for CephFS CSI driver. +type CSICephFSSpec struct { + // KernelMountOptions defines the mount options for kernel mounter. + // +optional + KernelMountOptions string `json:"kernelMountOptions,omitempty"` + // FuseMountOptions defines the mount options for ceph fuse mounter. + // +optional + FuseMountOptions string `json:"fuseMountOptions,omitempty"` +} + +// ReadAffinitySpec defines the read affinity settings for CSI driver. +type ReadAffinitySpec struct { + // Enables read affinity for CSI driver. + // +optional + Enabled bool `json:"enabled"` + // CrushLocationLabels defines which node labels to use + // as CRUSH location. This should correspond to the values set in + // the CRUSH map. + // +optional + CrushLocationLabels []string `json:"crushLocationLabels,omitempty"` +} + +// LogCollectorSpec is the logging spec +type LogCollectorSpec struct { + // Enabled represents whether the log collector is enabled + // +optional + Enabled bool `json:"enabled,omitempty"` + // Periodicity is the periodicity of the log rotation. + // +kubebuilder:validation:Pattern=`^$|^(hourly|daily|weekly|monthly|1h|24h|1d)$` + // +optional + Periodicity string `json:"periodicity,omitempty"` + // MaxLogSize is the maximum size of the log per ceph daemons. Must be at least 1M. + // +optional + MaxLogSize *resource.Quantity `json:"maxLogSize,omitempty"` +} + +// SecuritySpec is security spec to include various security items such as kms +type SecuritySpec struct { + // KeyManagementService is the main Key Management option + // +optional + // +nullable + KeyManagementService KeyManagementServiceSpec `json:"kms,omitempty"` + // KeyRotation defines options for Key Rotation. + // +optional + // +nullable + KeyRotation KeyRotationSpec `json:"keyRotation,omitempty"` +} + +// ClusterSecuritySpec is the CephCluster security spec to include various security items such as kms +type ClusterSecuritySpec struct { + // KeyManagementService is the main Key Management option + // +optional + // +nullable + KeyManagementService KeyManagementServiceSpec `json:"kms,omitempty"` + // KeyRotation defines options for rotation of OSD disk encryption keys. + // +optional + // +nullable + KeyRotation KeyRotationSpec `json:"keyRotation,omitempty"` + + // CephX configures CephX key settings. More: https://docs.ceph.com/en/latest/dev/cephx/ + // +optional + CephX ClusterCephxConfig `json:"cephx,omitempty"` +} + +type ClusterCephxConfig struct { + // Daemon configures CephX key settings for local Ceph daemons managed by Rook and part of the + // Ceph cluster. Daemon CephX keys can be rotated without affecting client connections. + Daemon CephxConfig `json:"daemon,omitempty"` + + // RBDMirrorPeer configures CephX key settings of the `rbd-mirror-peer` user that is used for creating + // bootstrap peer token used connect peer clusters. Rotating the `rbd-mirror-peer` user key will update + // the mirror peer token. + // Rotation will affect any existing peers connected to this cluster, so take care when exercising this option. + RBDMirrorPeer CephxConfig `json:"rbdMirrorPeer,omitempty"` + + // CSI configures CephX key rotation settings for the Ceph-CSI daemons in the current Kubernetes cluster. + // CSI key rotation can affect existing PV connections, so take care when exercising this option. + CSI CephXConfigWithPriorCount `json:"csi,omitempty"` +} + +type CephXConfigWithPriorCount struct { + CephxConfig `json:",inline"` // inline core CephxConfig + + // KeepPriorKeyCountMax tells Rook how many prior keys to keep active. + // Generally, this would be set to 1 to allow for a migration period for applications. + // If desired, set this to 0 to delete prior keys after migration. + // This config only applies to prior keys that already exist. + // If PriorKeyCount is set to 2 while only a single key currently exists, only a single prior key will be kept, + // and the reported status will only indicate the actual number of prior keys, + // not necessarily a reflection of PriorKeyCount config here. + // +optional + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=10 + KeepPriorKeyCountMax uint8 `json:"keepPriorKeyCountMax,omitempty"` +} + +type CephxConfig struct { + // KeyRotationPolicy controls if and when CephX keys are rotated after initial creation. + // One of Disabled, or KeyGeneration. Default Disabled. + // +optional + // +kubebuilder:validation:Enum="";Disabled;KeyGeneration + KeyRotationPolicy CephxKeyRotationPolicy `json:"keyRotationPolicy,omitempty"` + + // KeyGeneration specifies the desired CephX key generation. This is used when KeyRotationPolicy + // is KeyGeneration and ignored for other policies. If this is set to greater than the current + // key generation, relevant keys will be rotated, and the generation value will be updated to + // this new value (generation values are not necessarily incremental, though that is the + // intended use case). If this is set to less than or equal to the current key generation, keys + // are not rotated. + // +optional + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=4294967295 + // +kubebuilder:validation:XValidation:message="keyGeneration cannot be decreased",rule="self >= oldSelf" + KeyGeneration uint32 `json:"keyGeneration,omitempty"` +} + +type CephxKeyRotationPolicy string + +const ( + DisabledCephxKeyRotationPolicy CephxKeyRotationPolicy = "Disabled" + KeyGenerationCephxKeyRotationPolicy CephxKeyRotationPolicy = "KeyGeneration" +) + +// ObjectStoreSecuritySpec is spec to define security features like encryption +type ObjectStoreSecuritySpec struct { + // +optional + // +nullable + SecuritySpec `json:""` + + // The settings for supporting AWS-SSE:S3 with RGW + // +optional + // +nullable + ServerSideEncryptionS3 KeyManagementServiceSpec `json:"s3,omitempty"` +} + +// KeyManagementServiceSpec represent various details of the KMS server +type KeyManagementServiceSpec struct { + // ConnectionDetails contains the KMS connection details (address, port etc) + // +optional + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + ConnectionDetails map[string]string `json:"connectionDetails,omitempty"` + // TokenSecretName is the kubernetes secret containing the KMS token + // +optional + TokenSecretName string `json:"tokenSecretName,omitempty"` +} + +// KeyRotationSpec represents the settings for Key Rotation. +type KeyRotationSpec struct { + // Enabled represents whether the key rotation is enabled. + // +optional + // +kubebuilder:default=false + Enabled bool `json:"enabled,omitempty"` + // Schedule represents the cron schedule for key rotation. + // +optional + Schedule string `json:"schedule,omitempty"` +} + +// CephVersionSpec represents the settings for the Ceph version that Rook is orchestrating. +type CephVersionSpec struct { + // Image is the container image used to launch the ceph daemons, such as quay.io/ceph/ceph: + // The full list of images can be found at https://quay.io/repository/ceph/ceph?tab=tags + // +optional + Image string `json:"image,omitempty"` + + // Whether to allow unsupported versions (do not set to true in production) + // +optional + AllowUnsupported bool `json:"allowUnsupported,omitempty"` + + // ImagePullPolicy describes a policy for if/when to pull a container image + // One of Always, Never, IfNotPresent. + // +kubebuilder:validation:Enum=IfNotPresent;Always;Never;"" + // +optional + ImagePullPolicy v1.PullPolicy `json:"imagePullPolicy,omitempty"` +} + +// DashboardSpec represents the settings for the Ceph dashboard +type DashboardSpec struct { + // Enabled determines whether to enable the dashboard + // +optional + Enabled bool `json:"enabled,omitempty"` + // URLPrefix is a prefix for all URLs to use the dashboard with a reverse proxy + // +optional + URLPrefix string `json:"urlPrefix,omitempty"` + // Port is the dashboard webserver port + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=65535 + // +optional + Port int `json:"port,omitempty"` + // SSL determines whether SSL should be used + // +optional + SSL bool `json:"ssl,omitempty"` + // Endpoint for the Prometheus host + // +optional + PrometheusEndpoint string `json:"prometheusEndpoint,omitempty"` + // Whether to verify the ssl endpoint for prometheus. Set to false for a self-signed cert. + // +optional + PrometheusEndpointSSLVerify bool `json:"prometheusEndpointSSLVerify,omitempty"` +} + +// MonitoringSpec represents the settings for Prometheus based Ceph monitoring +type MonitoringSpec struct { + // Enabled determines whether to create the prometheus rules for the ceph cluster. If true, the prometheus + // types must exist or the creation will fail. Default is false. + // +optional + Enabled bool `json:"enabled,omitempty"` + + // Whether to disable the metrics reported by Ceph. If false, the prometheus mgr module and Ceph exporter are enabled. + // If true, the prometheus mgr module and Ceph exporter are both disabled. Default is false. + // +optional + MetricsDisabled bool `json:"metricsDisabled,omitempty"` + + // ExternalMgrEndpoints points to an existing Ceph prometheus exporter endpoint + // +optional + // +nullable + ExternalMgrEndpoints []v1.EndpointAddress `json:"externalMgrEndpoints,omitempty"` + + // ExternalMgrPrometheusPort Prometheus exporter port + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=65535 + // +optional + ExternalMgrPrometheusPort uint16 `json:"externalMgrPrometheusPort,omitempty"` + + // Port is the prometheus server port + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=65535 + // +optional + Port int `json:"port,omitempty"` + + // Interval determines prometheus scrape interval + // +optional + Interval *metav1.Duration `json:"interval,omitempty"` + + // Ceph exporter configuration + // +optional + Exporter *CephExporterSpec `json:"exporter,omitempty"` +} + +type CephExporterSpec struct { + // Only performance counters greater than or equal to this option are fetched + // +kubebuilder:default=5 + PerfCountersPrioLimit int64 `json:"perfCountersPrioLimit,omitempty"` + + // Time to wait before sending requests again to exporter server (seconds) + // +kubebuilder:default=5 + StatsPeriodSeconds int64 `json:"statsPeriodSeconds,omitempty"` + + // Whether host networking is enabled for CephExporter. If not set, the network settings from CephCluster.spec.networking will be applied. + // +nullable + // +optional + HostNetwork *bool `json:"hostNetwork,omitempty"` +} + +// ClusterStatus represents the status of a Ceph cluster +type ClusterStatus struct { + State ClusterState `json:"state,omitempty"` + Phase ConditionType `json:"phase,omitempty"` + Message string `json:"message,omitempty"` + Conditions []Condition `json:"conditions,omitempty"` + CephStatus *CephStatus `json:"ceph,omitempty"` + Cephx ClusterCephxStatus `json:"cephx,omitempty"` + CephStorage *CephStorage `json:"storage,omitempty"` + CephVersion *ClusterVersion `json:"version,omitempty"` + // ObservedGeneration is the latest generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// CephDaemonsVersions show the current ceph version for different ceph daemons +type CephDaemonsVersions struct { + // Mon shows Mon Ceph version + // +optional + Mon map[string]int `json:"mon,omitempty"` + // Mgr shows Mgr Ceph version + // +optional + Mgr map[string]int `json:"mgr,omitempty"` + // Osd shows Osd Ceph version + // +optional + Osd map[string]int `json:"osd,omitempty"` + // Rgw shows Rgw Ceph version + // +optional + Rgw map[string]int `json:"rgw,omitempty"` + // Mds shows Mds Ceph version + // +optional + Mds map[string]int `json:"mds,omitempty"` + // RbdMirror shows RbdMirror Ceph version + // +optional + RbdMirror map[string]int `json:"rbd-mirror,omitempty"` + // CephFSMirror shows CephFSMirror Ceph version + // +optional + CephFSMirror map[string]int `json:"cephfs-mirror,omitempty"` + // Overall shows overall Ceph version + // +optional + Overall map[string]int `json:"overall,omitempty"` +} + +// CephStatus is the details health of a Ceph Cluster +type CephStatus struct { + Health string `json:"health,omitempty"` + Details map[string]CephHealthMessage `json:"details,omitempty"` + LastChecked string `json:"lastChecked,omitempty"` + LastChanged string `json:"lastChanged,omitempty"` + PreviousHealth string `json:"previousHealth,omitempty"` + Capacity Capacity `json:"capacity,omitempty"` + // +optional + Versions *CephDaemonsVersions `json:"versions,omitempty"` + FSID string `json:"fsid,omitempty"` +} + +// Capacity is the capacity information of a Ceph Cluster +type Capacity struct { + TotalBytes uint64 `json:"bytesTotal,omitempty"` + UsedBytes uint64 `json:"bytesUsed,omitempty"` + AvailableBytes uint64 `json:"bytesAvailable,omitempty"` + LastUpdated string `json:"lastUpdated,omitempty"` +} + +// CephStorage represents flavors of Ceph Cluster Storage +type CephStorage struct { + DeviceClasses []DeviceClasses `json:"deviceClasses,omitempty"` + OSD OSDStatus `json:"osd,omitempty"` + DeprecatedOSDs map[string][]int `json:"deprecatedOSDs,omitempty"` +} + +// DeviceClasses represents device classes of a Ceph Cluster +type DeviceClasses struct { + Name string `json:"name,omitempty"` +} + +// OSDStatus represents OSD status of the ceph Cluster +type OSDStatus struct { + // StoreType is a mapping between the OSD backend stores and number of OSDs using these stores + StoreType map[string]int `json:"storeType,omitempty"` + MigrationStatus MigrationStatus `json:"migrationStatus,omitempty"` +} + +// MigrationStatus status represents the current status of any OSD migration. +type MigrationStatus struct { + Pending int `json:"pending,omitempty"` +} + +// ClusterVersion represents the version of a Ceph Cluster +type ClusterVersion struct { + Image string `json:"image,omitempty"` + Version string `json:"version,omitempty"` +} + +// CephHealthMessage represents the health message of a Ceph Cluster +type CephHealthMessage struct { + Severity string `json:"severity"` + Message string `json:"message"` +} + +// Condition represents a status condition on any Rook-Ceph Custom Resource. +type Condition struct { + Type ConditionType `json:"type,omitempty"` + Status v1.ConditionStatus `json:"status,omitempty"` + Reason ConditionReason `json:"reason,omitempty"` + Message string `json:"message,omitempty"` + LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty"` + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` +} + +// ConditionReason is a reason for a condition +type ConditionReason string + +const ( + // ClusterCreatedReason is cluster created reason + ClusterCreatedReason ConditionReason = "ClusterCreated" + // ClusterConnectedReason is cluster connected reason + ClusterConnectedReason ConditionReason = "ClusterConnected" + // ClusterProgressingReason is cluster progressing reason + ClusterProgressingReason ConditionReason = "ClusterProgressing" + // ClusterDeletingReason is cluster deleting reason + ClusterDeletingReason ConditionReason = "ClusterDeleting" + // ClusterConnectingReason is cluster connecting reason + ClusterConnectingReason ConditionReason = "ClusterConnecting" + + // ReconcileSucceeded represents when a resource reconciliation was successful. + ReconcileSucceeded ConditionReason = "ReconcileSucceeded" + // ReconcileFailed represents when a resource reconciliation failed. + ReconcileFailed ConditionReason = "ReconcileFailed" + // ReconcileStarted represents when a resource reconciliation started. + ReconcileStarted ConditionReason = "ReconcileStarted" + // ReconcileRequeuing represents when a resource reconciliation requeue. + ReconcileRequeuing ConditionReason = "ReconcileRequeuing" + // DeletingReason represents when Rook has detected a resource object should be deleted. + DeletingReason ConditionReason = "Deleting" + // ObjectHasDependentsReason represents when a resource object has dependents that are blocking + // deletion. + ObjectHasDependentsReason ConditionReason = "ObjectHasDependents" + // ObjectHasNoDependentsReason represents when a resource object has no dependents that are + // blocking deletion. + ObjectHasNoDependentsReason ConditionReason = "ObjectHasNoDependents" + // PoolNotEmptyReason represents when a pool contains images or snapshots that are blocking + // deletion. + PoolNotEmptyReason ConditionReason = "PoolNotEmpty" + // PoolEmptyReason represents when a pool does not contain images or snapshots that are blocking + // deletion. + PoolEmptyReason ConditionReason = "PoolEmpty" + // RadosNamespaceNotEmptyReason represents when a rados namespace contains images or snapshots that are blocking + // deletion. + RadosNamespaceNotEmptyReason ConditionReason = "RadosNamespaceNotEmpty" + // RadosNamespaceEmptyReason represents when a rados namespace does not contain images or snapshots that are blocking + // deletion. + RadosNamespaceEmptyReason ConditionReason = "RadosNamespaceEmpty" +) + +// ConditionType represent a resource's status +type ConditionType string + +const ( + // ConditionConnecting represents Connecting state of an object + ConditionConnecting ConditionType = "Connecting" + // ConditionConnected represents Connected state of an object + ConditionConnected ConditionType = "Connected" + // ConditionProgressing represents Progressing state of an object + ConditionProgressing ConditionType = "Progressing" + // ConditionReady represents Ready state of an object + ConditionReady ConditionType = "Ready" + // ConditionFailure represents Failure state of an object + ConditionFailure ConditionType = "Failure" + // ConditionDeleting represents Deleting state of an object + ConditionDeleting ConditionType = "Deleting" + + // ConditionDeletionIsBlocked represents when deletion of the object is blocked. + ConditionDeletionIsBlocked ConditionType = "DeletionIsBlocked" + // ConditionPoolDeletionIsBlocked represents when deletion of the object is blocked. + ConditionPoolDeletionIsBlocked ConditionType = "PoolDeletionIsBlocked" + // ConditionRadosNSDeletionIsBlocked represents when deletion of the object is blocked. + ConditionRadosNSDeletionIsBlocked ConditionType = "RadosNamespaceDeletionIsBlocked" +) + +// ClusterState represents the state of a Ceph Cluster +type ClusterState string + +const ( + // ClusterStateCreating represents the Creating state of a Ceph Cluster + ClusterStateCreating ClusterState = "Creating" + // ClusterStateCreated represents the Created state of a Ceph Cluster + ClusterStateCreated ClusterState = "Created" + // ClusterStateUpdating represents the Updating state of a Ceph Cluster + ClusterStateUpdating ClusterState = "Updating" + // ClusterStateConnecting represents the Connecting state of a Ceph Cluster + ClusterStateConnecting ClusterState = "Connecting" + // ClusterStateConnected represents the Connected state of a Ceph Cluster + ClusterStateConnected ClusterState = "Connected" + // ClusterStateError represents the Error state of a Ceph Cluster + ClusterStateError ClusterState = "Error" +) + +type CephxStatus struct { + // KeyGeneration represents the CephX key generation for the last successful reconcile. + // For all newly-created resources, this field is set to `1`. + // When keys are rotated due to any rotation policy, the generation is incremented or updated to + // the configured policy generation. + // Generation `0` indicates that keys existed prior to the implementation of key tracking. + KeyGeneration uint32 `json:"keyGeneration,omitempty"` + + // KeyCephVersion reports the Ceph version that created the current generation's keys. This is + // same string format as reported by `CephCluster.status.version.version` to allow them to be + // compared. E.g., `20.2.0-0`. + // For all newly-created resources, this field set to the version of Ceph that created the key. + // The special value "Uninitialized" indicates that keys are being created for the first time. + // An empty string indicates that the version is unknown, as expected in brownfield deployments. + KeyCephVersion string `json:"keyCephVersion,omitempty"` +} + +type CephxStatusWithKeyCount struct { + CephxStatus `json:",inline"` // inline core CephxStatus + + // PriorKeyCount reports the number of prior-generation CephX keys that remain active for the related component + PriorKeyCount uint8 `json:"priorKeyCount,omitempty"` +} + +// UninitializedCephxKeyCephVersion is a special value for CephxStatus.KeyCephVersion that is +// applied when a resource status is first initialized. Rook replaces this value with the current +// Ceph version after keys are first created and the resource is reconciled successfully. +const UninitializedCephxKeyCephVersion string = "Uninitialized" + +type LocalCephxStatus struct { + // Daemon shows the CephX key status for local Ceph daemons associated with this resources. + Daemon CephxStatus `json:"daemon,omitempty"` +} + +// ClusterCephxStatus defines the cephx key rotation status of various daemons on the cephCluster resource +type ClusterCephxStatus struct { + // Admin shows the CephX key status for the client.admin key + Admin CephxStatus `json:"admin,omitempty"` + // Mon represents the CephX key status of the Monitor daemons + Mon CephxStatus `json:"mon,omitempty"` + // Mgr represents the cephx key rotation status of the ceph manager daemon + Mgr CephxStatus `json:"mgr,omitempty"` + // OSD shows the CephX key status of of OSDs + OSD CephxStatus `json:"osd,omitempty"` + // CSI shows the CephX key status for Ceph-CSI components. + CSI CephxStatusWithKeyCount `json:"csi,omitempty"` + // RBDMirrorPeer represents the cephx key rotation status of the `rbd-mirror-peer` user + RBDMirrorPeer CephxStatus `json:"rbdMirrorPeer,omitempty"` + // Crash Collector represents the cephx key rotation status of the crash collector daemon + CrashCollector CephxStatus `json:"crashCollector,omitempty"` + // Ceph Exporter represents the cephx key rotation status of the ceph exporter daemon + CephExporter CephxStatus `json:"cephExporter,omitempty"` +} + +// MonSpec represents the specification of the monitor +// +kubebuilder:validation:XValidation:message="zones must be less than or equal to count",rule="!has(self.zones) || (has(self.zones) && (size(self.zones) <= self.count))" +// +kubebuilder:validation:XValidation:message="stretchCluster zones must be equal to 3",rule="!has(self.stretchCluster) || (has(self.stretchCluster) && (size(self.stretchCluster.zones) > 0) && (size(self.stretchCluster.zones) == 3))" +type MonSpec struct { + // Count is the number of Ceph monitors + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=9 + // +optional + Count int `json:"count,omitempty"` + // AllowMultiplePerNode determines if we can run multiple monitors on the same node (not recommended) + // +optional + AllowMultiplePerNode bool `json:"allowMultiplePerNode,omitempty"` + // +optional + FailureDomainLabel string `json:"failureDomainLabel,omitempty"` + // Zones are specified when we want to provide zonal awareness to mons + // +optional + Zones []MonZoneSpec `json:"zones,omitempty"` + // StretchCluster is the stretch cluster specification + // +optional + StretchCluster *StretchClusterSpec `json:"stretchCluster,omitempty"` + // VolumeClaimTemplate is the PVC definition + // +kubebuilder:pruning:PreserveUnknownFields + // +optional + VolumeClaimTemplate *VolumeClaimTemplate `json:"volumeClaimTemplate,omitempty"` + // ExternalMonIDs - optional list of monitor IDs which are deployed externally and not managed by Rook. + // If set, Rook will not remove mons with given IDs from quorum. + // This parameter is used only for local Rook cluster running in normal mode + // and will be ignored if external or stretched mode is used. + // leading + // +optional + ExternalMonIDs []string `json:"externalMonIDs,omitempty"` +} + +// VolumeClaimTemplate is a simplified version of K8s corev1's PVC. It has no type meta or status. +type VolumeClaimTemplate struct { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec defines the desired characteristics of a volume requested by a pod author. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + Spec v1.PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// StretchClusterSpec represents the specification of a stretched Ceph Cluster +type StretchClusterSpec struct { + // FailureDomainLabel the failure domain name (e,g: zone) + // +optional + FailureDomainLabel string `json:"failureDomainLabel,omitempty"` + // SubFailureDomain is the failure domain within a zone + // +optional + SubFailureDomain string `json:"subFailureDomain,omitempty"` + // Zones is the list of zones + // +optional + // +nullable + Zones []MonZoneSpec `json:"zones,omitempty"` +} + +// MonZoneSpec represents the specification of a zone in a Ceph Cluster +type MonZoneSpec struct { + // Name is the name of the zone + // +optional + Name string `json:"name,omitempty"` + // Arbiter determines if the zone contains the arbiter used for stretch cluster mode + // +optional + Arbiter bool `json:"arbiter,omitempty"` + // VolumeClaimTemplate is the PVC template + // +kubebuilder:pruning:PreserveUnknownFields + // +optional + VolumeClaimTemplate *VolumeClaimTemplate `json:"volumeClaimTemplate,omitempty"` +} + +// MgrSpec represents options to configure a ceph mgr +type MgrSpec struct { + // Count is the number of manager daemons to run + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=5 + // +optional + Count int `json:"count,omitempty"` + // AllowMultiplePerNode allows to run multiple managers on the same node (not recommended) + // +optional + AllowMultiplePerNode bool `json:"allowMultiplePerNode,omitempty"` + // Modules is the list of ceph manager modules to enable/disable + // +optional + // +nullable + Modules []Module `json:"modules,omitempty"` + // Whether host networking is enabled for the Ceph Mgr. If not set, the network settings from CephCluster.spec.networking will be applied. + // +optional + HostNetwork *bool `json:"hostNetwork,omitempty"` +} + +// Module represents mgr modules that the user wants to enable or disable +type Module struct { + // Name is the name of the ceph manager module + // +optional + Name string `json:"name,omitempty"` + // Enabled determines whether a module should be enabled or not + // +optional + Enabled bool `json:"enabled,omitempty"` + // Settings to further configure the module + Settings ModuleSettings `json:"settings,omitempty"` +} + +type ModuleSettings struct { + // BalancerMode sets the `balancer` module with different modes like `upmap`, `crush-compact` etc + // +kubebuilder:validation:Enum="";crush-compat;upmap;read;upmap-read + BalancerMode string `json:"balancerMode,omitempty"` +} + +// ExternalSpec represents the options supported by an external cluster +// +kubebuilder:pruning:PreserveUnknownFields +// +nullable +type ExternalSpec struct { + // Enable determines whether external mode is enabled or not + // +optional + Enable bool `json:"enable,omitempty"` +} + +// CrashCollectorSpec represents options to configure the crash controller +type CrashCollectorSpec struct { + // Disable determines whether we should enable the crash collector + // +optional + Disable bool `json:"disable,omitempty"` + + // DaysToRetain represents the number of days to retain crash until they get pruned + // +optional + DaysToRetain uint `json:"daysToRetain,omitempty"` +} + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephBlockPool represents a Ceph Storage Pool +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Type",type=string,JSONPath=`.status.info.type` +// +kubebuilder:printcolumn:name="FailureDomain",type=string,JSONPath=`.status.info.failureDomain` +// +kubebuilder:printcolumn:name="Replication",type=integer,JSONPath=`.spec.replicated.size`,priority=1 +// +kubebuilder:printcolumn:name="EC-CodingChunks",type=integer,JSONPath=`.spec.erasureCoded.codingChunks`,priority=1 +// +kubebuilder:printcolumn:name="EC-DataChunks",type=integer,JSONPath=`.spec.erasureCoded.dataChunks`,priority=1 +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=cephbp +type CephBlockPool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec NamedBlockPoolSpec `json:"spec"` + // +kubebuilder:pruning:PreserveUnknownFields + Status *CephBlockPoolStatus `json:"status,omitempty"` +} + +// CephBlockPoolList is a list of Ceph Storage Pools +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type CephBlockPoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephBlockPool `json:"items"` +} + +const ( + // DefaultFailureDomain for PoolSpec + DefaultFailureDomain = "host" + // DefaultCRUSHRoot is the default name of the CRUSH root bucket + DefaultCRUSHRoot = "default" +) + +// PoolSpec represents the spec of ceph pool +type PoolSpec struct { + // The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map + // +optional + FailureDomain string `json:"failureDomain,omitempty"` + + // The root of the crush hierarchy utilized by the pool + // +optional + // +nullable + CrushRoot string `json:"crushRoot,omitempty"` + + // The device class the OSD should set to for use in the pool + // +optional + // +nullable + DeviceClass string `json:"deviceClass,omitempty"` + + // Allow rook operator to change the pool CRUSH tunables once the pool is created + // +nullable + // +optional + EnableCrushUpdates *bool `json:"enableCrushUpdates,omitempty"` + + // DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" + // The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) + // +kubebuilder:validation:Enum=none;passive;aggressive;force;"" + // Do NOT set a default value for kubebuilder as this will override the Parameters + // +optional + // +nullable + CompressionMode string `json:"compressionMode,omitempty"` + + // The replication settings + // +optional + Replicated ReplicatedSpec `json:"replicated,omitempty"` + + // The erasure code settings + // +optional + ErasureCoded ErasureCodedSpec `json:"erasureCoded,omitempty"` + + // Parameters is a list of properties to enable on a given pool + // +kubebuilder:pruning:PreserveUnknownFields + // +optional + // +nullable + Parameters map[string]string `json:"parameters,omitempty"` + + // EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool + EnableRBDStats bool `json:"enableRBDStats,omitempty"` + + // The mirroring settings + Mirroring MirroringSpec `json:"mirroring,omitempty"` + + // The mirroring statusCheck + // +kubebuilder:pruning:PreserveUnknownFields + StatusCheck MirrorHealthCheckSpec `json:"statusCheck,omitempty"` + + // The quota settings + // +optional + // +nullable + Quotas QuotaSpec `json:"quotas,omitempty"` + + // The application name to set on the pool. Only expected to be set for rgw pools. + // +optional + Application string `json:"application"` +} + +// NamedBlockPoolSpec allows a block pool to be created with a non-default name. +// This is more specific than the NamedPoolSpec so we get schema validation on the +// allowed pool names that can be specified. +type NamedBlockPoolSpec struct { + // The desired name of the pool if different from the CephBlockPool CR name. + // +kubebuilder:validation:Enum=.rgw.root;.nfs;.mgr + // +optional + Name string `json:"name,omitempty"` + // The core pool configuration + PoolSpec `json:",inline"` +} + +// NamedPoolSpec represents the named ceph pool spec +type NamedPoolSpec struct { + // Name of the pool + Name string `json:"name,omitempty"` + // PoolSpec represents the spec of ceph pool + PoolSpec `json:",inline"` +} + +// MirrorHealthCheckSpec represents the health specification of a Ceph Storage Pool mirror +type MirrorHealthCheckSpec struct { + // +optional + // +nullable + Mirror HealthCheckSpec `json:"mirror,omitempty"` +} + +// CephBlockPoolStatus represents the mirroring status of Ceph Storage Pool +type CephBlockPoolStatus struct { + // +optional + Phase ConditionType `json:"phase,omitempty"` + // +optional + Cephx PeerTokenCephxStatus `json:"cephx,omitempty"` + // +optional + MirroringStatus *MirroringStatusSpec `json:"mirroringStatus,omitempty"` + // +optional + MirroringInfo *MirroringInfoSpec `json:"mirroringInfo,omitempty"` + // optional + PoolID int `json:"poolID,omitempty"` + // +optional + SnapshotScheduleStatus *SnapshotScheduleStatusSpec `json:"snapshotScheduleStatus,omitempty"` + // +optional + // +nullable + Info map[string]string `json:"info,omitempty"` + // ObservedGeneration is the latest generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + Conditions []Condition `json:"conditions,omitempty"` +} + +// PeerTokenCephxStatus represents the cephx key rotation status for peer tokens +type PeerTokenCephxStatus struct { + // PeerToken shows the rotation status of the peer token associated with the `rbd-mirror-peer` user. + PeerToken CephxStatus `json:"peerToken,omitempty"` +} + +// MirroringStatusSpec is the status of the pool/radosNamespace mirroring +type MirroringStatusSpec struct { + // MirroringStatus is the mirroring status of a pool/radosNamespace + // +optional + MirroringStatus `json:",inline"` + // LastChecked is the last time time the status was checked + // +optional + LastChecked string `json:"lastChecked,omitempty"` + // LastChanged is the last time time the status last changed + // +optional + LastChanged string `json:"lastChanged,omitempty"` + // Details contains potential status errors + // +optional + Details string `json:"details,omitempty"` +} + +// MirroringStatus is the pool/radosNamespace mirror status +type MirroringStatus struct { + // Summary is the mirroring status summary + // +optional + Summary *MirroringStatusSummarySpec `json:"summary,omitempty"` +} + +// MirroringStatusSummarySpec is the summary output of the command +type MirroringStatusSummarySpec struct { + // Health is the mirroring health + // +optional + Health string `json:"health,omitempty"` + // DaemonHealth is the health of the mirroring daemon + // +optional + DaemonHealth string `json:"daemon_health,omitempty"` + // ImageHealth is the health of the mirrored image + // +optional + ImageHealth string `json:"image_health,omitempty"` + // States is the various state for all mirrored images + // +optional + // +nullable + States StatesSpec `json:"states,omitempty"` + // ImageStates is the various state for all mirrored images + // +optional + // +nullable + ImageStates *StatesSpec `json:"image_states,omitempty"` + // GroupHealth is the health of the mirrored image group + // +optional + // +nullable + GroupHealth string `json:"group_health,omitempty"` + // GroupStates is the various state for all mirrored image groups + // +optional + // +nullable + GroupStates StatesSpec `json:"group_states,omitempty"` +} + +// StatesSpec are rbd images mirroring state +type StatesSpec struct { + // StartingReplay is when the replay of the mirroring journal starts + // +optional + StartingReplay int `json:"starting_replay,omitempty"` + // Replaying is when the replay of the mirroring journal is on-going + // +optional + Replaying int `json:"replaying,omitempty"` + // Syncing is when the image is syncing + // +optional + Syncing int `json:"syncing,omitempty"` + // StopReplaying is when the replay of the mirroring journal stops + // +optional + StopReplaying int `json:"stopping_replay,omitempty"` + // Stopped is when the mirroring state is stopped + // +optional + Stopped int `json:"stopped,omitempty"` + // Unknown is when the mirroring state is unknown + // +optional + Unknown int `json:"unknown,omitempty"` + // Error is when the mirroring state is errored + // +optional + Error int `json:"error,omitempty"` +} + +// MirroringInfoSpec is the status of the pool/radosnamespace mirroring +type MirroringInfoSpec struct { + // +optional + *MirroringInfo `json:",inline"` + // +optional + LastChecked string `json:"lastChecked,omitempty"` + // +optional + LastChanged string `json:"lastChanged,omitempty"` + // +optional + Details string `json:"details,omitempty"` +} + +// MirroringInfo is the mirroring info of a given pool/radosnamespace +type MirroringInfo struct { + // Mode is the mirroring mode + // +optional + Mode string `json:"mode,omitempty"` + // SiteName is the current site name + // +optional + SiteName string `json:"site_name,omitempty"` + // Peers are the list of peer sites connected to that cluster + // +optional + Peers []PeersSpec `json:"peers,omitempty"` +} + +// PeersSpec contains peer details +type PeersSpec struct { + // UUID is the peer UUID + // +optional + UUID string `json:"uuid,omitempty"` + // Direction is the peer mirroring direction + // +optional + Direction string `json:"direction,omitempty"` + // SiteName is the current site name + // +optional + SiteName string `json:"site_name,omitempty"` + // MirrorUUID is the mirror UUID + // +optional + MirrorUUID string `json:"mirror_uuid,omitempty"` + // ClientName is the CephX user used to connect to the peer + // +optional + ClientName string `json:"client_name,omitempty"` +} + +// SnapshotScheduleStatusSpec is the status of the snapshot schedule +type SnapshotScheduleStatusSpec struct { + // SnapshotSchedules is the list of snapshots scheduled + // +nullable + // +optional + SnapshotSchedules []SnapshotSchedulesSpec `json:"snapshotSchedules,omitempty"` + // LastChecked is the last time time the status was checked + // +optional + LastChecked string `json:"lastChecked,omitempty"` + // LastChanged is the last time time the status last changed + // +optional + LastChanged string `json:"lastChanged,omitempty"` + // Details contains potential status errors + // +optional + Details string `json:"details,omitempty"` +} + +// SnapshotSchedulesSpec is the list of snapshot scheduled for images in a pool +type SnapshotSchedulesSpec struct { + // Pool is the pool name + // +optional + Pool string `json:"pool,omitempty"` + // Namespace is the RADOS namespace the image is part of + // +optional + Namespace string `json:"namespace,omitempty"` + // Image is the mirrored image + // +optional + Image string `json:"image,omitempty"` + // Items is the list schedules times for a given snapshot + // +optional + Items []SnapshotSchedule `json:"items,omitempty"` +} + +// SnapshotSchedule is a schedule +type SnapshotSchedule struct { + // Interval is the interval in which snapshots will be taken + // +optional + Interval string `json:"interval,omitempty"` + // StartTime is the snapshot starting time + // +optional + StartTime string `json:"start_time,omitempty"` +} + +// Status represents the status of an object +type Status struct { + // +optional + Phase string `json:"phase,omitempty"` + // ObservedGeneration is the latest generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + Conditions []Condition `json:"conditions,omitempty"` +} + +// ReplicatedSpec represents the spec for replication in a pool +type ReplicatedSpec struct { + // Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) + // +kubebuilder:validation:Minimum=0 + Size uint `json:"size"` + + // TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity + // +kubebuilder:validation:Minimum=0 + // +optional + TargetSizeRatio float64 `json:"targetSizeRatio,omitempty"` + + // RequireSafeReplicaSize if false allows you to set replica 1 + // +optional + RequireSafeReplicaSize bool `json:"requireSafeReplicaSize,omitempty"` + + // ReplicasPerFailureDomain the number of replica in the specified failure domain + // +kubebuilder:validation:Minimum=1 + // +optional + ReplicasPerFailureDomain uint `json:"replicasPerFailureDomain,omitempty"` + + // SubFailureDomain the name of the sub-failure domain + // +optional + SubFailureDomain string `json:"subFailureDomain,omitempty"` + + // HybridStorage represents hybrid storage tier settings + // +optional + // +nullable + HybridStorage *HybridStorageSpec `json:"hybridStorage,omitempty"` +} + +// HybridStorageSpec represents the settings for hybrid storage pool +type HybridStorageSpec struct { + // PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Required + // +required + PrimaryDeviceClass string `json:"primaryDeviceClass"` + // SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Required + // +required + SecondaryDeviceClass string `json:"secondaryDeviceClass"` +} + +// MirroringSpec represents the setting for a mirrored pool +type MirroringSpec struct { + // Enabled whether this pool is mirrored or not + // +optional + Enabled bool `json:"enabled,omitempty"` + + // Mode is the mirroring mode: pool, image or init-only. + // +kubebuilder:validation:Enum=pool;image;init-only + // +optional + Mode string `json:"mode,omitempty"` + + // SnapshotSchedules is the scheduling of snapshot for mirrored images/pools + // +optional + SnapshotSchedules []SnapshotScheduleSpec `json:"snapshotSchedules,omitempty"` + + // Peers represents the peers spec + // +nullable + // +optional + Peers *MirroringPeerSpec `json:"peers,omitempty"` +} + +// SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool +type SnapshotScheduleSpec struct { + // Path is the path to snapshot, only valid for CephFS + // +optional + Path string `json:"path,omitempty"` + + // Interval represent the periodicity of the snapshot. + // +optional + Interval string `json:"interval,omitempty"` + + // StartTime indicates when to start the snapshot + // +optional + StartTime string `json:"startTime,omitempty"` +} + +// QuotaSpec represents the spec for quotas in a pool +type QuotaSpec struct { + // MaxBytes represents the quota in bytes + // Deprecated in favor of MaxSize + // +optional + MaxBytes *uint64 `json:"maxBytes,omitempty"` + + // MaxSize represents the quota in bytes as a string + // +kubebuilder:validation:Pattern=`^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$` + // +optional + MaxSize *string `json:"maxSize,omitempty"` + + // MaxObjects represents the quota in objects + // +optional + MaxObjects *uint64 `json:"maxObjects,omitempty"` +} + +// ErasureCodedSpec represents the spec for erasure code in a pool +type ErasureCodedSpec struct { + // Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + // This is the number of OSDs that can be lost simultaneously before data cannot be recovered. + // +kubebuilder:validation:Minimum=0 + CodingChunks uint `json:"codingChunks"` + + // Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + // The number of chunks required to recover an object when any single OSD is lost is the same + // as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. + // +kubebuilder:validation:Minimum=0 + DataChunks uint `json:"dataChunks"` + + // The algorithm for erasure coding. + // If absent, defaults to the plugin specified in osd_pool_default_erasure_code_profile. + // +kubebuilder:validation:Enum=isa;jerasure + // +optional + Algorithm string `json:"algorithm,omitempty"` +} + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephFilesystem represents a Ceph Filesystem +// +kubebuilder:printcolumn:name="ActiveMDS",type=string,JSONPath=`.spec.metadataServer.activeCount`,description="Number of desired active MDS daemons" +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=cephfs +type CephFilesystem struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec FilesystemSpec `json:"spec"` + // +kubebuilder:pruning:PreserveUnknownFields + Status *CephFilesystemStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephFilesystemList represents a list of Ceph Filesystems +type CephFilesystemList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephFilesystem `json:"items"` +} + +// FilesystemSpec represents the spec of a file system +type FilesystemSpec struct { + // The metadata pool settings + // +nullable + MetadataPool NamedPoolSpec `json:"metadataPool"` + + // The data pool settings, with optional predefined pool name. + // +nullable + DataPools []NamedPoolSpec `json:"dataPools"` + + // Preserve pool names as specified + // +optional + PreservePoolNames bool `json:"preservePoolNames,omitempty"` + + // Preserve pools on filesystem deletion + // +optional + PreservePoolsOnDelete bool `json:"preservePoolsOnDelete,omitempty"` + + // Preserve the fs in the cluster on CephFilesystem CR deletion. Setting this to true automatically implies PreservePoolsOnDelete is true. + // +optional + PreserveFilesystemOnDelete bool `json:"preserveFilesystemOnDelete,omitempty"` + + // The mds pod info + MetadataServer MetadataServerSpec `json:"metadataServer"` + + // The mirroring settings + // +nullable + // +optional + Mirroring *FSMirroringSpec `json:"mirroring,omitempty"` + + // The mirroring statusCheck + // +kubebuilder:pruning:PreserveUnknownFields + StatusCheck MirrorHealthCheckSpec `json:"statusCheck,omitempty"` +} + +// MetadataServerSpec represents the specification of a Ceph Metadata Server +type MetadataServerSpec struct { + // The number of metadata servers that are active. The remaining servers in the cluster will be in standby mode. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=50 + ActiveCount int32 `json:"activeCount"` + + // Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover. + // If false, standbys will still be available, but will not have a warm metadata cache. + // +optional + ActiveStandby bool `json:"activeStandby,omitempty"` + + // The affinity to place the mds pods (default is to place on all available node) with a daemonset + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Placement Placement `json:"placement,omitempty"` + + // The annotations-related configuration to add/set on each Pod related object. + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Annotations Annotations `json:"annotations,omitempty"` + + // The labels-related configuration to add/set on each Pod related object. + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Labels Labels `json:"labels,omitempty"` + + // The resource requirements for the mds pods + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Resources v1.ResourceRequirements `json:"resources,omitempty"` + + // PriorityClassName sets priority classes on components + // +optional + PriorityClassName string `json:"priorityClassName,omitempty"` + + // +optional + LivenessProbe *ProbeSpec `json:"livenessProbe,omitempty"` + + // +optional + StartupProbe *ProbeSpec `json:"startupProbe,omitempty"` + + // CacheMemoryLimitFactor is the factor applied to the memory limit to determine the MDS cache memory limit. + // MDS cache memory limit should be set to 50-60% of RAM reserved for the MDS container. + // MDS uses approximately 125% of the value of mds_cache_memory_limit in RAM. + // This factor is applied when resources.limits.memory is set. + // +kubebuilder:validation:Minimum=0.0 + // +kubebuilder:validation:Maximum=1.0 + // +optional + CacheMemoryLimitFactor *float64 `json:"cacheMemoryLimitFactor,omitempty"` + + // CacheMemoryRequestFactor is the factor applied to the memory request to determine the MDS cache memory limit. + // This factor is applied when resources.requests.memory is set and resources.limits.memory is not set. + // +kubebuilder:validation:Minimum=0.0 + // +kubebuilder:validation:Maximum=1.0 + // +optional + CacheMemoryRequestFactor *float64 `json:"cacheMemoryRequestFactor,omitempty"` +} + +// FSMirroringSpec represents the setting for a mirrored filesystem +type FSMirroringSpec struct { + // Enabled whether this filesystem is mirrored or not + // +optional + Enabled bool `json:"enabled,omitempty"` + + // Peers represents the peers spec + // +nullable + // +optional + Peers *MirroringPeerSpec `json:"peers,omitempty"` + + // SnapshotSchedules is the scheduling of snapshot for mirrored filesystems + // +optional + SnapshotSchedules []SnapshotScheduleSpec `json:"snapshotSchedules,omitempty"` + + // Retention is the retention policy for a snapshot schedule + // One path has exactly one retention policy. + // A policy can however contain multiple count-time period pairs in order to specify complex retention policies + // +optional + SnapshotRetention []SnapshotScheduleRetentionSpec `json:"snapshotRetention,omitempty"` +} + +// SnapshotScheduleRetentionSpec is a retention policy +type SnapshotScheduleRetentionSpec struct { + // Path is the path to snapshot + // +optional + Path string `json:"path,omitempty"` + + // Duration represents the retention duration for a snapshot + // +optional + Duration string `json:"duration,omitempty"` +} + +// CephFilesystemStatus represents the status of a Ceph Filesystem +type CephFilesystemStatus struct { + // +optional + Phase ConditionType `json:"phase,omitempty"` + // +optional + SnapshotScheduleStatus *FilesystemSnapshotScheduleStatusSpec `json:"snapshotScheduleStatus,omitempty"` + // Use only info and put mirroringStatus in it? + // +optional + // +nullable + Info map[string]string `json:"info,omitempty"` + Cephx LocalCephxStatus `json:"cephx,omitempty"` + // MirroringStatus is the filesystem mirroring status + // +optional + MirroringStatus *FilesystemMirroringInfoSpec `json:"mirroringStatus,omitempty"` + Conditions []Condition `json:"conditions,omitempty"` + // ObservedGeneration is the latest generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// FilesystemMirroringInfo is the status of the pool mirroring +type FilesystemMirroringInfoSpec struct { + // PoolMirroringStatus is the mirroring status of a filesystem + // +nullable + // +optional + FilesystemMirroringAllInfo []FilesystemMirroringInfo `json:"daemonsStatus,omitempty"` + // LastChecked is the last time time the status was checked + // +optional + LastChecked string `json:"lastChecked,omitempty"` + // LastChanged is the last time time the status last changed + // +optional + LastChanged string `json:"lastChanged,omitempty"` + // Details contains potential status errors + // +optional + Details string `json:"details,omitempty"` +} + +// FilesystemSnapshotScheduleStatusSpec is the status of the snapshot schedule +type FilesystemSnapshotScheduleStatusSpec struct { + // SnapshotSchedules is the list of snapshots scheduled + // +nullable + // +optional + SnapshotSchedules []FilesystemSnapshotSchedulesSpec `json:"snapshotSchedules,omitempty"` + // LastChecked is the last time time the status was checked + // +optional + LastChecked string `json:"lastChecked,omitempty"` + // LastChanged is the last time time the status last changed + // +optional + LastChanged string `json:"lastChanged,omitempty"` + // Details contains potential status errors + // +optional + Details string `json:"details,omitempty"` +} + +// FilesystemSnapshotSchedulesSpec is the list of snapshot scheduled for images in a pool +type FilesystemSnapshotSchedulesSpec struct { + // Fs is the name of the Ceph Filesystem + // +optional + Fs string `json:"fs,omitempty"` + // Subvol is the name of the sub volume + // +optional + Subvol string `json:"subvol,omitempty"` + // Path is the path on the filesystem + // +optional + Path string `json:"path,omitempty"` + // +optional + RelPath string `json:"rel_path,omitempty"` + // +optional + Schedule string `json:"schedule,omitempty"` + // +optional + Retention FilesystemSnapshotScheduleStatusRetention `json:"retention,omitempty"` +} + +// FilesystemSnapshotScheduleStatusRetention is the retention specification for a filesystem snapshot schedule +type FilesystemSnapshotScheduleStatusRetention struct { + // Start is when the snapshot schedule starts + // +optional + Start string `json:"start,omitempty"` + // Created is when the snapshot schedule was created + // +optional + Created string `json:"created,omitempty"` + // First is when the first snapshot schedule was taken + // +optional + First string `json:"first,omitempty"` + // Last is when the last snapshot schedule was taken + // +optional + Last string `json:"last,omitempty"` + // LastPruned is when the last snapshot schedule was pruned + // +optional + LastPruned string `json:"last_pruned,omitempty"` + // CreatedCount is total amount of snapshots + // +optional + CreatedCount int `json:"created_count,omitempty"` + // PrunedCount is total amount of pruned snapshots + // +optional + PrunedCount int `json:"pruned_count,omitempty"` + // Active is whether the scheduled is active or not + // +optional + Active bool `json:"active,omitempty"` +} + +// FilesystemMirrorInfoSpec is the filesystem mirror status of a given filesystem +type FilesystemMirroringInfo struct { + // DaemonID is the cephfs-mirror name + // +optional + DaemonID int `json:"daemon_id,omitempty"` + // Filesystems is the list of filesystems managed by a given cephfs-mirror daemon + // +optional + Filesystems []FilesystemsSpec `json:"filesystems,omitempty"` +} + +// FilesystemsSpec is spec for the mirrored filesystem +type FilesystemsSpec struct { + // FilesystemID is the filesystem identifier + // +optional + FilesystemID int `json:"filesystem_id,omitempty"` + // Name is name of the filesystem + // +optional + Name string `json:"name,omitempty"` + // DirectoryCount is the number of directories in the filesystem + // +optional + DirectoryCount int `json:"directory_count,omitempty"` + // Peers represents the mirroring peers + // +optional + Peers []FilesystemMirrorInfoPeerSpec `json:"peers,omitempty"` +} + +// FilesystemMirrorInfoPeerSpec is the specification of a filesystem peer mirror +type FilesystemMirrorInfoPeerSpec struct { + // UUID is the peer unique identifier + // +optional + UUID string `json:"uuid,omitempty"` + // Remote are the remote cluster information + // +optional + Remote *PeerRemoteSpec `json:"remote,omitempty"` + // Stats are the stat a peer mirror + // +optional + Stats *PeerStatSpec `json:"stats,omitempty"` +} + +type PeerRemoteSpec struct { + // ClientName is cephx name + // +optional + ClientName string `json:"client_name,omitempty"` + // ClusterName is the name of the cluster + // +optional + ClusterName string `json:"cluster_name,omitempty"` + // FsName is the filesystem name + // +optional + FsName string `json:"fs_name,omitempty"` +} + +// PeerStatSpec are the mirror stat with a given peer +type PeerStatSpec struct { + // FailureCount is the number of mirroring failure + // +optional + FailureCount int `json:"failure_count,omitempty"` + // RecoveryCount is the number of recovery attempted after failures + // +optional + RecoveryCount int `json:"recovery_count,omitempty"` +} + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephObjectStore represents a Ceph Object Store Gateway +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.status.info.endpoint` +// +kubebuilder:printcolumn:name="SecureEndpoint",type=string,JSONPath=`.status.info.secureEndpoint` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=cephos +// +kubebuilder:subresource:scale:specpath=.spec.gateway.instances,statuspath=.status.replicas,selectorpath=.status.selector +type CephObjectStore struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec ObjectStoreSpec `json:"spec"` + // +kubebuilder:pruning:PreserveUnknownFields + Status *ObjectStoreStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephObjectStoreList represents a Ceph Object Store Gateways +type CephObjectStoreList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephObjectStore `json:"items"` +} + +// ObjectStoreSpec represent the spec of a pool +// +kubebuilder:validation:XValidation:rule="!(has(self.defaultRealm) && self.defaultRealm == true && has(self.zone) && size(self.zone.name) > 0)",message="defaultRealm must not be true when zone.name is set (multisite configuration)" +type ObjectStoreSpec struct { + // The metadata pool settings + // +optional + // +nullable + MetadataPool PoolSpec `json:"metadataPool,omitempty"` + + // The data pool settings + // +optional + // +nullable + DataPool PoolSpec `json:"dataPool,omitempty"` + + // The pool information when configuring RADOS namespaces in existing pools. + // +optional + // +nullable + SharedPools ObjectSharedPoolsSpec `json:"sharedPools"` + + // Preserve pools on object store deletion + // +optional + PreservePoolsOnDelete bool `json:"preservePoolsOnDelete,omitempty"` + + // The rgw pod info + // +optional + // +nullable + Gateway GatewaySpec `json:"gateway"` + + // The protocol specification + // +optional + Protocols ProtocolSpec `json:"protocols,omitempty"` + + // The authentication configuration + // +optional + Auth AuthSpec `json:"auth,omitempty"` + + // The multisite info + // +optional + // +nullable + Zone ZoneSpec `json:"zone,omitempty"` + + // The RGW health probes + // +optional + // +nullable + HealthCheck ObjectHealthCheckSpec `json:"healthCheck,omitempty"` + + // Security represents security settings + // +optional + // +nullable + Security *ObjectStoreSecuritySpec `json:"security,omitempty"` + + // The list of allowed namespaces in addition to the object store namespace + // where ceph object store users may be created. Specify "*" to allow all + // namespaces, otherwise list individual namespaces that are to be allowed. + // This is useful for applications that need object store credentials + // to be created in their own namespace, where neither OBCs nor COSI + // is being used to create buckets. The default is empty. + // +optional + AllowUsersInNamespaces []string `json:"allowUsersInNamespaces,omitempty"` + + // Hosting settings for the object store. + // A common use case for hosting configuration is to inform Rook of endpoints that support DNS + // wildcards, which in turn allows virtual host-style bucket addressing. + // +nullable + // +optional + Hosting *ObjectStoreHostingSpec `json:"hosting,omitempty"` + + // Set this realm as the default in Ceph. Only one realm should be default. + // Do not set this true on more than one CephObjectStore. + // This may not be set when zone is also specified; in this case, the realm + // referenced by the zone's zonegroup should configure defaulting behavior. + // +optional + DefaultRealm bool `json:"defaultRealm,omitempty"` +} + +// ObjectSharedPoolsSpec represents object store pool info when configuring RADOS namespaces in existing pools. +type ObjectSharedPoolsSpec struct { + // The metadata pool used for creating RADOS namespaces in the object store + // +kubebuilder:validation:XValidation:message="object store shared metadata pool is immutable",rule="self == oldSelf" + // +optional + MetadataPoolName string `json:"metadataPoolName,omitempty"` + + // The data pool used for creating RADOS namespaces in the object store + // +kubebuilder:validation:XValidation:message="object store shared data pool is immutable",rule="self == oldSelf" + // +optional + DataPoolName string `json:"dataPoolName,omitempty"` + + // Whether the RADOS namespaces should be preserved on deletion of the object store + // +optional + PreserveRadosNamespaceDataOnDelete bool `json:"preserveRadosNamespaceDataOnDelete"` + + // PoolPlacements control which Pools are associated with a particular RGW bucket. + // Once PoolPlacements are defined, RGW client will be able to associate pool + // with ObjectStore bucket by providing "" during s3 bucket creation + // or "X-Storage-Policy" header during swift container creation. + // See: https://docs.ceph.com/en/latest/radosgw/placement/#placement-targets + // PoolPlacement with name: "default" will be used as a default pool if no option + // is provided during bucket creation. + // If default placement is not provided, spec.sharedPools.dataPoolName and spec.sharedPools.MetadataPoolName will be used as default pools. + // If spec.sharedPools are also empty, then RGW pools (spec.dataPool and spec.metadataPool) will be used as defaults. + // +optional + PoolPlacements []PoolPlacementSpec `json:"poolPlacements,omitempty"` +} + +type PoolPlacementSpec struct { + // Pool placement name. Name can be arbitrary. Placement with name "default" will be used as default. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9._/-]+$` + Name string `json:"name"` + + // Sets given placement as default. Only one placement in the list can be marked as default. + // Default is false. + // +optional + Default bool `json:"default"` + + // The metadata pool used to store ObjectStore bucket index. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + MetadataPoolName string `json:"metadataPoolName"` + + // The data pool used to store ObjectStore objects data. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + DataPoolName string `json:"dataPoolName"` + + // The data pool used to store ObjectStore data that cannot use erasure coding (ex: multi-part uploads). + // If dataPoolName is not erasure coded, then there is no need for dataNonECPoolName. + // +optional + DataNonECPoolName string `json:"dataNonECPoolName,omitempty"` + + // StorageClasses can be selected by user to override dataPoolName during object creation. + // Each placement has default STANDARD StorageClass pointing to dataPoolName. + // This list allows defining additional StorageClasses on top of default STANDARD storage class. + // +optional + StorageClasses []PlacementStorageClassSpec `json:"storageClasses,omitempty"` +} + +type PlacementStorageClassSpec struct { + // Name is the StorageClass name. Ceph allows arbitrary name for StorageClasses, + // however most clients/libs insist on AWS names so it is recommended to use + // one of the valid x-amz-storage-class values for better compatibility: + // REDUCED_REDUNDANCY | STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING | GLACIER | DEEP_ARCHIVE | OUTPOSTS | GLACIER_IR | SNOW | EXPRESS_ONEZONE + // See AWS docs: https://aws.amazon.com/de/s3/storage-classes/ + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9._/-]+$` + Name string `json:"name"` + + // DataPoolName is the data pool used to store ObjectStore objects data. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + DataPoolName string `json:"dataPoolName"` +} + +// ObjectHealthCheckSpec represents the health check of an object store +type ObjectHealthCheckSpec struct { + // livenessProbe field is no longer used + // +kubebuilder:pruning:PreserveUnknownFields + + // +optional + ReadinessProbe *ProbeSpec `json:"readinessProbe,omitempty"` + // +optional + StartupProbe *ProbeSpec `json:"startupProbe,omitempty"` +} + +// HealthCheckSpec represents the health check of an object store bucket +type HealthCheckSpec struct { + // +optional + Disabled bool `json:"disabled,omitempty"` + // Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + // +optional + Interval *metav1.Duration `json:"interval,omitempty"` + // +optional + Timeout string `json:"timeout,omitempty"` +} + +// GatewaySpec represents the specification of Ceph Object Store Gateway +type GatewaySpec struct { + // The port the rgw service will be listening on (http) + // +optional + Port int32 `json:"port,omitempty"` + + // The port the rgw service will be listening on (https) + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=65535 + // +nullable + // +optional + SecurePort int32 `json:"securePort,omitempty"` + + // The number of pods in the rgw replicaset. + // +nullable + // +optional + Instances int32 `json:"instances,omitempty"` + + // The name of the secret that stores the ssl certificate for secure rgw connections + // +nullable + // +optional + SSLCertificateRef string `json:"sslCertificateRef,omitempty"` + + // The name of the secret that stores custom ca-bundle with root and intermediate certificates. + // +nullable + // +optional + CaBundleRef string `json:"caBundleRef,omitempty"` + + // The affinity to place the rgw pods (default is to place on any available node) + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Placement Placement `json:"placement,omitempty"` + + // DisableMultisiteSyncTraffic, when true, prevents this object store's gateways from + // transmitting multisite replication data. Note that this value does not affect whether + // gateways receive multisite replication traffic: see ObjectZone.spec.customEndpoints for that. + // If false or unset, this object store's gateways will be able to transmit multisite + // replication data. + // +optional + DisableMultisiteSyncTraffic bool `json:"disableMultisiteSyncTraffic,omitempty"` + + // The annotations-related configuration to add/set on each Pod related object. + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Annotations Annotations `json:"annotations,omitempty"` + + // The labels-related configuration to add/set on each Pod related object. + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Labels Labels `json:"labels,omitempty"` + + // The resource requirements for the rgw pods + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Resources v1.ResourceRequirements `json:"resources,omitempty"` + + // PriorityClassName sets priority classes on the rgw pods + // +optional + PriorityClassName string `json:"priorityClassName,omitempty"` + + // ExternalRgwEndpoints points to external RGW endpoint(s). Multiple endpoints can be given, but + // for stability of ObjectBucketClaims, we highly recommend that users give only a single + // external RGW endpoint that is a load balancer that sends requests to the multiple RGWs. + // +nullable + // +optional + ExternalRgwEndpoints []EndpointAddress `json:"externalRgwEndpoints,omitempty"` + + // The configuration related to add/set on each rgw service. + // +optional + // +nullable + Service *RGWServiceSpec `json:"service,omitempty"` + + // Enable enhanced operation Logs for S3 in a sidecar named ops-log + // +optional + // +nullable + OpsLogSidecar *OpsLogSidecar `json:"opsLogSidecar,omitempty"` + + // Whether host networking is enabled for the rgw daemon. If not set, the network settings from the cluster CR will be applied. + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + HostNetwork *bool `json:"hostNetwork,omitempty"` + + // Whether rgw dashboard is enabled for the rgw daemon. If not set, the rgw dashboard will be enabled. + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + DashboardEnabled *bool `json:"dashboardEnabled,omitempty"` + + // AdditionalVolumeMounts allows additional volumes to be mounted to the RGW pod. + // The root directory for each additional volume mount is `/var/rgw`. + // Example: for an additional mount at subPath `ldap`, mounted from a secret that has key + // `bindpass.secret`, the file would reside at `/var/rgw/ldap/bindpass.secret`. + AdditionalVolumeMounts AdditionalVolumeMounts `json:"additionalVolumeMounts,omitempty"` + + // RgwConfig sets Ceph RGW config values for the gateway clients that serve this object store. + // Values are modified at runtime without RGW restart. + // This feature is intended for advanced users. It allows breaking configurations to be easily + // applied. Use with caution. + // +nullable + // +optional + RgwConfig map[string]string `json:"rgwConfig,omitempty"` + + // RgwConfigFromSecret works exactly like RgwConfig but takes config value from Secret Key reference. + // Values are modified at runtime without RGW restart. + // This feature is intended for advanced users. It allows breaking configurations to be easily + // applied. Use with caution. + // +nullable + // +optional + RgwConfigFromSecret map[string]v1.SecretKeySelector `json:"rgwConfigFromSecret,omitempty"` + + // RgwCommandFlags sets Ceph RGW config values for the gateway clients that serve this object + // store. Values are modified at RGW startup, resulting in RGW pod restarts. + // This feature is intended for advanced users. It allows breaking configurations to be easily + // applied. Use with caution. + // +nullable + // +optional + RgwCommandFlags map[string]string `json:"rgwCommandFlags,omitempty"` + + // ReadAffinity defines the RGW read affinity policy to optimize the read requests for the RGW clients + // Note: Only supported from Ceph Tentacle (v20) + // +optional + ReadAffinity *RgwReadAffinity `json:"readAffinity,omitempty"` +} + +type RgwReadAffinity struct { + // Type defines the RGW ReadAffinity type + // localize: read from the nearest OSD based on crush location of the RGW client + // balance: picks a random OSD from the PG's active set + // default: read from the primary OSD + // +kubebuilder:validation:Enum=localize;balance;default + // +required + Type string `json:"type"` +} + +// RGWLoggingSpec is intended to extend the s3/swift logging for client operations +type OpsLogSidecar struct { + // Resources represents the way to specify resource requirements for the ops-log sidecar + // +optional + Resources v1.ResourceRequirements `json:"resources,omitempty"` +} + +// EndpointAddress is a tuple that describes a single IP address or host name. This is a subset of +// Kubernetes's v1.EndpointAddress. +// +structType=atomic +type EndpointAddress struct { + // The IP of this endpoint. As a legacy behavior, this supports being given a DNS-addressable hostname as well. + // +optional + IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` + + // The DNS-addressable Hostname of this endpoint. This field will be preferred over IP if both are given. + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` +} + +// ProtocolSpec represents a Ceph Object Store protocol specification +type ProtocolSpec struct { + // Represents RGW 'rgw_enable_apis' config option. See: https://docs.ceph.com/en/reef/radosgw/config-ref/#confval-rgw_enable_apis + // If no value provided then all APIs will be enabled: s3, s3website, swift, swift_auth, admin, sts, iam, notifications + // If enabled APIs are set, all remaining APIs will be disabled. + // This option overrides S3.Enabled value. + // +optional + // +nullable + EnableAPIs []ObjectStoreAPI `json:"enableAPIs,omitempty"` + + // The spec for S3 + // +optional + // +nullable + S3 *S3Spec `json:"s3,omitempty"` + + // The spec for Swift + // +optional + // +nullable + Swift *SwiftSpec `json:"swift"` +} + +// +kubebuilder:validation:Enum=s3;s3website;swift;swift_auth;admin;sts;iam;notifications +type ObjectStoreAPI string + +// S3Spec represents Ceph Object Store specification for the S3 API +type S3Spec struct { + // Deprecated: use protocol.enableAPIs instead. + // Whether to enable S3. This defaults to true (even if protocols.s3 is not present in the CRD). This maintains backwards compatibility – by default S3 is enabled. + // +nullable + // +optional + Enabled *bool `json:"enabled,omitempty"` + // Whether to use Keystone for authentication. This option maps directly to the rgw_s3_auth_use_keystone option. Enabling it allows generating S3 credentials via an OpenStack API call, see the docs. If not given, the defaults of the corresponding RGW option apply. + // +nullable + // +optional + AuthUseKeystone *bool `json:"authUseKeystone,omitempty"` +} + +// SwiftSpec represents Ceph Object Store specification for the Swift API +type SwiftSpec struct { + // Whether or not the Swift account name should be included in the Swift API URL. If set to false (the default), then the Swift API will listen on a URL formed like http://host:port//v1. If set to true, the Swift API URL will be http://host:port//v1/AUTH_. You must set this option to true (and update the Keystone service catalog) if you want radosgw to support publicly-readable containers and temporary URLs. + // +nullable + // +optional + AccountInUrl *bool `json:"accountInUrl,omitempty"` + // The URL prefix for the Swift API, to distinguish it from the S3 API endpoint. The default is swift, which makes the Swift API available at the URL http://host:port/swift/v1 (or http://host:port/swift/v1/AUTH_%(tenant_id)s if rgw swift account in url is enabled). + // +nullable + // +optional + UrlPrefix *string `json:"urlPrefix,omitempty"` + // Enables the Object Versioning of OpenStack Object Storage API. This allows clients to put the X-Versions-Location attribute on containers that should be versioned. + // +nullable + // +optional + VersioningEnabled *bool `json:"versioningEnabled,omitempty"` +} + +// AuthSpec represents the authentication protocol configuration of a Ceph Object Store Gateway +type AuthSpec struct { + // The spec for Keystone + // +optional + // +nullable + Keystone *KeystoneSpec `json:"keystone,omitempty"` +} + +// KeystoneSpec represents the Keystone authentication configuration of a Ceph Object Store Gateway +type KeystoneSpec struct { + // The URL for the Keystone server. + Url string `json:"url"` + // The name of the secret containing the credentials for the service user account used by RGW. It has to be in the same namespace as the object store resource. + ServiceUserSecretName string `json:"serviceUserSecretName"` + // The roles requires to serve requests. + AcceptedRoles []string `json:"acceptedRoles"` + // Create new users in their own tenants of the same name. Possible values are true, false, swift and s3. The latter have the effect of splitting the identity space such that only the indicated protocol will use implicit tenants. + // +optional + ImplicitTenants ImplicitTenantSetting `json:"implicitTenants,omitempty"` + // The maximum number of entries in each Keystone token cache. + // +optional + // +nullable + TokenCacheSize *int `json:"tokenCacheSize,omitempty"` + // The number of seconds between token revocation checks. + // +optional + // +nullable + RevocationInterval *int `json:"revocationInterval,omitempty"` +} + +type ImplicitTenantSetting string + +const ( + ImplicitTenantSwift ImplicitTenantSetting = "swift" + ImplicitTenantS3 ImplicitTenantSetting = "s3" + ImplicitTenantTrue ImplicitTenantSetting = "true" + ImplicitTenantFalse ImplicitTenantSetting = "false" + ImplicitTenantDefault ImplicitTenantSetting = "" +) + +// ZoneSpec represents a Ceph Object Store Gateway Zone specification +type ZoneSpec struct { + // CephObjectStoreZone name this CephObjectStore is part of + Name string `json:"name"` +} + +// ObjectStoreStatus represents the status of a Ceph Object Store resource +type ObjectStoreStatus struct { + // +optional + Replicas int32 `json:"replicas"` + // +optional + Selector string `json:"selector"` + // +optional + Phase ConditionType `json:"phase,omitempty"` + // +optional + Message string `json:"message,omitempty"` + // +optional + Endpoints ObjectEndpoints `json:"endpoints"` + // +optional + // +nullable + Info map[string]string `json:"info,omitempty"` + Cephx LocalCephxStatus `json:"cephx,omitempty"` + Conditions []Condition `json:"conditions,omitempty"` + // ObservedGeneration is the latest generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +type ObjectEndpoints struct { + // +optional + // +nullable + Insecure []string `json:"insecure"` + // +optional + // +nullable + Secure []string `json:"secure"` +} + +// ObjectStoreHostingSpec represents the hosting settings for the object store +type ObjectStoreHostingSpec struct { + // AdvertiseEndpoint is the default endpoint Rook will return for resources dependent on this + // object store. This endpoint will be returned to CephObjectStoreUsers, Object Bucket Claims, + // and COSI Buckets/Accesses. + // By default, Rook returns the endpoint for the object store's Kubernetes service using HTTPS + // with `gateway.securePort` if it is defined (otherwise, HTTP with `gateway.port`). + // +nullable + // +optional + AdvertiseEndpoint *ObjectEndpointSpec `json:"advertiseEndpoint,omitempty"` + // A list of DNS host names on which object store gateways will accept client S3 connections. + // When specified, object store gateways will reject client S3 connections to hostnames that are + // not present in this list, so include all endpoints. + // The object store's advertiseEndpoint and Kubernetes service endpoint, plus CephObjectZone + // `customEndpoints` are automatically added to the list but may be set here again if desired. + // Each DNS name must be valid according RFC-1123. + // If the DNS name corresponds to an endpoint with DNS wildcard support, do not include the + // wildcard itself in the list of hostnames. + // E.g., use "mystore.example.com" instead of "*.mystore.example.com". + // +optional + DNSNames []string `json:"dnsNames,omitempty"` +} + +// ObjectEndpointSpec represents an object store endpoint +type ObjectEndpointSpec struct { + // DnsName is the DNS name (in RFC-1123 format) of the endpoint. + // If the DNS name corresponds to an endpoint with DNS wildcard support, do not include the + // wildcard itself in the list of hostnames. + // E.g., use "mystore.example.com" instead of "*.mystore.example.com". + // +kubebuilder:validation:MinLength=1 + // +required + DnsName string `json:"dnsName"` + // Port is the port on which S3 connections can be made for this endpoint. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +required + Port int32 `json:"port"` + // UseTls defines whether the endpoint uses TLS (HTTPS) or not (HTTP). + // +required + UseTls bool `json:"useTls"` +} + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephObjectStoreUser represents a Ceph Object Store Gateway User +// +kubebuilder:resource:shortName=rcou;objectuser;cephosu +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:subresource:status +type CephObjectStoreUser struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec ObjectStoreUserSpec `json:"spec"` + // +kubebuilder:pruning:PreserveUnknownFields + // +optional + Status *ObjectStoreUserStatus `json:"status,omitempty"` +} + +// ObjectStoreUserStatus represents the status Ceph Object Store Gateway User +type ObjectStoreUserStatus struct { + // +optional + Phase string `json:"phase,omitempty"` + // +optional + // +nullable + Info map[string]string `json:"info,omitempty"` + // ObservedGeneration is the latest generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // +optional + // +nullable + Keys []SecretReference `json:"keys,omitempty"` +} + +type SecretReference struct { + v1.SecretReference `json:",secretReference"` + UID types.UID `json:"uid,omitempty"` + ResourceVersion string `json:"resourceVersion,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephObjectStoreUserList represents a list Ceph Object Store Gateway Users +type CephObjectStoreUserList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephObjectStoreUser `json:"items"` +} + +// ObjectStoreUserSpec represent the spec of an Objectstoreuser +type ObjectStoreUserSpec struct { + // The store the user will be created in + // +optional + Store string `json:"store,omitempty"` + // The display name for the ceph user. + // +optional + DisplayName string `json:"displayName,omitempty"` + // +optional + // +nullable + Capabilities *ObjectUserCapSpec `json:"capabilities,omitempty"` + // +optional + // +nullable + Quotas *ObjectUserQuotaSpec `json:"quotas,omitempty"` + // Allows specifying credentials for the user. If not provided, the operator + // will generate them. + // +optional + Keys []ObjectUserKey `json:"keys,omitempty"` + // The namespace where the parent CephCluster and CephObjectStore are found + // +optional + ClusterNamespace string `json:"clusterNamespace,omitempty"` + // The op-mask of the user. + // +optional + // +kubebuilder:validation:MinItems=0 + // +kubebuilder:validation:MaxItems=3 + // +listType=set + OpMask *[]ObjectUserOpMask `json:"opMask,omitempty"` +} + +// Internally, RGW labels "operations" on persistent state as `RGW_OP_TYPE_READ` (`read`), `RGW_OP_TYPE_WRITE` (`write`), or `RGW_OP_TYPE_DELETE` (`delete`). All RGW users have an "operation mask", which does not function as mask or filter as is typically implied by the word "mask", but as a set of allowed or permissible "operation" types the user is able to perform. The "operation mask" is applied regardless of the bucket or IAM policy. For example, in order for an RGW user to be able to read an object from a bucket, that user must have **both** the `read` "op mask" bit and an IAM/bucket policy that allows `s3:GetObject`. The default operations allowed are `read`, `write`, and `delete`. Setting the value to `[]` (an empty YAML sequence) causes all "operations" in the mask to be removed, meaning that the user will not be able to perform any operations. These operation masks are supported: `read`, `write`, `delete` +// +enum +// +kubebuilder:validation:Enum=read;write;delete +type ObjectUserOpMask string + +// Additional admin-level capabilities for the Ceph object store user +type ObjectUserCapSpec struct { + // +optional + // +kubebuilder:validation:Enum={"*","read","write","read, write"} + // Admin capabilities to read/write Ceph object store users. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + User string `json:"user,omitempty"` + // +optional + // +kubebuilder:validation:Enum={"*","read","write","read, write"} + // Admin capabilities to read/write Ceph object store users. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + Users string `json:"users,omitempty"` + // +optional + // +kubebuilder:validation:Enum={"*","read","write","read, write"} + // Admin capabilities to read/write Ceph object store buckets. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + Bucket string `json:"bucket,omitempty"` + // +optional + // +kubebuilder:validation:Enum={"*","read","write","read, write"} + // Admin capabilities to read/write Ceph object store buckets. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + Buckets string `json:"buckets,omitempty"` + // +optional + // +kubebuilder:validation:Enum={"*","read","write","read, write"} + // Admin capabilities to read/write Ceph object store metadata. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + MetaData string `json:"metadata,omitempty"` + // +optional + // +kubebuilder:validation:Enum={"*","read","write","read, write"} + // Admin capabilities to read/write Ceph object store usage. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + Usage string `json:"usage,omitempty"` + // +optional + // +kubebuilder:validation:Enum={"*","read","write","read, write"} + // Admin capabilities to read/write Ceph object store zones. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + Zone string `json:"zone,omitempty"` + // +optional + // +kubebuilder:validation:Enum={"*","read","write","read, write"} + // Admin capabilities to read/write roles for user. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + Roles string `json:"roles,omitempty"` + // +optional + // +kubebuilder:validation:Enum={"*","read","write","read, write"} + // Admin capabilities to read/write information about the user. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + Info string `json:"info,omitempty"` + // +optional + // +kubebuilder:validation:Enum={"*","read","write","read, write"} + // Add capabilities for user to send request to RGW Cache API header. Documented in https://docs.ceph.com/en/latest/radosgw/rgw-cache/#cache-api + AMZCache string `json:"amz-cache,omitempty"` + // +optional + // +kubebuilder:validation:Enum={"*","read","write","read, write"} + // Add capabilities for user to change bucket index logging. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + BiLog string `json:"bilog,omitempty"` + // +optional + // +kubebuilder:validation:Enum={"*","read","write","read, write"} + // Add capabilities for user to change metadata logging. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + MdLog string `json:"mdlog,omitempty"` + // +optional + // +kubebuilder:validation:Enum={"*","read","write","read, write"} + // Add capabilities for user to change data logging. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + DataLog string `json:"datalog,omitempty"` + // +optional + // +kubebuilder:validation:Enum={"*","read","write","read, write"} + // Add capabilities for user to change user policies. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + UserPolicy string `json:"user-policy,omitempty"` + // +optional + // +kubebuilder:validation:Enum={"*","read","write","read, write"} + // Add capabilities for user to change oidc provider. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + OidcProvider string `json:"oidc-provider,omitempty"` + // +optional + // +kubebuilder:validation:Enum={"*","read","write","read, write"} + // Add capabilities for user to set rate limiter for user and bucket. Documented in https://docs.ceph.com/en/latest/radosgw/admin/?#add-remove-admin-capabilities + RateLimit string `json:"ratelimit,omitempty"` +} + +// ObjectUserQuotaSpec can be used to set quotas for the object store user to limit their usage. See the [Ceph docs](https://docs.ceph.com/en/latest/radosgw/admin/?#quota-management) for more +type ObjectUserQuotaSpec struct { + // Maximum bucket limit for the ceph user + // +optional + // +nullable + MaxBuckets *int `json:"maxBuckets,omitempty"` + // Maximum size limit of all objects across all the user's buckets + // See https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity for more info. + // +optional + // +nullable + MaxSize *resource.Quantity `json:"maxSize,omitempty"` + // Maximum number of objects across all the user's buckets + // +optional + // +nullable + MaxObjects *int64 `json:"maxObjects,omitempty"` +} + +// ObjectUserKey defines a set of rgw user access credentials to be retrieved +// from secret resources. +type ObjectUserKey struct { + // Secret key selector for the access_key (commonly referred to as AWS_ACCESS_KEY_ID). + AccessKeyRef *v1.SecretKeySelector `json:"accessKeyRef,omitempty"` + // Secret key selector for the secret_key (commonly referred to as AWS_SECRET_ACCESS_KEY). + SecretKeyRef *v1.SecretKeySelector `json:"secretKeyRef,omitempty"` +} + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephObjectRealm represents a Ceph Object Store Gateway Realm +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=cephor +type CephObjectRealm struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + // +nullable + // +optional + Spec ObjectRealmSpec `json:"spec,omitempty"` + // +kubebuilder:pruning:PreserveUnknownFields + // +optional + Status *Status `json:"status,omitempty"` +} + +// CephObjectRealmList represents a list Ceph Object Store Gateway Realms +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type CephObjectRealmList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephObjectRealm `json:"items"` +} + +// ObjectRealmSpec represent the spec of an ObjectRealm +type ObjectRealmSpec struct { + Pull PullSpec `json:"pull,omitempty"` + + // Set this realm as the default in Ceph. Only one realm should be default. + // +optional + DefaultRealm bool `json:"defaultRealm,omitempty"` +} + +// PullSpec represents the pulling specification of a Ceph Object Storage Gateway Realm +type PullSpec struct { + // +kubebuilder:validation:Pattern=`^https*://` + Endpoint string `json:"endpoint,omitempty"` +} + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephObjectZoneGroup represents a Ceph Object Store Gateway Zone Group +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=cephozg +type CephObjectZoneGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec ObjectZoneGroupSpec `json:"spec"` + // +kubebuilder:pruning:PreserveUnknownFields + // +optional + Status *Status `json:"status,omitempty"` +} + +// CephObjectZoneGroupList represents a list Ceph Object Store Gateway Zone Groups +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type CephObjectZoneGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephObjectZoneGroup `json:"items"` +} + +// ObjectZoneGroupSpec represent the spec of an ObjectZoneGroup +type ObjectZoneGroupSpec struct { + // The name of the realm the zone group is a member of. + Realm string `json:"realm"` +} + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephObjectZone represents a Ceph Object Store Gateway Zone +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=cephoz +type CephObjectZone struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec ObjectZoneSpec `json:"spec"` + // +kubebuilder:pruning:PreserveUnknownFields + // +optional + Status *Status `json:"status,omitempty"` +} + +// CephObjectZoneList represents a list Ceph Object Store Gateway Zones +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type CephObjectZoneList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephObjectZone `json:"items"` +} + +// ObjectZoneSpec represent the spec of an ObjectZone +type ObjectZoneSpec struct { + // The name of the zone group the zone is a member of. + ZoneGroup string `json:"zoneGroup"` + + // The metadata pool settings + // +optional + // +nullable + MetadataPool PoolSpec `json:"metadataPool"` + + // The data pool settings + // +optional + // +nullable + DataPool PoolSpec `json:"dataPool"` + + // The pool information when configuring RADOS namespaces in existing pools. + // +optional + // +nullable + SharedPools ObjectSharedPoolsSpec `json:"sharedPools"` + + // If this zone cannot be accessed from other peer Ceph clusters via the ClusterIP Service + // endpoint created by Rook, you must set this to the externally reachable endpoint(s). You may + // include the port in the definition. For example: "https://my-object-store.my-domain.net:443". + // In many cases, you should set this to the endpoint of the ingress resource that makes the + // CephObjectStore associated with this CephObjectStoreZone reachable to peer clusters. + // The list can have one or more endpoints pointing to different RGW servers in the zone. + // + // If a CephObjectStore endpoint is omitted from this list, that object store's gateways will + // not receive multisite replication data + // (see CephObjectStore.spec.gateway.disableMultisiteSyncTraffic). + // +nullable + // +optional + CustomEndpoints []string `json:"customEndpoints,omitempty"` + + // Preserve pools on object zone deletion + // +optional + // +kubebuilder:default=true + PreservePoolsOnDelete bool `json:"preservePoolsOnDelete"` +} + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephBucketTopic represents a Ceph Object Topic for Bucket Notifications +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=cephbt +type CephBucketTopic struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec BucketTopicSpec `json:"spec"` + // +kubebuilder:pruning:PreserveUnknownFields + // +optional + Status *BucketTopicStatus `json:"status,omitempty"` +} + +// BucketTopicStatus represents the Status of a CephBucketTopic +type BucketTopicStatus struct { + // +optional + Phase string `json:"phase,omitempty"` + // The ARN of the topic generated by the RGW + // +optional + // +nullable + ARN *string `json:"ARN,omitempty"` + // ObservedGeneration is the latest generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // +optional + Secrets []SecretReference `json:"secrets,omitempty"` +} + +// CephBucketTopicList represents a list Ceph Object Store Bucket Notification Topics +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type CephBucketTopicList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephBucketTopic `json:"items"` +} + +// BucketTopicSpec represent the spec of a Bucket Topic +type BucketTopicSpec struct { + // The name of the object store on which to define the topic + // +kubebuilder:validation:MinLength=1 + ObjectStoreName string `json:"objectStoreName"` + // The namespace of the object store on which to define the topic + // +kubebuilder:validation:MinLength=1 + ObjectStoreNamespace string `json:"objectStoreNamespace"` + // Data which is sent in each event + // +optional + OpaqueData string `json:"opaqueData,omitempty"` + // Indication whether notifications to this endpoint are persistent or not + // +optional + Persistent bool `json:"persistent,omitempty"` + // Contains the endpoint spec of the topic + Endpoint TopicEndpointSpec `json:"endpoint"` +} + +// TopicEndpointSpec contains exactly one of the endpoint specs of a Bucket Topic +type TopicEndpointSpec struct { + // Spec of HTTP endpoint + // +optional + HTTP *HTTPEndpointSpec `json:"http,omitempty"` + // Spec of AMQP endpoint + // +optional + AMQP *AMQPEndpointSpec `json:"amqp,omitempty"` + // Spec of Kafka endpoint + // +optional + Kafka *KafkaEndpointSpec `json:"kafka,omitempty"` +} + +// HTTPEndpointSpec represent the spec of an HTTP endpoint of a Bucket Topic +type HTTPEndpointSpec struct { + // The URI of the HTTP endpoint to push notification to + // +kubebuilder:validation:MinLength=1 + URI string `json:"uri"` + // Indicate whether the server certificate is validated by the client or not + // +optional + DisableVerifySSL bool `json:"disableVerifySSL,omitempty"` + // Send the notifications with the CloudEvents header: https://github.com/cloudevents/spec/blob/main/cloudevents/adapters/aws-s3.md + // +optional + SendCloudEvents bool `json:"sendCloudEvents,omitempty"` +} + +// AMQPEndpointSpec represent the spec of an AMQP endpoint of a Bucket Topic +type AMQPEndpointSpec struct { + // The URI of the AMQP endpoint to push notification to + // +kubebuilder:validation:MinLength=1 + URI string `json:"uri"` + // Name of the exchange that is used to route messages based on topics + // +kubebuilder:validation:MinLength=1 + Exchange string `json:"exchange"` + // Indicate whether the server certificate is validated by the client or not + // +optional + DisableVerifySSL bool `json:"disableVerifySSL,omitempty"` + // The ack level required for this topic (none/broker/routeable) + // +kubebuilder:validation:Enum=none;broker;routeable + // +kubebuilder:default=broker + // +optional + AckLevel string `json:"ackLevel,omitempty"` +} + +// KafkaEndpointSpec represent the spec of a Kafka endpoint of a Bucket Topic +type KafkaEndpointSpec struct { + // The URI of the Kafka endpoint to push notification to + // +kubebuilder:validation:MinLength=1 + URI string `json:"uri"` + // Indicate whether to use SSL when communicating with the broker + // +optional + UseSSL bool `json:"useSSL,omitempty"` + // Indicate whether the server certificate is validated by the client or not + // +optional + DisableVerifySSL bool `json:"disableVerifySSL,omitempty"` + // The ack level required for this topic (none/broker) + // +kubebuilder:validation:Enum=none;broker + // +kubebuilder:default=broker + // +optional + AckLevel string `json:"ackLevel,omitempty"` + // The authentication mechanism for this topic (PLAIN/SCRAM-SHA-512/SCRAM-SHA-256/GSSAPI/OAUTHBEARER) + // +kubebuilder:validation:Enum=PLAIN;SCRAM-SHA-512;SCRAM-SHA-256;GSSAPI;OAUTHBEARER + // +kubebuilder:default=PLAIN + // +optional + Mechanism string `json:"mechanism,omitempty"` + // The kafka user name to use for authentication + // +optional + UserSecretRef *corev1.SecretKeySelector `json:"userSecretRef,omitempty"` + // The kafka password to use for authentication + // +optional + PasswordSecretRef *corev1.SecretKeySelector `json:"passwordSecretRef,omitempty"` +} + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephBucketNotification represents a Bucket Notifications +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=cephbn +type CephBucketNotification struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec BucketNotificationSpec `json:"spec"` + // +kubebuilder:pruning:PreserveUnknownFields + // +optional + Status *Status `json:"status,omitempty"` +} + +// CephBucketNotificationList represents a list Ceph Object Store Bucket Notification Topics +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type CephBucketNotificationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephBucketNotification `json:"items"` +} + +// BucketNotificationSpec represent the event type of the bucket notification +// See: https://docs.ceph.com/en/latest/radosgw/s3-notification-compatibility/#event-types +// +kubebuilder:validation:Enum="s3:ObjectCreated:*";"s3:ObjectCreated:Put";"s3:ObjectCreated:Post";"s3:ObjectCreated:Copy";"s3:ObjectCreated:CompleteMultipartUpload";"s3:ObjectRemoved:*";"s3:ObjectRemoved:Delete";"s3:ObjectRemoved:DeleteMarkerCreated";"s3:ObjectLifecycle:Expiration:Current";"s3:ObjectLifecycle:Expiration:NonCurrent";"s3:ObjectLifecycle:Expiration:DeleteMarker";"s3:ObjectLifecycle:Expiration:AbortMultipartUpload";"s3:ObjectLifecycle:Transition:Current";"s3:ObjectLifecycle:Transition:NonCurrent";"s3:LifecycleExpiration:*";"s3:LifecycleExpiration:Delete";"s3:LifecycleExpiration:DeleteMarkerCreated";"s3:LifecycleTransition";"s3:ObjectSynced:*";"s3:ObjectSynced:Create";"s3:ObjectSynced:Delete";"s3:ObjectSynced:DeletionMarkerCreated";"s3:Replication:*";"s3:Replication:Create";"s3:Replication:Delete";"s3:Replication:DeletionMarkerCreated";"s3:ObjectRestore:*";"s3:ObjectRestore:Post";"s3:ObjectRestore:Completed";"s3:ObjectRestore:Delete" +type BucketNotificationEvent string + +// BucketNotificationSpec represent the spec of a Bucket Notification +type BucketNotificationSpec struct { + // The name of the topic associated with this notification + // +kubebuilder:validation:MinLength=1 + Topic string `json:"topic"` + // List of events that should trigger the notification + // +optional + Events []BucketNotificationEvent `json:"events,omitempty"` + // Spec of notification filter + // +optional + Filter *NotificationFilterSpec `json:"filter,omitempty"` +} + +// NotificationFilterRule represent a single rule in the Notification Filter spec +type NotificationFilterRule struct { + // Name of the metadata or tag + // +kubebuilder:validation:MinLength=1 + Name string `json:"name"` + // Value to filter on + Value string `json:"value"` +} + +// NotificationKeyFilterRule represent a single key rule in the Notification Filter spec +type NotificationKeyFilterRule struct { + // Name of the filter - prefix/suffix/regex + // +kubebuilder:validation:Enum=prefix;suffix;regex + Name string `json:"name"` + // Value to filter on + Value string `json:"value"` +} + +// NotificationFilterSpec represent the spec of a Bucket Notification filter +type NotificationFilterSpec struct { + // Filters based on the object's key + // +optional + KeyFilters []NotificationKeyFilterRule `json:"keyFilters,omitempty"` + // Filters based on the object's metadata + // +optional + MetadataFilters []NotificationFilterRule `json:"metadataFilters,omitempty"` + // Filters based on the object's tags + // +optional + TagFilters []NotificationFilterRule `json:"tagFilters,omitempty"` +} + +// RGWServiceSpec represent the spec for RGW service +type RGWServiceSpec struct { + // The annotations-related configuration to add/set on each rgw service. + // nullable + // optional + Annotations Annotations `json:"annotations,omitempty"` +} + +// +genclient +// +genclient:noStatus +// +kubebuilder:resource:shortName=nfs,path=cephnfses + +// CephNFS represents a Ceph NFS +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:subresource:status +type CephNFS struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec NFSGaneshaSpec `json:"spec"` + // +kubebuilder:pruning:PreserveUnknownFields + // +optional + Status *NFSStatus `json:"status,omitempty"` +} + +// NFSStatus represents the status of Ceph NFS +type NFSStatus struct { + Status `json:",inline"` + Cephx LocalCephxStatus `json:"cephx,omitempty"` +} + +// CephNFSList represents a list Ceph NFSes +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type CephNFSList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephNFS `json:"items"` +} + +// NFSGaneshaSpec represents the spec of an nfs ganesha server +type NFSGaneshaSpec struct { + // RADOS is the Ganesha RADOS specification + // +nullable + // +optional + RADOS GaneshaRADOSSpec `json:"rados,omitempty"` + + // Server is the Ganesha Server specification + Server GaneshaServerSpec `json:"server"` + + // Security allows specifying security configurations for the NFS cluster + // +nullable + // +optional + Security *NFSSecuritySpec `json:"security"` +} + +// GaneshaRADOSSpec represents the specification of a Ganesha RADOS object +type GaneshaRADOSSpec struct { + // The Ceph pool used store the shared configuration for NFS-Ganesha daemons. + // This setting is deprecated, as it is internally required to be ".nfs". + // +optional + Pool string `json:"pool,omitempty"` + + // The namespace inside the Ceph pool (set by 'pool') where shared NFS-Ganesha config is stored. + // This setting is deprecated as it is internally set to the name of the CephNFS. + // +optional + Namespace string `json:"namespace,omitempty"` +} + +// GaneshaServerSpec represents the specification of a Ganesha Server +type GaneshaServerSpec struct { + // The number of active Ganesha servers + Active int `json:"active"` + + // The affinity to place the ganesha pods + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Placement Placement `json:"placement,omitempty"` + + // The annotations-related configuration to add/set on each Pod related object. + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Annotations Annotations `json:"annotations,omitempty"` + + // The labels-related configuration to add/set on each Pod related object. + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Labels Labels `json:"labels,omitempty"` + + // Resources set resource requests and limits + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Resources v1.ResourceRequirements `json:"resources,omitempty"` + + // PriorityClassName sets the priority class on the pods + // +optional + PriorityClassName string `json:"priorityClassName,omitempty"` + + // LogLevel set logging level + // +optional + LogLevel string `json:"logLevel,omitempty"` + + // Whether host networking is enabled for the Ganesha server. If not set, the network settings from the cluster CR will be applied. + // +nullable + // +optional + HostNetwork *bool `json:"hostNetwork,omitempty"` + + // A liveness-probe to verify that Ganesha server has valid run-time state. + // If LivenessProbe.Disabled is false and LivenessProbe.Probe is nil uses default probe. + // +optional + LivenessProbe *ProbeSpec `json:"livenessProbe,omitempty"` + + // Image is the container image used to launch the Ceph NFS (Ganesha) daemon(s). + // The image must include the NFS Ganesha binaries, such as are included with the official Ceph releases. E.g.: quay.io/ceph/ceph: + // If not specified, the Ceph image defined in the CephCluster is used. + // Overriding the CephCluster defined image is not normally necessary when using the official Ceph images. + // The image must contain the NFS Ganesha and dbus packages. + // If the SSSD sidecar is enabled, the image must also contain the sssd-client package. + // +optional + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=1572864 + Image string `json:"image,omitempty"` + + // ImagePullPolicy describes a policy for if/when to pull a container image + // One of Always, Never, IfNotPresent. + // This field only has effect if an image is specified. + // +optional + // +kubebuilder:validation:Enum=IfNotPresent;Always;Never;"" + ImagePullPolicy v1.PullPolicy `json:"imagePullPolicy,omitempty"` +} + +// NFSSecuritySpec represents security configurations for an NFS server pod +type NFSSecuritySpec struct { + // SSSD enables integration with System Security Services Daemon (SSSD). SSSD can be used to + // provide user ID mapping from a number of sources. See https://sssd.io for more information + // about the SSSD project. + // +optional + // +nullable + SSSD *SSSDSpec `json:"sssd,omitempty"` + + // Kerberos configures NFS-Ganesha to secure NFS client connections with Kerberos. + // +optional + // +nullable + Kerberos *KerberosSpec `json:"kerberos,omitempty"` +} + +// KerberosSpec represents configuration for Kerberos. +type KerberosSpec struct { + // PrincipalName corresponds directly to NFS-Ganesha's NFS_KRB5:PrincipalName config. In + // practice, this is the service prefix of the principal name. The default is "nfs". + // This value is combined with (a) the namespace and name of the CephNFS (with a hyphen between) + // and (b) the Realm configured in the user-provided krb5.conf to determine the full principal + // name: /-@. e.g., nfs/rook-ceph-my-nfs@example.net. + // See https://github.com/nfs-ganesha/nfs-ganesha/wiki/RPCSEC_GSS for more detail. + // +optional + // +kubebuilder:default="nfs" + PrincipalName string `json:"principalName"` + + // DomainName should be set to the Kerberos Realm. + // +optional + DomainName string `json:"domainName"` + + // ConfigFiles defines where the Kerberos configuration should be sourced from. Config files + // will be placed into the `/etc/krb5.conf.rook/` directory. + // + // If this is left empty, Rook will not add any files. This allows you to manage the files + // yourself however you wish. For example, you may build them into your custom Ceph container + // image or use the Vault agent injector to securely add the files via annotations on the + // CephNFS spec (passed to the NFS server pods). + // + // Rook configures Kerberos to log to stderr. We suggest removing logging sections from config + // files to avoid consuming unnecessary disk space from logging to files. + // +optional + ConfigFiles KerberosConfigFiles `json:"configFiles"` + + // KeytabFile defines where the Kerberos keytab should be sourced from. The keytab file will be + // placed into `/etc/krb5.keytab`. If this is left empty, Rook will not add the file. + // This allows you to manage the `krb5.keytab` file yourself however you wish. For example, you + // may build it into your custom Ceph container image or use the Vault agent injector to + // securely add the file via annotations on the CephNFS spec (passed to the NFS server pods). + // +optional + KeytabFile KerberosKeytabFile `json:"keytabFile"` +} + +// KerberosConfigFiles represents the source(s) from which Kerberos configuration should come. +type KerberosConfigFiles struct { + // VolumeSource accepts a pared down version of the standard Kubernetes VolumeSource for + // Kerberos configuration files like what is normally used to configure Volumes for a Pod. For + // example, a ConfigMap, Secret, or HostPath. The volume may contain multiple files, all of + // which will be loaded. + VolumeSource *ConfigFileVolumeSource `json:"volumeSource,omitempty"` +} + +// KerberosKeytabFile represents the source(s) from which the Kerberos keytab file should come. +type KerberosKeytabFile struct { + // VolumeSource accepts a pared down version of the standard Kubernetes VolumeSource for the + // Kerberos keytab file like what is normally used to configure Volumes for a Pod. For example, + // a Secret or HostPath. + // There are two requirements for the source's content: + // 1. The config file must be mountable via `subPath: krb5.keytab`. For example, in a + // Secret, the data item must be named `krb5.keytab`, or `items` must be defined to + // select the key and give it path `krb5.keytab`. A HostPath directory must have the + // `krb5.keytab` file. + // 2. The volume or config file must have mode 0600. + VolumeSource *ConfigFileVolumeSource `json:"volumeSource,omitempty"` +} + +// SSSDSpec represents configuration for System Security Services Daemon (SSSD). +type SSSDSpec struct { + // Sidecar tells Rook to run SSSD in a sidecar alongside the NFS-Ganesha server in each NFS pod. + // +optional + Sidecar *SSSDSidecar `json:"sidecar,omitempty"` +} + +// SSSDSidecar represents configuration when SSSD is run in a sidecar. +type SSSDSidecar struct { + // Image defines the container image that should be used for the SSSD sidecar. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + Image string `json:"image"` + + // SSSDConfigFile defines where the SSSD configuration should be sourced from. The config file + // will be placed into `/etc/sssd/sssd.conf`. If this is left empty, Rook will not add the file. + // This allows you to manage the `sssd.conf` file yourself however you wish. For example, you + // may build it into your custom Ceph container image or use the Vault agent injector to + // securely add the file via annotations on the CephNFS spec (passed to the NFS server pods). + // +optional + SSSDConfigFile SSSDSidecarConfigFile `json:"sssdConfigFile"` + + // AdditionalFiles defines any number of additional files that should be mounted into the SSSD + // sidecar with a directory root of `/etc/sssd/rook-additional/`. + // These files may be referenced by the sssd.conf config file. + // +optional + AdditionalFiles AdditionalVolumeMounts `json:"additionalFiles,omitempty"` + + // Resources allow specifying resource requests/limits on the SSSD sidecar container. + // +optional + Resources v1.ResourceRequirements `json:"resources,omitempty"` + + // DebugLevel sets the debug level for SSSD. If unset or set to 0, Rook does nothing. Otherwise, + // this may be a value between 1 and 10. See SSSD docs for more info: + // https://sssd.io/troubleshooting/basics.html#sssd-debug-logs + // +optional + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=10 + DebugLevel int `json:"debugLevel,omitempty"` +} + +// SSSDSidecarConfigFile represents the source(s) from which the SSSD configuration should come. +type SSSDSidecarConfigFile struct { + // VolumeSource accepts a pared down version of the standard Kubernetes VolumeSource for the + // SSSD configuration file like what is normally used to configure Volumes for a Pod. For + // example, a ConfigMap, Secret, or HostPath. There are two requirements for the source's + // content: + // 1. The config file must be mountable via `subPath: sssd.conf`. For example, in a ConfigMap, + // the data item must be named `sssd.conf`, or `items` must be defined to select the key + // and give it path `sssd.conf`. A HostPath directory must have the `sssd.conf` file. + // 2. The volume or config file must have mode 0600. + VolumeSource *ConfigFileVolumeSource `json:"volumeSource,omitempty"` +} + +// AdditionalVolumeMount represents the source from where additional files in pod containers +// should come from and what subdirectory they are made available in. +type AdditionalVolumeMount struct { + // SubPath defines the sub-path (subdirectory) of the directory root where the volumeSource will + // be mounted. All files/keys in the volume source's volume will be mounted to the subdirectory. + // This is not the same as the Kubernetes `subPath` volume mount option. + // Each subPath definition must be unique and must not contain ':'. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Pattern=`^[^:]+$` + SubPath string `json:"subPath"` + + // VolumeSource accepts a pared down version of the standard Kubernetes VolumeSource for the + // additional file(s) like what is normally used to configure Volumes for a Pod. Fore example, a + // ConfigMap, Secret, or HostPath. Each VolumeSource adds one or more additional files to the + // container `/` directory. + // Be aware that some files may need to have a specific file mode like 0600 due to application + // requirements. For example, CA or TLS certificates. + VolumeSource *ConfigFileVolumeSource `json:"volumeSource"` +} + +type AdditionalVolumeMounts []AdditionalVolumeMount + +// NetworkSpec for Ceph includes backward compatibility code +// +kubebuilder:validation:XValidation:message="at least one network selector must be specified when using multus",rule="!has(self.provider) || (self.provider != 'multus' || (self.provider == 'multus' && size(self.selectors) > 0))" +// +kubebuilder:validation:XValidation:message=`the legacy hostNetwork setting can only be set if the network.provider is set to the empty string`,rule=`!has(self.hostNetwork) || self.hostNetwork == false || !has(self.provider) || self.provider == ""` +type NetworkSpec struct { + // Provider is what provides network connectivity to the cluster e.g. "host" or "multus". + // If the Provider is updated from being empty to "host" on a running cluster, then the operator will automatically fail over all the mons to apply the "host" network settings. + // +kubebuilder:validation:XValidation:message="network provider must be disabled (reverted to empty string) before a new provider is enabled",rule="self == '' || oldSelf == '' || self == oldSelf" + // +nullable + // +optional + Provider NetworkProviderType `json:"provider,omitempty"` + + // Selectors define NetworkAttachmentDefinitions to be used for Ceph public and/or cluster + // networks when the "multus" network provider is used. This config section is not used for + // other network providers. + // + // Valid keys are "public" and "cluster". Refer to Ceph networking documentation for more: + // https://docs.ceph.com/en/latest/rados/configuration/network-config-ref/ + // + // Refer to Multus network annotation documentation for help selecting values: + // https://github.com/k8snetworkplumbingwg/multus-cni/blob/master/docs/how-to-use.md#run-pod-with-network-annotation + // + // Rook will make a best-effort attempt to automatically detect CIDR address ranges for given + // network attachment definitions. Rook's methods are robust but may be imprecise for + // sufficiently complicated networks. Rook's auto-detection process obtains a new IP address + // lease for each CephCluster reconcile. If Rook fails to detect, incorrectly detects, only + // partially detects, or if underlying networks do not support reusing old IP addresses, it is + // best to use the 'addressRanges' config section to specify CIDR ranges for the Ceph cluster. + // + // As a contrived example, one can use a theoretical Kubernetes-wide network for Ceph client + // traffic and a theoretical Rook-only network for Ceph replication traffic as shown: + // selectors: + // public: "default/cluster-fast-net" + // cluster: "rook-ceph/ceph-backend-net" + // + // +nullable + // +optional + Selectors map[CephNetworkType]string `json:"selectors,omitempty"` + + // AddressRanges specify a list of CIDRs that Rook will apply to Ceph's 'public_network' and/or + // 'cluster_network' configurations. This config section may be used for the "host" or "multus" + // network providers. + // +nullable + // +optional + AddressRanges *AddressRangesSpec `json:"addressRanges,omitempty"` + + // Settings for network connections such as compression and encryption across the + // wire. + // +nullable + // +optional + Connections *ConnectionsSpec `json:"connections,omitempty"` + + // HostNetwork to enable host network. + // If host networking is enabled or disabled on a running cluster, then the operator will automatically fail over all the mons to + // apply the new network settings. + // +optional + HostNetwork bool `json:"hostNetwork,omitempty"` + + // IPFamily is the single stack IPv6 or IPv4 protocol + // +kubebuilder:validation:Enum=IPv4;IPv6 + // +nullable + // +optional + IPFamily IPFamilyType `json:"ipFamily,omitempty"` + + // DualStack determines whether Ceph daemons should listen on both IPv4 and IPv6 + // +optional + DualStack bool `json:"dualStack,omitempty"` + + // Enable multiClusterService to export the Services between peer clusters + // +optional + MultiClusterService MultiClusterServiceSpec `json:"multiClusterService,omitempty"` +} + +// NetworkProviderType defines valid network providers for Rook. +// +kubebuilder:validation:Enum="";host;multus +type NetworkProviderType string + +const ( + NetworkProviderDefault = NetworkProviderType("") + NetworkProviderHost = NetworkProviderType("host") + NetworkProviderMultus = NetworkProviderType("multus") +) + +// CephNetworkType should be "public" or "cluster". +// Allow any string so that over-specified legacy clusters do not break on CRD update. +type CephNetworkType string + +const ( + CephNetworkPublic = CephNetworkType("public") + CephNetworkCluster = CephNetworkType("cluster") +) + +type AddressRangesSpec struct { + // Public defines a list of CIDRs to use for Ceph public network communication. + // +optional + Public CIDRList `json:"public"` + + // Cluster defines a list of CIDRs to use for Ceph cluster network communication. + // +optional + Cluster CIDRList `json:"cluster"` +} + +// An IPv4 or IPv6 network CIDR. +// +// This naive kubebuilder regex provides immediate feedback for some typos and for a common problem +// case where the range spec is forgotten (e.g., /24). Rook does in-depth validation in code. +// +kubebuilder:validation:Pattern=`^[0-9a-fA-F:.]{2,}\/[0-9]{1,3}$` +type CIDR string + +// A list of CIDRs. +type CIDRList []CIDR + +type MultiClusterServiceSpec struct { + // Enable multiClusterService to export the mon and OSD services to peer cluster. + // Ensure that peer clusters are connected using an MCS API compatible application, + // like Globalnet Submariner. + // +optional + Enabled bool `json:"enabled,omitempty"` + + // ClusterID uniquely identifies a cluster. It is used as a prefix to nslookup exported + // services. For example: ...svc.clusterset.local + ClusterID string `json:"clusterID,omitempty"` +} +type ConnectionsSpec struct { + // Encryption settings for the network connections. + // +nullable + // +optional + Encryption *EncryptionSpec `json:"encryption,omitempty"` + + // Compression settings for the network connections. + // +nullable + // +optional + Compression *CompressionSpec `json:"compression,omitempty"` + + // Whether to require msgr2 (port 3300) even if compression or encryption are not enabled. + // If true, the msgr1 port (6789) will be disabled. + // Requires a kernel that supports msgr2 (kernel 5.11 or CentOS 8.4 or newer). + // +optional + RequireMsgr2 bool `json:"requireMsgr2,omitempty"` +} + +type EncryptionSpec struct { + // Whether to encrypt the data in transit across the wire to prevent eavesdropping + // the data on the network. The default is not set. Even if encryption is not enabled, + // clients still establish a strong initial authentication for the connection + // and data integrity is still validated with a crc check. When encryption is enabled, + // all communication between clients and Ceph daemons, or between Ceph daemons will + // be encrypted. + // +optional + Enabled bool `json:"enabled,omitempty"` +} + +type CompressionSpec struct { + // Whether to compress the data in transit across the wire. + // The default is not set. + // +optional + Enabled bool `json:"enabled,omitempty"` +} + +// DisruptionManagementSpec configures management of daemon disruptions +type DisruptionManagementSpec struct { + // This enables management of poddisruptionbudgets + // +optional + ManagePodBudgets bool `json:"managePodBudgets,omitempty"` + + // OSDMaintenanceTimeout sets how many additional minutes the DOWN/OUT interval is for drained failure domains + // it only works if managePodBudgets is true. + // the default is 30 minutes + // +optional + OSDMaintenanceTimeout time.Duration `json:"osdMaintenanceTimeout,omitempty"` + + // DEPRECATED: PGHealthCheckTimeout is no longer implemented + // +optional + PGHealthCheckTimeout time.Duration `json:"pgHealthCheckTimeout,omitempty"` + + // PgHealthyRegex is the regular expression that is used to determine which PG states should be considered healthy. + // The default is `^(active\+clean|active\+clean\+scrubbing|active\+clean\+scrubbing\+deep)$` + // +optional + PGHealthyRegex string `json:"pgHealthyRegex,omitempty"` + + // Deprecated. This enables management of machinedisruptionbudgets. + // +optional + ManageMachineDisruptionBudgets bool `json:"manageMachineDisruptionBudgets,omitempty"` + + // Deprecated. Namespace to look for MDBs by the machineDisruptionBudgetController + // +optional + MachineDisruptionBudgetNamespace string `json:"machineDisruptionBudgetNamespace,omitempty"` +} + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephClient represents a Ceph Client +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=cephcl +type CephClient struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + // Spec represents the specification of a Ceph Client + Spec ClientSpec `json:"spec"` + // Status represents the status of a Ceph Client + // +kubebuilder:pruning:PreserveUnknownFields + // +optional + Status *CephClientStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephClientList represents a list of Ceph Clients +type CephClientList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephClient `json:"items"` +} + +// ClientSpec represents the specification of a Ceph Client +type ClientSpec struct { + // +optional + Name string `json:"name,omitempty"` + // SecretName is the name of the secret created for this ceph client. + // If not specified, the default name is "rook-ceph-client-" as a prefix to the CR name. + // +kubebuilder:validation:XValidation:message="SecretName is immutable and cannot be changed",rule="self == oldSelf" + // +optional + SecretName string `json:"secretName,omitempty"` + + // RemoveSecret indicates whether the current secret for this ceph client should be removed or not. + // If true, the K8s secret will be deleted, but the cephx keyring will remain until the CR is deleted. + // +optional + RemoveSecret bool `json:"removeSecret,omitempty"` + // +kubebuilder:pruning:PreserveUnknownFields + Caps map[string]string `json:"caps"` + // Security represents security settings + // +optional + Security ClientSecuritySpec `json:"security,omitempty"` +} + +// ClinetSecuritySpec represents security settings for a Ceph Client +type ClientSecuritySpec struct { + // CephX configures CephX key settings. More: https://docs.ceph.com/en/latest/dev/cephx/ + // +optional + CephX CephxConfig `json:"cephx,omitempty"` +} + +// CephClientStatus represents the Status of Ceph Client +type CephClientStatus struct { + // +optional + Phase ConditionType `json:"phase,omitempty"` + // +optional + // +nullable + Info map[string]string `json:"info,omitempty"` + // ObservedGeneration is the latest generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // +optional + Cephx CephxStatus `json:"cephx,omitempty"` +} + +// CleanupPolicySpec represents a Ceph Cluster cleanup policy +type CleanupPolicySpec struct { + // Confirmation represents the cleanup confirmation + // +optional + // +nullable + Confirmation CleanupConfirmationProperty `json:"confirmation,omitempty"` + // SanitizeDisks represents way we sanitize disks + // +optional + // +nullable + SanitizeDisks SanitizeDisksSpec `json:"sanitizeDisks,omitempty"` + // AllowUninstallWithVolumes defines whether we can proceed with the uninstall if they are RBD images still present + // +optional + AllowUninstallWithVolumes bool `json:"allowUninstallWithVolumes,omitempty"` + + // WipeDevicesFromOtherClusters wipes the OSD disks belonging to other clusters. This is useful in scenarios where ceph cluster + // was reinstalled but OSD disk still contains the metadata from previous ceph cluster. + // +optional + WipeDevicesFromOtherClusters bool `json:"wipeDevicesFromOtherClusters"` +} + +// CleanupConfirmationProperty represents the cleanup confirmation +// +kubebuilder:validation:Pattern=`^$|^yes-really-destroy-data$` +type CleanupConfirmationProperty string + +// SanitizeDataSourceProperty represents a sanitizing data source +type SanitizeDataSourceProperty string + +// SanitizeMethodProperty represents a disk sanitizing method +type SanitizeMethodProperty string + +// SanitizeDisksSpec represents a disk sanitizing specification +type SanitizeDisksSpec struct { + // Method is the method we use to sanitize disks + // +optional + // +kubebuilder:validation:Enum=complete;quick + Method SanitizeMethodProperty `json:"method,omitempty"` + // DataSource is the data source to use to sanitize the disk with + // +optional + // +kubebuilder:validation:Enum=zero;random + DataSource SanitizeDataSourceProperty `json:"dataSource,omitempty"` + // Iteration is the number of pass to apply the sanitizing + // +optional + Iteration int32 `json:"iteration,omitempty"` +} + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephRBDMirror represents a Ceph RBD Mirror +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=cephrbdm +type CephRBDMirror struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec RBDMirroringSpec `json:"spec"` + // +kubebuilder:pruning:PreserveUnknownFields + // +optional + Status *RBDMirrorStatus `json:"status,omitempty"` +} + +// RBDMirrorStatus represents the status of the RBD mirror resource +type RBDMirrorStatus struct { + Status `json:",inline"` + Cephx LocalCephxStatus `json:"cephx,omitempty"` +} + +// CephRBDMirrorList represents a list Ceph RBD Mirrors +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type CephRBDMirrorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephRBDMirror `json:"items"` +} + +// RBDMirroringSpec represents the specification of an RBD mirror daemon +type RBDMirroringSpec struct { + // Count represents the number of rbd mirror instance to run + // +kubebuilder:validation:Minimum=1 + Count int `json:"count"` + + // Peers represents the peers spec + // +nullable + // +optional + Peers MirroringPeerSpec `json:"peers,omitempty"` + + // The affinity to place the rgw pods (default is to place on any available node) + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Placement Placement `json:"placement,omitempty"` + + // The annotations-related configuration to add/set on each Pod related object. + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Annotations Annotations `json:"annotations,omitempty"` + + // The labels-related configuration to add/set on each Pod related object. + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Labels Labels `json:"labels,omitempty"` + + // The resource requirements for the rbd mirror pods + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Resources v1.ResourceRequirements `json:"resources,omitempty"` + + // PriorityClassName sets priority class on the rbd mirror pods + // +optional + PriorityClassName string `json:"priorityClassName,omitempty"` +} + +// MirroringPeerSpec represents the specification of a mirror peer +type MirroringPeerSpec struct { + // SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + // +optional + SecretNames []string `json:"secretNames,omitempty"` +} + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephFilesystemMirror is the Ceph Filesystem Mirror object definition +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=cephfsm +type CephFilesystemMirror struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec FilesystemMirroringSpec `json:"spec"` + // +optional + Status *FileMirrorStatus `json:"status,omitempty"` +} + +// FileMirrorStatus represents the status of the FileSystem mirror resource +type FileMirrorStatus struct { + Status `json:",inline"` + Cephx LocalCephxStatus `json:"cephx,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephFilesystemMirrorList is a list of CephFilesystemMirror +type CephFilesystemMirrorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephFilesystemMirror `json:"items"` +} + +// FilesystemMirroringSpec is the filesystem mirroring specification +type FilesystemMirroringSpec struct { + // The affinity to place the rgw pods (default is to place on any available node) + // +nullable + // +optional + Placement Placement `json:"placement,omitempty"` + + // The annotations-related configuration to add/set on each Pod related object. + // +nullable + // +optional + Annotations Annotations `json:"annotations,omitempty"` + + // The labels-related configuration to add/set on each Pod related object. + // +nullable + // +optional + Labels Labels `json:"labels,omitempty"` + + // The resource requirements for the cephfs-mirror pods + // +nullable + // +optional + Resources v1.ResourceRequirements `json:"resources,omitempty"` + + // PriorityClassName sets priority class on the cephfs-mirror pods + // +optional + PriorityClassName string `json:"priorityClassName,omitempty"` +} + +// IPFamilyType represents the single stack Ipv4 or Ipv6 protocol. +type IPFamilyType string + +const ( + // IPv6 internet protocol version + IPv6 IPFamilyType = "IPv6" + // IPv4 internet protocol version + IPv4 IPFamilyType = "IPv4" +) + +type StorageScopeSpec struct { + // +nullable + // +optional + Nodes []Node `json:"nodes,omitempty"` + // +optional + UseAllNodes bool `json:"useAllNodes,omitempty"` + // +optional + // Whether to always schedule OSDs on a node even if the node is not currently scheduleable or ready + ScheduleAlways bool `json:"scheduleAlways,omitempty"` + // +optional + OnlyApplyOSDPlacement bool `json:"onlyApplyOSDPlacement,omitempty"` + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Config map[string]string `json:"config,omitempty"` + Selection `json:",inline"` + // +nullable + // +optional + StorageClassDeviceSets []StorageClassDeviceSet `json:"storageClassDeviceSets,omitempty"` + // Migration handles the OSD migration + // +optional + Migration Migration `json:"migration,omitempty"` + // +optional + Store OSDStore `json:"store,omitempty"` + // +optional + // FlappingRestartIntervalHours defines the time for which the OSD pods, that failed with zero exit code, will sleep before restarting. + // This is needed for OSD flapping where OSD daemons are marked down more than 5 times in 600 seconds by Ceph. + // Preventing the OSD pods to restart immediately in such scenarios will prevent Rook from marking OSD as `up` and thus + // peering of the PGs mapped to the OSD. + // User needs to manually restart the OSD pod if they manage to fix the underlying OSD flapping issue before the restart interval. + // The sleep will be disabled if this interval is set to 0. + FlappingRestartIntervalHours int `json:"flappingRestartIntervalHours"` + // FullRatio is the ratio at which the cluster is considered full and ceph will stop accepting writes. Default is 0.95. + // +kubebuilder:validation:Minimum=0.0 + // +kubebuilder:validation:Maximum=1.0 + // +optional + // +nullable + FullRatio *float64 `json:"fullRatio,omitempty"` + // NearFullRatio is the ratio at which the cluster is considered nearly full and will raise a ceph health warning. Default is 0.85. + // +kubebuilder:validation:Minimum=0.0 + // +kubebuilder:validation:Maximum=1.0 + // +optional + // +nullable + NearFullRatio *float64 `json:"nearFullRatio,omitempty"` + // BackfillFullRatio is the ratio at which the cluster is too full for backfill. Backfill will be disabled if above this threshold. Default is 0.90. + // +kubebuilder:validation:Minimum=0.0 + // +kubebuilder:validation:Maximum=1.0 + // +optional + // +nullable + BackfillFullRatio *float64 `json:"backfillFullRatio,omitempty"` + // Whether to allow updating the device class after the OSD is initially provisioned + // +optional + AllowDeviceClassUpdate bool `json:"allowDeviceClassUpdate,omitempty"` + // Whether Rook will resize the OSD CRUSH weight when the OSD PVC size is increased. + // This allows cluster data to be rebalanced to make most effective use of new OSD space. + // The default is false since data rebalancing can cause temporary cluster slowdown. + // +optional + AllowOsdCrushWeightUpdate bool `json:"allowOsdCrushWeightUpdate,omitempty"` + // The maximum number of OSDs to update in parallel. + // +kubebuilder:validation:Minimum=1 + // +optional + OSDMaxUpdatesInParallel uint32 `json:"osdMaxUpdatesInParallel,omitempty"` +} + +// Migration handles the OSD migration +type Migration struct { + // A user confirmation to migrate the OSDs. It destroys each OSD one at a time, cleans up the backing disk + // and prepares OSD with same ID on that disk + // +optional + // +kubebuilder:validation:Pattern=`^$|^yes-really-migrate-osds$` + Confirmation string `json:"confirmation,omitempty"` +} + +// OSDStore is the backend storage type used for creating the OSDs +type OSDStore struct { + // Type of backend storage to be used while creating OSDs. If empty, then bluestore will be used + // +optional + // +kubebuilder:validation:Enum=bluestore;bluestore-rdr; + Type string `json:"type,omitempty"` + // UpdateStore updates the backend store for existing OSDs. It destroys each OSD one at a time, cleans up the backing disk + // and prepares same OSD on that disk + // +optional + // +kubebuilder:validation:Pattern=`^$|^yes-really-update-store$` + UpdateStore string `json:"updateStore,omitempty"` +} + +// Node is a storage nodes +// +nullable +type Node struct { + // +optional + Name string `json:"name,omitempty"` + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Resources v1.ResourceRequirements `json:"resources,omitempty"` + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Config map[string]string `json:"config,omitempty"` + Selection `json:",inline"` +} + +// Device represents a disk to use in the cluster +type Device struct { + // +optional + Name string `json:"name,omitempty"` + // +optional + FullPath string `json:"fullpath,omitempty"` + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Config map[string]string `json:"config,omitempty"` +} + +type Selection struct { + // Whether to consume all the storage devices found on a machine + // +optional + UseAllDevices *bool `json:"useAllDevices,omitempty"` + // A regular expression to allow more fine-grained selection of devices on nodes across the cluster + // +optional + DeviceFilter string `json:"deviceFilter,omitempty"` + // A regular expression to allow more fine-grained selection of devices with path names + // +optional + DevicePathFilter string `json:"devicePathFilter,omitempty"` + // List of devices to use as storage devices + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Devices []Device `json:"devices,omitempty"` + // PersistentVolumeClaims to use as storage + // +optional + VolumeClaimTemplates []VolumeClaimTemplate `json:"volumeClaimTemplates,omitempty"` +} + +// PlacementSpec is the placement for core ceph daemons part of the CephCluster CRD +type PlacementSpec map[KeyType]Placement + +// Placement is the placement for an object +type Placement struct { + // NodeAffinity is a group of node affinity scheduling rules + // +optional + NodeAffinity *v1.NodeAffinity `json:"nodeAffinity,omitempty"` + // PodAffinity is a group of inter pod affinity scheduling rules + // +optional + PodAffinity *v1.PodAffinity `json:"podAffinity,omitempty"` + // PodAntiAffinity is a group of inter pod anti affinity scheduling rules + // +optional + PodAntiAffinity *v1.PodAntiAffinity `json:"podAntiAffinity,omitempty"` + // The pod this Toleration is attached to tolerates any taint that matches + // the triple using the matching operator + // +optional + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // TopologySpreadConstraints specifies how to spread matching pods among the given topology + // +optional + TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` +} + +// ResourceSpec is a collection of ResourceRequirements that describes the compute resource requirements +type ResourceSpec map[string]v1.ResourceRequirements + +// ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon +type ProbeSpec struct { + // Disabled determines whether probe is disable or not + // +optional + Disabled bool `json:"disabled,omitempty"` + // Probe describes a health check to be performed against a container to determine whether it is + // alive or ready to receive traffic. + // +optional + Probe *v1.Probe `json:"probe,omitempty"` +} + +// PriorityClassNamesSpec is a map of priority class names to be assigned to components +type PriorityClassNamesSpec map[KeyType]string + +// StorageClassDeviceSet is a storage class device set +// +nullable +type StorageClassDeviceSet struct { + // Name is a unique identifier for the set + Name string `json:"name"` + // Count is the number of devices in this set + // +kubebuilder:validation:Minimum=1 + Count int `json:"count"` + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Resources v1.ResourceRequirements `json:"resources,omitempty"` // Requests/limits for the devices + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Placement Placement `json:"placement,omitempty"` // Placement constraints for the device daemons + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + PreparePlacement *Placement `json:"preparePlacement,omitempty"` // Placement constraints for the device preparation + // Provider-specific device configuration + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + // +optional + Config map[string]string `json:"config,omitempty"` + // VolumeClaimTemplates is a list of PVC templates for the underlying storage devices + VolumeClaimTemplates []VolumeClaimTemplate `json:"volumeClaimTemplates"` + // Portable represents OSD portability across the hosts + // +optional + Portable bool `json:"portable,omitempty"` + // TuneSlowDeviceClass Tune the OSD when running on a slow Device Class + // +optional + TuneSlowDeviceClass bool `json:"tuneDeviceClass,omitempty"` + // TuneFastDeviceClass Tune the OSD when running on a fast Device Class + // +optional + TuneFastDeviceClass bool `json:"tuneFastDeviceClass,omitempty"` + // Scheduler name for OSD pod placement + // +optional + SchedulerName string `json:"schedulerName,omitempty"` + // Whether to encrypt the deviceSet + // +optional + Encrypted bool `json:"encrypted,omitempty"` +} + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephFilesystemSubVolumeGroup represents a Ceph Filesystem SubVolumeGroup +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Filesystem",type=string,JSONPath=`.spec.filesystemName`,description="Name of the CephFileSystem" +// +kubebuilder:printcolumn:name="Quota",type=string,JSONPath=`.spec.quota` +// +kubebuilder:printcolumn:name="Pinning",type=string,JSONPath=`.status.info.pinning`,priority=1 +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=cephfssvg;cephsvg +type CephFilesystemSubVolumeGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + // Spec represents the specification of a Ceph Filesystem SubVolumeGroup + Spec CephFilesystemSubVolumeGroupSpec `json:"spec"` + // Status represents the status of a CephFilesystem SubvolumeGroup + // +kubebuilder:pruning:PreserveUnknownFields + // +optional + Status *CephFilesystemSubVolumeGroupStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephFilesystemSubVolumeGroup represents a list of Ceph Clients +type CephFilesystemSubVolumeGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephFilesystemSubVolumeGroup `json:"items"` +} + +// CephFilesystemSubVolumeGroupSpec represents the specification of a Ceph Filesystem SubVolumeGroup +type CephFilesystemSubVolumeGroupSpec struct { + // The name of the subvolume group. If not set, the default is the name of the subvolumeGroup CR. + // +kubebuilder:validation:XValidation:message="name is immutable",rule="self == oldSelf" + // +optional + Name string `json:"name,omitempty"` + // FilesystemName is the name of Ceph Filesystem SubVolumeGroup volume name. Typically it's the name of + // the CephFilesystem CR. If not coming from the CephFilesystem CR, it can be retrieved from the + // list of Ceph Filesystem volumes with `ceph fs volume ls`. To learn more about Ceph Filesystem + // abstractions see https://docs.ceph.com/en/latest/cephfs/fs-volumes/#fs-volumes-and-subvolumes + // +kubebuilder:validation:XValidation:message="filesystemName is immutable",rule="self == oldSelf" + FilesystemName string `json:"filesystemName"` + // Pinning configuration of CephFilesystemSubVolumeGroup, + // reference https://docs.ceph.com/en/latest/cephfs/fs-volumes/#pinning-subvolumes-and-subvolume-groups + // only one out of (export, distributed, random) can be set at a time + // +optional + Pinning CephFilesystemSubVolumeGroupSpecPinning `json:"pinning,omitempty"` + // Quota size of the Ceph Filesystem subvolume group. + // +optional + Quota *resource.Quantity `json:"quota,omitempty"` + // The data pool name for the Ceph Filesystem subvolume group layout, if the default CephFS pool is not desired. + // +optional + DataPoolName string `json:"dataPoolName"` + // ClusterID to be used for this subvolume group in the CSI configuration. + // It must be unique among all Ceph clusters managed by Rook. + // If not specified, the clusterID will be generated and can be found in the CR status. + // +optional + // +kubebuilder:validation:XValidation:message="ClusterID is immutable",rule="self == oldSelf" + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=36 + // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9_-]+$` + ClusterID string `json:"clusterID,omitempty"` +} + +// CephFilesystemSubVolumeGroupSpecPinning represents the pinning configuration of SubVolumeGroup +// +kubebuilder:validation:XValidation:message="only one pinning type should be set",rule="(has(self.export) && !has(self.distributed) && !has(self.random)) || (!has(self.export) && has(self.distributed) && !has(self.random)) || (!has(self.export) && !has(self.distributed) && has(self.random)) || (!has(self.export) && !has(self.distributed) && !has(self.random))" +type CephFilesystemSubVolumeGroupSpecPinning struct { + // +kubebuilder:validation:Minimum=-1 + // +kubebuilder:validation:Maximum=256 + // +optional + // +nullable + Export *int `json:"export,omitempty"` + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=1 + // +optional + // +nullable + Distributed *int `json:"distributed,omitempty"` + // +kubebuilder:validation:Minimum=0.0 + // +kubebuilder:validation:Maximum=1.0 + // +optional + // +nullable + Random *float64 `json:"random,,omitempty"` +} + +// CephFilesystemSubVolumeGroupStatus represents the Status of Ceph Filesystem SubVolumeGroup +type CephFilesystemSubVolumeGroupStatus struct { + // +optional + Phase ConditionType `json:"phase,omitempty"` + // +optional + // +nullable + Info map[string]string `json:"info,omitempty"` + // ObservedGeneration is the latest generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// CephBlockPoolRadosNamespace represents a Ceph BlockPool Rados Namespace +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="BlockPool",type=string,JSONPath=`.spec.blockPoolName`,description="Name of the Ceph BlockPool" +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=cephbprns;cephrns +type CephBlockPoolRadosNamespace struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + // Spec represents the specification of a Ceph BlockPool Rados Namespace + Spec CephBlockPoolRadosNamespaceSpec `json:"spec"` + // Status represents the status of a CephBlockPool Rados Namespace + // +kubebuilder:pruning:PreserveUnknownFields + // +optional + Status *CephBlockPoolRadosNamespaceStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephBlockPoolRadosNamespaceList represents a list of Ceph BlockPool Rados Namespace +type CephBlockPoolRadosNamespaceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephBlockPoolRadosNamespace `json:"items"` +} + +// RadosNamespaceMirroring represents the mirroring configuration of CephBlockPoolRadosNamespace +type RadosNamespaceMirroring struct { + // RemoteNamespace is the name of the CephBlockPoolRadosNamespace on the secondary cluster CephBlockPool + // +optional + RemoteNamespace *string `json:"remoteNamespace"` + // Mode is the mirroring mode; either pool or image. + // +kubebuilder:validation:Enum="";pool;image + Mode RadosNamespaceMirroringMode `json:"mode"` + // SnapshotSchedules is the scheduling of snapshot for mirrored images + // +optional + SnapshotSchedules []SnapshotScheduleSpec `json:"snapshotSchedules,omitempty"` +} + +// RadosNamespaceMirroringMode represents the mode of the RadosNamespace +type RadosNamespaceMirroringMode string + +const ( + // RadosNamespaceMirroringModePool represents the pool mode + RadosNamespaceMirroringModePool RadosNamespaceMirroringMode = "pool" + // RadosNamespaceMirroringModeImage represents the image mode + RadosNamespaceMirroringModeImage RadosNamespaceMirroringMode = "image" +) + +// CephBlockPoolRadosNamespaceSpec represents the specification of a CephBlockPool Rados Namespace +type CephBlockPoolRadosNamespaceSpec struct { + // The name of the CephBlockPoolRadosNamespaceSpec namespace. If not set, the default is the name of the CR. + // +kubebuilder:validation:XValidation:message="name is immutable",rule="self == oldSelf" + // +optional + Name string `json:"name,omitempty"` + // BlockPoolName is the name of Ceph BlockPool. Typically it's the name of + // the CephBlockPool CR. + // +kubebuilder:validation:XValidation:message="blockPoolName is immutable",rule="self == oldSelf" + BlockPoolName string `json:"blockPoolName"` + // Mirroring configuration of CephBlockPoolRadosNamespace + // +optional + Mirroring *RadosNamespaceMirroring `json:"mirroring,omitempty"` + + // ClusterID to be used for this RadosNamespace in the CSI configuration. + // It must be unique among all Ceph clusters managed by Rook. + // If not specified, the clusterID will be generated and can be found in the CR status. + // +optional + // +kubebuilder:validation:XValidation:message="ClusterID is immutable",rule="self == oldSelf" + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=36 + // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9_-]+$` + ClusterID string `json:"clusterID,omitempty"` +} + +// CephBlockPoolRadosNamespaceStatus represents the Status of Ceph BlockPool +// Rados Namespace +type CephBlockPoolRadosNamespaceStatus struct { + // +optional + Phase ConditionType `json:"phase,omitempty"` + // +optional + // +nullable + Info map[string]string `json:"info,omitempty"` + // +optional + MirroringStatus *MirroringStatusSpec `json:"mirroringStatus,omitempty"` + // +optional + MirroringInfo *MirroringInfoSpec `json:"mirroringInfo,omitempty"` + // +optional + SnapshotScheduleStatus *SnapshotScheduleStatusSpec `json:"snapshotScheduleStatus,omitempty"` + Conditions []Condition `json:"conditions,omitempty"` +} + +// Represents the source of a volume to mount. +// Only one of its members may be specified. +// This is a subset of the full Kubernetes API's VolumeSource that is reduced to what is most likely +// to be useful for mounting config files/dirs into Rook pods. +type ConfigFileVolumeSource struct { + // hostPath represents a pre-existing file or directory on the host + // machine that is directly exposed to the container. This is generally + // used for system agents or other privileged things that are allowed + // to see the host machine. Most containers will NOT need this. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + // --- + // +optional + HostPath *v1.HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"` + // emptyDir represents a temporary directory that shares a pod's lifetime. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + // +optional + EmptyDir *v1.EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"` + // secret represents a secret that should populate this volume. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + // +optional + Secret *v1.SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"` + // persistentVolumeClaimVolumeSource represents a reference to a + // PersistentVolumeClaim in the same namespace. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + PersistentVolumeClaim *v1.PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"` + // configMap represents a configMap that should populate this volume + // +optional + ConfigMap *v1.ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"` + // projected items for all in one resources secrets, configmaps, and downward API + Projected *v1.ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"` +} + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephCOSIDriver represents the CRD for the Ceph COSI Driver Deployment +// +kubebuilder:resource:shortName=cephcosi +type CephCOSIDriver struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + // Spec represents the specification of a Ceph COSI Driver + Spec CephCOSIDriverSpec `json:"spec"` +} + +// CephCOSIDriverList represents a list of Ceph COSI Driver +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type CephCOSIDriverList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephCOSIDriver `json:"items"` +} + +// CephCOSIDriverSpec represents the specification of a Ceph COSI Driver +type CephCOSIDriverSpec struct { + // Image is the container image to run the Ceph COSI driver + // +optional + Image string `json:"image,omitempty"` + // ObjectProvisionerImage is the container image to run the COSI driver sidecar + // +optional + ObjectProvisionerImage string `json:"objectProvisionerImage,omitempty"` + // DeploymentStrategy is the strategy to use to deploy the COSI driver. + // +optional + // +kubebuilder:validation:Enum=Never;Auto;Always + DeploymentStrategy COSIDeploymentStrategy `json:"deploymentStrategy,omitempty"` + // Placement is the placement strategy to use for the COSI driver + // +optional + Placement Placement `json:"placement,omitempty"` + // Resources is the resource requirements for the COSI driver + // +optional + Resources v1.ResourceRequirements `json:"resources,omitempty"` +} + +// COSIDeploymentStrategy represents the strategy to use to deploy the Ceph COSI driver +type COSIDeploymentStrategy string + +const ( + // Never means the Ceph COSI driver will never deployed + COSIDeploymentStrategyNever COSIDeploymentStrategy = "Never" + // Auto means the Ceph COSI driver will be deployed automatically if object store is present + COSIDeploymentStrategyAuto COSIDeploymentStrategy = "Auto" + // Always means the Ceph COSI driver will be deployed even if the object store is not present + COSIDeploymentStrategyAlways COSIDeploymentStrategy = "Always" +) + +// +genclient +// +genclient:noStatus +// +kubebuilder:resource:shortName=nvmeof,path=cephnvmeofgateways +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +kubebuilder:subresource:status +// +// CephNVMeOFGateway represents a Ceph NVMe-oF Gateway +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type CephNVMeOFGateway struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + Spec NVMeOFGatewaySpec `json:"spec"` + // +optional + Status *NVMeOFGatewayStatus `json:"status,omitempty"` +} + +// NVMeOFGatewayStatus represents the status of Ceph NVMe-oF Gateway +type NVMeOFGatewayStatus struct { + Status `json:",inline"` + Cephx LocalCephxStatus `json:"cephx,omitempty"` +} + +// CephNVMeOFGatewayList represents a list of Ceph NVMe-oF Gateways +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type CephNVMeOFGatewayList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephNVMeOFGateway `json:"items"` +} + +// NVMeOFGatewaySpec represents the spec of an NVMe-oF gateway +type NVMeOFGatewaySpec struct { + // Image is the container image to use for the NVMe-oF gateway daemon. + // For example, quay.io/ceph/nvmeof:1.5 + // +kubebuilder:validation:MinLength=1 + Image string `json:"image"` + + // The number of active gateway instances + // +kubebuilder:validation:Minimum=1 + Instances int `json:"instances"` + + // Pool is the RADOS pool where NVMe-oF configuration is stored + // +kubebuilder:validation:MinLength=1 + Pool string `json:"pool"` + + // Group is the gateway group name for high availability (ANA group) + // +kubebuilder:validation:MinLength=1 + Group string `json:"group"` + + // ConfigMapRef is the name of the ConfigMap containing nvmeof.conf configuration + // If not specified, a default configuration will be generated + // +optional + // +kubebuilder:validation:MinLength=1 + ConfigMapRef string `json:"configMapRef,omitempty"` + + // NVMeOFConfig is a map of section names to key-value pairs for nvmeof.conf configuration + // This allows users to override or add configuration options without needing to manage a ConfigMap + // +optional + NVMeOFConfig map[string]map[string]string `json:"nvmeofConfig,omitempty"` + + // The affinity to place the gateway pods + // +optional + Placement Placement `json:"placement,omitempty"` + + // The annotations-related configuration to add/set on each Pod related object. + // +optional + Annotations Annotations `json:"annotations,omitempty"` + + // The labels-related configuration to add/set on each Pod related object. + // +optional + Labels Labels `json:"labels,omitempty"` + + // Resources set resource requests and limits + // +optional + Resources v1.ResourceRequirements `json:"resources,omitempty"` + + // PriorityClassName sets the priority class on the pods + // +optional + PriorityClassName string `json:"priorityClassName,omitempty"` + + // Whether host networking is enabled for the gateway. If not set, the network settings from the cluster CR will be applied. + // +optional + HostNetwork *bool `json:"hostNetwork,omitempty"` + + // Ports configuration for the NVMe-oF gateway + // +optional + Ports *NVMeOFGatewayPorts `json:"ports,omitempty"` + + // A liveness-probe to verify that gateway has valid run-time state. + // If LivenessProbe.Disabled is false and LivenessProbe.Probe is nil uses default probe. + // +optional + LivenessProbe *ProbeSpec `json:"livenessProbe,omitempty"` +} + +// NVMeOFGatewayPorts represents the port configuration for NVMe-oF gateway +type NVMeOFGatewayPorts struct { + // IOPort is the port for NVMe-oF IO traffic (default: 4420) + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +optional + IOPort int32 `json:"ioPort,omitempty"` + + // GatewayPort is the port for the gateway service (default: 5500) + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +optional + GatewayPort int32 `json:"gatewayPort,omitempty"` + + // MonitorPort is the port for the monitor service (default: 5499) + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +optional + MonitorPort int32 `json:"monitorPort,omitempty"` + + // DiscoveryPort is the port for discovery service (default: 8009) + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +optional + DiscoveryPort int32 `json:"discoveryPort,omitempty"` +} diff --git a/external/ceph.rook.io/v1/volume.go b/external/ceph.rook.io/v1/volume.go new file mode 100644 index 00000000..d713d976 --- /dev/null +++ b/external/ceph.rook.io/v1/volume.go @@ -0,0 +1,84 @@ +/* +Copyright 2023 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "path/filepath" + "reflect" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" +) + +func (src *ConfigFileVolumeSource) ToKubernetesVolumeSource() *corev1.VolumeSource { + if src == nil { + return nil + } + + dst := &corev1.VolumeSource{} + vDst := reflect.ValueOf(dst).Elem() + + tSrc := reflect.TypeOf(*src) + vSrc := reflect.ValueOf(*src) + for _, srcField := range reflect.VisibleFields(tSrc) { + if !srcField.IsExported() { + continue + } + + srcVal := vSrc.FieldByName(srcField.Name) + if srcVal.IsNil() { + continue // don't do anything if the src field is a nil ptr + } + + dstVal := vDst.FieldByName(srcField.Name) + dstVal.Set(srcVal) + } + + return dst +} + +// GenerateVolumesAndMounts converts Rook's AdditionalVolumeMounts type to a list of volumes and +// corresponding mounts that can be added to Kubernetes pod specs. +func (v *AdditionalVolumeMounts) GenerateVolumesAndMounts(rootDir string) ([]v1.Volume, []v1.VolumeMount) { + vols := []v1.Volume{} + mounts := []v1.VolumeMount{} + + for _, addVolMnt := range *v { + mountPath := filepath.Join(rootDir, addVolMnt.SubPath) + volName := ToValidDNSLabel(mountPath) + vols = append(vols, v1.Volume{ + Name: volName, + VolumeSource: *addVolMnt.VolumeSource.ToKubernetesVolumeSource(), + }) + mounts = append(mounts, v1.VolumeMount{ + Name: volName, + MountPath: mountPath, + }) + } + + return vols, mounts +} + +func (t *VolumeClaimTemplate) ToPVC() *corev1.PersistentVolumeClaim { + if t == nil { + return nil + } + return &corev1.PersistentVolumeClaim{ + ObjectMeta: *t.ObjectMeta.DeepCopy(), + Spec: *t.Spec.DeepCopy(), + } +} diff --git a/external/ceph.rook.io/v1/volume_test.go b/external/ceph.rook.io/v1/volume_test.go new file mode 100644 index 00000000..78133d65 --- /dev/null +++ b/external/ceph.rook.io/v1/volume_test.go @@ -0,0 +1,110 @@ +/* +Copyright 2023 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" +) + +func validateToVolumeSource( + t *testing.T, + fieldUnderTest string, fieldValue reflect.Value, + in *ConfigFileVolumeSource, +) { + got := in.ToKubernetesVolumeSource() + + // validate got + vGot := reflect.ValueOf(got).Elem() + for _, gField := range reflect.VisibleFields(vGot.Type()) { + gFieldVal := vGot.FieldByName(gField.Name) + + if gField.Name != fieldUnderTest { + assert.Nilf(t, gFieldVal.Interface(), "fields NOT under test should be nil") + continue + } + + assert.Equalf(t, fieldValue.Interface(), gFieldVal.Interface(), + "fields under test should be deeply equal to what was created") + } +} + +func TestConfigFileVolumeSource_ToVolumeSource(t *testing.T) { + t.Run("nil receiver", func(t *testing.T) { + var in *ConfigFileVolumeSource = nil + got := in.ToKubernetesVolumeSource() + assert.Nil(t, got) + }) + + t.Run("zero-value receiver", func(t *testing.T) { + in := &ConfigFileVolumeSource{} + got := in.ToKubernetesVolumeSource() + assert.Equal(t, v1.VolumeSource{}, *got) + }) + + for _, field := range reflect.VisibleFields(reflect.TypeOf(ConfigFileVolumeSource{})) { + // for each struct field of ConfigFileVolumeSource, create a new CFVS with that field filled + // in with some non-nil value to test ToVolumeSource() with. Then ensure that every + // possible volume type of the CFVS converts to k8s' corev1.VolumeSource successfully + in := &ConfigFileVolumeSource{} + + // use reflection to set the field under test with a non-nil created object + vIn := reflect.ValueOf(in).Elem() + fIn := vIn.FieldByName(field.Name) + baseType := field.Type.Elem() + fVal := reflect.New(baseType) + fIn.Set(fVal) + + t.Run(fmt.Sprintf("%s: %s{}", field.Name, field.Type), func(t *testing.T) { + // test with zero object + validateToVolumeSource(t, field.Name, fVal, in) + }) + + t.Run(fmt.Sprintf("%s: %s{}", field.Name, field.Type), func(t *testing.T) { + // set some data set on the object + setSomeFields(field.Type.Elem(), fVal.Elem()) + fIn.Set(fVal) + + validateToVolumeSource(t, field.Name, fVal, in) + }) + } +} + +func setSomeFields(t reflect.Type, v reflect.Value) { + for _, f := range reflect.VisibleFields(t) { + fVal := v.FieldByName(f.Name) + setSomeData(fVal) + } +} + +func setSomeData(v reflect.Value) { + switch v.Kind() { + case reflect.Pointer: + v.Set(reflect.New(v.Type().Elem())) + setSomeData(v.Elem()) + case reflect.String: + v.SetString("string-data") + case reflect.Bool: + v.SetBool(true) + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + v.SetInt(0o755) + } +} diff --git a/external/ceph.rook.io/v1/zz_generated.deepcopy.go b/external/ceph.rook.io/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000..e615f19a --- /dev/null +++ b/external/ceph.rook.io/v1/zz_generated.deepcopy.go @@ -0,0 +1,5592 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2018 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +    http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AMQPEndpointSpec) DeepCopyInto(out *AMQPEndpointSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AMQPEndpointSpec. +func (in *AMQPEndpointSpec) DeepCopy() *AMQPEndpointSpec { + if in == nil { + return nil + } + out := new(AMQPEndpointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalVolumeMount) DeepCopyInto(out *AdditionalVolumeMount) { + *out = *in + if in.VolumeSource != nil { + in, out := &in.VolumeSource, &out.VolumeSource + *out = new(ConfigFileVolumeSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalVolumeMount. +func (in *AdditionalVolumeMount) DeepCopy() *AdditionalVolumeMount { + if in == nil { + return nil + } + out := new(AdditionalVolumeMount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in AdditionalVolumeMounts) DeepCopyInto(out *AdditionalVolumeMounts) { + { + in := &in + *out = make(AdditionalVolumeMounts, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalVolumeMounts. +func (in AdditionalVolumeMounts) DeepCopy() AdditionalVolumeMounts { + if in == nil { + return nil + } + out := new(AdditionalVolumeMounts) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressRangesSpec) DeepCopyInto(out *AddressRangesSpec) { + *out = *in + if in.Public != nil { + in, out := &in.Public, &out.Public + *out = make(CIDRList, len(*in)) + copy(*out, *in) + } + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = make(CIDRList, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressRangesSpec. +func (in *AddressRangesSpec) DeepCopy() *AddressRangesSpec { + if in == nil { + return nil + } + out := new(AddressRangesSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Annotations) DeepCopyInto(out *Annotations) { + { + in := &in + *out = make(Annotations, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Annotations. +func (in Annotations) DeepCopy() Annotations { + if in == nil { + return nil + } + out := new(Annotations) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in AnnotationsSpec) DeepCopyInto(out *AnnotationsSpec) { + { + in := &in + *out = make(AnnotationsSpec, len(*in)) + for key, val := range *in { + var outVal map[string]string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(Annotations, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + (*out)[key] = outVal + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnnotationsSpec. +func (in AnnotationsSpec) DeepCopy() AnnotationsSpec { + if in == nil { + return nil + } + out := new(AnnotationsSpec) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSpec) DeepCopyInto(out *AuthSpec) { + *out = *in + if in.Keystone != nil { + in, out := &in.Keystone, &out.Keystone + *out = new(KeystoneSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSpec. +func (in *AuthSpec) DeepCopy() *AuthSpec { + if in == nil { + return nil + } + out := new(AuthSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketNotificationSpec) DeepCopyInto(out *BucketNotificationSpec) { + *out = *in + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = make([]BucketNotificationEvent, len(*in)) + copy(*out, *in) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(NotificationFilterSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketNotificationSpec. +func (in *BucketNotificationSpec) DeepCopy() *BucketNotificationSpec { + if in == nil { + return nil + } + out := new(BucketNotificationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketTopicSpec) DeepCopyInto(out *BucketTopicSpec) { + *out = *in + in.Endpoint.DeepCopyInto(&out.Endpoint) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketTopicSpec. +func (in *BucketTopicSpec) DeepCopy() *BucketTopicSpec { + if in == nil { + return nil + } + out := new(BucketTopicSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketTopicStatus) DeepCopyInto(out *BucketTopicStatus) { + *out = *in + if in.ARN != nil { + in, out := &in.ARN, &out.ARN + *out = new(string) + **out = **in + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretReference, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketTopicStatus. +func (in *BucketTopicStatus) DeepCopy() *BucketTopicStatus { + if in == nil { + return nil + } + out := new(BucketTopicStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in CIDRList) DeepCopyInto(out *CIDRList) { + { + in := &in + *out = make(CIDRList, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRList. +func (in CIDRList) DeepCopy() CIDRList { + if in == nil { + return nil + } + out := new(CIDRList) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSICephFSSpec) DeepCopyInto(out *CSICephFSSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSICephFSSpec. +func (in *CSICephFSSpec) DeepCopy() *CSICephFSSpec { + if in == nil { + return nil + } + out := new(CSICephFSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) { + *out = *in + in.ReadAffinity.DeepCopyInto(&out.ReadAffinity) + out.CephFS = in.CephFS + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverSpec. +func (in *CSIDriverSpec) DeepCopy() *CSIDriverSpec { + if in == nil { + return nil + } + out := new(CSIDriverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Capacity) DeepCopyInto(out *Capacity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Capacity. +func (in *Capacity) DeepCopy() *Capacity { + if in == nil { + return nil + } + out := new(Capacity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephBlockPool) DeepCopyInto(out *CephBlockPool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(CephBlockPoolStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephBlockPool. +func (in *CephBlockPool) DeepCopy() *CephBlockPool { + if in == nil { + return nil + } + out := new(CephBlockPool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephBlockPool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephBlockPoolList) DeepCopyInto(out *CephBlockPoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephBlockPool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephBlockPoolList. +func (in *CephBlockPoolList) DeepCopy() *CephBlockPoolList { + if in == nil { + return nil + } + out := new(CephBlockPoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephBlockPoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephBlockPoolRadosNamespace) DeepCopyInto(out *CephBlockPoolRadosNamespace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(CephBlockPoolRadosNamespaceStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephBlockPoolRadosNamespace. +func (in *CephBlockPoolRadosNamespace) DeepCopy() *CephBlockPoolRadosNamespace { + if in == nil { + return nil + } + out := new(CephBlockPoolRadosNamespace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephBlockPoolRadosNamespace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephBlockPoolRadosNamespaceList) DeepCopyInto(out *CephBlockPoolRadosNamespaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephBlockPoolRadosNamespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephBlockPoolRadosNamespaceList. +func (in *CephBlockPoolRadosNamespaceList) DeepCopy() *CephBlockPoolRadosNamespaceList { + if in == nil { + return nil + } + out := new(CephBlockPoolRadosNamespaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephBlockPoolRadosNamespaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephBlockPoolRadosNamespaceSpec) DeepCopyInto(out *CephBlockPoolRadosNamespaceSpec) { + *out = *in + if in.Mirroring != nil { + in, out := &in.Mirroring, &out.Mirroring + *out = new(RadosNamespaceMirroring) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephBlockPoolRadosNamespaceSpec. +func (in *CephBlockPoolRadosNamespaceSpec) DeepCopy() *CephBlockPoolRadosNamespaceSpec { + if in == nil { + return nil + } + out := new(CephBlockPoolRadosNamespaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephBlockPoolRadosNamespaceStatus) DeepCopyInto(out *CephBlockPoolRadosNamespaceStatus) { + *out = *in + if in.Info != nil { + in, out := &in.Info, &out.Info + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.MirroringStatus != nil { + in, out := &in.MirroringStatus, &out.MirroringStatus + *out = new(MirroringStatusSpec) + (*in).DeepCopyInto(*out) + } + if in.MirroringInfo != nil { + in, out := &in.MirroringInfo, &out.MirroringInfo + *out = new(MirroringInfoSpec) + (*in).DeepCopyInto(*out) + } + if in.SnapshotScheduleStatus != nil { + in, out := &in.SnapshotScheduleStatus, &out.SnapshotScheduleStatus + *out = new(SnapshotScheduleStatusSpec) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephBlockPoolRadosNamespaceStatus. +func (in *CephBlockPoolRadosNamespaceStatus) DeepCopy() *CephBlockPoolRadosNamespaceStatus { + if in == nil { + return nil + } + out := new(CephBlockPoolRadosNamespaceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephBlockPoolStatus) DeepCopyInto(out *CephBlockPoolStatus) { + *out = *in + out.Cephx = in.Cephx + if in.MirroringStatus != nil { + in, out := &in.MirroringStatus, &out.MirroringStatus + *out = new(MirroringStatusSpec) + (*in).DeepCopyInto(*out) + } + if in.MirroringInfo != nil { + in, out := &in.MirroringInfo, &out.MirroringInfo + *out = new(MirroringInfoSpec) + (*in).DeepCopyInto(*out) + } + if in.SnapshotScheduleStatus != nil { + in, out := &in.SnapshotScheduleStatus, &out.SnapshotScheduleStatus + *out = new(SnapshotScheduleStatusSpec) + (*in).DeepCopyInto(*out) + } + if in.Info != nil { + in, out := &in.Info, &out.Info + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephBlockPoolStatus. +func (in *CephBlockPoolStatus) DeepCopy() *CephBlockPoolStatus { + if in == nil { + return nil + } + out := new(CephBlockPoolStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephBucketNotification) DeepCopyInto(out *CephBucketNotification) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(Status) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephBucketNotification. +func (in *CephBucketNotification) DeepCopy() *CephBucketNotification { + if in == nil { + return nil + } + out := new(CephBucketNotification) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephBucketNotification) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephBucketNotificationList) DeepCopyInto(out *CephBucketNotificationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephBucketNotification, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephBucketNotificationList. +func (in *CephBucketNotificationList) DeepCopy() *CephBucketNotificationList { + if in == nil { + return nil + } + out := new(CephBucketNotificationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephBucketNotificationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephBucketTopic) DeepCopyInto(out *CephBucketTopic) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(BucketTopicStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephBucketTopic. +func (in *CephBucketTopic) DeepCopy() *CephBucketTopic { + if in == nil { + return nil + } + out := new(CephBucketTopic) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephBucketTopic) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephBucketTopicList) DeepCopyInto(out *CephBucketTopicList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephBucketTopic, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephBucketTopicList. +func (in *CephBucketTopicList) DeepCopy() *CephBucketTopicList { + if in == nil { + return nil + } + out := new(CephBucketTopicList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephBucketTopicList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephCOSIDriver) DeepCopyInto(out *CephCOSIDriver) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephCOSIDriver. +func (in *CephCOSIDriver) DeepCopy() *CephCOSIDriver { + if in == nil { + return nil + } + out := new(CephCOSIDriver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephCOSIDriver) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephCOSIDriverList) DeepCopyInto(out *CephCOSIDriverList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephCOSIDriver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephCOSIDriverList. +func (in *CephCOSIDriverList) DeepCopy() *CephCOSIDriverList { + if in == nil { + return nil + } + out := new(CephCOSIDriverList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephCOSIDriverList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephCOSIDriverSpec) DeepCopyInto(out *CephCOSIDriverSpec) { + *out = *in + in.Placement.DeepCopyInto(&out.Placement) + in.Resources.DeepCopyInto(&out.Resources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephCOSIDriverSpec. +func (in *CephCOSIDriverSpec) DeepCopy() *CephCOSIDriverSpec { + if in == nil { + return nil + } + out := new(CephCOSIDriverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephClient) DeepCopyInto(out *CephClient) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(CephClientStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephClient. +func (in *CephClient) DeepCopy() *CephClient { + if in == nil { + return nil + } + out := new(CephClient) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephClient) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephClientList) DeepCopyInto(out *CephClientList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephClient, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephClientList. +func (in *CephClientList) DeepCopy() *CephClientList { + if in == nil { + return nil + } + out := new(CephClientList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephClientList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephClientStatus) DeepCopyInto(out *CephClientStatus) { + *out = *in + if in.Info != nil { + in, out := &in.Info, &out.Info + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.Cephx = in.Cephx + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephClientStatus. +func (in *CephClientStatus) DeepCopy() *CephClientStatus { + if in == nil { + return nil + } + out := new(CephClientStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephCluster) DeepCopyInto(out *CephCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephCluster. +func (in *CephCluster) DeepCopy() *CephCluster { + if in == nil { + return nil + } + out := new(CephCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephClusterHealthCheckSpec) DeepCopyInto(out *CephClusterHealthCheckSpec) { + *out = *in + in.DaemonHealth.DeepCopyInto(&out.DaemonHealth) + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = make(map[KeyType]*ProbeSpec, len(*in)) + for key, val := range *in { + var outVal *ProbeSpec + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(ProbeSpec) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } + if in.StartupProbe != nil { + in, out := &in.StartupProbe, &out.StartupProbe + *out = make(map[KeyType]*ProbeSpec, len(*in)) + for key, val := range *in { + var outVal *ProbeSpec + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(ProbeSpec) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephClusterHealthCheckSpec. +func (in *CephClusterHealthCheckSpec) DeepCopy() *CephClusterHealthCheckSpec { + if in == nil { + return nil + } + out := new(CephClusterHealthCheckSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephClusterList) DeepCopyInto(out *CephClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephClusterList. +func (in *CephClusterList) DeepCopy() *CephClusterList { + if in == nil { + return nil + } + out := new(CephClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephDaemonsVersions) DeepCopyInto(out *CephDaemonsVersions) { + *out = *in + if in.Mon != nil { + in, out := &in.Mon, &out.Mon + *out = make(map[string]int, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Mgr != nil { + in, out := &in.Mgr, &out.Mgr + *out = make(map[string]int, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Osd != nil { + in, out := &in.Osd, &out.Osd + *out = make(map[string]int, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Rgw != nil { + in, out := &in.Rgw, &out.Rgw + *out = make(map[string]int, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Mds != nil { + in, out := &in.Mds, &out.Mds + *out = make(map[string]int, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.RbdMirror != nil { + in, out := &in.RbdMirror, &out.RbdMirror + *out = make(map[string]int, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.CephFSMirror != nil { + in, out := &in.CephFSMirror, &out.CephFSMirror + *out = make(map[string]int, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Overall != nil { + in, out := &in.Overall, &out.Overall + *out = make(map[string]int, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephDaemonsVersions. +func (in *CephDaemonsVersions) DeepCopy() *CephDaemonsVersions { + if in == nil { + return nil + } + out := new(CephDaemonsVersions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephExporterSpec) DeepCopyInto(out *CephExporterSpec) { + *out = *in + if in.HostNetwork != nil { + in, out := &in.HostNetwork, &out.HostNetwork + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephExporterSpec. +func (in *CephExporterSpec) DeepCopy() *CephExporterSpec { + if in == nil { + return nil + } + out := new(CephExporterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFilesystem) DeepCopyInto(out *CephFilesystem) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(CephFilesystemStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFilesystem. +func (in *CephFilesystem) DeepCopy() *CephFilesystem { + if in == nil { + return nil + } + out := new(CephFilesystem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephFilesystem) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFilesystemList) DeepCopyInto(out *CephFilesystemList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephFilesystem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFilesystemList. +func (in *CephFilesystemList) DeepCopy() *CephFilesystemList { + if in == nil { + return nil + } + out := new(CephFilesystemList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephFilesystemList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFilesystemMirror) DeepCopyInto(out *CephFilesystemMirror) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(FileMirrorStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFilesystemMirror. +func (in *CephFilesystemMirror) DeepCopy() *CephFilesystemMirror { + if in == nil { + return nil + } + out := new(CephFilesystemMirror) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephFilesystemMirror) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFilesystemMirrorList) DeepCopyInto(out *CephFilesystemMirrorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephFilesystemMirror, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFilesystemMirrorList. +func (in *CephFilesystemMirrorList) DeepCopy() *CephFilesystemMirrorList { + if in == nil { + return nil + } + out := new(CephFilesystemMirrorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephFilesystemMirrorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFilesystemStatus) DeepCopyInto(out *CephFilesystemStatus) { + *out = *in + if in.SnapshotScheduleStatus != nil { + in, out := &in.SnapshotScheduleStatus, &out.SnapshotScheduleStatus + *out = new(FilesystemSnapshotScheduleStatusSpec) + (*in).DeepCopyInto(*out) + } + if in.Info != nil { + in, out := &in.Info, &out.Info + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.Cephx = in.Cephx + if in.MirroringStatus != nil { + in, out := &in.MirroringStatus, &out.MirroringStatus + *out = new(FilesystemMirroringInfoSpec) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFilesystemStatus. +func (in *CephFilesystemStatus) DeepCopy() *CephFilesystemStatus { + if in == nil { + return nil + } + out := new(CephFilesystemStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFilesystemSubVolumeGroup) DeepCopyInto(out *CephFilesystemSubVolumeGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(CephFilesystemSubVolumeGroupStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFilesystemSubVolumeGroup. +func (in *CephFilesystemSubVolumeGroup) DeepCopy() *CephFilesystemSubVolumeGroup { + if in == nil { + return nil + } + out := new(CephFilesystemSubVolumeGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephFilesystemSubVolumeGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFilesystemSubVolumeGroupList) DeepCopyInto(out *CephFilesystemSubVolumeGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephFilesystemSubVolumeGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFilesystemSubVolumeGroupList. +func (in *CephFilesystemSubVolumeGroupList) DeepCopy() *CephFilesystemSubVolumeGroupList { + if in == nil { + return nil + } + out := new(CephFilesystemSubVolumeGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephFilesystemSubVolumeGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFilesystemSubVolumeGroupSpec) DeepCopyInto(out *CephFilesystemSubVolumeGroupSpec) { + *out = *in + in.Pinning.DeepCopyInto(&out.Pinning) + if in.Quota != nil { + in, out := &in.Quota, &out.Quota + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFilesystemSubVolumeGroupSpec. +func (in *CephFilesystemSubVolumeGroupSpec) DeepCopy() *CephFilesystemSubVolumeGroupSpec { + if in == nil { + return nil + } + out := new(CephFilesystemSubVolumeGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFilesystemSubVolumeGroupSpecPinning) DeepCopyInto(out *CephFilesystemSubVolumeGroupSpecPinning) { + *out = *in + if in.Export != nil { + in, out := &in.Export, &out.Export + *out = new(int) + **out = **in + } + if in.Distributed != nil { + in, out := &in.Distributed, &out.Distributed + *out = new(int) + **out = **in + } + if in.Random != nil { + in, out := &in.Random, &out.Random + *out = new(float64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFilesystemSubVolumeGroupSpecPinning. +func (in *CephFilesystemSubVolumeGroupSpecPinning) DeepCopy() *CephFilesystemSubVolumeGroupSpecPinning { + if in == nil { + return nil + } + out := new(CephFilesystemSubVolumeGroupSpecPinning) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFilesystemSubVolumeGroupStatus) DeepCopyInto(out *CephFilesystemSubVolumeGroupStatus) { + *out = *in + if in.Info != nil { + in, out := &in.Info, &out.Info + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFilesystemSubVolumeGroupStatus. +func (in *CephFilesystemSubVolumeGroupStatus) DeepCopy() *CephFilesystemSubVolumeGroupStatus { + if in == nil { + return nil + } + out := new(CephFilesystemSubVolumeGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephHealthMessage) DeepCopyInto(out *CephHealthMessage) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephHealthMessage. +func (in *CephHealthMessage) DeepCopy() *CephHealthMessage { + if in == nil { + return nil + } + out := new(CephHealthMessage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephNFS) DeepCopyInto(out *CephNFS) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(NFSStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephNFS. +func (in *CephNFS) DeepCopy() *CephNFS { + if in == nil { + return nil + } + out := new(CephNFS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephNFS) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephNFSList) DeepCopyInto(out *CephNFSList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephNFS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephNFSList. +func (in *CephNFSList) DeepCopy() *CephNFSList { + if in == nil { + return nil + } + out := new(CephNFSList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephNFSList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephNVMeOFGateway) DeepCopyInto(out *CephNVMeOFGateway) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(NVMeOFGatewayStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephNVMeOFGateway. +func (in *CephNVMeOFGateway) DeepCopy() *CephNVMeOFGateway { + if in == nil { + return nil + } + out := new(CephNVMeOFGateway) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephNVMeOFGateway) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephNVMeOFGatewayList) DeepCopyInto(out *CephNVMeOFGatewayList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephNVMeOFGateway, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephNVMeOFGatewayList. +func (in *CephNVMeOFGatewayList) DeepCopy() *CephNVMeOFGatewayList { + if in == nil { + return nil + } + out := new(CephNVMeOFGatewayList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephNVMeOFGatewayList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephObjectRealm) DeepCopyInto(out *CephObjectRealm) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(Status) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephObjectRealm. +func (in *CephObjectRealm) DeepCopy() *CephObjectRealm { + if in == nil { + return nil + } + out := new(CephObjectRealm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephObjectRealm) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephObjectRealmList) DeepCopyInto(out *CephObjectRealmList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephObjectRealm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephObjectRealmList. +func (in *CephObjectRealmList) DeepCopy() *CephObjectRealmList { + if in == nil { + return nil + } + out := new(CephObjectRealmList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephObjectRealmList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephObjectStore) DeepCopyInto(out *CephObjectStore) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(ObjectStoreStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephObjectStore. +func (in *CephObjectStore) DeepCopy() *CephObjectStore { + if in == nil { + return nil + } + out := new(CephObjectStore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephObjectStore) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephObjectStoreList) DeepCopyInto(out *CephObjectStoreList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephObjectStore, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephObjectStoreList. +func (in *CephObjectStoreList) DeepCopy() *CephObjectStoreList { + if in == nil { + return nil + } + out := new(CephObjectStoreList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephObjectStoreList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephObjectStoreUser) DeepCopyInto(out *CephObjectStoreUser) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(ObjectStoreUserStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephObjectStoreUser. +func (in *CephObjectStoreUser) DeepCopy() *CephObjectStoreUser { + if in == nil { + return nil + } + out := new(CephObjectStoreUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephObjectStoreUser) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephObjectStoreUserList) DeepCopyInto(out *CephObjectStoreUserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephObjectStoreUser, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephObjectStoreUserList. +func (in *CephObjectStoreUserList) DeepCopy() *CephObjectStoreUserList { + if in == nil { + return nil + } + out := new(CephObjectStoreUserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephObjectStoreUserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephObjectZone) DeepCopyInto(out *CephObjectZone) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(Status) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephObjectZone. +func (in *CephObjectZone) DeepCopy() *CephObjectZone { + if in == nil { + return nil + } + out := new(CephObjectZone) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephObjectZone) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephObjectZoneGroup) DeepCopyInto(out *CephObjectZoneGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(Status) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephObjectZoneGroup. +func (in *CephObjectZoneGroup) DeepCopy() *CephObjectZoneGroup { + if in == nil { + return nil + } + out := new(CephObjectZoneGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephObjectZoneGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephObjectZoneGroupList) DeepCopyInto(out *CephObjectZoneGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephObjectZoneGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephObjectZoneGroupList. +func (in *CephObjectZoneGroupList) DeepCopy() *CephObjectZoneGroupList { + if in == nil { + return nil + } + out := new(CephObjectZoneGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephObjectZoneGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephObjectZoneList) DeepCopyInto(out *CephObjectZoneList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephObjectZone, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephObjectZoneList. +func (in *CephObjectZoneList) DeepCopy() *CephObjectZoneList { + if in == nil { + return nil + } + out := new(CephObjectZoneList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephObjectZoneList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephRBDMirror) DeepCopyInto(out *CephRBDMirror) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(RBDMirrorStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephRBDMirror. +func (in *CephRBDMirror) DeepCopy() *CephRBDMirror { + if in == nil { + return nil + } + out := new(CephRBDMirror) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephRBDMirror) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephRBDMirrorList) DeepCopyInto(out *CephRBDMirrorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephRBDMirror, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephRBDMirrorList. +func (in *CephRBDMirrorList) DeepCopy() *CephRBDMirrorList { + if in == nil { + return nil + } + out := new(CephRBDMirrorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephRBDMirrorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephStatus) DeepCopyInto(out *CephStatus) { + *out = *in + if in.Details != nil { + in, out := &in.Details, &out.Details + *out = make(map[string]CephHealthMessage, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.Capacity = in.Capacity + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = new(CephDaemonsVersions) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephStatus. +func (in *CephStatus) DeepCopy() *CephStatus { + if in == nil { + return nil + } + out := new(CephStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephStorage) DeepCopyInto(out *CephStorage) { + *out = *in + if in.DeviceClasses != nil { + in, out := &in.DeviceClasses, &out.DeviceClasses + *out = make([]DeviceClasses, len(*in)) + copy(*out, *in) + } + in.OSD.DeepCopyInto(&out.OSD) + if in.DeprecatedOSDs != nil { + in, out := &in.DeprecatedOSDs, &out.DeprecatedOSDs + *out = make(map[string][]int, len(*in)) + for key, val := range *in { + var outVal []int + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]int, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephStorage. +func (in *CephStorage) DeepCopy() *CephStorage { + if in == nil { + return nil + } + out := new(CephStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephVersionSpec) DeepCopyInto(out *CephVersionSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephVersionSpec. +func (in *CephVersionSpec) DeepCopy() *CephVersionSpec { + if in == nil { + return nil + } + out := new(CephVersionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephXConfigWithPriorCount) DeepCopyInto(out *CephXConfigWithPriorCount) { + *out = *in + out.CephxConfig = in.CephxConfig + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephXConfigWithPriorCount. +func (in *CephXConfigWithPriorCount) DeepCopy() *CephXConfigWithPriorCount { + if in == nil { + return nil + } + out := new(CephXConfigWithPriorCount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephxConfig) DeepCopyInto(out *CephxConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephxConfig. +func (in *CephxConfig) DeepCopy() *CephxConfig { + if in == nil { + return nil + } + out := new(CephxConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephxStatus) DeepCopyInto(out *CephxStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephxStatus. +func (in *CephxStatus) DeepCopy() *CephxStatus { + if in == nil { + return nil + } + out := new(CephxStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephxStatusWithKeyCount) DeepCopyInto(out *CephxStatusWithKeyCount) { + *out = *in + out.CephxStatus = in.CephxStatus + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephxStatusWithKeyCount. +func (in *CephxStatusWithKeyCount) DeepCopy() *CephxStatusWithKeyCount { + if in == nil { + return nil + } + out := new(CephxStatusWithKeyCount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CleanupPolicySpec) DeepCopyInto(out *CleanupPolicySpec) { + *out = *in + out.SanitizeDisks = in.SanitizeDisks + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CleanupPolicySpec. +func (in *CleanupPolicySpec) DeepCopy() *CleanupPolicySpec { + if in == nil { + return nil + } + out := new(CleanupPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientSecuritySpec) DeepCopyInto(out *ClientSecuritySpec) { + *out = *in + out.CephX = in.CephX + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientSecuritySpec. +func (in *ClientSecuritySpec) DeepCopy() *ClientSecuritySpec { + if in == nil { + return nil + } + out := new(ClientSecuritySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientSpec) DeepCopyInto(out *ClientSpec) { + *out = *in + if in.Caps != nil { + in, out := &in.Caps, &out.Caps + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.Security = in.Security + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientSpec. +func (in *ClientSpec) DeepCopy() *ClientSpec { + if in == nil { + return nil + } + out := new(ClientSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCephxConfig) DeepCopyInto(out *ClusterCephxConfig) { + *out = *in + out.Daemon = in.Daemon + out.RBDMirrorPeer = in.RBDMirrorPeer + out.CSI = in.CSI + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCephxConfig. +func (in *ClusterCephxConfig) DeepCopy() *ClusterCephxConfig { + if in == nil { + return nil + } + out := new(ClusterCephxConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCephxStatus) DeepCopyInto(out *ClusterCephxStatus) { + *out = *in + out.Admin = in.Admin + out.Mon = in.Mon + out.Mgr = in.Mgr + out.OSD = in.OSD + out.CSI = in.CSI + out.RBDMirrorPeer = in.RBDMirrorPeer + out.CrashCollector = in.CrashCollector + out.CephExporter = in.CephExporter + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCephxStatus. +func (in *ClusterCephxStatus) DeepCopy() *ClusterCephxStatus { + if in == nil { + return nil + } + out := new(ClusterCephxStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSecuritySpec) DeepCopyInto(out *ClusterSecuritySpec) { + *out = *in + in.KeyManagementService.DeepCopyInto(&out.KeyManagementService) + out.KeyRotation = in.KeyRotation + out.CephX = in.CephX + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSecuritySpec. +func (in *ClusterSecuritySpec) DeepCopy() *ClusterSecuritySpec { + if in == nil { + return nil + } + out := new(ClusterSecuritySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + out.CephVersion = in.CephVersion + in.Storage.DeepCopyInto(&out.Storage) + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(AnnotationsSpec, len(*in)) + for key, val := range *in { + var outVal map[string]string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(Annotations, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + (*out)[key] = outVal + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(LabelsSpec, len(*in)) + for key, val := range *in { + var outVal map[string]string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(Labels, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + (*out)[key] = outVal + } + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = make(PlacementSpec, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + in.Network.DeepCopyInto(&out.Network) + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make(ResourceSpec, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.PriorityClassNames != nil { + in, out := &in.PriorityClassNames, &out.PriorityClassNames + *out = make(PriorityClassNamesSpec, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.DisruptionManagement = in.DisruptionManagement + in.Mon.DeepCopyInto(&out.Mon) + out.CrashCollector = in.CrashCollector + out.Dashboard = in.Dashboard + in.Monitoring.DeepCopyInto(&out.Monitoring) + out.External = in.External + in.Mgr.DeepCopyInto(&out.Mgr) + out.CleanupPolicy = in.CleanupPolicy + in.HealthCheck.DeepCopyInto(&out.HealthCheck) + in.Security.DeepCopyInto(&out.Security) + in.LogCollector.DeepCopyInto(&out.LogCollector) + in.CSI.DeepCopyInto(&out.CSI) + if in.CephConfig != nil { + in, out := &in.CephConfig, &out.CephConfig + *out = make(map[string]map[string]string, len(*in)) + for key, val := range *in { + var outVal map[string]string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + (*out)[key] = outVal + } + } + if in.CephConfigFromSecret != nil { + in, out := &in.CephConfigFromSecret, &out.CephConfigFromSecret + *out = make(map[string]map[string]corev1.SecretKeySelector, len(*in)) + for key, val := range *in { + var outVal map[string]corev1.SecretKeySelector + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(map[string]corev1.SecretKeySelector, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CephStatus != nil { + in, out := &in.CephStatus, &out.CephStatus + *out = new(CephStatus) + (*in).DeepCopyInto(*out) + } + out.Cephx = in.Cephx + if in.CephStorage != nil { + in, out := &in.CephStorage, &out.CephStorage + *out = new(CephStorage) + (*in).DeepCopyInto(*out) + } + if in.CephVersion != nil { + in, out := &in.CephVersion, &out.CephVersion + *out = new(ClusterVersion) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersion) DeepCopyInto(out *ClusterVersion) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersion. +func (in *ClusterVersion) DeepCopy() *ClusterVersion { + if in == nil { + return nil + } + out := new(ClusterVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompressionSpec) DeepCopyInto(out *CompressionSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompressionSpec. +func (in *CompressionSpec) DeepCopy() *CompressionSpec { + if in == nil { + return nil + } + out := new(CompressionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigFileVolumeSource) DeepCopyInto(out *ConfigFileVolumeSource) { + *out = *in + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(corev1.HostPathVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(corev1.EmptyDirVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(corev1.SecretVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(corev1.PersistentVolumeClaimVolumeSource) + **out = **in + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(corev1.ConfigMapVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + *out = new(corev1.ProjectedVolumeSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigFileVolumeSource. +func (in *ConfigFileVolumeSource) DeepCopy() *ConfigFileVolumeSource { + if in == nil { + return nil + } + out := new(ConfigFileVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionsSpec) DeepCopyInto(out *ConnectionsSpec) { + *out = *in + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionSpec) + **out = **in + } + if in.Compression != nil { + in, out := &in.Compression, &out.Compression + *out = new(CompressionSpec) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionsSpec. +func (in *ConnectionsSpec) DeepCopy() *ConnectionsSpec { + if in == nil { + return nil + } + out := new(ConnectionsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrashCollectorSpec) DeepCopyInto(out *CrashCollectorSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrashCollectorSpec. +func (in *CrashCollectorSpec) DeepCopy() *CrashCollectorSpec { + if in == nil { + return nil + } + out := new(CrashCollectorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonHealthSpec) DeepCopyInto(out *DaemonHealthSpec) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + in.Monitor.DeepCopyInto(&out.Monitor) + in.ObjectStorageDaemon.DeepCopyInto(&out.ObjectStorageDaemon) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonHealthSpec. +func (in *DaemonHealthSpec) DeepCopy() *DaemonHealthSpec { + if in == nil { + return nil + } + out := new(DaemonHealthSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardSpec) DeepCopyInto(out *DashboardSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardSpec. +func (in *DashboardSpec) DeepCopy() *DashboardSpec { + if in == nil { + return nil + } + out := new(DashboardSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Device) DeepCopyInto(out *Device) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Device. +func (in *Device) DeepCopy() *Device { + if in == nil { + return nil + } + out := new(Device) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClasses) DeepCopyInto(out *DeviceClasses) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClasses. +func (in *DeviceClasses) DeepCopy() *DeviceClasses { + if in == nil { + return nil + } + out := new(DeviceClasses) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DisruptionManagementSpec) DeepCopyInto(out *DisruptionManagementSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DisruptionManagementSpec. +func (in *DisruptionManagementSpec) DeepCopy() *DisruptionManagementSpec { + if in == nil { + return nil + } + out := new(DisruptionManagementSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionSpec) DeepCopyInto(out *EncryptionSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionSpec. +func (in *EncryptionSpec) DeepCopy() *EncryptionSpec { + if in == nil { + return nil + } + out := new(EncryptionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointAddress) DeepCopyInto(out *EndpointAddress) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAddress. +func (in *EndpointAddress) DeepCopy() *EndpointAddress { + if in == nil { + return nil + } + out := new(EndpointAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ErasureCodedSpec) DeepCopyInto(out *ErasureCodedSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ErasureCodedSpec. +func (in *ErasureCodedSpec) DeepCopy() *ErasureCodedSpec { + if in == nil { + return nil + } + out := new(ErasureCodedSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalSpec) DeepCopyInto(out *ExternalSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalSpec. +func (in *ExternalSpec) DeepCopy() *ExternalSpec { + if in == nil { + return nil + } + out := new(ExternalSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FSMirroringSpec) DeepCopyInto(out *FSMirroringSpec) { + *out = *in + if in.Peers != nil { + in, out := &in.Peers, &out.Peers + *out = new(MirroringPeerSpec) + (*in).DeepCopyInto(*out) + } + if in.SnapshotSchedules != nil { + in, out := &in.SnapshotSchedules, &out.SnapshotSchedules + *out = make([]SnapshotScheduleSpec, len(*in)) + copy(*out, *in) + } + if in.SnapshotRetention != nil { + in, out := &in.SnapshotRetention, &out.SnapshotRetention + *out = make([]SnapshotScheduleRetentionSpec, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSMirroringSpec. +func (in *FSMirroringSpec) DeepCopy() *FSMirroringSpec { + if in == nil { + return nil + } + out := new(FSMirroringSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileMirrorStatus) DeepCopyInto(out *FileMirrorStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + out.Cephx = in.Cephx + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileMirrorStatus. +func (in *FileMirrorStatus) DeepCopy() *FileMirrorStatus { + if in == nil { + return nil + } + out := new(FileMirrorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemMirrorInfoPeerSpec) DeepCopyInto(out *FilesystemMirrorInfoPeerSpec) { + *out = *in + if in.Remote != nil { + in, out := &in.Remote, &out.Remote + *out = new(PeerRemoteSpec) + **out = **in + } + if in.Stats != nil { + in, out := &in.Stats, &out.Stats + *out = new(PeerStatSpec) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemMirrorInfoPeerSpec. +func (in *FilesystemMirrorInfoPeerSpec) DeepCopy() *FilesystemMirrorInfoPeerSpec { + if in == nil { + return nil + } + out := new(FilesystemMirrorInfoPeerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemMirroringInfo) DeepCopyInto(out *FilesystemMirroringInfo) { + *out = *in + if in.Filesystems != nil { + in, out := &in.Filesystems, &out.Filesystems + *out = make([]FilesystemsSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemMirroringInfo. +func (in *FilesystemMirroringInfo) DeepCopy() *FilesystemMirroringInfo { + if in == nil { + return nil + } + out := new(FilesystemMirroringInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemMirroringInfoSpec) DeepCopyInto(out *FilesystemMirroringInfoSpec) { + *out = *in + if in.FilesystemMirroringAllInfo != nil { + in, out := &in.FilesystemMirroringAllInfo, &out.FilesystemMirroringAllInfo + *out = make([]FilesystemMirroringInfo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemMirroringInfoSpec. +func (in *FilesystemMirroringInfoSpec) DeepCopy() *FilesystemMirroringInfoSpec { + if in == nil { + return nil + } + out := new(FilesystemMirroringInfoSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemMirroringSpec) DeepCopyInto(out *FilesystemMirroringSpec) { + *out = *in + in.Placement.DeepCopyInto(&out.Placement) + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(Annotations, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(Labels, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Resources.DeepCopyInto(&out.Resources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemMirroringSpec. +func (in *FilesystemMirroringSpec) DeepCopy() *FilesystemMirroringSpec { + if in == nil { + return nil + } + out := new(FilesystemMirroringSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemSnapshotScheduleStatusRetention) DeepCopyInto(out *FilesystemSnapshotScheduleStatusRetention) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemSnapshotScheduleStatusRetention. +func (in *FilesystemSnapshotScheduleStatusRetention) DeepCopy() *FilesystemSnapshotScheduleStatusRetention { + if in == nil { + return nil + } + out := new(FilesystemSnapshotScheduleStatusRetention) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemSnapshotScheduleStatusSpec) DeepCopyInto(out *FilesystemSnapshotScheduleStatusSpec) { + *out = *in + if in.SnapshotSchedules != nil { + in, out := &in.SnapshotSchedules, &out.SnapshotSchedules + *out = make([]FilesystemSnapshotSchedulesSpec, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemSnapshotScheduleStatusSpec. +func (in *FilesystemSnapshotScheduleStatusSpec) DeepCopy() *FilesystemSnapshotScheduleStatusSpec { + if in == nil { + return nil + } + out := new(FilesystemSnapshotScheduleStatusSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemSnapshotSchedulesSpec) DeepCopyInto(out *FilesystemSnapshotSchedulesSpec) { + *out = *in + out.Retention = in.Retention + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemSnapshotSchedulesSpec. +func (in *FilesystemSnapshotSchedulesSpec) DeepCopy() *FilesystemSnapshotSchedulesSpec { + if in == nil { + return nil + } + out := new(FilesystemSnapshotSchedulesSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemSpec) DeepCopyInto(out *FilesystemSpec) { + *out = *in + in.MetadataPool.DeepCopyInto(&out.MetadataPool) + if in.DataPools != nil { + in, out := &in.DataPools, &out.DataPools + *out = make([]NamedPoolSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.MetadataServer.DeepCopyInto(&out.MetadataServer) + if in.Mirroring != nil { + in, out := &in.Mirroring, &out.Mirroring + *out = new(FSMirroringSpec) + (*in).DeepCopyInto(*out) + } + in.StatusCheck.DeepCopyInto(&out.StatusCheck) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemSpec. +func (in *FilesystemSpec) DeepCopy() *FilesystemSpec { + if in == nil { + return nil + } + out := new(FilesystemSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemsSpec) DeepCopyInto(out *FilesystemsSpec) { + *out = *in + if in.Peers != nil { + in, out := &in.Peers, &out.Peers + *out = make([]FilesystemMirrorInfoPeerSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemsSpec. +func (in *FilesystemsSpec) DeepCopy() *FilesystemsSpec { + if in == nil { + return nil + } + out := new(FilesystemsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GaneshaRADOSSpec) DeepCopyInto(out *GaneshaRADOSSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GaneshaRADOSSpec. +func (in *GaneshaRADOSSpec) DeepCopy() *GaneshaRADOSSpec { + if in == nil { + return nil + } + out := new(GaneshaRADOSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GaneshaServerSpec) DeepCopyInto(out *GaneshaServerSpec) { + *out = *in + in.Placement.DeepCopyInto(&out.Placement) + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(Annotations, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(Labels, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.HostNetwork != nil { + in, out := &in.HostNetwork, &out.HostNetwork + *out = new(bool) + **out = **in + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(ProbeSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GaneshaServerSpec. +func (in *GaneshaServerSpec) DeepCopy() *GaneshaServerSpec { + if in == nil { + return nil + } + out := new(GaneshaServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewaySpec) DeepCopyInto(out *GatewaySpec) { + *out = *in + in.Placement.DeepCopyInto(&out.Placement) + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(Annotations, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(Labels, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.ExternalRgwEndpoints != nil { + in, out := &in.ExternalRgwEndpoints, &out.ExternalRgwEndpoints + *out = make([]EndpointAddress, len(*in)) + copy(*out, *in) + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(RGWServiceSpec) + (*in).DeepCopyInto(*out) + } + if in.OpsLogSidecar != nil { + in, out := &in.OpsLogSidecar, &out.OpsLogSidecar + *out = new(OpsLogSidecar) + (*in).DeepCopyInto(*out) + } + if in.HostNetwork != nil { + in, out := &in.HostNetwork, &out.HostNetwork + *out = new(bool) + **out = **in + } + if in.DashboardEnabled != nil { + in, out := &in.DashboardEnabled, &out.DashboardEnabled + *out = new(bool) + **out = **in + } + if in.AdditionalVolumeMounts != nil { + in, out := &in.AdditionalVolumeMounts, &out.AdditionalVolumeMounts + *out = make(AdditionalVolumeMounts, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RgwConfig != nil { + in, out := &in.RgwConfig, &out.RgwConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.RgwConfigFromSecret != nil { + in, out := &in.RgwConfigFromSecret, &out.RgwConfigFromSecret + *out = make(map[string]corev1.SecretKeySelector, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.RgwCommandFlags != nil { + in, out := &in.RgwCommandFlags, &out.RgwCommandFlags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ReadAffinity != nil { + in, out := &in.ReadAffinity, &out.ReadAffinity + *out = new(RgwReadAffinity) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewaySpec. +func (in *GatewaySpec) DeepCopy() *GatewaySpec { + if in == nil { + return nil + } + out := new(GatewaySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPEndpointSpec) DeepCopyInto(out *HTTPEndpointSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPEndpointSpec. +func (in *HTTPEndpointSpec) DeepCopy() *HTTPEndpointSpec { + if in == nil { + return nil + } + out := new(HTTPEndpointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthCheckSpec) DeepCopyInto(out *HealthCheckSpec) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckSpec. +func (in *HealthCheckSpec) DeepCopy() *HealthCheckSpec { + if in == nil { + return nil + } + out := new(HealthCheckSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HybridStorageSpec) DeepCopyInto(out *HybridStorageSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HybridStorageSpec. +func (in *HybridStorageSpec) DeepCopy() *HybridStorageSpec { + if in == nil { + return nil + } + out := new(HybridStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaEndpointSpec) DeepCopyInto(out *KafkaEndpointSpec) { + *out = *in + if in.UserSecretRef != nil { + in, out := &in.UserSecretRef, &out.UserSecretRef + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaEndpointSpec. +func (in *KafkaEndpointSpec) DeepCopy() *KafkaEndpointSpec { + if in == nil { + return nil + } + out := new(KafkaEndpointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KerberosConfigFiles) DeepCopyInto(out *KerberosConfigFiles) { + *out = *in + if in.VolumeSource != nil { + in, out := &in.VolumeSource, &out.VolumeSource + *out = new(ConfigFileVolumeSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KerberosConfigFiles. +func (in *KerberosConfigFiles) DeepCopy() *KerberosConfigFiles { + if in == nil { + return nil + } + out := new(KerberosConfigFiles) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KerberosKeytabFile) DeepCopyInto(out *KerberosKeytabFile) { + *out = *in + if in.VolumeSource != nil { + in, out := &in.VolumeSource, &out.VolumeSource + *out = new(ConfigFileVolumeSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KerberosKeytabFile. +func (in *KerberosKeytabFile) DeepCopy() *KerberosKeytabFile { + if in == nil { + return nil + } + out := new(KerberosKeytabFile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KerberosSpec) DeepCopyInto(out *KerberosSpec) { + *out = *in + in.ConfigFiles.DeepCopyInto(&out.ConfigFiles) + in.KeytabFile.DeepCopyInto(&out.KeytabFile) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KerberosSpec. +func (in *KerberosSpec) DeepCopy() *KerberosSpec { + if in == nil { + return nil + } + out := new(KerberosSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyManagementServiceSpec) DeepCopyInto(out *KeyManagementServiceSpec) { + *out = *in + if in.ConnectionDetails != nil { + in, out := &in.ConnectionDetails, &out.ConnectionDetails + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyManagementServiceSpec. +func (in *KeyManagementServiceSpec) DeepCopy() *KeyManagementServiceSpec { + if in == nil { + return nil + } + out := new(KeyManagementServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyRotationSpec) DeepCopyInto(out *KeyRotationSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyRotationSpec. +func (in *KeyRotationSpec) DeepCopy() *KeyRotationSpec { + if in == nil { + return nil + } + out := new(KeyRotationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeystoneSpec) DeepCopyInto(out *KeystoneSpec) { + *out = *in + if in.AcceptedRoles != nil { + in, out := &in.AcceptedRoles, &out.AcceptedRoles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.TokenCacheSize != nil { + in, out := &in.TokenCacheSize, &out.TokenCacheSize + *out = new(int) + **out = **in + } + if in.RevocationInterval != nil { + in, out := &in.RevocationInterval, &out.RevocationInterval + *out = new(int) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeystoneSpec. +func (in *KeystoneSpec) DeepCopy() *KeystoneSpec { + if in == nil { + return nil + } + out := new(KeystoneSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Labels) DeepCopyInto(out *Labels) { + { + in := &in + *out = make(Labels, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Labels. +func (in Labels) DeepCopy() Labels { + if in == nil { + return nil + } + out := new(Labels) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in LabelsSpec) DeepCopyInto(out *LabelsSpec) { + { + in := &in + *out = make(LabelsSpec, len(*in)) + for key, val := range *in { + var outVal map[string]string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(Labels, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + (*out)[key] = outVal + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelsSpec. +func (in LabelsSpec) DeepCopy() LabelsSpec { + if in == nil { + return nil + } + out := new(LabelsSpec) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalCephxStatus) DeepCopyInto(out *LocalCephxStatus) { + *out = *in + out.Daemon = in.Daemon + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalCephxStatus. +func (in *LocalCephxStatus) DeepCopy() *LocalCephxStatus { + if in == nil { + return nil + } + out := new(LocalCephxStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogCollectorSpec) DeepCopyInto(out *LogCollectorSpec) { + *out = *in + if in.MaxLogSize != nil { + in, out := &in.MaxLogSize, &out.MaxLogSize + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogCollectorSpec. +func (in *LogCollectorSpec) DeepCopy() *LogCollectorSpec { + if in == nil { + return nil + } + out := new(LogCollectorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataServerSpec) DeepCopyInto(out *MetadataServerSpec) { + *out = *in + in.Placement.DeepCopyInto(&out.Placement) + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(Annotations, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(Labels, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(ProbeSpec) + (*in).DeepCopyInto(*out) + } + if in.StartupProbe != nil { + in, out := &in.StartupProbe, &out.StartupProbe + *out = new(ProbeSpec) + (*in).DeepCopyInto(*out) + } + if in.CacheMemoryLimitFactor != nil { + in, out := &in.CacheMemoryLimitFactor, &out.CacheMemoryLimitFactor + *out = new(float64) + **out = **in + } + if in.CacheMemoryRequestFactor != nil { + in, out := &in.CacheMemoryRequestFactor, &out.CacheMemoryRequestFactor + *out = new(float64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataServerSpec. +func (in *MetadataServerSpec) DeepCopy() *MetadataServerSpec { + if in == nil { + return nil + } + out := new(MetadataServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MgrSpec) DeepCopyInto(out *MgrSpec) { + *out = *in + if in.Modules != nil { + in, out := &in.Modules, &out.Modules + *out = make([]Module, len(*in)) + copy(*out, *in) + } + if in.HostNetwork != nil { + in, out := &in.HostNetwork, &out.HostNetwork + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MgrSpec. +func (in *MgrSpec) DeepCopy() *MgrSpec { + if in == nil { + return nil + } + out := new(MgrSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Migration) DeepCopyInto(out *Migration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Migration. +func (in *Migration) DeepCopy() *Migration { + if in == nil { + return nil + } + out := new(Migration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MigrationStatus) DeepCopyInto(out *MigrationStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MigrationStatus. +func (in *MigrationStatus) DeepCopy() *MigrationStatus { + if in == nil { + return nil + } + out := new(MigrationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MirrorHealthCheckSpec) DeepCopyInto(out *MirrorHealthCheckSpec) { + *out = *in + in.Mirror.DeepCopyInto(&out.Mirror) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MirrorHealthCheckSpec. +func (in *MirrorHealthCheckSpec) DeepCopy() *MirrorHealthCheckSpec { + if in == nil { + return nil + } + out := new(MirrorHealthCheckSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MirroringInfo) DeepCopyInto(out *MirroringInfo) { + *out = *in + if in.Peers != nil { + in, out := &in.Peers, &out.Peers + *out = make([]PeersSpec, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MirroringInfo. +func (in *MirroringInfo) DeepCopy() *MirroringInfo { + if in == nil { + return nil + } + out := new(MirroringInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MirroringInfoSpec) DeepCopyInto(out *MirroringInfoSpec) { + *out = *in + if in.MirroringInfo != nil { + in, out := &in.MirroringInfo, &out.MirroringInfo + *out = new(MirroringInfo) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MirroringInfoSpec. +func (in *MirroringInfoSpec) DeepCopy() *MirroringInfoSpec { + if in == nil { + return nil + } + out := new(MirroringInfoSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MirroringPeerSpec) DeepCopyInto(out *MirroringPeerSpec) { + *out = *in + if in.SecretNames != nil { + in, out := &in.SecretNames, &out.SecretNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MirroringPeerSpec. +func (in *MirroringPeerSpec) DeepCopy() *MirroringPeerSpec { + if in == nil { + return nil + } + out := new(MirroringPeerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MirroringSpec) DeepCopyInto(out *MirroringSpec) { + *out = *in + if in.SnapshotSchedules != nil { + in, out := &in.SnapshotSchedules, &out.SnapshotSchedules + *out = make([]SnapshotScheduleSpec, len(*in)) + copy(*out, *in) + } + if in.Peers != nil { + in, out := &in.Peers, &out.Peers + *out = new(MirroringPeerSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MirroringSpec. +func (in *MirroringSpec) DeepCopy() *MirroringSpec { + if in == nil { + return nil + } + out := new(MirroringSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MirroringStatus) DeepCopyInto(out *MirroringStatus) { + *out = *in + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(MirroringStatusSummarySpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MirroringStatus. +func (in *MirroringStatus) DeepCopy() *MirroringStatus { + if in == nil { + return nil + } + out := new(MirroringStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MirroringStatusSpec) DeepCopyInto(out *MirroringStatusSpec) { + *out = *in + in.MirroringStatus.DeepCopyInto(&out.MirroringStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MirroringStatusSpec. +func (in *MirroringStatusSpec) DeepCopy() *MirroringStatusSpec { + if in == nil { + return nil + } + out := new(MirroringStatusSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MirroringStatusSummarySpec) DeepCopyInto(out *MirroringStatusSummarySpec) { + *out = *in + out.States = in.States + if in.ImageStates != nil { + in, out := &in.ImageStates, &out.ImageStates + *out = new(StatesSpec) + **out = **in + } + out.GroupStates = in.GroupStates + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MirroringStatusSummarySpec. +func (in *MirroringStatusSummarySpec) DeepCopy() *MirroringStatusSummarySpec { + if in == nil { + return nil + } + out := new(MirroringStatusSummarySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Module) DeepCopyInto(out *Module) { + *out = *in + out.Settings = in.Settings + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Module. +func (in *Module) DeepCopy() *Module { + if in == nil { + return nil + } + out := new(Module) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModuleSettings) DeepCopyInto(out *ModuleSettings) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModuleSettings. +func (in *ModuleSettings) DeepCopy() *ModuleSettings { + if in == nil { + return nil + } + out := new(ModuleSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonSpec) DeepCopyInto(out *MonSpec) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]MonZoneSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StretchCluster != nil { + in, out := &in.StretchCluster, &out.StretchCluster + *out = new(StretchClusterSpec) + (*in).DeepCopyInto(*out) + } + if in.VolumeClaimTemplate != nil { + in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate + *out = new(VolumeClaimTemplate) + (*in).DeepCopyInto(*out) + } + if in.ExternalMonIDs != nil { + in, out := &in.ExternalMonIDs, &out.ExternalMonIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonSpec. +func (in *MonSpec) DeepCopy() *MonSpec { + if in == nil { + return nil + } + out := new(MonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonZoneSpec) DeepCopyInto(out *MonZoneSpec) { + *out = *in + if in.VolumeClaimTemplate != nil { + in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate + *out = new(VolumeClaimTemplate) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonZoneSpec. +func (in *MonZoneSpec) DeepCopy() *MonZoneSpec { + if in == nil { + return nil + } + out := new(MonZoneSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringSpec) DeepCopyInto(out *MonitoringSpec) { + *out = *in + if in.ExternalMgrEndpoints != nil { + in, out := &in.ExternalMgrEndpoints, &out.ExternalMgrEndpoints + *out = make([]corev1.EndpointAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(metav1.Duration) + **out = **in + } + if in.Exporter != nil { + in, out := &in.Exporter, &out.Exporter + *out = new(CephExporterSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSpec. +func (in *MonitoringSpec) DeepCopy() *MonitoringSpec { + if in == nil { + return nil + } + out := new(MonitoringSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiClusterServiceSpec) DeepCopyInto(out *MultiClusterServiceSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiClusterServiceSpec. +func (in *MultiClusterServiceSpec) DeepCopy() *MultiClusterServiceSpec { + if in == nil { + return nil + } + out := new(MultiClusterServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NFSGaneshaSpec) DeepCopyInto(out *NFSGaneshaSpec) { + *out = *in + out.RADOS = in.RADOS + in.Server.DeepCopyInto(&out.Server) + if in.Security != nil { + in, out := &in.Security, &out.Security + *out = new(NFSSecuritySpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSGaneshaSpec. +func (in *NFSGaneshaSpec) DeepCopy() *NFSGaneshaSpec { + if in == nil { + return nil + } + out := new(NFSGaneshaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NFSSecuritySpec) DeepCopyInto(out *NFSSecuritySpec) { + *out = *in + if in.SSSD != nil { + in, out := &in.SSSD, &out.SSSD + *out = new(SSSDSpec) + (*in).DeepCopyInto(*out) + } + if in.Kerberos != nil { + in, out := &in.Kerberos, &out.Kerberos + *out = new(KerberosSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSSecuritySpec. +func (in *NFSSecuritySpec) DeepCopy() *NFSSecuritySpec { + if in == nil { + return nil + } + out := new(NFSSecuritySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NFSStatus) DeepCopyInto(out *NFSStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + out.Cephx = in.Cephx + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSStatus. +func (in *NFSStatus) DeepCopy() *NFSStatus { + if in == nil { + return nil + } + out := new(NFSStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NVMeOFGatewayPorts) DeepCopyInto(out *NVMeOFGatewayPorts) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NVMeOFGatewayPorts. +func (in *NVMeOFGatewayPorts) DeepCopy() *NVMeOFGatewayPorts { + if in == nil { + return nil + } + out := new(NVMeOFGatewayPorts) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NVMeOFGatewaySpec) DeepCopyInto(out *NVMeOFGatewaySpec) { + *out = *in + if in.NVMeOFConfig != nil { + in, out := &in.NVMeOFConfig, &out.NVMeOFConfig + *out = make(map[string]map[string]string, len(*in)) + for key, val := range *in { + var outVal map[string]string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + (*out)[key] = outVal + } + } + in.Placement.DeepCopyInto(&out.Placement) + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(Annotations, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(Labels, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.HostNetwork != nil { + in, out := &in.HostNetwork, &out.HostNetwork + *out = new(bool) + **out = **in + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = new(NVMeOFGatewayPorts) + **out = **in + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(ProbeSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NVMeOFGatewaySpec. +func (in *NVMeOFGatewaySpec) DeepCopy() *NVMeOFGatewaySpec { + if in == nil { + return nil + } + out := new(NVMeOFGatewaySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NVMeOFGatewayStatus) DeepCopyInto(out *NVMeOFGatewayStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + out.Cephx = in.Cephx + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NVMeOFGatewayStatus. +func (in *NVMeOFGatewayStatus) DeepCopy() *NVMeOFGatewayStatus { + if in == nil { + return nil + } + out := new(NVMeOFGatewayStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedBlockPoolSpec) DeepCopyInto(out *NamedBlockPoolSpec) { + *out = *in + in.PoolSpec.DeepCopyInto(&out.PoolSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedBlockPoolSpec. +func (in *NamedBlockPoolSpec) DeepCopy() *NamedBlockPoolSpec { + if in == nil { + return nil + } + out := new(NamedBlockPoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedPoolSpec) DeepCopyInto(out *NamedPoolSpec) { + *out = *in + in.PoolSpec.DeepCopyInto(&out.PoolSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedPoolSpec. +func (in *NamedPoolSpec) DeepCopy() *NamedPoolSpec { + if in == nil { + return nil + } + out := new(NamedPoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make(map[CephNetworkType]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AddressRanges != nil { + in, out := &in.AddressRanges, &out.AddressRanges + *out = new(AddressRangesSpec) + (*in).DeepCopyInto(*out) + } + if in.Connections != nil { + in, out := &in.Connections, &out.Connections + *out = new(ConnectionsSpec) + (*in).DeepCopyInto(*out) + } + out.MultiClusterService = in.MultiClusterService + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. +func (in *NetworkSpec) DeepCopy() *NetworkSpec { + if in == nil { + return nil + } + out := new(NetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Node) DeepCopyInto(out *Node) { + *out = *in + in.Resources.DeepCopyInto(&out.Resources) + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Selection.DeepCopyInto(&out.Selection) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. +func (in *Node) DeepCopy() *Node { + if in == nil { + return nil + } + out := new(Node) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in NodesByName) DeepCopyInto(out *NodesByName) { + { + in := &in + *out = make(NodesByName, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodesByName. +func (in NodesByName) DeepCopy() NodesByName { + if in == nil { + return nil + } + out := new(NodesByName) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationFilterRule) DeepCopyInto(out *NotificationFilterRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationFilterRule. +func (in *NotificationFilterRule) DeepCopy() *NotificationFilterRule { + if in == nil { + return nil + } + out := new(NotificationFilterRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationFilterSpec) DeepCopyInto(out *NotificationFilterSpec) { + *out = *in + if in.KeyFilters != nil { + in, out := &in.KeyFilters, &out.KeyFilters + *out = make([]NotificationKeyFilterRule, len(*in)) + copy(*out, *in) + } + if in.MetadataFilters != nil { + in, out := &in.MetadataFilters, &out.MetadataFilters + *out = make([]NotificationFilterRule, len(*in)) + copy(*out, *in) + } + if in.TagFilters != nil { + in, out := &in.TagFilters, &out.TagFilters + *out = make([]NotificationFilterRule, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationFilterSpec. +func (in *NotificationFilterSpec) DeepCopy() *NotificationFilterSpec { + if in == nil { + return nil + } + out := new(NotificationFilterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NotificationKeyFilterRule) DeepCopyInto(out *NotificationKeyFilterRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NotificationKeyFilterRule. +func (in *NotificationKeyFilterRule) DeepCopy() *NotificationKeyFilterRule { + if in == nil { + return nil + } + out := new(NotificationKeyFilterRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSDStatus) DeepCopyInto(out *OSDStatus) { + *out = *in + if in.StoreType != nil { + in, out := &in.StoreType, &out.StoreType + *out = make(map[string]int, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.MigrationStatus = in.MigrationStatus + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDStatus. +func (in *OSDStatus) DeepCopy() *OSDStatus { + if in == nil { + return nil + } + out := new(OSDStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSDStore) DeepCopyInto(out *OSDStore) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDStore. +func (in *OSDStore) DeepCopy() *OSDStore { + if in == nil { + return nil + } + out := new(OSDStore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectEndpointSpec) DeepCopyInto(out *ObjectEndpointSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectEndpointSpec. +func (in *ObjectEndpointSpec) DeepCopy() *ObjectEndpointSpec { + if in == nil { + return nil + } + out := new(ObjectEndpointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectEndpoints) DeepCopyInto(out *ObjectEndpoints) { + *out = *in + if in.Insecure != nil { + in, out := &in.Insecure, &out.Insecure + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Secure != nil { + in, out := &in.Secure, &out.Secure + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectEndpoints. +func (in *ObjectEndpoints) DeepCopy() *ObjectEndpoints { + if in == nil { + return nil + } + out := new(ObjectEndpoints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectHealthCheckSpec) DeepCopyInto(out *ObjectHealthCheckSpec) { + *out = *in + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(ProbeSpec) + (*in).DeepCopyInto(*out) + } + if in.StartupProbe != nil { + in, out := &in.StartupProbe, &out.StartupProbe + *out = new(ProbeSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectHealthCheckSpec. +func (in *ObjectHealthCheckSpec) DeepCopy() *ObjectHealthCheckSpec { + if in == nil { + return nil + } + out := new(ObjectHealthCheckSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectRealmSpec) DeepCopyInto(out *ObjectRealmSpec) { + *out = *in + out.Pull = in.Pull + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectRealmSpec. +func (in *ObjectRealmSpec) DeepCopy() *ObjectRealmSpec { + if in == nil { + return nil + } + out := new(ObjectRealmSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectSharedPoolsSpec) DeepCopyInto(out *ObjectSharedPoolsSpec) { + *out = *in + if in.PoolPlacements != nil { + in, out := &in.PoolPlacements, &out.PoolPlacements + *out = make([]PoolPlacementSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectSharedPoolsSpec. +func (in *ObjectSharedPoolsSpec) DeepCopy() *ObjectSharedPoolsSpec { + if in == nil { + return nil + } + out := new(ObjectSharedPoolsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStoreHostingSpec) DeepCopyInto(out *ObjectStoreHostingSpec) { + *out = *in + if in.AdvertiseEndpoint != nil { + in, out := &in.AdvertiseEndpoint, &out.AdvertiseEndpoint + *out = new(ObjectEndpointSpec) + **out = **in + } + if in.DNSNames != nil { + in, out := &in.DNSNames, &out.DNSNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStoreHostingSpec. +func (in *ObjectStoreHostingSpec) DeepCopy() *ObjectStoreHostingSpec { + if in == nil { + return nil + } + out := new(ObjectStoreHostingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStoreSecuritySpec) DeepCopyInto(out *ObjectStoreSecuritySpec) { + *out = *in + in.SecuritySpec.DeepCopyInto(&out.SecuritySpec) + in.ServerSideEncryptionS3.DeepCopyInto(&out.ServerSideEncryptionS3) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStoreSecuritySpec. +func (in *ObjectStoreSecuritySpec) DeepCopy() *ObjectStoreSecuritySpec { + if in == nil { + return nil + } + out := new(ObjectStoreSecuritySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStoreSpec) DeepCopyInto(out *ObjectStoreSpec) { + *out = *in + in.MetadataPool.DeepCopyInto(&out.MetadataPool) + in.DataPool.DeepCopyInto(&out.DataPool) + in.SharedPools.DeepCopyInto(&out.SharedPools) + in.Gateway.DeepCopyInto(&out.Gateway) + in.Protocols.DeepCopyInto(&out.Protocols) + in.Auth.DeepCopyInto(&out.Auth) + out.Zone = in.Zone + in.HealthCheck.DeepCopyInto(&out.HealthCheck) + if in.Security != nil { + in, out := &in.Security, &out.Security + *out = new(ObjectStoreSecuritySpec) + (*in).DeepCopyInto(*out) + } + if in.AllowUsersInNamespaces != nil { + in, out := &in.AllowUsersInNamespaces, &out.AllowUsersInNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Hosting != nil { + in, out := &in.Hosting, &out.Hosting + *out = new(ObjectStoreHostingSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStoreSpec. +func (in *ObjectStoreSpec) DeepCopy() *ObjectStoreSpec { + if in == nil { + return nil + } + out := new(ObjectStoreSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStoreStatus) DeepCopyInto(out *ObjectStoreStatus) { + *out = *in + in.Endpoints.DeepCopyInto(&out.Endpoints) + if in.Info != nil { + in, out := &in.Info, &out.Info + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.Cephx = in.Cephx + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStoreStatus. +func (in *ObjectStoreStatus) DeepCopy() *ObjectStoreStatus { + if in == nil { + return nil + } + out := new(ObjectStoreStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStoreUserSpec) DeepCopyInto(out *ObjectStoreUserSpec) { + *out = *in + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = new(ObjectUserCapSpec) + **out = **in + } + if in.Quotas != nil { + in, out := &in.Quotas, &out.Quotas + *out = new(ObjectUserQuotaSpec) + (*in).DeepCopyInto(*out) + } + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]ObjectUserKey, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OpMask != nil { + in, out := &in.OpMask, &out.OpMask + *out = new([]ObjectUserOpMask) + if **in != nil { + in, out := *in, *out + *out = make([]ObjectUserOpMask, len(*in)) + copy(*out, *in) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStoreUserSpec. +func (in *ObjectStoreUserSpec) DeepCopy() *ObjectStoreUserSpec { + if in == nil { + return nil + } + out := new(ObjectStoreUserSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStoreUserStatus) DeepCopyInto(out *ObjectStoreUserStatus) { + *out = *in + if in.Info != nil { + in, out := &in.Info, &out.Info + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]SecretReference, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStoreUserStatus. +func (in *ObjectStoreUserStatus) DeepCopy() *ObjectStoreUserStatus { + if in == nil { + return nil + } + out := new(ObjectStoreUserStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectUserCapSpec) DeepCopyInto(out *ObjectUserCapSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectUserCapSpec. +func (in *ObjectUserCapSpec) DeepCopy() *ObjectUserCapSpec { + if in == nil { + return nil + } + out := new(ObjectUserCapSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectUserKey) DeepCopyInto(out *ObjectUserKey) { + *out = *in + if in.AccessKeyRef != nil { + in, out := &in.AccessKeyRef, &out.AccessKeyRef + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectUserKey. +func (in *ObjectUserKey) DeepCopy() *ObjectUserKey { + if in == nil { + return nil + } + out := new(ObjectUserKey) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectUserQuotaSpec) DeepCopyInto(out *ObjectUserQuotaSpec) { + *out = *in + if in.MaxBuckets != nil { + in, out := &in.MaxBuckets, &out.MaxBuckets + *out = new(int) + **out = **in + } + if in.MaxSize != nil { + in, out := &in.MaxSize, &out.MaxSize + x := (*in).DeepCopy() + *out = &x + } + if in.MaxObjects != nil { + in, out := &in.MaxObjects, &out.MaxObjects + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectUserQuotaSpec. +func (in *ObjectUserQuotaSpec) DeepCopy() *ObjectUserQuotaSpec { + if in == nil { + return nil + } + out := new(ObjectUserQuotaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectZoneGroupSpec) DeepCopyInto(out *ObjectZoneGroupSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectZoneGroupSpec. +func (in *ObjectZoneGroupSpec) DeepCopy() *ObjectZoneGroupSpec { + if in == nil { + return nil + } + out := new(ObjectZoneGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectZoneSpec) DeepCopyInto(out *ObjectZoneSpec) { + *out = *in + in.MetadataPool.DeepCopyInto(&out.MetadataPool) + in.DataPool.DeepCopyInto(&out.DataPool) + in.SharedPools.DeepCopyInto(&out.SharedPools) + if in.CustomEndpoints != nil { + in, out := &in.CustomEndpoints, &out.CustomEndpoints + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectZoneSpec. +func (in *ObjectZoneSpec) DeepCopy() *ObjectZoneSpec { + if in == nil { + return nil + } + out := new(ObjectZoneSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpsLogSidecar) DeepCopyInto(out *OpsLogSidecar) { + *out = *in + in.Resources.DeepCopyInto(&out.Resources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpsLogSidecar. +func (in *OpsLogSidecar) DeepCopy() *OpsLogSidecar { + if in == nil { + return nil + } + out := new(OpsLogSidecar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PeerRemoteSpec) DeepCopyInto(out *PeerRemoteSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerRemoteSpec. +func (in *PeerRemoteSpec) DeepCopy() *PeerRemoteSpec { + if in == nil { + return nil + } + out := new(PeerRemoteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PeerStatSpec) DeepCopyInto(out *PeerStatSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerStatSpec. +func (in *PeerStatSpec) DeepCopy() *PeerStatSpec { + if in == nil { + return nil + } + out := new(PeerStatSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PeerTokenCephxStatus) DeepCopyInto(out *PeerTokenCephxStatus) { + *out = *in + out.PeerToken = in.PeerToken + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerTokenCephxStatus. +func (in *PeerTokenCephxStatus) DeepCopy() *PeerTokenCephxStatus { + if in == nil { + return nil + } + out := new(PeerTokenCephxStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PeersSpec) DeepCopyInto(out *PeersSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeersSpec. +func (in *PeersSpec) DeepCopy() *PeersSpec { + if in == nil { + return nil + } + out := new(PeersSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Placement) DeepCopyInto(out *Placement) { + *out = *in + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(corev1.NodeAffinity) + (*in).DeepCopyInto(*out) + } + if in.PodAffinity != nil { + in, out := &in.PodAffinity, &out.PodAffinity + *out = new(corev1.PodAffinity) + (*in).DeepCopyInto(*out) + } + if in.PodAntiAffinity != nil { + in, out := &in.PodAntiAffinity, &out.PodAntiAffinity + *out = new(corev1.PodAntiAffinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]corev1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Placement. +func (in *Placement) DeepCopy() *Placement { + if in == nil { + return nil + } + out := new(Placement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in PlacementSpec) DeepCopyInto(out *PlacementSpec) { + { + in := &in + *out = make(PlacementSpec, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementSpec. +func (in PlacementSpec) DeepCopy() PlacementSpec { + if in == nil { + return nil + } + out := new(PlacementSpec) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementStorageClassSpec) DeepCopyInto(out *PlacementStorageClassSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementStorageClassSpec. +func (in *PlacementStorageClassSpec) DeepCopy() *PlacementStorageClassSpec { + if in == nil { + return nil + } + out := new(PlacementStorageClassSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PoolPlacementSpec) DeepCopyInto(out *PoolPlacementSpec) { + *out = *in + if in.StorageClasses != nil { + in, out := &in.StorageClasses, &out.StorageClasses + *out = make([]PlacementStorageClassSpec, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolPlacementSpec. +func (in *PoolPlacementSpec) DeepCopy() *PoolPlacementSpec { + if in == nil { + return nil + } + out := new(PoolPlacementSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PoolSpec) DeepCopyInto(out *PoolSpec) { + *out = *in + if in.EnableCrushUpdates != nil { + in, out := &in.EnableCrushUpdates, &out.EnableCrushUpdates + *out = new(bool) + **out = **in + } + in.Replicated.DeepCopyInto(&out.Replicated) + out.ErasureCoded = in.ErasureCoded + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Mirroring.DeepCopyInto(&out.Mirroring) + in.StatusCheck.DeepCopyInto(&out.StatusCheck) + in.Quotas.DeepCopyInto(&out.Quotas) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolSpec. +func (in *PoolSpec) DeepCopy() *PoolSpec { + if in == nil { + return nil + } + out := new(PoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in PriorityClassNamesSpec) DeepCopyInto(out *PriorityClassNamesSpec) { + { + in := &in + *out = make(PriorityClassNamesSpec, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClassNamesSpec. +func (in PriorityClassNamesSpec) DeepCopy() PriorityClassNamesSpec { + if in == nil { + return nil + } + out := new(PriorityClassNamesSpec) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeSpec) DeepCopyInto(out *ProbeSpec) { + *out = *in + if in.Probe != nil { + in, out := &in.Probe, &out.Probe + *out = new(corev1.Probe) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeSpec. +func (in *ProbeSpec) DeepCopy() *ProbeSpec { + if in == nil { + return nil + } + out := new(ProbeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtocolSpec) DeepCopyInto(out *ProtocolSpec) { + *out = *in + if in.EnableAPIs != nil { + in, out := &in.EnableAPIs, &out.EnableAPIs + *out = make([]ObjectStoreAPI, len(*in)) + copy(*out, *in) + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3Spec) + (*in).DeepCopyInto(*out) + } + if in.Swift != nil { + in, out := &in.Swift, &out.Swift + *out = new(SwiftSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtocolSpec. +func (in *ProtocolSpec) DeepCopy() *ProtocolSpec { + if in == nil { + return nil + } + out := new(ProtocolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PullSpec) DeepCopyInto(out *PullSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PullSpec. +func (in *PullSpec) DeepCopy() *PullSpec { + if in == nil { + return nil + } + out := new(PullSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuotaSpec) DeepCopyInto(out *QuotaSpec) { + *out = *in + if in.MaxBytes != nil { + in, out := &in.MaxBytes, &out.MaxBytes + *out = new(uint64) + **out = **in + } + if in.MaxSize != nil { + in, out := &in.MaxSize, &out.MaxSize + *out = new(string) + **out = **in + } + if in.MaxObjects != nil { + in, out := &in.MaxObjects, &out.MaxObjects + *out = new(uint64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaSpec. +func (in *QuotaSpec) DeepCopy() *QuotaSpec { + if in == nil { + return nil + } + out := new(QuotaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RBDMirrorStatus) DeepCopyInto(out *RBDMirrorStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + out.Cephx = in.Cephx + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDMirrorStatus. +func (in *RBDMirrorStatus) DeepCopy() *RBDMirrorStatus { + if in == nil { + return nil + } + out := new(RBDMirrorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RBDMirroringSpec) DeepCopyInto(out *RBDMirroringSpec) { + *out = *in + in.Peers.DeepCopyInto(&out.Peers) + in.Placement.DeepCopyInto(&out.Placement) + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(Annotations, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(Labels, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Resources.DeepCopyInto(&out.Resources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDMirroringSpec. +func (in *RBDMirroringSpec) DeepCopy() *RBDMirroringSpec { + if in == nil { + return nil + } + out := new(RBDMirroringSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RGWServiceSpec) DeepCopyInto(out *RGWServiceSpec) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(Annotations, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RGWServiceSpec. +func (in *RGWServiceSpec) DeepCopy() *RGWServiceSpec { + if in == nil { + return nil + } + out := new(RGWServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RadosNamespaceMirroring) DeepCopyInto(out *RadosNamespaceMirroring) { + *out = *in + if in.RemoteNamespace != nil { + in, out := &in.RemoteNamespace, &out.RemoteNamespace + *out = new(string) + **out = **in + } + if in.SnapshotSchedules != nil { + in, out := &in.SnapshotSchedules, &out.SnapshotSchedules + *out = make([]SnapshotScheduleSpec, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RadosNamespaceMirroring. +func (in *RadosNamespaceMirroring) DeepCopy() *RadosNamespaceMirroring { + if in == nil { + return nil + } + out := new(RadosNamespaceMirroring) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReadAffinitySpec) DeepCopyInto(out *ReadAffinitySpec) { + *out = *in + if in.CrushLocationLabels != nil { + in, out := &in.CrushLocationLabels, &out.CrushLocationLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReadAffinitySpec. +func (in *ReadAffinitySpec) DeepCopy() *ReadAffinitySpec { + if in == nil { + return nil + } + out := new(ReadAffinitySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicatedSpec) DeepCopyInto(out *ReplicatedSpec) { + *out = *in + if in.HybridStorage != nil { + in, out := &in.HybridStorage, &out.HybridStorage + *out = new(HybridStorageSpec) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicatedSpec. +func (in *ReplicatedSpec) DeepCopy() *ReplicatedSpec { + if in == nil { + return nil + } + out := new(ReplicatedSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ResourceSpec) DeepCopyInto(out *ResourceSpec) { + { + in := &in + *out = make(ResourceSpec, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec. +func (in ResourceSpec) DeepCopy() ResourceSpec { + if in == nil { + return nil + } + out := new(ResourceSpec) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RgwReadAffinity) DeepCopyInto(out *RgwReadAffinity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RgwReadAffinity. +func (in *RgwReadAffinity) DeepCopy() *RgwReadAffinity { + if in == nil { + return nil + } + out := new(RgwReadAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Spec) DeepCopyInto(out *S3Spec) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.AuthUseKeystone != nil { + in, out := &in.AuthUseKeystone, &out.AuthUseKeystone + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Spec. +func (in *S3Spec) DeepCopy() *S3Spec { + if in == nil { + return nil + } + out := new(S3Spec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSSDSidecar) DeepCopyInto(out *SSSDSidecar) { + *out = *in + in.SSSDConfigFile.DeepCopyInto(&out.SSSDConfigFile) + if in.AdditionalFiles != nil { + in, out := &in.AdditionalFiles, &out.AdditionalFiles + *out = make(AdditionalVolumeMounts, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSSDSidecar. +func (in *SSSDSidecar) DeepCopy() *SSSDSidecar { + if in == nil { + return nil + } + out := new(SSSDSidecar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSSDSidecarConfigFile) DeepCopyInto(out *SSSDSidecarConfigFile) { + *out = *in + if in.VolumeSource != nil { + in, out := &in.VolumeSource, &out.VolumeSource + *out = new(ConfigFileVolumeSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSSDSidecarConfigFile. +func (in *SSSDSidecarConfigFile) DeepCopy() *SSSDSidecarConfigFile { + if in == nil { + return nil + } + out := new(SSSDSidecarConfigFile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSSDSpec) DeepCopyInto(out *SSSDSpec) { + *out = *in + if in.Sidecar != nil { + in, out := &in.Sidecar, &out.Sidecar + *out = new(SSSDSidecar) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSSDSpec. +func (in *SSSDSpec) DeepCopy() *SSSDSpec { + if in == nil { + return nil + } + out := new(SSSDSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SanitizeDisksSpec) DeepCopyInto(out *SanitizeDisksSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SanitizeDisksSpec. +func (in *SanitizeDisksSpec) DeepCopy() *SanitizeDisksSpec { + if in == nil { + return nil + } + out := new(SanitizeDisksSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretReference) DeepCopyInto(out *SecretReference) { + *out = *in + out.SecretReference = in.SecretReference + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference. +func (in *SecretReference) DeepCopy() *SecretReference { + if in == nil { + return nil + } + out := new(SecretReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecuritySpec) DeepCopyInto(out *SecuritySpec) { + *out = *in + in.KeyManagementService.DeepCopyInto(&out.KeyManagementService) + out.KeyRotation = in.KeyRotation + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecuritySpec. +func (in *SecuritySpec) DeepCopy() *SecuritySpec { + if in == nil { + return nil + } + out := new(SecuritySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Selection) DeepCopyInto(out *Selection) { + *out = *in + if in.UseAllDevices != nil { + in, out := &in.UseAllDevices, &out.UseAllDevices + *out = new(bool) + **out = **in + } + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]Device, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]VolumeClaimTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Selection. +func (in *Selection) DeepCopy() *Selection { + if in == nil { + return nil + } + out := new(Selection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotSchedule) DeepCopyInto(out *SnapshotSchedule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotSchedule. +func (in *SnapshotSchedule) DeepCopy() *SnapshotSchedule { + if in == nil { + return nil + } + out := new(SnapshotSchedule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleRetentionSpec) DeepCopyInto(out *SnapshotScheduleRetentionSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleRetentionSpec. +func (in *SnapshotScheduleRetentionSpec) DeepCopy() *SnapshotScheduleRetentionSpec { + if in == nil { + return nil + } + out := new(SnapshotScheduleRetentionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleSpec) DeepCopyInto(out *SnapshotScheduleSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleSpec. +func (in *SnapshotScheduleSpec) DeepCopy() *SnapshotScheduleSpec { + if in == nil { + return nil + } + out := new(SnapshotScheduleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotScheduleStatusSpec) DeepCopyInto(out *SnapshotScheduleStatusSpec) { + *out = *in + if in.SnapshotSchedules != nil { + in, out := &in.SnapshotSchedules, &out.SnapshotSchedules + *out = make([]SnapshotSchedulesSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotScheduleStatusSpec. +func (in *SnapshotScheduleStatusSpec) DeepCopy() *SnapshotScheduleStatusSpec { + if in == nil { + return nil + } + out := new(SnapshotScheduleStatusSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SnapshotSchedulesSpec) DeepCopyInto(out *SnapshotSchedulesSpec) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SnapshotSchedule, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotSchedulesSpec. +func (in *SnapshotSchedulesSpec) DeepCopy() *SnapshotSchedulesSpec { + if in == nil { + return nil + } + out := new(SnapshotSchedulesSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatesSpec) DeepCopyInto(out *StatesSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatesSpec. +func (in *StatesSpec) DeepCopy() *StatesSpec { + if in == nil { + return nil + } + out := new(StatesSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Status) DeepCopyInto(out *Status) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status. +func (in *Status) DeepCopy() *Status { + if in == nil { + return nil + } + out := new(Status) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageClassDeviceSet) DeepCopyInto(out *StorageClassDeviceSet) { + *out = *in + in.Resources.DeepCopyInto(&out.Resources) + in.Placement.DeepCopyInto(&out.Placement) + if in.PreparePlacement != nil { + in, out := &in.PreparePlacement, &out.PreparePlacement + *out = new(Placement) + (*in).DeepCopyInto(*out) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]VolumeClaimTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassDeviceSet. +func (in *StorageClassDeviceSet) DeepCopy() *StorageClassDeviceSet { + if in == nil { + return nil + } + out := new(StorageClassDeviceSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageScopeSpec) DeepCopyInto(out *StorageScopeSpec) { + *out = *in + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]Node, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Selection.DeepCopyInto(&out.Selection) + if in.StorageClassDeviceSets != nil { + in, out := &in.StorageClassDeviceSets, &out.StorageClassDeviceSets + *out = make([]StorageClassDeviceSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.Migration = in.Migration + out.Store = in.Store + if in.FullRatio != nil { + in, out := &in.FullRatio, &out.FullRatio + *out = new(float64) + **out = **in + } + if in.NearFullRatio != nil { + in, out := &in.NearFullRatio, &out.NearFullRatio + *out = new(float64) + **out = **in + } + if in.BackfillFullRatio != nil { + in, out := &in.BackfillFullRatio, &out.BackfillFullRatio + *out = new(float64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageScopeSpec. +func (in *StorageScopeSpec) DeepCopy() *StorageScopeSpec { + if in == nil { + return nil + } + out := new(StorageScopeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StretchClusterSpec) DeepCopyInto(out *StretchClusterSpec) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]MonZoneSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StretchClusterSpec. +func (in *StretchClusterSpec) DeepCopy() *StretchClusterSpec { + if in == nil { + return nil + } + out := new(StretchClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SwiftSpec) DeepCopyInto(out *SwiftSpec) { + *out = *in + if in.AccountInUrl != nil { + in, out := &in.AccountInUrl, &out.AccountInUrl + *out = new(bool) + **out = **in + } + if in.UrlPrefix != nil { + in, out := &in.UrlPrefix, &out.UrlPrefix + *out = new(string) + **out = **in + } + if in.VersioningEnabled != nil { + in, out := &in.VersioningEnabled, &out.VersioningEnabled + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SwiftSpec. +func (in *SwiftSpec) DeepCopy() *SwiftSpec { + if in == nil { + return nil + } + out := new(SwiftSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopicEndpointSpec) DeepCopyInto(out *TopicEndpointSpec) { + *out = *in + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(HTTPEndpointSpec) + **out = **in + } + if in.AMQP != nil { + in, out := &in.AMQP, &out.AMQP + *out = new(AMQPEndpointSpec) + **out = **in + } + if in.Kafka != nil { + in, out := &in.Kafka, &out.Kafka + *out = new(KafkaEndpointSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicEndpointSpec. +func (in *TopicEndpointSpec) DeepCopy() *TopicEndpointSpec { + if in == nil { + return nil + } + out := new(TopicEndpointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeClaimTemplate) DeepCopyInto(out *VolumeClaimTemplate) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeClaimTemplate. +func (in *VolumeClaimTemplate) DeepCopy() *VolumeClaimTemplate { + if in == nil { + return nil + } + out := new(VolumeClaimTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZoneSpec) DeepCopyInto(out *ZoneSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZoneSpec. +func (in *ZoneSpec) DeepCopy() *ZoneSpec { + if in == nil { + return nil + } + out := new(ZoneSpec) + in.DeepCopyInto(out) + return out +} diff --git a/go.mod b/go.mod index 26862b43..f850a9eb 100644 --- a/go.mod +++ b/go.mod @@ -2,6 +2,12 @@ module github.com/codesphere-cloud/oms go 1.26.1 +replace ( + github.com/kubernetes-incubator/external-storage => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc3 + github.com/libopenstorage/secrets => github.com/rook/secrets v0.0.0-20240315053144-3195f6906937 + github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3 +) + require ( cloud.google.com/go/artifactregistry v1.20.0 cloud.google.com/go/compute v1.56.0 @@ -12,9 +18,11 @@ require ( github.com/codesphere-cloud/cs-go v0.19.2 github.com/creativeprojects/go-selfupdate v1.5.2 github.com/jedib0t/go-pretty/v6 v6.7.8 + github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.7 github.com/lithammer/shortuuid v3.0.0+incompatible github.com/onsi/ginkgo/v2 v2.28.1 github.com/onsi/gomega v1.39.1 + github.com/openshift/api v0.0.0-20260306105915-ec7ab20aa8c4 github.com/pkg/sftp v1.13.10 github.com/spf13/cobra v1.10.2 github.com/stretchr/testify v1.11.1 @@ -392,7 +400,7 @@ require ( github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/pjbgf/sha1cd v0.5.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/pkg/errors v0.9.1 // indirect + github.com/pkg/errors v0.9.1 github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/polydawn/refmt v0.89.1-0.20221221234430-40501e09de1f // indirect github.com/prometheus/client_golang v1.23.2 // indirect @@ -518,6 +526,7 @@ require ( github.com/cloudnative-pg/barman-cloud v0.4.1-0.20260108104508-ced266c145f5 // indirect github.com/cloudnative-pg/cnpg-i v0.3.1 // indirect github.com/cloudnative-pg/machinery v0.3.3 // indirect + github.com/containernetworking/cni v1.2.0-rc1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/denis-tingaikin/go-header v0.5.0 // indirect diff --git a/go.sum b/go.sum index 434981b8..236bff91 100644 --- a/go.sum +++ b/go.sum @@ -380,6 +380,8 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/stargz-snapshotter/estargz v0.18.2 h1:yXkZFYIzz3eoLwlTUZKz2iQ4MrckBxJjkmD16ynUTrw= github.com/containerd/stargz-snapshotter/estargz v0.18.2/go.mod h1:XyVU5tcJ3PRpkA9XS2T5us6Eg35yM0214Y+wvrZTBrY= +github.com/containernetworking/cni v1.2.0-rc1 h1:AKI3+pXtgY4PDLN9+50o9IaywWVuey0Jkw3Lvzp0HCY= +github.com/containernetworking/cni v1.2.0-rc1/go.mod h1:Lt0TQcZQVDju64fYxUhDziTgXCDe3Olzi9I4zZJLWHg= github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc= github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -889,6 +891,8 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julz/importas v0.2.0 h1:y+MJN/UdL63QbFJHws9BVC5RpA2iq0kpjrFajTGivjQ= github.com/julz/importas v0.2.0/go.mod h1:pThlt589EnCYtMnmhmRYY/qn9lCf/frPOK+WMx3xiJY= +github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.7 h1:z4P744DR+PIpkjwXSEc6TvN3L6LVzmUquFgmNm8wSUc= +github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.7/go.mod h1:CM7HAH5PNuIsqjMN0fGc1ydM74Uj+0VZFhob620nklw= github.com/karamaru-alpha/copyloopvar v1.2.2 h1:yfNQvP9YaGQR7VaWLYcfZUlRP2eo2vhExWKxD/fP6q0= github.com/karamaru-alpha/copyloopvar v1.2.2/go.mod h1:oY4rGZqZ879JkJMtX3RRkcXRkmUvH0x35ykgaKgsgJY= github.com/kevinburke/ssh_config v1.4.0 h1:6xxtP5bZ2E4NF5tuQulISpTO2z8XbtH8cg1PWkxoFkQ= @@ -1072,8 +1076,12 @@ github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= github.com/nunnatsa/ginkgolinter v0.23.0 h1:x3o4DGYOWbBMP/VdNQKgSj+25aJKx2Pe6lHr8gBcgf8= github.com/nunnatsa/ginkgolinter v0.23.0/go.mod h1:9qN1+0akwXEccwV1CAcCDfcoBlWXHB+ML9884pL4SZ4= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI= github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE= github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28= @@ -1084,6 +1092,8 @@ github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJw github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opencontainers/runc v1.2.8 h1:RnEICeDReapbZ5lZEgHvj7E9Q3Eex9toYmaGBsbvU5Q= github.com/opencontainers/runc v1.2.8/go.mod h1:cC0YkmZcuvr+rtBZ6T7NBoVbMGNAdLa/21vIElJDOzI= +github.com/openshift/api v0.0.0-20260306105915-ec7ab20aa8c4 h1:WJFsWIK6RgKablY+IXyIeTLPT2ZRHdHXYnG9Eq+9bhM= +github.com/openshift/api v0.0.0-20260306105915-ec7ab20aa8c4/go.mod h1:pyVjK0nZ4sRs4fuQVQ4rubsJdahI1PB94LnQ8sGdvxo= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/ory/dockertest/v3 v3.12.0 h1:3oV9d0sDzlSQfHtIaB5k6ghUCVMVLpAY8hwrqoCyRCw= @@ -1333,6 +1343,8 @@ github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CP github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/vektra/mockery/v3 v3.7.0 h1:Dd0EeaOcRJBVP9n3oYOVPV7KdPaaE3EcwTppaZIsFSM= github.com/vektra/mockery/v3 v3.7.0/go.mod h1:z9Wr23Ha8etImqQwS3boTNR9WkjX6tIklW5c88DRkSw= +github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= +github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/wagoodman/go-progress v0.0.0-20230925121702-07e42b3cdba0 h1:0KGbf+0SMg+UFy4e1A/CPVvXn21f1qtWdeJwxZFoQG8= github.com/wagoodman/go-progress v0.0.0-20230925121702-07e42b3cdba0/go.mod h1:jLXFoL31zFaHKAAyZUh+sxiTDFe1L1ZHrcK2T1itVKA= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= @@ -1682,6 +1694,8 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/mail.v2 v2.3.1 h1:WYFn/oANrAGP2C0dcV6/pbkPzv8yGzqTjPmTeO7qoXk= gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/validator.v2 v2.0.1 h1:xF0KWyGWXm/LM2G1TrEjqOu4pa6coO9AlWSf3msVfDY= gopkg.in/validator.v2 v2.0.1/go.mod h1:lIUZBlB3Im4s/eYp39Ry/wkR02yOPhZ9IwIRBjuPuG8= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= diff --git a/internal/bootstrap/local/local.go b/internal/bootstrap/local/local.go index 5c2a9c24..19eafc8d 100644 --- a/internal/bootstrap/local/local.go +++ b/internal/bootstrap/local/local.go @@ -10,6 +10,7 @@ import ( "time" cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + rookcephv1 "github.com/codesphere-cloud/oms/external/ceph.rook.io/v1" "github.com/codesphere-cloud/oms/internal/bootstrap" "github.com/codesphere-cloud/oms/internal/installer" "github.com/codesphere-cloud/oms/internal/util" @@ -20,10 +21,23 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) const ( codesphereSystemNamespace = "codesphere-system" + rookRepoName = "rook-release" + rookRepoURL = "https://charts.rook.io/release" + rookReleaseName = "rook-ceph" + rookChartRef = "rook-release/rook-ceph" + rookNamespace = "rook-ceph" + rookClusterName = "rook-ceph" + rookCephImage = "quay.io/ceph/ceph:v18.2.7" + rookCephDataDirHostPath = "/var/lib/rook" + rookCephDeviceSetName = "set1" + rookCephOSDStorageSize = "20Gi" + rookReadyTimeout = 30 * time.Minute + rookReadyPollInterval = 5 * time.Second cnpgRepoName = "cnpg" cnpgRepoURL = "https://cloudnative-pg.github.io/charts" cnpgReleaseName = "cnpg" @@ -80,15 +94,28 @@ func NewLocalBootstrapper(ctx context.Context, stlog *bootstrap.StepLogger, kube } func (c *LocalBootstrapper) Bootstrap() error { - if c.stlog == nil { - return fmt.Errorf("step logger is not initialized") - } - err := c.stlog.Step("Ensure codesphere system namespace", c.EnsureCodesphereSystemNamespace) if err != nil { return fmt.Errorf("failed to ensure namespace %q: %w", codesphereSystemNamespace, err) } + err = c.stlog.Step("Install Rook and test Ceph cluster", func() error { + err := c.stlog.Substep("Install Rook operator", c.InstallRookHelmChart) + if err != nil { + return err + } + + err = c.stlog.Substep("Deploy test Ceph cluster (single OSD)", c.DeployTestCephCluster) + if err != nil { + return err + } + + return nil + }) + if err != nil { + return fmt.Errorf("failed to install Rook and deploy test Ceph cluster: %w", err) + } + err = c.stlog.Step("Install CloudNativePG and PostgreSQL", func() error { err := c.stlog.Substep("Install CloudNativePG operator", c.InstallCloudNativePGHelmChart) if err != nil { @@ -154,23 +181,207 @@ func (c *LocalBootstrapper) InstallCloudNativePGHelmChart() error { return nil } +func (c *LocalBootstrapper) InstallRookHelmChart() error { + repoAddOut, err := exec.CommandContext(c.ctx, "helm", "repo", "add", rookRepoName, rookRepoURL).CombinedOutput() + if err != nil && !strings.Contains(string(repoAddOut), "already exists") { + return fmt.Errorf("failed to add helm repo %q: %w (%s)", rookRepoName, err, strings.TrimSpace(string(repoAddOut))) + } + + repoUpdateOut, err := exec.CommandContext(c.ctx, "helm", "repo", "update", rookRepoName).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to update helm repo %q: %w (%s)", rookRepoName, err, strings.TrimSpace(string(repoUpdateOut))) + } + + cmd := exec.CommandContext( + c.ctx, + "helm", + "upgrade", + "--install", + rookReleaseName, + rookChartRef, + "--namespace", + rookNamespace, + "--create-namespace", + "--wait", + ) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to install helm chart %q: %w", rookChartRef, err) + } + + return nil +} + +func (c *LocalBootstrapper) DeployTestCephCluster() error { + // Ceph test cluster config from https://github.com/rook/rook/blob/0e05c6afff25a4e03649dd2092a5a10c3349fd9c/deploy/examples/cluster-test.yaml + cephCluster := &rookcephv1.CephCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: rookClusterName, + Namespace: rookNamespace, + }, + } + + _, err := controllerutil.CreateOrUpdate(c.ctx, c.kubeClient, cephCluster, func() error { + cephCluster.Spec = rookcephv1.ClusterSpec{ + CephVersion: rookcephv1.CephVersionSpec{ + Image: rookCephImage, + }, + DataDirHostPath: rookCephDataDirHostPath, + Mon: rookcephv1.MonSpec{ + Count: 1, + AllowMultiplePerNode: true, + }, + Mgr: rookcephv1.MgrSpec{ + Count: 1, + AllowMultiplePerNode: true, + }, + Storage: rookcephv1.StorageScopeSpec{ + // TODO: make configurable. + UseAllNodes: true, + AllowDeviceClassUpdate: true, + AllowOsdCrushWeightUpdate: false, + }, + PriorityClassNames: rookcephv1.PriorityClassNamesSpec{ + "all": "system-node-critical", + "mgr": "system-cluster-critical", + }, + HealthCheck: rookcephv1.CephClusterHealthCheckSpec{ + DaemonHealth: rookcephv1.DaemonHealthSpec{ + Monitor: rookcephv1.HealthCheckSpec{ + Interval: &metav1.Duration{Duration: 45 * time.Second}, + Timeout: "600s", + }, + }, + }, + Dashboard: rookcephv1.DashboardSpec{ + Enabled: true, + }, + DisruptionManagement: rookcephv1.DisruptionManagementSpec{ + ManagePodBudgets: true, + }, + CrashCollector: rookcephv1.CrashCollectorSpec{ + Disable: true, + }, + Monitoring: rookcephv1.MonitoringSpec{ + Enabled: false, + }, + CephConfig: map[string]map[string]string{ + "global": { + "osd_pool_default_size": "1", + "mon_warn_on_pool_no_redundancy": "false", + "bdev_flock_retry": "20", + "bluefs_buffered_io": "false", + "mon_data_avail_warn": "10", + }, + }, + } + return nil + }) + if err != nil { + return fmt.Errorf("failed to create or update Ceph cluster %q: %w", rookClusterName, err) + } + + if err := c.WaitForTestCephClusterReady(); err != nil { + return err + } + + return nil +} + +func (c *LocalBootstrapper) WaitForTestCephClusterReady() error { + ctx, cancel := context.WithTimeout(c.ctx, rookReadyTimeout) + defer cancel() + + clusterKey := client.ObjectKey{ + Name: rookClusterName, + Namespace: rookNamespace, + } + + steps := int(rookReadyTimeout / rookReadyPollInterval) + if steps < 1 { + steps = 1 + } + + backoff := wait.Backoff{ + Duration: rookReadyPollInterval, + Factor: 1.0, + Jitter: 0.1, + Steps: steps, + } + + lastPhase := "" + lastState := "" + lastMessage := "" + + err := retry.OnError(backoff, isRetryableWaitError, func() error { + if err := ctx.Err(); err != nil { + return err + } + + cluster := &rookcephv1.CephCluster{} + err := c.kubeClient.Get(ctx, clusterKey, cluster) + if err != nil { + if apierrors.IsNotFound(err) { + return &retryableWaitError{err: fmt.Errorf("Ceph cluster %q not found yet", rookClusterName)} + } + + return err + } + + lastPhase = string(cluster.Status.Phase) + lastState = string(cluster.Status.State) + lastMessage = cluster.Status.Message + + if isRookCephClusterReady(cluster) { + return nil + } + + return &retryableWaitError{err: fmt.Errorf( + "Ceph cluster is not ready yet (phase=%q, state=%q, message=%q)", + lastPhase, + lastState, + lastMessage, + )} + }) + if err == nil { + return nil + } + + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) || isRetryableWaitError(err) { + return fmt.Errorf( + "timed out waiting for Ceph cluster %q to become ready (phase=%q, state=%q, message=%q, error=%v)", + rookClusterName, + lastPhase, + lastState, + lastMessage, + err, + ) + } + + return fmt.Errorf("failed to fetch Ceph cluster %q: %w", rookClusterName, err) +} + func (c *LocalBootstrapper) DeployPostgresDatabase() error { postgresCluster := &cnpgv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: cnpgDatabaseClusterName, Namespace: codesphereSystemNamespace, }, - Spec: cnpgv1.ClusterSpec{ + } + + _, err := controllerutil.CreateOrUpdate(c.ctx, c.kubeClient, postgresCluster, func() error { + postgresCluster.Spec = cnpgv1.ClusterSpec{ ImageName: fmt.Sprintf("ghcr.io/cloudnative-pg/postgresql:%s-system-trixie", cnpgDatabaseVersion), Instances: 1, StorageConfiguration: cnpgv1.StorageConfiguration{ Size: cnpgDatabaseStorageSize, }, - }, - } - - if err := c.kubeClient.Create(c.ctx, postgresCluster); err != nil && !apierrors.IsAlreadyExists(err) { - return fmt.Errorf("failed to create PostgreSQL cluster %q: %w", cnpgDatabaseClusterName, err) + } + return nil + }) + if err != nil { + return fmt.Errorf("failed to create or update PostgreSQL cluster %q: %w", cnpgDatabaseClusterName, err) } if err := c.WaitForPostgresDatabaseReady(); err != nil { @@ -298,3 +509,17 @@ func isCNPGClusterReady(cluster *cnpgv1.Cluster) bool { return cluster.Status.Instances > 0 && cluster.Status.ReadyInstances == cluster.Status.Instances } + +func isRookCephClusterReady(cluster *rookcephv1.CephCluster) bool { + if cluster == nil { + return false + } + + for _, condition := range cluster.Status.Conditions { + if condition.Type == rookcephv1.ConditionReady && condition.Status == corev1.ConditionTrue { + return true + } + } + + return cluster.Status.Phase == rookcephv1.ConditionReady +} From 2524d0a8b8b0efe98784964411b159226fb225f1 Mon Sep 17 00:00:00 2001 From: schrodit Date: Fri, 6 Mar 2026 13:32:13 +0100 Subject: [PATCH 05/31] split into several files Signed-off-by: Tim Schrodi --- internal/bootstrap/local/local.go | 409 --------------------------- internal/bootstrap/local/postgres.go | 219 ++++++++++++++ internal/bootstrap/local/rook.go | 233 +++++++++++++++ 3 files changed, 452 insertions(+), 409 deletions(-) create mode 100644 internal/bootstrap/local/postgres.go create mode 100644 internal/bootstrap/local/rook.go diff --git a/internal/bootstrap/local/local.go b/internal/bootstrap/local/local.go index 19eafc8d..13a85906 100644 --- a/internal/bootstrap/local/local.go +++ b/internal/bootstrap/local/local.go @@ -4,50 +4,18 @@ import ( "context" "errors" "fmt" - "os" - "os/exec" - "strings" - "time" - cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - rookcephv1 "github.com/codesphere-cloud/oms/external/ceph.rook.io/v1" "github.com/codesphere-cloud/oms/internal/bootstrap" "github.com/codesphere-cloud/oms/internal/installer" "github.com/codesphere-cloud/oms/internal/util" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) const ( codesphereSystemNamespace = "codesphere-system" - rookRepoName = "rook-release" - rookRepoURL = "https://charts.rook.io/release" - rookReleaseName = "rook-ceph" - rookChartRef = "rook-release/rook-ceph" - rookNamespace = "rook-ceph" - rookClusterName = "rook-ceph" - rookCephImage = "quay.io/ceph/ceph:v18.2.7" - rookCephDataDirHostPath = "/var/lib/rook" - rookCephDeviceSetName = "set1" - rookCephOSDStorageSize = "20Gi" - rookReadyTimeout = 30 * time.Minute - rookReadyPollInterval = 5 * time.Second - cnpgRepoName = "cnpg" - cnpgRepoURL = "https://cloudnative-pg.github.io/charts" - cnpgReleaseName = "cnpg" - cnpgChartRef = "cnpg/cloudnative-pg" - cnpgDatabaseClusterName = "masterdata" - cnpgDatabaseVersion = "15.14" - cnpgDatabaseStorageSize = "10Gi" - cnpgReadyTimeout = 15 * time.Minute - cnpgReadyPollInterval = 5 * time.Second - cnpgSecretPasswordKey = "password" ) type retryableWaitError struct { @@ -146,380 +114,3 @@ func (c *LocalBootstrapper) EnsureCodesphereSystemNamespace() error { return nil } - -func (c *LocalBootstrapper) InstallCloudNativePGHelmChart() error { - repoAddOut, err := exec.CommandContext(c.ctx, "helm", "repo", "add", cnpgRepoName, cnpgRepoURL).CombinedOutput() - if err != nil && !strings.Contains(string(repoAddOut), "already exists") { - return fmt.Errorf("failed to add helm repo %q: %w (%s)", cnpgRepoName, err, strings.TrimSpace(string(repoAddOut))) - } - - repoUpdateOut, err := exec.CommandContext(c.ctx, "helm", "repo", "update", cnpgRepoName).CombinedOutput() - if err != nil { - return fmt.Errorf("failed to update helm repo %q: %w (%s)", cnpgRepoName, err, strings.TrimSpace(string(repoUpdateOut))) - } - - cmd := exec.CommandContext( - c.ctx, - "helm", - "upgrade", - "--install", - cnpgReleaseName, - cnpgChartRef, - "--namespace", - codesphereSystemNamespace, - "--create-namespace", - "--set", - "config.clusterWide=false", - "--wait", - ) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return fmt.Errorf("failed to install helm chart %q: %w", cnpgChartRef, err) - } - - return nil -} - -func (c *LocalBootstrapper) InstallRookHelmChart() error { - repoAddOut, err := exec.CommandContext(c.ctx, "helm", "repo", "add", rookRepoName, rookRepoURL).CombinedOutput() - if err != nil && !strings.Contains(string(repoAddOut), "already exists") { - return fmt.Errorf("failed to add helm repo %q: %w (%s)", rookRepoName, err, strings.TrimSpace(string(repoAddOut))) - } - - repoUpdateOut, err := exec.CommandContext(c.ctx, "helm", "repo", "update", rookRepoName).CombinedOutput() - if err != nil { - return fmt.Errorf("failed to update helm repo %q: %w (%s)", rookRepoName, err, strings.TrimSpace(string(repoUpdateOut))) - } - - cmd := exec.CommandContext( - c.ctx, - "helm", - "upgrade", - "--install", - rookReleaseName, - rookChartRef, - "--namespace", - rookNamespace, - "--create-namespace", - "--wait", - ) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return fmt.Errorf("failed to install helm chart %q: %w", rookChartRef, err) - } - - return nil -} - -func (c *LocalBootstrapper) DeployTestCephCluster() error { - // Ceph test cluster config from https://github.com/rook/rook/blob/0e05c6afff25a4e03649dd2092a5a10c3349fd9c/deploy/examples/cluster-test.yaml - cephCluster := &rookcephv1.CephCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: rookClusterName, - Namespace: rookNamespace, - }, - } - - _, err := controllerutil.CreateOrUpdate(c.ctx, c.kubeClient, cephCluster, func() error { - cephCluster.Spec = rookcephv1.ClusterSpec{ - CephVersion: rookcephv1.CephVersionSpec{ - Image: rookCephImage, - }, - DataDirHostPath: rookCephDataDirHostPath, - Mon: rookcephv1.MonSpec{ - Count: 1, - AllowMultiplePerNode: true, - }, - Mgr: rookcephv1.MgrSpec{ - Count: 1, - AllowMultiplePerNode: true, - }, - Storage: rookcephv1.StorageScopeSpec{ - // TODO: make configurable. - UseAllNodes: true, - AllowDeviceClassUpdate: true, - AllowOsdCrushWeightUpdate: false, - }, - PriorityClassNames: rookcephv1.PriorityClassNamesSpec{ - "all": "system-node-critical", - "mgr": "system-cluster-critical", - }, - HealthCheck: rookcephv1.CephClusterHealthCheckSpec{ - DaemonHealth: rookcephv1.DaemonHealthSpec{ - Monitor: rookcephv1.HealthCheckSpec{ - Interval: &metav1.Duration{Duration: 45 * time.Second}, - Timeout: "600s", - }, - }, - }, - Dashboard: rookcephv1.DashboardSpec{ - Enabled: true, - }, - DisruptionManagement: rookcephv1.DisruptionManagementSpec{ - ManagePodBudgets: true, - }, - CrashCollector: rookcephv1.CrashCollectorSpec{ - Disable: true, - }, - Monitoring: rookcephv1.MonitoringSpec{ - Enabled: false, - }, - CephConfig: map[string]map[string]string{ - "global": { - "osd_pool_default_size": "1", - "mon_warn_on_pool_no_redundancy": "false", - "bdev_flock_retry": "20", - "bluefs_buffered_io": "false", - "mon_data_avail_warn": "10", - }, - }, - } - return nil - }) - if err != nil { - return fmt.Errorf("failed to create or update Ceph cluster %q: %w", rookClusterName, err) - } - - if err := c.WaitForTestCephClusterReady(); err != nil { - return err - } - - return nil -} - -func (c *LocalBootstrapper) WaitForTestCephClusterReady() error { - ctx, cancel := context.WithTimeout(c.ctx, rookReadyTimeout) - defer cancel() - - clusterKey := client.ObjectKey{ - Name: rookClusterName, - Namespace: rookNamespace, - } - - steps := int(rookReadyTimeout / rookReadyPollInterval) - if steps < 1 { - steps = 1 - } - - backoff := wait.Backoff{ - Duration: rookReadyPollInterval, - Factor: 1.0, - Jitter: 0.1, - Steps: steps, - } - - lastPhase := "" - lastState := "" - lastMessage := "" - - err := retry.OnError(backoff, isRetryableWaitError, func() error { - if err := ctx.Err(); err != nil { - return err - } - - cluster := &rookcephv1.CephCluster{} - err := c.kubeClient.Get(ctx, clusterKey, cluster) - if err != nil { - if apierrors.IsNotFound(err) { - return &retryableWaitError{err: fmt.Errorf("Ceph cluster %q not found yet", rookClusterName)} - } - - return err - } - - lastPhase = string(cluster.Status.Phase) - lastState = string(cluster.Status.State) - lastMessage = cluster.Status.Message - - if isRookCephClusterReady(cluster) { - return nil - } - - return &retryableWaitError{err: fmt.Errorf( - "Ceph cluster is not ready yet (phase=%q, state=%q, message=%q)", - lastPhase, - lastState, - lastMessage, - )} - }) - if err == nil { - return nil - } - - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) || isRetryableWaitError(err) { - return fmt.Errorf( - "timed out waiting for Ceph cluster %q to become ready (phase=%q, state=%q, message=%q, error=%v)", - rookClusterName, - lastPhase, - lastState, - lastMessage, - err, - ) - } - - return fmt.Errorf("failed to fetch Ceph cluster %q: %w", rookClusterName, err) -} - -func (c *LocalBootstrapper) DeployPostgresDatabase() error { - postgresCluster := &cnpgv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: cnpgDatabaseClusterName, - Namespace: codesphereSystemNamespace, - }, - } - - _, err := controllerutil.CreateOrUpdate(c.ctx, c.kubeClient, postgresCluster, func() error { - postgresCluster.Spec = cnpgv1.ClusterSpec{ - ImageName: fmt.Sprintf("ghcr.io/cloudnative-pg/postgresql:%s-system-trixie", cnpgDatabaseVersion), - Instances: 1, - StorageConfiguration: cnpgv1.StorageConfiguration{ - Size: cnpgDatabaseStorageSize, - }, - } - return nil - }) - if err != nil { - return fmt.Errorf("failed to create or update PostgreSQL cluster %q: %w", cnpgDatabaseClusterName, err) - } - - if err := c.WaitForPostgresDatabaseReady(); err != nil { - return err - } - - return nil -} - -func (c *LocalBootstrapper) WaitForPostgresDatabaseReady() error { - ctx, cancel := context.WithTimeout(c.ctx, cnpgReadyTimeout) - defer cancel() - - clusterKey := client.ObjectKey{ - Name: cnpgDatabaseClusterName, - Namespace: codesphereSystemNamespace, - } - - steps := int(cnpgReadyTimeout / cnpgReadyPollInterval) - if steps < 1 { - steps = 1 - } - - backoff := wait.Backoff{ - Duration: cnpgReadyPollInterval, - Factor: 1.0, - Jitter: 0.1, - Steps: steps, - } - - lastPhase := "" - lastReadyInstances := 0 - lastInstances := 0 - - err := retry.OnError(backoff, isRetryableWaitError, func() error { - if err := ctx.Err(); err != nil { - return err - } - - cluster := &cnpgv1.Cluster{} - err := c.kubeClient.Get(ctx, clusterKey, cluster) - if err != nil { - if apierrors.IsNotFound(err) { - return &retryableWaitError{err: fmt.Errorf("PostgreSQL cluster %q not found yet", cnpgDatabaseClusterName)} - } - - return err - } - - lastPhase = cluster.Status.Phase - lastReadyInstances = cluster.Status.ReadyInstances - lastInstances = cluster.Status.Instances - - if isCNPGClusterReady(cluster) { - return nil - } - - return &retryableWaitError{err: fmt.Errorf( - "PostgreSQL cluster is not ready yet (phase=%q, readyInstances=%d, instances=%d)", - lastPhase, - lastReadyInstances, - lastInstances, - )} - }) - if err == nil { - return nil - } - - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) || isRetryableWaitError(err) { - return fmt.Errorf( - "timed out waiting for PostgreSQL cluster %q to become ready (phase=%q, readyInstances=%d, instances=%d, error=%v)", - cnpgDatabaseClusterName, - lastPhase, - lastReadyInstances, - lastInstances, - err, - ) - } - - return fmt.Errorf("failed to fetch PostgreSQL cluster %q: %w", cnpgDatabaseClusterName, err) -} - -func (c *LocalBootstrapper) ReadPostgresSuperuserPassword() (string, error) { - clusterKey := client.ObjectKey{ - Name: cnpgDatabaseClusterName, - Namespace: codesphereSystemNamespace, - } - - cluster := &cnpgv1.Cluster{} - if err := c.kubeClient.Get(c.ctx, clusterKey, cluster); err != nil { - return "", fmt.Errorf("failed to get PostgreSQL cluster %q: %w", cnpgDatabaseClusterName, err) - } - - secretName := cluster.GetSuperuserSecretName() - secretKey := client.ObjectKey{ - Name: secretName, - Namespace: codesphereSystemNamespace, - } - - secret := &corev1.Secret{} - if err := c.kubeClient.Get(c.ctx, secretKey, secret); err != nil { - return "", fmt.Errorf("failed to get PostgreSQL superuser secret %q: %w", secretName, err) - } - - passwordBytes, ok := secret.Data[cnpgSecretPasswordKey] - if !ok { - return "", fmt.Errorf("PostgreSQL superuser secret %q does not contain key %q", secretName, cnpgSecretPasswordKey) - } - if len(passwordBytes) == 0 { - return "", fmt.Errorf("PostgreSQL superuser secret %q contains an empty %q value", secretName, cnpgSecretPasswordKey) - } - - return string(passwordBytes), nil -} - -func isCNPGClusterReady(cluster *cnpgv1.Cluster) bool { - if cluster == nil { - return false - } - - readyCondition := apimeta.FindStatusCondition(cluster.Status.Conditions, string(cnpgv1.ConditionClusterReady)) - if readyCondition != nil && readyCondition.Status == metav1.ConditionTrue { - return true - } - - return cluster.Status.Instances > 0 && cluster.Status.ReadyInstances == cluster.Status.Instances -} - -func isRookCephClusterReady(cluster *rookcephv1.CephCluster) bool { - if cluster == nil { - return false - } - - for _, condition := range cluster.Status.Conditions { - if condition.Type == rookcephv1.ConditionReady && condition.Status == corev1.ConditionTrue { - return true - } - } - - return cluster.Status.Phase == rookcephv1.ConditionReady -} diff --git a/internal/bootstrap/local/postgres.go b/internal/bootstrap/local/postgres.go new file mode 100644 index 00000000..1f666cf3 --- /dev/null +++ b/internal/bootstrap/local/postgres.go @@ -0,0 +1,219 @@ +// Copyright (c) Codesphere Inc. +// SPDX-License-Identifier: Apache-2.0 + +package local + +import ( + "context" + "errors" + "fmt" + "os" + "os/exec" + "strings" + "time" + + cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +const ( + cnpgRepoName = "cnpg" + cnpgRepoURL = "https://cloudnative-pg.github.io/charts" + cnpgReleaseName = "cnpg" + cnpgChartRef = "cnpg/cloudnative-pg" + cnpgDatabaseClusterName = "masterdata" + cnpgDatabaseVersion = "15.14" + cnpgDatabaseStorageSize = "10Gi" + cnpgReadyTimeout = 15 * time.Minute + cnpgReadyPollInterval = 5 * time.Second + cnpgSecretPasswordKey = "password" +) + +func (c *LocalBootstrapper) InstallCloudNativePGHelmChart() error { + repoAddOut, err := exec.CommandContext(c.ctx, "helm", "repo", "add", cnpgRepoName, cnpgRepoURL).CombinedOutput() + if err != nil && !strings.Contains(string(repoAddOut), "already exists") { + return fmt.Errorf("failed to add helm repo %q: %w (%s)", cnpgRepoName, err, strings.TrimSpace(string(repoAddOut))) + } + + repoUpdateOut, err := exec.CommandContext(c.ctx, "helm", "repo", "update", cnpgRepoName).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to update helm repo %q: %w (%s)", cnpgRepoName, err, strings.TrimSpace(string(repoUpdateOut))) + } + + cmd := exec.CommandContext( + c.ctx, + "helm", + "upgrade", + "--install", + cnpgReleaseName, + cnpgChartRef, + "--namespace", + codesphereSystemNamespace, + "--create-namespace", + "--set", + "config.clusterWide=false", + "--wait", + ) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to install helm chart %q: %w", cnpgChartRef, err) + } + + return nil +} + +func (c *LocalBootstrapper) DeployPostgresDatabase() error { + postgresCluster := &cnpgv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: cnpgDatabaseClusterName, + Namespace: codesphereSystemNamespace, + }, + } + + _, err := controllerutil.CreateOrUpdate(c.ctx, c.kubeClient, postgresCluster, func() error { + postgresCluster.Spec = cnpgv1.ClusterSpec{ + ImageName: fmt.Sprintf("ghcr.io/cloudnative-pg/postgresql:%s-system-trixie", cnpgDatabaseVersion), + Instances: 1, + StorageConfiguration: cnpgv1.StorageConfiguration{ + Size: cnpgDatabaseStorageSize, + }, + } + return nil + }) + if err != nil { + return fmt.Errorf("failed to create or update PostgreSQL cluster %q: %w", cnpgDatabaseClusterName, err) + } + + if err := c.WaitForPostgresDatabaseReady(); err != nil { + return err + } + + return nil +} + +func (c *LocalBootstrapper) WaitForPostgresDatabaseReady() error { + ctx, cancel := context.WithTimeout(c.ctx, cnpgReadyTimeout) + defer cancel() + + clusterKey := client.ObjectKey{ + Name: cnpgDatabaseClusterName, + Namespace: codesphereSystemNamespace, + } + + steps := int(cnpgReadyTimeout / cnpgReadyPollInterval) + if steps < 1 { + steps = 1 + } + + backoff := wait.Backoff{ + Duration: cnpgReadyPollInterval, + Factor: 1.0, + Jitter: 0.1, + Steps: steps, + } + + lastPhase := "" + lastReadyInstances := 0 + lastInstances := 0 + + err := retry.OnError(backoff, isRetryableWaitError, func() error { + if err := ctx.Err(); err != nil { + return err + } + + cluster := &cnpgv1.Cluster{} + err := c.kubeClient.Get(ctx, clusterKey, cluster) + if err != nil { + if apierrors.IsNotFound(err) { + return &retryableWaitError{err: fmt.Errorf("PostgreSQL cluster %q not found yet", cnpgDatabaseClusterName)} + } + + return err + } + + lastPhase = cluster.Status.Phase + lastReadyInstances = cluster.Status.ReadyInstances + lastInstances = cluster.Status.Instances + + if isCNPGClusterReady(cluster) { + return nil + } + + return &retryableWaitError{err: fmt.Errorf( + "PostgreSQL cluster is not ready yet (phase=%q, readyInstances=%d, instances=%d)", + lastPhase, + lastReadyInstances, + lastInstances, + )} + }) + if err == nil { + return nil + } + + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) || isRetryableWaitError(err) { + return fmt.Errorf( + "timed out waiting for PostgreSQL cluster %q to become ready (phase=%q, readyInstances=%d, instances=%d, error=%v)", + cnpgDatabaseClusterName, + lastPhase, + lastReadyInstances, + lastInstances, + err, + ) + } + + return fmt.Errorf("failed to fetch PostgreSQL cluster %q: %w", cnpgDatabaseClusterName, err) +} + +func (c *LocalBootstrapper) ReadPostgresSuperuserPassword() (string, error) { + clusterKey := client.ObjectKey{ + Name: cnpgDatabaseClusterName, + Namespace: codesphereSystemNamespace, + } + + cluster := &cnpgv1.Cluster{} + if err := c.kubeClient.Get(c.ctx, clusterKey, cluster); err != nil { + return "", fmt.Errorf("failed to get PostgreSQL cluster %q: %w", cnpgDatabaseClusterName, err) + } + + secretName := cluster.GetSuperuserSecretName() + secretKey := client.ObjectKey{ + Name: secretName, + Namespace: codesphereSystemNamespace, + } + + secret := &corev1.Secret{} + if err := c.kubeClient.Get(c.ctx, secretKey, secret); err != nil { + return "", fmt.Errorf("failed to get PostgreSQL superuser secret %q: %w", secretName, err) + } + + passwordBytes, ok := secret.Data[cnpgSecretPasswordKey] + if !ok { + return "", fmt.Errorf("PostgreSQL superuser secret %q does not contain key %q", secretName, cnpgSecretPasswordKey) + } + if len(passwordBytes) == 0 { + return "", fmt.Errorf("PostgreSQL superuser secret %q contains an empty %q value", secretName, cnpgSecretPasswordKey) + } + + return string(passwordBytes), nil +} + +func isCNPGClusterReady(cluster *cnpgv1.Cluster) bool { + if cluster == nil { + return false + } + + readyCondition := apimeta.FindStatusCondition(cluster.Status.Conditions, string(cnpgv1.ConditionClusterReady)) + if readyCondition != nil && readyCondition.Status == metav1.ConditionTrue { + return true + } + + return cluster.Status.Instances > 0 && cluster.Status.ReadyInstances == cluster.Status.Instances +} diff --git a/internal/bootstrap/local/rook.go b/internal/bootstrap/local/rook.go new file mode 100644 index 00000000..f594ed0a --- /dev/null +++ b/internal/bootstrap/local/rook.go @@ -0,0 +1,233 @@ +// Copyright (c) Codesphere Inc. +// SPDX-License-Identifier: Apache-2.0 + +package local + +import ( + "context" + "errors" + "fmt" + "os" + "os/exec" + "strings" + "time" + + rookcephv1 "github.com/codesphere-cloud/oms/external/ceph.rook.io/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +const ( + rookRepoName = "rook-release" + rookRepoURL = "https://charts.rook.io/release" + rookReleaseName = "rook-ceph" + rookChartRef = "rook-release/rook-ceph" + rookNamespace = "rook-ceph" + rookClusterName = "rook-ceph" + rookCephImage = "quay.io/ceph/ceph:v18.2.7" + rookCephDataDirHostPath = "/var/lib/rook" + rookCephDeviceSetName = "set1" + rookCephOSDStorageSize = "20Gi" + rookReadyTimeout = 30 * time.Minute + rookReadyPollInterval = 5 * time.Second +) + +func (c *LocalBootstrapper) InstallRookHelmChart() error { + repoAddOut, err := exec.CommandContext(c.ctx, "helm", "repo", "add", rookRepoName, rookRepoURL).CombinedOutput() + if err != nil && !strings.Contains(string(repoAddOut), "already exists") { + return fmt.Errorf("failed to add helm repo %q: %w (%s)", rookRepoName, err, strings.TrimSpace(string(repoAddOut))) + } + + repoUpdateOut, err := exec.CommandContext(c.ctx, "helm", "repo", "update", rookRepoName).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to update helm repo %q: %w (%s)", rookRepoName, err, strings.TrimSpace(string(repoUpdateOut))) + } + + cmd := exec.CommandContext( + c.ctx, + "helm", + "upgrade", + "--install", + rookReleaseName, + rookChartRef, + "--namespace", + rookNamespace, + "--create-namespace", + "--wait", + ) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to install helm chart %q: %w", rookChartRef, err) + } + + return nil +} + +func (c *LocalBootstrapper) DeployTestCephCluster() error { + // Ceph test cluster config from https://github.com/rook/rook/blob/0e05c6afff25a4e03649dd2092a5a10c3349fd9c/deploy/examples/cluster-test.yaml + cephCluster := &rookcephv1.CephCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: rookClusterName, + Namespace: rookNamespace, + }, + } + + _, err := controllerutil.CreateOrUpdate(c.ctx, c.kubeClient, cephCluster, func() error { + cephCluster.Spec = rookcephv1.ClusterSpec{ + CephVersion: rookcephv1.CephVersionSpec{ + Image: rookCephImage, + }, + DataDirHostPath: rookCephDataDirHostPath, + Mon: rookcephv1.MonSpec{ + Count: 1, + AllowMultiplePerNode: true, + }, + Mgr: rookcephv1.MgrSpec{ + Count: 1, + AllowMultiplePerNode: true, + }, + Storage: rookcephv1.StorageScopeSpec{ + // TODO: make configurable. + UseAllNodes: true, + AllowDeviceClassUpdate: true, + AllowOsdCrushWeightUpdate: false, + }, + PriorityClassNames: rookcephv1.PriorityClassNamesSpec{ + "all": "system-node-critical", + "mgr": "system-cluster-critical", + }, + HealthCheck: rookcephv1.CephClusterHealthCheckSpec{ + DaemonHealth: rookcephv1.DaemonHealthSpec{ + Monitor: rookcephv1.HealthCheckSpec{ + Interval: &metav1.Duration{Duration: 45 * time.Second}, + Timeout: "600s", + }, + }, + }, + Dashboard: rookcephv1.DashboardSpec{ + Enabled: true, + }, + DisruptionManagement: rookcephv1.DisruptionManagementSpec{ + ManagePodBudgets: true, + }, + CrashCollector: rookcephv1.CrashCollectorSpec{ + Disable: true, + }, + Monitoring: rookcephv1.MonitoringSpec{ + Enabled: false, + }, + CephConfig: map[string]map[string]string{ + "global": { + "osd_pool_default_size": "1", + "mon_warn_on_pool_no_redundancy": "false", + "bdev_flock_retry": "20", + "bluefs_buffered_io": "false", + "mon_data_avail_warn": "10", + }, + }, + } + return nil + }) + if err != nil { + return fmt.Errorf("failed to create or update Ceph cluster %q: %w", rookClusterName, err) + } + + if err := c.WaitForTestCephClusterReady(); err != nil { + return err + } + + return nil +} + +func (c *LocalBootstrapper) WaitForTestCephClusterReady() error { + ctx, cancel := context.WithTimeout(c.ctx, rookReadyTimeout) + defer cancel() + + clusterKey := client.ObjectKey{ + Name: rookClusterName, + Namespace: rookNamespace, + } + + steps := int(rookReadyTimeout / rookReadyPollInterval) + if steps < 1 { + steps = 1 + } + + backoff := wait.Backoff{ + Duration: rookReadyPollInterval, + Factor: 1.0, + Jitter: 0.1, + Steps: steps, + } + + lastPhase := "" + lastState := "" + lastMessage := "" + + err := retry.OnError(backoff, isRetryableWaitError, func() error { + if err := ctx.Err(); err != nil { + return err + } + + cluster := &rookcephv1.CephCluster{} + err := c.kubeClient.Get(ctx, clusterKey, cluster) + if err != nil { + if apierrors.IsNotFound(err) { + return &retryableWaitError{err: fmt.Errorf("Ceph cluster %q not found yet", rookClusterName)} + } + + return err + } + + lastPhase = string(cluster.Status.Phase) + lastState = string(cluster.Status.State) + lastMessage = cluster.Status.Message + + if isRookCephClusterReady(cluster) { + return nil + } + + return &retryableWaitError{err: fmt.Errorf( + "Ceph cluster is not ready yet (phase=%q, state=%q, message=%q)", + lastPhase, + lastState, + lastMessage, + )} + }) + if err == nil { + return nil + } + + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) || isRetryableWaitError(err) { + return fmt.Errorf( + "timed out waiting for Ceph cluster %q to become ready (phase=%q, state=%q, message=%q, error=%v)", + rookClusterName, + lastPhase, + lastState, + lastMessage, + err, + ) + } + + return fmt.Errorf("failed to fetch Ceph cluster %q: %w", rookClusterName, err) +} + +func isRookCephClusterReady(cluster *rookcephv1.CephCluster) bool { + if cluster == nil { + return false + } + + for _, condition := range cluster.Status.Conditions { + if condition.Type == rookcephv1.ConditionReady && condition.Status == corev1.ConditionTrue { + return true + } + } + + return cluster.Status.Phase == rookcephv1.ConditionReady +} From 779fb458418d6f607f7b1156c839e7f57c85ad19 Mon Sep 17 00:00:00 2001 From: schrodit Date: Fri, 6 Mar 2026 16:43:15 +0100 Subject: [PATCH 06/31] remove requests to run inside a local cluster Signed-off-by: Tim Schrodi --- go.mod | 2 +- internal/bootstrap/local/local.go | 5 + internal/bootstrap/local/postgres.go | 9 +- internal/bootstrap/local/rook.go | 191 ++++++++++++++++++++++++++- 4 files changed, 199 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index f850a9eb..ef174fb5 100644 --- a/go.mod +++ b/go.mod @@ -35,6 +35,7 @@ require ( k8s.io/api v0.35.2 k8s.io/apimachinery v0.35.2 k8s.io/client-go v0.35.2 + k8s.io/utils v0.0.0-20260108192941-914a6e750570 sigs.k8s.io/controller-runtime v0.23.2 ) @@ -580,7 +581,6 @@ require ( k8s.io/apiextensions-apiserver v0.35.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e // indirect - k8s.io/utils v0.0.0-20260108192941-914a6e750570 // indirect sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 // indirect diff --git a/internal/bootstrap/local/local.go b/internal/bootstrap/local/local.go index 13a85906..4a083cad 100644 --- a/internal/bootstrap/local/local.go +++ b/internal/bootstrap/local/local.go @@ -78,6 +78,11 @@ func (c *LocalBootstrapper) Bootstrap() error { return err } + err = c.stlog.Substep("Create CephBlockPool and StorageClass", c.DeployCephBlockPoolAndStorageClass) + if err != nil { + return err + } + return nil }) if err != nil { diff --git a/internal/bootstrap/local/postgres.go b/internal/bootstrap/local/postgres.go index 1f666cf3..5945d7af 100644 --- a/internal/bootstrap/local/postgres.go +++ b/internal/bootstrap/local/postgres.go @@ -19,6 +19,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) @@ -57,8 +58,9 @@ func (c *LocalBootstrapper) InstallCloudNativePGHelmChart() error { "--namespace", codesphereSystemNamespace, "--create-namespace", - "--set", - "config.clusterWide=false", + "--set", "config.clusterWide=false", + "--set", "resources.requests.cpu=0", + "--set", "resources.requests.memory=0", "--wait", ) cmd.Stdout = os.Stdout @@ -83,7 +85,8 @@ func (c *LocalBootstrapper) DeployPostgresDatabase() error { ImageName: fmt.Sprintf("ghcr.io/cloudnative-pg/postgresql:%s-system-trixie", cnpgDatabaseVersion), Instances: 1, StorageConfiguration: cnpgv1.StorageConfiguration{ - Size: cnpgDatabaseStorageSize, + StorageClass: ptr.To(cephStorageClassName), + Size: cnpgDatabaseStorageSize, }, } return nil diff --git a/internal/bootstrap/local/rook.go b/internal/bootstrap/local/rook.go index f594ed0a..d526aac3 100644 --- a/internal/bootstrap/local/rook.go +++ b/internal/bootstrap/local/rook.go @@ -5,19 +5,23 @@ package local import ( "context" + "encoding/json" "errors" "fmt" "os" "os/exec" + "path/filepath" "strings" "time" rookcephv1 "github.com/codesphere-cloud/oms/external/ceph.rook.io/v1" corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) @@ -29,14 +33,104 @@ const ( rookChartRef = "rook-release/rook-ceph" rookNamespace = "rook-ceph" rookClusterName = "rook-ceph" - rookCephImage = "quay.io/ceph/ceph:v18.2.7" + rookCephImage = "quay.io/ceph/ceph:v19.2.3" rookCephDataDirHostPath = "/var/lib/rook" - rookCephDeviceSetName = "set1" - rookCephOSDStorageSize = "20Gi" rookReadyTimeout = 30 * time.Minute rookReadyPollInterval = 5 * time.Second + + cephBlockPoolName = "codesphere-rbd" + cephStorageClassName = "codesphere-rbd" + cephRBDProvisionerName = "rook-ceph.rbd.csi.ceph.com" ) +// csiResourceEntry represents a single container resource definition for Rook CSI drivers. +type csiResourceEntry struct { + Name string `json:"name"` + Resource map[string]interface{} `json:"resource"` +} + +// buildRookHelmValues constructs the Helm values for the Rook operator chart. +// It configures the operator and CSI drivers without resource requests +// but preserves the default memory limits for each container. +// +// Config is based on https://github.com/rook/rook/blob/master/deploy/charts/rook-ceph/values.yaml#L233 +func buildRookHelmValues() ([]byte, error) { + limitOnly := func(memory string) map[string]interface{} { + return map[string]interface{}{ + "limits": map[string]string{"memory": memory}, + "requests": map[string]string{"cpu": "0", "memory": "0"}, + } + } + + marshalCSIResources := func(entries []csiResourceEntry) (string, error) { + b, err := json.Marshal(entries) + if err != nil { + return "", err + } + return string(b), nil + } + + rbdProvisioner, err := marshalCSIResources([]csiResourceEntry{ + {"csi-provisioner", limitOnly("256Mi")}, + {"csi-resizer", limitOnly("256Mi")}, + {"csi-attacher", limitOnly("256Mi")}, + {"csi-snapshotter", limitOnly("256Mi")}, + {"csi-rbdplugin", limitOnly("1Gi")}, + {"liveness-prometheus", limitOnly("256Mi")}, + }) + if err != nil { + return nil, fmt.Errorf("failed to marshal csiRBDProvisionerResource: %w", err) + } + + rbdPlugin, err := marshalCSIResources([]csiResourceEntry{ + {"driver-registrar", limitOnly("256Mi")}, + {"csi-rbdplugin", limitOnly("1Gi")}, + {"liveness-prometheus", limitOnly("256Mi")}, + }) + if err != nil { + return nil, fmt.Errorf("failed to marshal csiRBDPluginResource: %w", err) + } + + cephfsProvisioner, err := marshalCSIResources([]csiResourceEntry{ + {"csi-provisioner", limitOnly("256Mi")}, + {"csi-resizer", limitOnly("256Mi")}, + {"csi-attacher", limitOnly("256Mi")}, + {"csi-snapshotter", limitOnly("256Mi")}, + {"csi-cephfsplugin", limitOnly("1Gi")}, + {"liveness-prometheus", limitOnly("256Mi")}, + }) + if err != nil { + return nil, fmt.Errorf("failed to marshal csiCephFSProvisionerResource: %w", err) + } + + cephfsPlugin, err := marshalCSIResources([]csiResourceEntry{ + {"driver-registrar", limitOnly("256Mi")}, + {"csi-cephfsplugin", limitOnly("1Gi")}, + {"liveness-prometheus", limitOnly("256Mi")}, + }) + if err != nil { + return nil, fmt.Errorf("failed to marshal csiCephFSPluginResource: %w", err) + } + + values := map[string]interface{}{ + "priorityClassName": "system-cluster-critical", + "resources": map[string]interface{}{ + "requests": map[string]interface{}{ + "cpu": "0", + "memory": "0", + }, + }, + "csi": map[string]interface{}{ + "csiRBDProvisionerResource": rbdProvisioner, + "csiRBDPluginResource": rbdPlugin, + "csiCephFSProvisionerResource": cephfsProvisioner, + "csiCephFSPluginResource": cephfsPlugin, + }, + } + + return json.MarshalIndent(values, "", " ") +} + func (c *LocalBootstrapper) InstallRookHelmChart() error { repoAddOut, err := exec.CommandContext(c.ctx, "helm", "repo", "add", rookRepoName, rookRepoURL).CombinedOutput() if err != nil && !strings.Contains(string(repoAddOut), "already exists") { @@ -48,6 +142,22 @@ func (c *LocalBootstrapper) InstallRookHelmChart() error { return fmt.Errorf("failed to update helm repo %q: %w (%s)", rookRepoName, err, strings.TrimSpace(string(repoUpdateOut))) } + helmValues, err := buildRookHelmValues() + if err != nil { + return fmt.Errorf("failed to build Helm values: %w", err) + } + + tmpDir, err := os.MkdirTemp("", "rook-helm-values-*") + if err != nil { + return fmt.Errorf("failed to create temp directory for Helm values: %w", err) + } + defer os.RemoveAll(tmpDir) + + valuesPath := filepath.Join(tmpDir, "values.json") + if err := os.WriteFile(valuesPath, helmValues, 0600); err != nil { + return fmt.Errorf("failed to write Helm values file: %w", err) + } + cmd := exec.CommandContext( c.ctx, "helm", @@ -58,6 +168,7 @@ func (c *LocalBootstrapper) InstallRookHelmChart() error { "--namespace", rookNamespace, "--create-namespace", + "--values", valuesPath, "--wait", ) cmd.Stdout = os.Stdout @@ -94,7 +205,10 @@ func (c *LocalBootstrapper) DeployTestCephCluster() error { }, Storage: rookcephv1.StorageScopeSpec{ // TODO: make configurable. - UseAllNodes: true, + UseAllNodes: true, + Selection: rookcephv1.Selection{ + UseAllDevices: ptr.To(true), + }, AllowDeviceClassUpdate: true, AllowOsdCrushWeightUpdate: false, }, @@ -131,6 +245,14 @@ func (c *LocalBootstrapper) DeployTestCephCluster() error { "mon_data_avail_warn": "10", }, }, + Resources: rookcephv1.ResourceSpec{ + "mon": {}, + "mgr": {}, + "osd": {}, + "mgr-sidecar": {}, + "crashcollector": {}, + "cleanup": {}, + }, } return nil }) @@ -218,6 +340,67 @@ func (c *LocalBootstrapper) WaitForTestCephClusterReady() error { return fmt.Errorf("failed to fetch Ceph cluster %q: %w", rookClusterName, err) } +func (c *LocalBootstrapper) DeployCephBlockPoolAndStorageClass() error { + // Create CephBlockPool + blockPool := &rookcephv1.CephBlockPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: cephBlockPoolName, + Namespace: rookNamespace, + }, + } + + _, err := controllerutil.CreateOrUpdate(c.ctx, c.kubeClient, blockPool, func() error { + blockPool.Spec = rookcephv1.NamedBlockPoolSpec{ + PoolSpec: rookcephv1.PoolSpec{ + FailureDomain: "osd", + Replicated: rookcephv1.ReplicatedSpec{ + Size: 1, + RequireSafeReplicaSize: false, + }, + }, + } + return nil + }) + if err != nil { + return fmt.Errorf("failed to create or update CephBlockPool %q: %w", cephBlockPoolName, err) + } + + // Create StorageClass + reclaimPolicy := corev1.PersistentVolumeReclaimDelete + volumeBindingMode := storagev1.VolumeBindingImmediate + storageClass := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: cephStorageClassName, + }, + } + + _, err = controllerutil.CreateOrUpdate(c.ctx, c.kubeClient, storageClass, func() error { + storageClass.Provisioner = cephRBDProvisionerName + storageClass.Parameters = map[string]string{ + "clusterID": rookNamespace, + "pool": cephBlockPoolName, + "imageFormat": "2", + "imageFeatures": "layering", + "csi.storage.k8s.io/provisioner-secret-name": "rook-csi-rbd-provisioner", + "csi.storage.k8s.io/provisioner-secret-namespace": rookNamespace, + "csi.storage.k8s.io/controller-expand-secret-name": "rook-csi-rbd-provisioner", + "csi.storage.k8s.io/controller-expand-secret-namespace": rookNamespace, + "csi.storage.k8s.io/node-stage-secret-name": "rook-csi-rbd-node", + "csi.storage.k8s.io/node-stage-secret-namespace": rookNamespace, + "csi.storage.k8s.io/fstype": "ext4", + } + storageClass.ReclaimPolicy = &reclaimPolicy + storageClass.VolumeBindingMode = &volumeBindingMode + storageClass.AllowVolumeExpansion = ptr.To(true) + return nil + }) + if err != nil { + return fmt.Errorf("failed to create or update StorageClass %q: %w", cephStorageClassName, err) + } + + return nil +} + func isRookCephClusterReady(cluster *rookcephv1.CephCluster) bool { if cluster == nil { return false From 017cdf5acc88df5583cac6d5ae250f556498ea57 Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Fri, 6 Mar 2026 17:06:47 +0100 Subject: [PATCH 07/31] feat: add basic local config Signed-off-by: Tim Schrodi --- .golangci.yml | 5 + cli/cmd/bootstrap_local.go | 25 ++- external/ceph.rook.io/v1/object_test.go | 4 +- internal/bootstrap/local/local.go | 224 ++++++++++++++++++++++-- internal/bootstrap/local/postgres.go | 55 ++++-- internal/bootstrap/local/rook.go | 26 +-- internal/installer/files/config_yaml.go | 2 + internal/util/filewriter.go | 4 +- 8 files changed, 293 insertions(+), 52 deletions(-) create mode 100644 .golangci.yml diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 00000000..f2d393e5 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,5 @@ +version: "2" +linters: + exclusions: + paths: + - "external/.*" diff --git a/cli/cmd/bootstrap_local.go b/cli/cmd/bootstrap_local.go index 3d65d723..047dc114 100644 --- a/cli/cmd/bootstrap_local.go +++ b/cli/cmd/bootstrap_local.go @@ -25,14 +25,14 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/client" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" ctrlconfig "sigs.k8s.io/controller-runtime/pkg/client/config" ) type BootstrapLocalCmd struct { - cmd *cobra.Command - Yes bool + cmd *cobra.Command + CodesphereEnv *local.CodesphereEnvironment + Yes bool } func (c *BootstrapLocalCmd) RunE(_ *cobra.Command, args []string) error { @@ -55,8 +55,19 @@ func AddBootstrapLocalCmd(parent *cobra.Command) { Warning: For local setups, it is highly recommended to use Minikube with a virtual machine, similar to the Rook Minikube guide. Not for production use.`), }, - } - bootstrapLocalCmd.cmd.Flags().BoolVarP(&bootstrapLocalCmd.Yes, "yes", "y", false, "Auto-approve the local bootstrapping warning prompt") + CodesphereEnv: &local.CodesphereEnvironment{}, + } + + flags := bootstrapLocalCmd.cmd.Flags() + // Installer + flags.BoolVarP(&bootstrapLocalCmd.Yes, "yes", "y", false, "Auto-approve the local bootstrapping warning prompt") + // Codesphere Environment + flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.BaseDomain, "base-domain", "cs.local", "Base domain for Codesphere") + flags.StringArrayVar(&bootstrapLocalCmd.CodesphereEnv.Experiments, "experiments", []string{}, "Experiments to enable in Codesphere installation (optional)") + flags.StringArrayVar(&bootstrapLocalCmd.CodesphereEnv.FeatureFlags, "feature-flags", []string{}, "Feature flags to enable in Codesphere installation (optional)") + // Config + flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.InstallConfigPath, "install-config", "config.yaml", "Path to install config file (optional)") + flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.SecretsFilePath, "secrets-file", "prod.vault.yaml", "Path to secrets files (optional)") bootstrapLocalCmd.cmd.RunE = bootstrapLocalCmd.RunE parent.AddCommand(bootstrapLocalCmd.cmd) @@ -80,7 +91,7 @@ func (c *BootstrapLocalCmd) BootstrapLocal() error { return fmt.Errorf("failed to initialize Kubernetes client: %w", err) } - bs := local.NewLocalBootstrapper(ctx, stlog, kubeClient, fw, icg) + bs := local.NewLocalBootstrapper(ctx, stlog, kubeClient, fw, icg, c.CodesphereEnv) return bs.Bootstrap() } @@ -124,7 +135,7 @@ Consult the offical rook guide for more details and troubleshooting: https://roo return nil } -func (c *BootstrapLocalCmd) GetKubeClient(ctx context.Context) (client.Client, error) { +func (c *BootstrapLocalCmd) GetKubeClient(ctx context.Context) (ctrlclient.Client, error) { kubeConfig, err := ctrlconfig.GetConfig() if err != nil { return nil, fmt.Errorf("failed to load Kubernetes config: %w", err) diff --git a/external/ceph.rook.io/v1/object_test.go b/external/ceph.rook.io/v1/object_test.go index 5ecfa732..8d32a7f0 100644 --- a/external/ceph.rook.io/v1/object_test.go +++ b/external/ceph.rook.io/v1/object_test.go @@ -50,12 +50,12 @@ func TestValidateObjectStoreSpec(t *testing.T) { assert.Error(t, err) // when name is empty - o.ObjectMeta.Name = "" + o.Name = "" err = ValidateObjectSpec(o) assert.Error(t, err) // when namespace is empty - o.ObjectMeta.Namespace = "" + o.Namespace = "" err = ValidateObjectSpec(o) assert.Error(t, err) diff --git a/internal/bootstrap/local/local.go b/internal/bootstrap/local/local.go index 4a083cad..bb570533 100644 --- a/internal/bootstrap/local/local.go +++ b/internal/bootstrap/local/local.go @@ -4,9 +4,11 @@ import ( "context" "errors" "fmt" + "path/filepath" "github.com/codesphere-cloud/oms/internal/bootstrap" "github.com/codesphere-cloud/oms/internal/installer" + "github.com/codesphere-cloud/oms/internal/installer/files" "github.com/codesphere-cloud/oms/internal/util" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -46,39 +48,56 @@ type LocalBootstrapper struct { } type CodesphereEnvironment struct { - // Add any environment-specific fields here, e.g.: - ClusterName string + BaseDomain string `json:"base_domain"` + Experiments []string `json:"experiments"` + FeatureFlags []string `json:"feature_flags"` + // Config + ExistingConfigUsed bool `json:"-"` + InstallConfigPath string `json:"-"` + SecretsFilePath string `json:"-"` + InstallConfig *files.RootConfig `json:"-"` + Secrets *files.InstallVault `json:"-"` } -func NewLocalBootstrapper(ctx context.Context, stlog *bootstrap.StepLogger, kubeClient client.Client, fw util.FileIO, icg installer.InstallConfigManager) *LocalBootstrapper { +func NewLocalBootstrapper(ctx context.Context, stlog *bootstrap.StepLogger, kubeClient client.Client, fw util.FileIO, icg installer.InstallConfigManager, env *CodesphereEnvironment) *LocalBootstrapper { return &LocalBootstrapper{ ctx: ctx, stlog: stlog, kubeClient: kubeClient, fw: fw, icg: icg, - Env: &CodesphereEnvironment{}, + Env: env, } } -func (c *LocalBootstrapper) Bootstrap() error { - err := c.stlog.Step("Ensure codesphere system namespace", c.EnsureCodesphereSystemNamespace) +func (b *LocalBootstrapper) Bootstrap() error { + err := b.stlog.Step("Ensure install config", b.EnsureInstallConfig) + if err != nil { + return fmt.Errorf("failed to ensure install config: %w", err) + } + + err = b.stlog.Step("Ensure secrets", b.EnsureSecrets) + if err != nil { + return fmt.Errorf("failed to ensure secrets: %w", err) + } + + err = b.stlog.Step("Ensure codesphere system namespace", b.EnsureCodesphereSystemNamespace) if err != nil { return fmt.Errorf("failed to ensure namespace %q: %w", codesphereSystemNamespace, err) } - err = c.stlog.Step("Install Rook and test Ceph cluster", func() error { - err := c.stlog.Substep("Install Rook operator", c.InstallRookHelmChart) + err = b.stlog.Step("Install Rook and test Ceph cluster", func() error { + err := b.stlog.Substep("Install Rook operator", b.InstallRookHelmChart) if err != nil { return err } - err = c.stlog.Substep("Deploy test Ceph cluster (single OSD)", c.DeployTestCephCluster) + err = b.stlog.Substep("Deploy test Ceph cluster (single OSD)", b.DeployTestCephCluster) if err != nil { return err } - err = c.stlog.Substep("Create CephBlockPool and StorageClass", c.DeployCephBlockPoolAndStorageClass) + err = b.stlog.Substep("Create CephBlockPool and StorageClass", b.DeployCephBlockPoolAndStorageClass) if err != nil { return err } @@ -89,13 +108,13 @@ func (c *LocalBootstrapper) Bootstrap() error { return fmt.Errorf("failed to install Rook and deploy test Ceph cluster: %w", err) } - err = c.stlog.Step("Install CloudNativePG and PostgreSQL", func() error { - err := c.stlog.Substep("Install CloudNativePG operator", c.InstallCloudNativePGHelmChart) + err = b.stlog.Step("Install CloudNativePG and PostgreSQL", func() error { + err := b.stlog.Substep("Install CloudNativePG operator", b.InstallCloudNativePGHelmChart) if err != nil { return err } - err = c.stlog.Substep("Deploy PostgreSQL database", c.DeployPostgresDatabase) + err = b.stlog.Substep("Deploy PostgreSQL database", b.DeployPostgresDatabase) if err != nil { return err } @@ -106,16 +125,191 @@ func (c *LocalBootstrapper) Bootstrap() error { return fmt.Errorf("failed to install CloudNativePG and deploy PostgreSQL database: %w", err) } + err = b.stlog.Step("Update install config", b.UpdateInstallConfig) + if err != nil { + return fmt.Errorf("failed to update install config: %w", err) + } + return nil } -func (c *LocalBootstrapper) EnsureCodesphereSystemNamespace() error { +func (b *LocalBootstrapper) EnsureCodesphereSystemNamespace() error { namespace := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{Name: codesphereSystemNamespace}, } - if err := c.kubeClient.Create(c.ctx, namespace); err != nil && !apierrors.IsAlreadyExists(err) { + if err := b.kubeClient.Create(b.ctx, namespace); err != nil && !apierrors.IsAlreadyExists(err) { return fmt.Errorf("failed to create namespace %q: %w", codesphereSystemNamespace, err) } return nil } + +func (b *LocalBootstrapper) EnsureInstallConfig() error { + if b.fw.Exists(b.Env.InstallConfigPath) { + err := b.icg.LoadInstallConfigFromFile(b.Env.InstallConfigPath) + if err != nil { + return fmt.Errorf("failed to load config file: %w", err) + } + + b.Env.ExistingConfigUsed = true + } else { + err := b.icg.ApplyProfile("dev") + if err != nil { + return fmt.Errorf("failed to apply profile: %w", err) + } + } + + b.Env.InstallConfig = b.icg.GetInstallConfig() + + return nil +} + +func (b *LocalBootstrapper) EnsureSecrets() error { + if b.fw.Exists(b.Env.SecretsFilePath) { + err := b.icg.LoadVaultFromFile(b.Env.SecretsFilePath) + if err != nil { + return fmt.Errorf("failed to load vault file: %w", err) + } + err = b.icg.MergeVaultIntoConfig() + if err != nil { + return fmt.Errorf("failed to merge vault into config: %w", err) + } + } + + b.Env.Secrets = b.icg.GetVault() + + return nil +} + +func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { + b.Env.InstallConfig.Secrets.BaseDir = filepath.Dir(b.Env.SecretsFilePath) + b.Env.InstallConfig.Registry.ReplaceImagesInBom = false + b.Env.InstallConfig.Registry.LoadContainerImages = false + + b.Env.InstallConfig.Postgres.Mode = "external" + b.Env.InstallConfig.Postgres.Database = cnpgDatabaseName + b.Env.InstallConfig.Postgres.CACertPem, err = b.ReadPostgresCA() + if err != nil { + return fmt.Errorf("failed to read PostgreSQL CA: %w", err) + } + b.Env.InstallConfig.Postgres.ServerAddress = "masterdata-rw.codesphere-system.svc.cluster.local" + b.Env.InstallConfig.Postgres.Port = 5432 + b.Env.InstallConfig.Postgres.Primary = nil + b.Env.InstallConfig.Postgres.Replica = nil + + // TODO: configure ceph values + + b.Env.InstallConfig.Kubernetes = files.KubernetesConfig{ + ManagedByCodesphere: false, + } + b.Env.InstallConfig.Cluster.Monitoring = &files.MonitoringConfig{ + Prometheus: &files.PrometheusConfig{ + RemoteWrite: &files.RemoteWriteConfig{ + Enabled: false, + ClusterName: "local-test", + }, + }, + } + b.Env.InstallConfig.Cluster.Gateway = files.GatewayConfig{ + ServiceType: "LoadBalancer", + } + b.Env.InstallConfig.Cluster.PublicGateway = files.GatewayConfig{ + ServiceType: "LoadBalancer", + } + + // TODO: certificates + + b.Env.InstallConfig.Codesphere.Domain = b.Env.BaseDomain + b.Env.InstallConfig.Codesphere.WorkspaceHostingBaseDomain = "ws." + b.Env.BaseDomain + // TODO: set public IP or configure DNS for local setup + // b.Env.InstallConfig.Codesphere.PublicIP = b.Env.ControlPlaneNodes[1].GetExternalIP() + b.Env.InstallConfig.Codesphere.CustomDomains = files.CustomDomainsConfig{ + CNameBaseDomain: "ws." + b.Env.BaseDomain, + } + b.Env.InstallConfig.Codesphere.DNSServers = []string{"8.8.8.8"} + b.Env.InstallConfig.Codesphere.DeployConfig = files.DeployConfig{ + Images: map[string]files.ImageConfig{ + "ubuntu-24.04": { + Name: "Ubuntu 24.04", + SupportedUntil: "2028-05-31", + Flavors: map[string]files.FlavorConfig{ + "default": { + Image: files.ImageRef{ + BomRef: "workspace-agent-24.04", + }, + Pool: map[int]int{ + 1: 1, + 2: 1, + 3: 0, + }, + }, + }, + }, + }, + } + b.Env.InstallConfig.Codesphere.Plans = files.PlansConfig{ + HostingPlans: map[int]files.HostingPlan{ + 1: { + CPUTenth: 20, + GPUParts: 0, + MemoryMb: 4096, + StorageMb: 20480, + TempStorageMb: 1024, + }, + 2: { + CPUTenth: 40, + GPUParts: 0, + MemoryMb: 8192, + StorageMb: 40960, + TempStorageMb: 1024, + }, + 3: { + CPUTenth: 80, + GPUParts: 0, + MemoryMb: 16384, + StorageMb: 40960, + TempStorageMb: 1024, + }, + }, + WorkspacePlans: map[int]files.WorkspacePlan{ + 1: { + Name: "Standard", + HostingPlanID: 1, + MaxReplicas: 3, + OnDemand: true, + }, + 2: { + Name: "Big", + HostingPlanID: 2, + MaxReplicas: 3, + OnDemand: true, + }, + 3: { + Name: "Pro", + HostingPlanID: 3, + MaxReplicas: 3, + OnDemand: true, + }, + }, + } + + b.Env.InstallConfig.Codesphere.Experiments = b.Env.Experiments + b.Env.InstallConfig.Codesphere.Features = b.Env.FeatureFlags + + if !b.Env.ExistingConfigUsed { + err := b.icg.GenerateSecrets() + if err != nil { + return fmt.Errorf("failed to generate secrets: %w", err) + } + } + + if err := b.icg.WriteInstallConfig(b.Env.InstallConfigPath, true); err != nil { + return fmt.Errorf("failed to write config file: %w", err) + } + + if err := b.icg.WriteVault(b.Env.SecretsFilePath, true); err != nil { + return fmt.Errorf("failed to write vault file: %w", err) + } + + return nil +} diff --git a/internal/bootstrap/local/postgres.go b/internal/bootstrap/local/postgres.go index 5945d7af..8834f680 100644 --- a/internal/bootstrap/local/postgres.go +++ b/internal/bootstrap/local/postgres.go @@ -30,6 +30,7 @@ const ( cnpgReleaseName = "cnpg" cnpgChartRef = "cnpg/cloudnative-pg" cnpgDatabaseClusterName = "masterdata" + cnpgDatabaseName = "masterdata" cnpgDatabaseVersion = "15.14" cnpgDatabaseStorageSize = "10Gi" cnpgReadyTimeout = 15 * time.Minute @@ -37,19 +38,19 @@ const ( cnpgSecretPasswordKey = "password" ) -func (c *LocalBootstrapper) InstallCloudNativePGHelmChart() error { - repoAddOut, err := exec.CommandContext(c.ctx, "helm", "repo", "add", cnpgRepoName, cnpgRepoURL).CombinedOutput() +func (b *LocalBootstrapper) InstallCloudNativePGHelmChart() error { + repoAddOut, err := exec.CommandContext(b.ctx, "helm", "repo", "add", cnpgRepoName, cnpgRepoURL).CombinedOutput() if err != nil && !strings.Contains(string(repoAddOut), "already exists") { return fmt.Errorf("failed to add helm repo %q: %w (%s)", cnpgRepoName, err, strings.TrimSpace(string(repoAddOut))) } - repoUpdateOut, err := exec.CommandContext(c.ctx, "helm", "repo", "update", cnpgRepoName).CombinedOutput() + repoUpdateOut, err := exec.CommandContext(b.ctx, "helm", "repo", "update", cnpgRepoName).CombinedOutput() if err != nil { return fmt.Errorf("failed to update helm repo %q: %w (%s)", cnpgRepoName, err, strings.TrimSpace(string(repoUpdateOut))) } cmd := exec.CommandContext( - c.ctx, + b.ctx, "helm", "upgrade", "--install", @@ -72,7 +73,7 @@ func (c *LocalBootstrapper) InstallCloudNativePGHelmChart() error { return nil } -func (c *LocalBootstrapper) DeployPostgresDatabase() error { +func (b *LocalBootstrapper) DeployPostgresDatabase() error { postgresCluster := &cnpgv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: cnpgDatabaseClusterName, @@ -80,7 +81,7 @@ func (c *LocalBootstrapper) DeployPostgresDatabase() error { }, } - _, err := controllerutil.CreateOrUpdate(c.ctx, c.kubeClient, postgresCluster, func() error { + _, err := controllerutil.CreateOrUpdate(b.ctx, b.kubeClient, postgresCluster, func() error { postgresCluster.Spec = cnpgv1.ClusterSpec{ ImageName: fmt.Sprintf("ghcr.io/cloudnative-pg/postgresql:%s-system-trixie", cnpgDatabaseVersion), Instances: 1, @@ -88,6 +89,11 @@ func (c *LocalBootstrapper) DeployPostgresDatabase() error { StorageClass: ptr.To(cephStorageClassName), Size: cnpgDatabaseStorageSize, }, + Bootstrap: &cnpgv1.BootstrapConfiguration{ + InitDB: &cnpgv1.BootstrapInitDB{ + Database: cnpgDatabaseName, + }, + }, } return nil }) @@ -95,15 +101,15 @@ func (c *LocalBootstrapper) DeployPostgresDatabase() error { return fmt.Errorf("failed to create or update PostgreSQL cluster %q: %w", cnpgDatabaseClusterName, err) } - if err := c.WaitForPostgresDatabaseReady(); err != nil { + if err := b.WaitForPostgresDatabaseReady(); err != nil { return err } return nil } -func (c *LocalBootstrapper) WaitForPostgresDatabaseReady() error { - ctx, cancel := context.WithTimeout(c.ctx, cnpgReadyTimeout) +func (b *LocalBootstrapper) WaitForPostgresDatabaseReady() error { + ctx, cancel := context.WithTimeout(b.ctx, cnpgReadyTimeout) defer cancel() clusterKey := client.ObjectKey{ @@ -133,7 +139,7 @@ func (c *LocalBootstrapper) WaitForPostgresDatabaseReady() error { } cluster := &cnpgv1.Cluster{} - err := c.kubeClient.Get(ctx, clusterKey, cluster) + err := b.kubeClient.Get(ctx, clusterKey, cluster) if err != nil { if apierrors.IsNotFound(err) { return &retryableWaitError{err: fmt.Errorf("PostgreSQL cluster %q not found yet", cnpgDatabaseClusterName)} @@ -175,14 +181,14 @@ func (c *LocalBootstrapper) WaitForPostgresDatabaseReady() error { return fmt.Errorf("failed to fetch PostgreSQL cluster %q: %w", cnpgDatabaseClusterName, err) } -func (c *LocalBootstrapper) ReadPostgresSuperuserPassword() (string, error) { +func (b *LocalBootstrapper) ReadPostgresSuperuserPassword() (string, error) { clusterKey := client.ObjectKey{ Name: cnpgDatabaseClusterName, Namespace: codesphereSystemNamespace, } cluster := &cnpgv1.Cluster{} - if err := c.kubeClient.Get(c.ctx, clusterKey, cluster); err != nil { + if err := b.kubeClient.Get(b.ctx, clusterKey, cluster); err != nil { return "", fmt.Errorf("failed to get PostgreSQL cluster %q: %w", cnpgDatabaseClusterName, err) } @@ -193,7 +199,7 @@ func (c *LocalBootstrapper) ReadPostgresSuperuserPassword() (string, error) { } secret := &corev1.Secret{} - if err := c.kubeClient.Get(c.ctx, secretKey, secret); err != nil { + if err := b.kubeClient.Get(b.ctx, secretKey, secret); err != nil { return "", fmt.Errorf("failed to get PostgreSQL superuser secret %q: %w", secretName, err) } @@ -208,6 +214,29 @@ func (c *LocalBootstrapper) ReadPostgresSuperuserPassword() (string, error) { return string(passwordBytes), nil } +func (b *LocalBootstrapper) ReadPostgresCA() (string, error) { + secretName := cnpgDatabaseClusterName + "-ca" + secretKey := client.ObjectKey{ + Name: secretName, + Namespace: codesphereSystemNamespace, + } + + secret := &corev1.Secret{} + if err := b.kubeClient.Get(b.ctx, secretKey, secret); err != nil { + return "", fmt.Errorf("failed to get PostgreSQL CA secret %q: %w", secretName, err) + } + + caCert, ok := secret.Data["ca.crt"] + if !ok { + return "", fmt.Errorf("PostgreSQL CA secret %q does not contain key %q", secretName, "ca.crt") + } + if len(caCert) == 0 { + return "", fmt.Errorf("PostgreSQL CA secret %q contains an empty %q value", secretName, "ca.crt") + } + + return string(caCert), nil +} + func isCNPGClusterReady(cluster *cnpgv1.Cluster) bool { if cluster == nil { return false diff --git a/internal/bootstrap/local/rook.go b/internal/bootstrap/local/rook.go index d526aac3..420dd18f 100644 --- a/internal/bootstrap/local/rook.go +++ b/internal/bootstrap/local/rook.go @@ -131,13 +131,13 @@ func buildRookHelmValues() ([]byte, error) { return json.MarshalIndent(values, "", " ") } -func (c *LocalBootstrapper) InstallRookHelmChart() error { - repoAddOut, err := exec.CommandContext(c.ctx, "helm", "repo", "add", rookRepoName, rookRepoURL).CombinedOutput() +func (b *LocalBootstrapper) InstallRookHelmChart() error { + repoAddOut, err := exec.CommandContext(b.ctx, "helm", "repo", "add", rookRepoName, rookRepoURL).CombinedOutput() if err != nil && !strings.Contains(string(repoAddOut), "already exists") { return fmt.Errorf("failed to add helm repo %q: %w (%s)", rookRepoName, err, strings.TrimSpace(string(repoAddOut))) } - repoUpdateOut, err := exec.CommandContext(c.ctx, "helm", "repo", "update", rookRepoName).CombinedOutput() + repoUpdateOut, err := exec.CommandContext(b.ctx, "helm", "repo", "update", rookRepoName).CombinedOutput() if err != nil { return fmt.Errorf("failed to update helm repo %q: %w (%s)", rookRepoName, err, strings.TrimSpace(string(repoUpdateOut))) } @@ -159,7 +159,7 @@ func (c *LocalBootstrapper) InstallRookHelmChart() error { } cmd := exec.CommandContext( - c.ctx, + b.ctx, "helm", "upgrade", "--install", @@ -180,7 +180,7 @@ func (c *LocalBootstrapper) InstallRookHelmChart() error { return nil } -func (c *LocalBootstrapper) DeployTestCephCluster() error { +func (b *LocalBootstrapper) DeployTestCephCluster() error { // Ceph test cluster config from https://github.com/rook/rook/blob/0e05c6afff25a4e03649dd2092a5a10c3349fd9c/deploy/examples/cluster-test.yaml cephCluster := &rookcephv1.CephCluster{ ObjectMeta: metav1.ObjectMeta{ @@ -189,7 +189,7 @@ func (c *LocalBootstrapper) DeployTestCephCluster() error { }, } - _, err := controllerutil.CreateOrUpdate(c.ctx, c.kubeClient, cephCluster, func() error { + _, err := controllerutil.CreateOrUpdate(b.ctx, b.kubeClient, cephCluster, func() error { cephCluster.Spec = rookcephv1.ClusterSpec{ CephVersion: rookcephv1.CephVersionSpec{ Image: rookCephImage, @@ -260,15 +260,15 @@ func (c *LocalBootstrapper) DeployTestCephCluster() error { return fmt.Errorf("failed to create or update Ceph cluster %q: %w", rookClusterName, err) } - if err := c.WaitForTestCephClusterReady(); err != nil { + if err := b.WaitForTestCephClusterReady(); err != nil { return err } return nil } -func (c *LocalBootstrapper) WaitForTestCephClusterReady() error { - ctx, cancel := context.WithTimeout(c.ctx, rookReadyTimeout) +func (b *LocalBootstrapper) WaitForTestCephClusterReady() error { + ctx, cancel := context.WithTimeout(b.ctx, rookReadyTimeout) defer cancel() clusterKey := client.ObjectKey{ @@ -298,7 +298,7 @@ func (c *LocalBootstrapper) WaitForTestCephClusterReady() error { } cluster := &rookcephv1.CephCluster{} - err := c.kubeClient.Get(ctx, clusterKey, cluster) + err := b.kubeClient.Get(ctx, clusterKey, cluster) if err != nil { if apierrors.IsNotFound(err) { return &retryableWaitError{err: fmt.Errorf("Ceph cluster %q not found yet", rookClusterName)} @@ -340,7 +340,7 @@ func (c *LocalBootstrapper) WaitForTestCephClusterReady() error { return fmt.Errorf("failed to fetch Ceph cluster %q: %w", rookClusterName, err) } -func (c *LocalBootstrapper) DeployCephBlockPoolAndStorageClass() error { +func (b *LocalBootstrapper) DeployCephBlockPoolAndStorageClass() error { // Create CephBlockPool blockPool := &rookcephv1.CephBlockPool{ ObjectMeta: metav1.ObjectMeta{ @@ -349,7 +349,7 @@ func (c *LocalBootstrapper) DeployCephBlockPoolAndStorageClass() error { }, } - _, err := controllerutil.CreateOrUpdate(c.ctx, c.kubeClient, blockPool, func() error { + _, err := controllerutil.CreateOrUpdate(b.ctx, b.kubeClient, blockPool, func() error { blockPool.Spec = rookcephv1.NamedBlockPoolSpec{ PoolSpec: rookcephv1.PoolSpec{ FailureDomain: "osd", @@ -374,7 +374,7 @@ func (c *LocalBootstrapper) DeployCephBlockPoolAndStorageClass() error { }, } - _, err = controllerutil.CreateOrUpdate(c.ctx, c.kubeClient, storageClass, func() error { + _, err = controllerutil.CreateOrUpdate(b.ctx, b.kubeClient, storageClass, func() error { storageClass.Provisioner = cephRBDProvisionerName storageClass.Parameters = map[string]string{ "clusterID": rookNamespace, diff --git a/internal/installer/files/config_yaml.go b/internal/installer/files/config_yaml.go index 4b49d604..0b053ec1 100644 --- a/internal/installer/files/config_yaml.go +++ b/internal/installer/files/config_yaml.go @@ -79,6 +79,8 @@ type PostgresConfig struct { Primary *PostgresPrimaryConfig `yaml:"primary,omitempty"` Replica *PostgresReplicaConfig `yaml:"replica,omitempty"` ServerAddress string `yaml:"serverAddress,omitempty"` + Port int `yaml:"port,omitempty"` + Database string `yaml:"database,omitempty"` // Stored separately in vault CaCertPrivateKey string `yaml:"-"` diff --git a/internal/util/filewriter.go b/internal/util/filewriter.go index 88722823..0b3244b2 100644 --- a/internal/util/filewriter.go +++ b/internal/util/filewriter.go @@ -36,12 +36,12 @@ func (fs *FilesystemWriter) Create(filename string) (*os.File, error) { func (fs *FilesystemWriter) CreateAndWrite(filePath string, data []byte, fileType string) error { file, err := fs.Create(filePath) if err != nil { - return fmt.Errorf("failed to create %s file: %w", fileType, err) + return fmt.Errorf("failed to create %s file at %q: %w", fileType, filePath, err) } defer CloseFileIgnoreError(file) if _, err = file.Write(data); err != nil { - return fmt.Errorf("failed to write %s file: %w", fileType, err) + return fmt.Errorf("failed to write %s file at %q: %w", fileType, filePath, err) } log.Printf("\n%s file created: %s", fileType, filePath) From a9702ebf909c95d633a9af3e18a2a5f6298aa0a5 Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Sat, 7 Mar 2026 23:48:41 +0100 Subject: [PATCH 08/31] feat: prepare cs environment and install it Signed-off-by: Tim Schrodi --- cli/cmd/bootstrap_local.go | 19 + internal/bootstrap/local/ceph.go | 395 ++++++++++++++++++++ internal/bootstrap/local/installer.go | 204 ++++++++++ internal/bootstrap/local/local.go | 103 ++++- internal/bootstrap/local/postgres.go | 12 +- internal/bootstrap/local/rook.go | 10 +- internal/installer/config_manager.go | 58 ++- internal/installer/config_manager_test.go | 2 +- internal/installer/files/config_yaml.go | 5 +- internal/installer/mocks.go | 63 ++++ internal/installer/secrets_test.go | 6 +- internal/installer/vault_encryption.go | 141 +++++++ internal/installer/vault_encryption_test.go | 296 +++++++++++++++ 13 files changed, 1290 insertions(+), 24 deletions(-) create mode 100644 internal/bootstrap/local/ceph.go create mode 100644 internal/bootstrap/local/installer.go create mode 100644 internal/installer/vault_encryption.go create mode 100644 internal/installer/vault_encryption_test.go diff --git a/cli/cmd/bootstrap_local.go b/cli/cmd/bootstrap_local.go index 047dc114..c4afab94 100644 --- a/cli/cmd/bootstrap_local.go +++ b/cli/cmd/bootstrap_local.go @@ -61,6 +61,9 @@ func AddBootstrapLocalCmd(parent *cobra.Command) { flags := bootstrapLocalCmd.cmd.Flags() // Installer flags.BoolVarP(&bootstrapLocalCmd.Yes, "yes", "y", false, "Auto-approve the local bootstrapping warning prompt") + flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.InstallVersion, "install-version", "", "Codesphere version to install (downloaded from the OMS portal)") + flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.InstallHash, "install-hash", "", "Codesphere package hash (required when install-version is set)") + flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.InstallLocal, "install-local", "", "Path to a local installer package (tar.gz or unpacked directory)") // Codesphere Environment flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.BaseDomain, "base-domain", "cs.local", "Base domain for Codesphere") flags.StringArrayVar(&bootstrapLocalCmd.CodesphereEnv.Experiments, "experiments", []string{}, "Experiments to enable in Codesphere installation (optional)") @@ -170,6 +173,10 @@ func (c *BootstrapLocalCmd) ValidatePrerequisites(ctx context.Context) error { return err } + if err := c.ValidateEncryptionTools(); err != nil { + return err + } + return nil } @@ -213,3 +220,15 @@ func (c *BootstrapLocalCmd) ValidateHelmVersion(ctx context.Context) error { return nil } + +func (c *BootstrapLocalCmd) ValidateEncryptionTools() error { + if _, err := exec.LookPath("sops"); err != nil { + return fmt.Errorf("sops binary not found in PATH; install it with: brew install sops") + } + + if _, err := exec.LookPath("age-keygen"); err != nil { + return fmt.Errorf("age binary not found in PATH (age-keygen missing); install it with: brew install age") + } + + return nil +} diff --git a/internal/bootstrap/local/ceph.go b/internal/bootstrap/local/ceph.go new file mode 100644 index 00000000..0104d049 --- /dev/null +++ b/internal/bootstrap/local/ceph.go @@ -0,0 +1,395 @@ +// Copyright (c) Codesphere Inc. +// SPDX-License-Identifier: Apache-2.0 + +package local + +import ( + "context" + "fmt" + "time" + + rookcephv1 "github.com/codesphere-cloud/oms/external/ceph.rook.io/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +const ( + cephFilesystemName = "codesphere" + cephSubVolumeGroupName = "workspace-volumes" + cephFilesystemReadyTimeout = 10 * time.Minute + cephClientReadyTimeout = 5 * time.Minute + cephReadyPollInterval = 5 * time.Second +) + +// CephUserCredentials holds the entity name and key for a Ceph auth user. +type CephUserCredentials struct { + Entity string + Key string +} + +// CephCredentials holds all Ceph credentials needed by Codesphere. +type CephCredentials struct { + FSID string + CephfsAdmin CephUserCredentials + CephfsAdminCodesphere CephUserCredentials + CSIRBDNode CephUserCredentials + CSIRBDProvisioner CephUserCredentials + CSICephFSNode CephUserCredentials + CSICephFSProvisioner CephUserCredentials +} + +// cephClientDef defines a CephClient to create. +type cephClientDef struct { + // name is the CephClient CR name (without the "client." prefix). + name string + // caps is the Ceph auth capabilities. + caps map[string]string +} + +// DeployCephFilesystem creates the CephFS filesystem "codesphere" via a CephFilesystem CRD. +func (b *LocalBootstrapper) DeployCephFilesystem() error { + fs := &rookcephv1.CephFilesystem{ + ObjectMeta: metav1.ObjectMeta{ + Name: cephFilesystemName, + Namespace: rookNamespace, + }, + } + + _, err := controllerutil.CreateOrUpdate(b.ctx, b.kubeClient, fs, func() error { + fs.Spec = rookcephv1.FilesystemSpec{ + MetadataPool: rookcephv1.NamedPoolSpec{ + PoolSpec: rookcephv1.PoolSpec{ + Replicated: rookcephv1.ReplicatedSpec{ + Size: 1, + RequireSafeReplicaSize: false, + }, + }, + }, + DataPools: []rookcephv1.NamedPoolSpec{ + { + PoolSpec: rookcephv1.PoolSpec{ + FailureDomain: "osd", + Replicated: rookcephv1.ReplicatedSpec{ + Size: 1, + RequireSafeReplicaSize: false, + }, + }, + }, + }, + PreserveFilesystemOnDelete: true, + MetadataServer: rookcephv1.MetadataServerSpec{ + ActiveCount: 1, + ActiveStandby: false, + Resources: corev1.ResourceRequirements{}, + }, + } + return nil + }) + if err != nil { + return fmt.Errorf("failed to create or update CephFilesystem %q: %w", cephFilesystemName, err) + } + + return b.waitForCephFilesystemReady() +} + +// DeployCephFilesystemSubVolumeGroup creates the "workspace-volumes" SubVolumeGroup on the CephFS filesystem. +func (b *LocalBootstrapper) DeployCephFilesystemSubVolumeGroup() error { + svg := &rookcephv1.CephFilesystemSubVolumeGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: cephSubVolumeGroupName, + Namespace: rookNamespace, + }, + } + + _, err := controllerutil.CreateOrUpdate(b.ctx, b.kubeClient, svg, func() error { + svg.Spec = rookcephv1.CephFilesystemSubVolumeGroupSpec{ + FilesystemName: cephFilesystemName, + } + return nil + }) + if err != nil { + return fmt.Errorf("failed to create or update CephFilesystemSubVolumeGroup %q: %w", cephSubVolumeGroupName, err) + } + + return nil +} + +// DeployCephUsers creates CephClient CRDs for the Ceph auth users required by Codesphere. +func (b *LocalBootstrapper) DeployCephUsers() error { + clients := []cephClientDef{ + { + name: "cephfs-admin-blue", + caps: map[string]string{ + "mds": "allow *", + "mon": "allow *", + "osd": "allow *", + "mgr": "allow *", + }, + }, + { + name: "cephfs-codesphere-admin", + caps: map[string]string{ + "mon": "allow r", + "osd": fmt.Sprintf( + "allow rwx pool=cephfs.%s.meta,allow rwx pool=cephfs.%s.data", + cephFilesystemName, cephFilesystemName, + ), + }, + }, + } + + for _, def := range clients { + cc := &rookcephv1.CephClient{ + ObjectMeta: metav1.ObjectMeta{ + Name: def.name, + Namespace: rookNamespace, + }, + } + + _, err := controllerutil.CreateOrUpdate(b.ctx, b.kubeClient, cc, func() error { + cc.Spec = rookcephv1.ClientSpec{ + Caps: def.caps, + } + return nil + }) + if err != nil { + return fmt.Errorf("failed to create or update CephClient %q: %w", def.name, err) + } + + if err := b.waitForCephClientReady(def.name); err != nil { + return err + } + } + + return nil +} + +// ReadCephCredentials reads all Ceph credentials from the cluster: +// - FSID from CephCluster status +// - Custom user keys from CephClient-generated K8s Secrets +// - CSI user keys from Rook-managed K8s Secrets +func (b *LocalBootstrapper) ReadCephCredentials() (*CephCredentials, error) { + fsid, err := b.readCephFSID() + if err != nil { + return nil, err + } + + cephfsAdmin, err := b.readCephClientSecret("cephfs-admin-blue") + if err != nil { + return nil, err + } + + cephfsAdminCodesphere, err := b.readCephClientSecret("cephfs-codesphere-admin") + if err != nil { + return nil, err + } + + csiRBDNode, err := b.readCSISecret("rook-csi-rbd-node", "userID", "userKey") + if err != nil { + return nil, err + } + + csiRBDProvisioner, err := b.readCSISecret("rook-csi-rbd-provisioner", "userID", "userKey") + if err != nil { + return nil, err + } + + csiCephFSNode, err := b.readCSISecret("rook-csi-cephfs-node", "userID", "userKey") + if err != nil { + return nil, err + } + + csiCephFSProvisioner, err := b.readCSISecret("rook-csi-cephfs-provisioner", "userID", "userKey") + if err != nil { + return nil, err + } + + return &CephCredentials{ + FSID: fsid, + CephfsAdmin: *cephfsAdmin, + CephfsAdminCodesphere: *cephfsAdminCodesphere, + CSIRBDNode: *csiRBDNode, + CSIRBDProvisioner: *csiRBDProvisioner, + CSICephFSNode: *csiCephFSNode, + CSICephFSProvisioner: *csiCephFSProvisioner, + }, nil +} + +// readCephFSID reads the Ceph FSID from the CephCluster status. +func (b *LocalBootstrapper) readCephFSID() (string, error) { + cluster := &rookcephv1.CephCluster{} + key := client.ObjectKey{Name: rookClusterName, Namespace: rookNamespace} + if err := b.kubeClient.Get(b.ctx, key, cluster); err != nil { + return "", fmt.Errorf("failed to get CephCluster %q: %w", rookClusterName, err) + } + + if cluster.Status.CephStatus == nil || cluster.Status.CephStatus.FSID == "" { + return "", fmt.Errorf("CephCluster %q does not have an FSID in its status yet", rookClusterName) + } + + return cluster.Status.CephStatus.FSID, nil +} + +// readCephClientSecret reads the key from the K8s Secret created by the Rook operator for a CephClient CR. +// The secret is named "rook-ceph-client-" in the rook-ceph namespace. +func (b *LocalBootstrapper) readCephClientSecret(name string) (*CephUserCredentials, error) { + secretName := "rook-ceph-client-" + name + secret := &corev1.Secret{} + key := client.ObjectKey{Name: secretName, Namespace: rookNamespace} + if err := b.kubeClient.Get(b.ctx, key, secret); err != nil { + return nil, fmt.Errorf("failed to get CephClient secret %q: %w", secretName, err) + } + + userKey, ok := secret.Data[name] + if !ok { + return nil, fmt.Errorf("CephClient secret %q does not contain key %q", secretName, name) + } + + return &CephUserCredentials{ + Entity: "client." + name, + Key: string(userKey), + }, nil +} + +// readCSISecret reads a Rook-managed CSI secret from the rook-ceph namespace. +func (b *LocalBootstrapper) readCSISecret(secretName, idKey, keyKey string) (*CephUserCredentials, error) { + secret := &corev1.Secret{} + key := client.ObjectKey{Name: secretName, Namespace: rookNamespace} + if err := b.kubeClient.Get(b.ctx, key, secret); err != nil { + return nil, fmt.Errorf("failed to get CSI secret %q: %w", secretName, err) + } + + userID, ok := secret.Data[idKey] + if !ok { + return nil, fmt.Errorf("CSI secret %q does not contain key %q", secretName, idKey) + } + + userKey, ok := secret.Data[keyKey] + if !ok { + return nil, fmt.Errorf("CSI secret %q does not contain key %q", secretName, keyKey) + } + + return &CephUserCredentials{ + Entity: string(userID), + Key: string(userKey), + }, nil +} + +// waitForCephFilesystemReady polls until the CephFilesystem reaches the Ready phase. +func (b *LocalBootstrapper) waitForCephFilesystemReady() error { + ctx, cancel := context.WithTimeout(b.ctx, cephFilesystemReadyTimeout) + defer cancel() + + fsKey := client.ObjectKey{Name: cephFilesystemName, Namespace: rookNamespace} + + steps := int(cephFilesystemReadyTimeout / cephReadyPollInterval) + if steps < 1 { + steps = 1 + } + + backoff := wait.Backoff{ + Duration: cephReadyPollInterval, + Factor: 1.0, + Jitter: 0.1, + Steps: steps, + } + + lastPhase := "" + + err := retry.OnError(backoff, isRetryableWaitError, func() error { + if err := ctx.Err(); err != nil { + return err + } + + fs := &rookcephv1.CephFilesystem{} + if err := b.kubeClient.Get(ctx, fsKey, fs); err != nil { + if apierrors.IsNotFound(err) { + return &retryableWaitError{err: fmt.Errorf("CephFilesystem %q not found yet", cephFilesystemName)} + } + return err + } + + if fs.Status != nil { + lastPhase = string(fs.Status.Phase) + if fs.Status.Phase == rookcephv1.ConditionReady { + return nil + } + } + + return &retryableWaitError{err: fmt.Errorf( + "CephFilesystem %q is not ready yet (phase=%q)", + cephFilesystemName, lastPhase, + )} + }) + if err == nil { + return nil + } + + if isRetryableWaitError(err) { + return fmt.Errorf("timed out waiting for CephFilesystem %q to become ready (phase=%q)", cephFilesystemName, lastPhase) + } + + return fmt.Errorf("failed waiting for CephFilesystem %q: %w", cephFilesystemName, err) +} + +// waitForCephClientReady polls until the CephClient reaches the Ready phase. +func (b *LocalBootstrapper) waitForCephClientReady(name string) error { + ctx, cancel := context.WithTimeout(b.ctx, cephClientReadyTimeout) + defer cancel() + + ccKey := client.ObjectKey{Name: name, Namespace: rookNamespace} + + steps := int(cephClientReadyTimeout / cephReadyPollInterval) + if steps < 1 { + steps = 1 + } + + backoff := wait.Backoff{ + Duration: cephReadyPollInterval, + Factor: 1.0, + Jitter: 0.1, + Steps: steps, + } + + lastPhase := "" + + err := retry.OnError(backoff, isRetryableWaitError, func() error { + if err := ctx.Err(); err != nil { + return err + } + + cc := &rookcephv1.CephClient{} + if err := b.kubeClient.Get(ctx, ccKey, cc); err != nil { + if apierrors.IsNotFound(err) { + return &retryableWaitError{err: fmt.Errorf("CephClient %q not found yet", name)} + } + return err + } + + if cc.Status != nil { + lastPhase = string(cc.Status.Phase) + if cc.Status.Phase == rookcephv1.ConditionReady { + return nil + } + } + + return &retryableWaitError{err: fmt.Errorf( + "CephClient %q is not ready yet (phase=%q)", + name, lastPhase, + )} + }) + if err == nil { + return nil + } + + if isRetryableWaitError(err) { + return fmt.Errorf("timed out waiting for CephClient %q to become ready (phase=%q)", name, lastPhase) + } + + return fmt.Errorf("failed waiting for CephClient %q: %w", name, err) +} diff --git a/internal/bootstrap/local/installer.go b/internal/bootstrap/local/installer.go new file mode 100644 index 00000000..dcf5db66 --- /dev/null +++ b/internal/bootstrap/local/installer.go @@ -0,0 +1,204 @@ +// Copyright (c) Codesphere Inc. +// SPDX-License-Identifier: Apache-2.0 + +package local + +import ( + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/codesphere-cloud/oms/internal/portal" + "github.com/codesphere-cloud/oms/internal/util" +) + +// installerSkipSteps lists the installer steps that are already handled by the +// local bootstrap (Ceph via Rook, Kubernetes setup, PostgreSQL via CNPG). +var installerSkipSteps = []string{"load-container-images", "ceph", "kubernetes"} + +// installerArtifactFilename is the artifact to download from the OMS portal. +const installerArtifactFilename = "installer.tar.gz" + +// DownloadInstallerPackage downloads the Codesphere installer package from the +// OMS portal, similar to how the GCP bootstrapper fetches it onto a jumpbox. +// The package is downloaded into the directory that contains the config/secrets +// files and its on-disk filename is returned. +func (b *LocalBootstrapper) DownloadInstallerPackage() (string, error) { + version := b.Env.InstallVersion + hash := b.Env.InstallHash + + if version == "" { + return "", fmt.Errorf("install version is required to download from the portal") + } + if hash == "" { + return "", fmt.Errorf("install hash must be set when install version is set") + } + + log.Printf("Downloading Codesphere package %s (hash %s) from the OMS portal...", version, hash) + + p := portal.NewPortalClient() + + build, err := p.GetBuild(portal.CodesphereProduct, version, hash) + if err != nil { + return "", fmt.Errorf("failed to get build from portal: %w", err) + } + + download, err := build.GetBuildForDownload(installerArtifactFilename) + if err != nil { + return "", fmt.Errorf("artifact %q not found in build: %w", installerArtifactFilename, err) + } + + fullFilename := build.BuildPackageFilename(installerArtifactFilename) + destPath := filepath.Join(filepath.Dir(b.Env.InstallConfigPath), fullFilename) + + // Support resuming a partial download. + out, err := b.fw.OpenAppend(destPath) + if err != nil { + out, err = b.fw.Create(destPath) + if err != nil { + return "", fmt.Errorf("failed to create file %s: %w", destPath, err) + } + } + defer util.CloseFileIgnoreError(out) + + fileSize := 0 + fileInfo, err := out.Stat() + if err == nil { + fileSize = int(fileInfo.Size()) + } + + err = p.DownloadBuildArtifact(portal.CodesphereProduct, download, out, fileSize, false) + if err != nil { + return "", fmt.Errorf("failed to download build artifact: %w", err) + } + + // Verify integrity. + verifyFile, err := b.fw.Open(destPath) + if err != nil { + return "", fmt.Errorf("failed to open downloaded file for verification: %w", err) + } + defer util.CloseFileIgnoreError(verifyFile) + + if err := p.VerifyBuildArtifactDownload(verifyFile, download); err != nil { + return "", fmt.Errorf("artifact verification failed: %w", err) + } + + return destPath, nil +} + +// PrepareInstallerBundle resolves the installer package to a directory. +// It handles three cases: +// 1. Portal download: InstallVersion+InstallHash are set → download tar.gz, then extract. +// 2. Local tar.gz/tgz: InstallLocal points to an archive → extract. +// 3. Local directory: InstallLocal points to an already-unpacked directory → use as-is. +func (b *LocalBootstrapper) PrepareInstallerBundle() (string, error) { + var bundlePath string + + switch { + case b.Env.InstallVersion != "": + // Download from portal. + downloaded, err := b.DownloadInstallerPackage() + if err != nil { + return "", err + } + bundlePath = downloaded + + case b.Env.InstallLocal != "": + bundlePath = b.Env.InstallLocal + + default: + return "", fmt.Errorf("either --install-version or --install-local must be specified") + } + + info, err := os.Stat(bundlePath) + if err != nil { + return "", fmt.Errorf("cannot access installer bundle %q: %w", bundlePath, err) + } + + // Already an unpacked directory – use directly. + if info.IsDir() { + log.Printf("Installer bundle is a directory, using as-is: %s", bundlePath) + return bundlePath, nil + } + + // Treat as tar.gz archive – extract alongside the archive. + if !strings.HasSuffix(bundlePath, ".tar.gz") && !strings.HasSuffix(bundlePath, ".tgz") { + return "", fmt.Errorf("installer bundle %q is neither a directory nor a .tar.gz/.tgz archive", bundlePath) + } + + destDir := strings.TrimSuffix(strings.TrimSuffix(bundlePath, ".gz"), ".tar") + destDir = strings.TrimSuffix(destDir, ".tgz") + if destDir == bundlePath { + destDir = bundlePath + "-unpacked" + } + + log.Printf("Extracting installer bundle %s → %s", bundlePath, destDir) + if err := util.ExtractTarGz(b.fw, bundlePath, destDir); err != nil { + return "", fmt.Errorf("failed to extract installer bundle: %w", err) + } + + return destDir, nil +} + +// RunInstaller executes the Codesphere private-cloud-installer.js from the +// prepared bundle directory, skipping infrastructure steps already performed +// by the local bootstrap (Ceph, Kubernetes/PostgreSQL). +func (b *LocalBootstrapper) RunInstaller() error { + if b.Env.InstallVersion == "" && b.Env.InstallLocal == "" { + log.Println("No installer package specified, skipping Codesphere installation.") + return nil + } + + bundleDir, err := b.PrepareInstallerBundle() + if err != nil { + return fmt.Errorf("failed to prepare installer bundle: %w", err) + } + + nodePath := filepath.Join(bundleDir, "node") + + installerPath := filepath.Join(bundleDir, "private-cloud-installer.js") + archivePath := filepath.Join(bundleDir, "deps.tar.gz") + + // Resolve absolute paths for config and key so the installer finds them + // regardless of its working directory. + configPath, err := filepath.Abs(b.Env.InstallConfigPath) + if err != nil { + return fmt.Errorf("failed to resolve absolute config path: %w", err) + } + + privKeyPath := b.ageKeyPath + if privKeyPath == "" { + return fmt.Errorf("age key path is not set; cannot pass private key to installer") + } + privKeyPath, err = filepath.Abs(privKeyPath) + if err != nil { + return fmt.Errorf("failed to resolve absolute key path: %w", err) + } + + // Build command arguments. + cmdArgs := []string{ + installerPath, + "--archive", archivePath, + "--config", configPath, + "--privKey", privKeyPath, + } + for _, step := range installerSkipSteps { + cmdArgs = append(cmdArgs, "--skipStep", step) + } + + log.Printf("Running Codesphere installer: %s %s", nodePath, strings.Join(cmdArgs, " ")) + cmd := exec.Command(nodePath, cmdArgs...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Stdin = os.Stdin + + if err := cmd.Run(); err != nil { + return fmt.Errorf("codesphere installer failed: %w", err) + } + + log.Println("Codesphere installer finished successfully.") + return nil +} diff --git a/internal/bootstrap/local/local.go b/internal/bootstrap/local/local.go index bb570533..6152acc9 100644 --- a/internal/bootstrap/local/local.go +++ b/internal/bootstrap/local/local.go @@ -45,18 +45,28 @@ type LocalBootstrapper struct { icg installer.InstallConfigManager // Environment Env *CodesphereEnvironment + // cephCredentials holds the Ceph auth credentials read after setup. + cephCredentials *CephCredentials + // ageRecipient is the age public key used for SOPS vault encryption. + ageRecipient string + // ageKeyPath is the filesystem path to the age private key file. + ageKeyPath string } type CodesphereEnvironment struct { BaseDomain string `json:"base_domain"` Experiments []string `json:"experiments"` FeatureFlags []string `json:"feature_flags"` + // Installer + InstallVersion string `json:"install_version"` + InstallHash string `json:"install_hash"` + InstallLocal string `json:"install_local"` // Config ExistingConfigUsed bool `json:"-"` InstallConfigPath string `json:"-"` SecretsFilePath string `json:"-"` InstallConfig *files.RootConfig `json:"-"` - Secrets *files.InstallVault `json:"-"` + Vault *files.InstallVault `json:"-"` } func NewLocalBootstrapper(ctx context.Context, stlog *bootstrap.StepLogger, kubeClient client.Client, fw util.FileIO, icg installer.InstallConfigManager, env *CodesphereEnvironment) *LocalBootstrapper { @@ -81,6 +91,11 @@ func (b *LocalBootstrapper) Bootstrap() error { return fmt.Errorf("failed to ensure secrets: %w", err) } + err = b.stlog.Step("Resolve age encryption key", b.ResolveAgeKey) + if err != nil { + return fmt.Errorf("failed to resolve age encryption key: %w", err) + } + err = b.stlog.Step("Ensure codesphere system namespace", b.EnsureCodesphereSystemNamespace) if err != nil { return fmt.Errorf("failed to ensure namespace %q: %w", codesphereSystemNamespace, err) @@ -102,6 +117,33 @@ func (b *LocalBootstrapper) Bootstrap() error { return err } + err = b.stlog.Substep("Deploy CephFS filesystem", b.DeployCephFilesystem) + if err != nil { + return err + } + + err = b.stlog.Substep("Create CephFS SubVolumeGroup", b.DeployCephFilesystemSubVolumeGroup) + if err != nil { + return err + } + + err = b.stlog.Substep("Create Ceph users", b.DeployCephUsers) + if err != nil { + return err + } + + err = b.stlog.Substep("Read Ceph credentials", func() error { + creds, err := b.ReadCephCredentials() + if err != nil { + return err + } + b.cephCredentials = creds + return nil + }) + if err != nil { + return err + } + return nil }) if err != nil { @@ -130,6 +172,11 @@ func (b *LocalBootstrapper) Bootstrap() error { return fmt.Errorf("failed to update install config: %w", err) } + err = b.stlog.Step("Run Codesphere installer", b.RunInstaller) + if err != nil { + return fmt.Errorf("failed to run Codesphere installer: %w", err) + } + return nil } @@ -176,11 +223,24 @@ func (b *LocalBootstrapper) EnsureSecrets() error { } } - b.Env.Secrets = b.icg.GetVault() + b.Env.Vault = b.icg.GetVault() return nil } +func (b *LocalBootstrapper) ResolveAgeKey() error { + recipient, keyPath, err := installer.ResolveAgeKey(filepath.Dir(b.Env.SecretsFilePath)) + if err != nil { + return fmt.Errorf("failed to resolve age key: %w", err) + } + b.ageRecipient = recipient + b.ageKeyPath = keyPath + if keyPath != "" { + fmt.Printf("Using age key: %s\n", keyPath) + } + return nil +} + func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { b.Env.InstallConfig.Secrets.BaseDir = filepath.Dir(b.Env.SecretsFilePath) b.Env.InstallConfig.Registry.ReplaceImagesInBom = false @@ -196,8 +256,20 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { b.Env.InstallConfig.Postgres.Port = 5432 b.Env.InstallConfig.Postgres.Primary = nil b.Env.InstallConfig.Postgres.Replica = nil + pgPassword, err := b.ReadPostgresSuperuserPassword() + if err != nil { + return fmt.Errorf("failed to read PostgreSQL superuser password: %w", err) + } + b.Env.Vault.Secrets = append(b.Env.Vault.Secrets, files.SecretEntry{ + Name: "postgresPassword", + Fields: &files.SecretFields{ + Password: pgPassword, + }, + }) - // TODO: configure ceph values + if b.cephCredentials != nil { + b.addCephSecretsToVault(b.Env.Vault) + } b.Env.InstallConfig.Kubernetes = files.KubernetesConfig{ ManagedByCodesphere: false, @@ -307,9 +379,30 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { return fmt.Errorf("failed to write config file: %w", err) } - if err := b.icg.WriteVault(b.Env.SecretsFilePath, true); err != nil { - return fmt.Errorf("failed to write vault file: %w", err) + if err := b.icg.EncryptVault(b.Env.SecretsFilePath, true, b.ageRecipient); err != nil { + return fmt.Errorf("failed to write encrypted vault file: %w", err) } return nil } + +// addCephSecretsToVault appends Ceph credentials to the vault as SecretEntry items. +// These mirror the secrets that the JS installer stores via SecretManagerSops: +// - cephFsId (password = FSID) +// - cephfsAdmin, cephfsAdminCodesphere (password = auth key) +// - csiRbdNode, csiRbdProvisioner, csiCephfsNode, csiCephfsProvisioner, csiOperator (password = auth key) +func (b *LocalBootstrapper) addCephSecretsToVault(vault *files.InstallVault) { + creds := b.cephCredentials + + vault.Secrets = append(vault.Secrets, + files.SecretEntry{Name: "cephFsId", Fields: &files.SecretFields{Password: creds.FSID}}, + files.SecretEntry{Name: "cephfsAdmin", Fields: &files.SecretFields{Password: creds.CephfsAdmin.Key}}, + files.SecretEntry{Name: "cephfsAdminCodesphere", Fields: &files.SecretFields{Password: creds.CephfsAdminCodesphere.Key}}, + files.SecretEntry{Name: "csiRbdNode", Fields: &files.SecretFields{Password: creds.CSIRBDNode.Key}}, + files.SecretEntry{Name: "csiRbdProvisioner", Fields: &files.SecretFields{Password: creds.CSIRBDProvisioner.Key}}, + files.SecretEntry{Name: "csiCephfsNode", Fields: &files.SecretFields{Password: creds.CSICephFSNode.Key}}, + files.SecretEntry{Name: "csiCephfsProvisioner", Fields: &files.SecretFields{Password: creds.CSICephFSProvisioner.Key}}, + // csiOperator is managed by Rook internally; provide a dummy value for vault compatibility. + files.SecretEntry{Name: "csiOperator", Fields: &files.SecretFields{Password: "dummy"}}, + ) +} diff --git a/internal/bootstrap/local/postgres.go b/internal/bootstrap/local/postgres.go index 8834f680..eab3cdb4 100644 --- a/internal/bootstrap/local/postgres.go +++ b/internal/bootstrap/local/postgres.go @@ -7,7 +7,6 @@ import ( "context" "errors" "fmt" - "os" "os/exec" "strings" "time" @@ -49,7 +48,7 @@ func (b *LocalBootstrapper) InstallCloudNativePGHelmChart() error { return fmt.Errorf("failed to update helm repo %q: %w (%s)", cnpgRepoName, err, strings.TrimSpace(string(repoUpdateOut))) } - cmd := exec.CommandContext( + out, err := exec.CommandContext( b.ctx, "helm", "upgrade", @@ -63,11 +62,9 @@ func (b *LocalBootstrapper) InstallCloudNativePGHelmChart() error { "--set", "resources.requests.cpu=0", "--set", "resources.requests.memory=0", "--wait", - ) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return fmt.Errorf("failed to install helm chart %q: %w", cnpgChartRef, err) + ).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to install helm chart %q: %w (%s)", cnpgChartRef, err, strings.TrimSpace(string(out))) } return nil @@ -94,6 +91,7 @@ func (b *LocalBootstrapper) DeployPostgresDatabase() error { Database: cnpgDatabaseName, }, }, + EnableSuperuserAccess: ptr.To(true), } return nil }) diff --git a/internal/bootstrap/local/rook.go b/internal/bootstrap/local/rook.go index 420dd18f..647bf561 100644 --- a/internal/bootstrap/local/rook.go +++ b/internal/bootstrap/local/rook.go @@ -158,7 +158,7 @@ func (b *LocalBootstrapper) InstallRookHelmChart() error { return fmt.Errorf("failed to write Helm values file: %w", err) } - cmd := exec.CommandContext( + deployOut, err := exec.CommandContext( b.ctx, "helm", "upgrade", @@ -170,11 +170,9 @@ func (b *LocalBootstrapper) InstallRookHelmChart() error { "--create-namespace", "--values", valuesPath, "--wait", - ) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return fmt.Errorf("failed to install helm chart %q: %w", rookChartRef, err) + ).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to install helm chart %q: %w (%s)", rookChartRef, err, strings.TrimSpace(string(deployOut))) } return nil diff --git a/internal/installer/config_manager.go b/internal/installer/config_manager.go index 97166bda..823f6eae 100644 --- a/internal/installer/config_manager.go +++ b/internal/installer/config_manager.go @@ -8,6 +8,7 @@ import ( "io" "net" "net/url" + "os" "github.com/codesphere-cloud/oms/internal/installer/files" "github.com/codesphere-cloud/oms/internal/util" @@ -33,6 +34,7 @@ type InstallConfigManager interface { GenerateSecrets() error WriteInstallConfig(configPath string, withComments bool) error WriteVault(vaultPath string, withComments bool) error + EncryptVault(vaultPath string, withComments bool, ageRecipient string) error } type InstallConfig struct { @@ -41,6 +43,11 @@ type InstallConfig struct { Vault *files.InstallVault } +// SetFileIO overrides the file I/O implementation (useful for testing). +func (g *InstallConfig) SetFileIO(fio util.FileIO) { + g.fileIO = fio +} + func NewInstallConfigManager() InstallConfigManager { config := files.NewRootConfig() return &InstallConfig{ @@ -236,7 +243,7 @@ func (g *InstallConfig) WriteVault(vaultPath string, withComments bool) error { return fmt.Errorf("no configuration provided - config is nil") } - vault := g.Config.ExtractVault() + vault := g.Config.ExtractVault(g.GetVault()) vaultYAML, err := vault.Marshal() if err != nil { return fmt.Errorf("failed to marshal vault.yaml: %w", err) @@ -253,6 +260,55 @@ func (g *InstallConfig) WriteVault(vaultPath string, withComments bool) error { return nil } +func (g *InstallConfig) EncryptVault(vaultPath string, withComments bool, ageRecipient string) error { + if g.Config == nil { + return fmt.Errorf("no configuration provided - config is nil") + } + + vault := g.Config.ExtractVault(g.GetVault()) + vaultYAML, err := vault.Marshal() + if err != nil { + return fmt.Errorf("failed to marshal vault.yaml: %w", err) + } + + if withComments { + vaultYAML = AddVaultComments(vaultYAML) + } + + // Write plaintext vault to a temporary file. + tmpFile, err := os.CreateTemp("", "vault-*.yaml") + if err != nil { + return fmt.Errorf("failed to create temp file for vault encryption: %w", err) + } + tmpPath := tmpFile.Name() + defer os.Remove(tmpPath) + + if _, err := tmpFile.Write(vaultYAML); err != nil { + tmpFile.Close() + return fmt.Errorf("failed to write decrypted vault to temp file: %w", err) + } + if err := tmpFile.Close(); err != nil { + return fmt.Errorf("failed to close temp file: %w", err) + } + + // Encrypt the temp file in-place using SOPS + age. + if err := encryptFileWithSOPS(tmpPath, ageRecipient); err != nil { + return err + } + + // Read the encrypted content and write to the final vault path. + encryptedData, err := os.ReadFile(tmpPath) + if err != nil { + return fmt.Errorf("failed to read encrypted vault: %w", err) + } + + if err := g.fileIO.CreateAndWrite(vaultPath, encryptedData, "Encrypted Secrets"); err != nil { + return err + } + + return nil +} + func AddConfigComments(yamlData []byte) []byte { header := `# Codesphere Installer Configuration # Generated by OMS CLI diff --git a/internal/installer/config_manager_test.go b/internal/installer/config_manager_test.go index 7b1c6810..a7680473 100644 --- a/internal/installer/config_manager_test.go +++ b/internal/installer/config_manager_test.go @@ -496,7 +496,7 @@ var _ = Describe("ConfigManager", func() { err = configManager.GenerateSecrets() Expect(err).ToNot(HaveOccurred()) - vault := configManager.Config.ExtractVault() + vault := configManager.Config.ExtractVault(nil) configManager.Vault = vault errors := configManager.ValidateVault() diff --git a/internal/installer/files/config_yaml.go b/internal/installer/files/config_yaml.go index 0b053ec1..0ca6c50f 100644 --- a/internal/installer/files/config_yaml.go +++ b/internal/installer/files/config_yaml.go @@ -528,10 +528,13 @@ func (c *RootConfig) ExtractWorkspaceDockerfiles() map[string]string { return dockerfiles } -func (c *RootConfig) ExtractVault() *InstallVault { +func (c *RootConfig) ExtractVault(baseVault *InstallVault) *InstallVault { vault := &InstallVault{ Secrets: []SecretEntry{}, } + if baseVault != nil { + vault.Secrets = append(vault.Secrets, baseVault.Secrets...) + } c.addCodesphereSecrets(vault) c.addIngressCASecret(vault) diff --git a/internal/installer/mocks.go b/internal/installer/mocks.go index 79d4d7af..acc48e11 100644 --- a/internal/installer/mocks.go +++ b/internal/installer/mocks.go @@ -707,6 +707,69 @@ func (_c *MockInstallConfigManager_WriteVault_Call) RunAndReturn(run func(vaultP return _c } +// EncryptVault provides a mock function for the type MockInstallConfigManager +func (_mock *MockInstallConfigManager) EncryptVault(vaultPath string, withComments bool, ageRecipient string) error { + ret := _mock.Called(vaultPath, withComments, ageRecipient) + + if len(ret) == 0 { + panic("no return value specified for EncryptVault") + } + + var r0 error + if returnFunc, ok := ret.Get(0).(func(string, bool, string) error); ok { + r0 = returnFunc(vaultPath, withComments, ageRecipient) + } else { + r0 = ret.Error(0) + } + return r0 +} + +// MockInstallConfigManager_EncryptVault_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EncryptVault' +type MockInstallConfigManager_EncryptVault_Call struct { + *mock.Call +} + +// EncryptVault is a helper method to define mock.On call +// - vaultPath string +// - withComments bool +// - ageRecipient string +func (_e *MockInstallConfigManager_Expecter) EncryptVault(vaultPath interface{}, withComments interface{}, ageRecipient interface{}) *MockInstallConfigManager_EncryptVault_Call { + return &MockInstallConfigManager_EncryptVault_Call{Call: _e.mock.On("EncryptVault", vaultPath, withComments, ageRecipient)} +} + +func (_c *MockInstallConfigManager_EncryptVault_Call) Run(run func(vaultPath string, withComments bool, ageRecipient string)) *MockInstallConfigManager_EncryptVault_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 string + if args[0] != nil { + arg0 = args[0].(string) + } + var arg1 bool + if args[1] != nil { + arg1 = args[1].(bool) + } + var arg2 string + if args[2] != nil { + arg2 = args[2].(string) + } + run( + arg0, + arg1, + arg2, + ) + }) + return _c +} + +func (_c *MockInstallConfigManager_EncryptVault_Call) Return(err error) *MockInstallConfigManager_EncryptVault_Call { + _c.Call.Return(err) + return _c +} + +func (_c *MockInstallConfigManager_EncryptVault_Call) RunAndReturn(run func(vaultPath string, withComments bool, ageRecipient string) error) *MockInstallConfigManager_EncryptVault_Call { + _c.Call.Return(run) + return _c +} + // NewMockK0sManager creates a new instance of MockK0sManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewMockK0sManager(t interface { diff --git a/internal/installer/secrets_test.go b/internal/installer/secrets_test.go index b99fec1c..265f9aa0 100644 --- a/internal/installer/secrets_test.go +++ b/internal/installer/secrets_test.go @@ -56,7 +56,7 @@ var _ = Describe("ExtractVault", func() { Registry: &files.RegistryConfig{}, } - vault := config.ExtractVault() + vault := config.ExtractVault(nil) Expect(vault.Secrets).NotTo(BeEmpty()) @@ -128,7 +128,7 @@ var _ = Describe("ExtractVault", func() { DomainAuthPublicKey: "test-pub", } - vault := config.ExtractVault() + vault := config.ExtractVault(nil) kubeConfigFound := false for _, secret := range vault.Secrets { @@ -156,7 +156,7 @@ var _ = Describe("ExtractVault", func() { DomainAuthPublicKey: "test", } - vault := config.ExtractVault() + vault := config.ExtractVault(nil) for _, service := range services { foundUser := false diff --git a/internal/installer/vault_encryption.go b/internal/installer/vault_encryption.go new file mode 100644 index 00000000..45415ba7 --- /dev/null +++ b/internal/installer/vault_encryption.go @@ -0,0 +1,141 @@ +// Copyright (c) Codesphere Inc. +// SPDX-License-Identifier: Apache-2.0 + +package installer + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" +) + +const ( + // Standard environment variables for SOPS age keys. + envSOPSAgeKey = "SOPS_AGE_KEY" + envSOPSAgeKeyFile = "SOPS_AGE_KEY_FILE" + // Default age key file location (relative to $HOME). + defaultAgeKeyRelPath = ".config/sops/age/keys.txt" +) + +// ResolveAgeKey resolves an existing age key or generates a new one. +// It checks (in order): +// 1. SOPS_AGE_KEY environment variable (raw key content) +// 2. SOPS_AGE_KEY_FILE environment variable (path to key file) +// 3. Default location: ~/.config/sops/age/keys.txt +// 4. Generate a new key and write it to /age_key.txt +// +// Returns the age public key (recipient) and the path to the key file (empty when +// the key was supplied via SOPS_AGE_KEY). +func ResolveAgeKey(fallbackDir string) (recipient string, keyPath string, err error) { + // 1. SOPS_AGE_KEY env var – contains raw key content. + if raw := os.Getenv(envSOPSAgeKey); raw != "" { + recipient, err = parseAgeRecipient(raw) + if err != nil { + // Fall back to deriving the public key with age-keygen -y. + recipient, err = deriveRecipient(raw) + if err != nil { + return "", "", fmt.Errorf("SOPS_AGE_KEY is set but could not extract public key: %w", err) + } + } + return recipient, "", nil + } + + // 2. SOPS_AGE_KEY_FILE env var. + if keyFile := os.Getenv(envSOPSAgeKeyFile); keyFile != "" { + recipient, err = readRecipientFromFile(keyFile) + if err != nil { + return "", "", fmt.Errorf("failed to read age key from %s: %w", keyFile, err) + } + return recipient, keyFile, nil + } + + // 3. Default location: ~/.config/sops/age/keys.txt. + home, homeErr := os.UserHomeDir() + if homeErr == nil { + defaultPath := filepath.Join(home, defaultAgeKeyRelPath) + if _, statErr := os.Stat(defaultPath); statErr == nil { + recipient, err = readRecipientFromFile(defaultPath) + if err != nil { + return "", "", fmt.Errorf("failed to read age key from default location %s: %w", defaultPath, err) + } + return recipient, defaultPath, nil + } + } + + // 4. Generate a new key. + keyPath = filepath.Join(fallbackDir, "age_key.txt") + recipient, err = generateAgeKey(keyPath) + if err != nil { + return "", "", fmt.Errorf("failed to generate age key: %w", err) + } + return recipient, keyPath, nil +} + +// parseAgeRecipient extracts the public key from age key file content. +// It looks for comment lines of the form "# public key: age1…". +func parseAgeRecipient(content string) (string, error) { + for _, line := range strings.Split(content, "\n") { + line = strings.TrimSpace(line) + if strings.HasPrefix(line, "# public key: ") { + return strings.TrimPrefix(line, "# public key: "), nil + } + } + return "", fmt.Errorf("no public key comment found in age key content") +} + +// readRecipientFromFile reads an age key file and extracts the public key. +func readRecipientFromFile(path string) (string, error) { + data, err := os.ReadFile(path) + if err != nil { + return "", err + } + return parseAgeRecipient(string(data)) +} + +// deriveRecipient uses `age-keygen -y` to derive the public key from a private key. +func deriveRecipient(privateKey string) (string, error) { + cmd := exec.Command("age-keygen", "-y") + cmd.Stdin = strings.NewReader(privateKey) + out, err := cmd.Output() + if err != nil { + return "", fmt.Errorf("age-keygen -y failed: %w", err) + } + recipient := strings.TrimSpace(string(out)) + if recipient == "" || !strings.HasPrefix(recipient, "age1") { + return "", fmt.Errorf("unexpected age-keygen output: %q", recipient) + } + return recipient, nil +} + +// generateAgeKey generates a new age keypair and writes it to the given path. +// Returns the public key (recipient). +func generateAgeKey(keyPath string) (string, error) { + if err := os.MkdirAll(filepath.Dir(keyPath), 0700); err != nil { + return "", fmt.Errorf("failed to create directory for age key: %w", err) + } + + cmd := exec.Command("age-keygen", "-o", keyPath) + out, err := cmd.CombinedOutput() + if err != nil { + return "", fmt.Errorf("age-keygen failed: %w: %s", err, out) + } + + // Read back the generated file to extract the public key. + recipient, err := readRecipientFromFile(keyPath) + if err != nil { + return "", fmt.Errorf("failed to read generated age key: %w", err) + } + return recipient, nil +} + +// encryptFileWithSOPS encrypts a file in-place using SOPS with the given age recipient. +func encryptFileWithSOPS(filePath string, recipient string) error { + cmd := exec.Command("sops", "--encrypt", "--age", recipient, "--in-place", filePath) + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("sops encrypt failed: %w: %s", err, out) + } + return nil +} diff --git a/internal/installer/vault_encryption_test.go b/internal/installer/vault_encryption_test.go new file mode 100644 index 00000000..3d2652f6 --- /dev/null +++ b/internal/installer/vault_encryption_test.go @@ -0,0 +1,296 @@ +// Copyright (c) Codesphere Inc. +// SPDX-License-Identifier: Apache-2.0 + +package installer_test + +import ( + "os" + "os/exec" + "path/filepath" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/codesphere-cloud/oms/internal/installer" + "github.com/codesphere-cloud/oms/internal/installer/files" + "github.com/codesphere-cloud/oms/internal/util" +) + +func sopsAndAgeAvailable() bool { + if _, err := exec.LookPath("sops"); err != nil { + return false + } + if _, err := exec.LookPath("age-keygen"); err != nil { + return false + } + return true +} + +var _ = Describe("VaultEncryption", func() { + Describe("ResolveAgeKey", func() { + var ( + tmpDir string + origAgeKey string + origAgeKeyFile string + hasOrigAgeKey bool + hasOrigKeyFile bool + ) + + BeforeEach(func() { + var err error + tmpDir, err = os.MkdirTemp("", "age-test-*") + Expect(err).ToNot(HaveOccurred()) + + // Save and clear env vars to isolate tests. + origAgeKey, hasOrigAgeKey = os.LookupEnv("SOPS_AGE_KEY") + origAgeKeyFile, hasOrigKeyFile = os.LookupEnv("SOPS_AGE_KEY_FILE") + os.Unsetenv("SOPS_AGE_KEY") + os.Unsetenv("SOPS_AGE_KEY_FILE") + }) + + AfterEach(func() { + os.RemoveAll(tmpDir) + // Restore env vars. + if hasOrigAgeKey { + os.Setenv("SOPS_AGE_KEY", origAgeKey) + } else { + os.Unsetenv("SOPS_AGE_KEY") + } + if hasOrigKeyFile { + os.Setenv("SOPS_AGE_KEY_FILE", origAgeKeyFile) + } else { + os.Unsetenv("SOPS_AGE_KEY_FILE") + } + }) + + Context("with SOPS_AGE_KEY env var containing key file content", func() { + It("should extract the recipient from the public key comment", func() { + keyContent := "# created: 2024-01-01T00:00:00Z\n# public key: age1testrecipient123\nAGE-SECRET-KEY-FAKE\n" + os.Setenv("SOPS_AGE_KEY", keyContent) + + recipient, keyPath, err := installer.ResolveAgeKey(tmpDir) + Expect(err).ToNot(HaveOccurred()) + Expect(recipient).To(Equal("age1testrecipient123")) + Expect(keyPath).To(BeEmpty(), "keyPath should be empty when using SOPS_AGE_KEY env var") + }) + }) + + Context("with SOPS_AGE_KEY env var containing only a private key (no comment)", func() { + It("should fall back to age-keygen -y to derive the recipient", func() { + if !sopsAndAgeAvailable() { + Skip("age-keygen not available") + } + // Generate a real key to get valid content. + keyFile := filepath.Join(tmpDir, "real_key.txt") + out, err := exec.Command("age-keygen", "-o", keyFile).CombinedOutput() + Expect(err).ToNot(HaveOccurred(), string(out)) + + data, err := os.ReadFile(keyFile) + Expect(err).ToNot(HaveOccurred()) + + // Extract just the private key line (no comments). + var privKeyLine string + for _, line := range splitLines(string(data)) { + if len(line) > 0 && line[0] != '#' { + privKeyLine = line + break + } + } + Expect(privKeyLine).ToNot(BeEmpty()) + + os.Setenv("SOPS_AGE_KEY", privKeyLine) + + recipient, keyPath, err := installer.ResolveAgeKey(tmpDir) + Expect(err).ToNot(HaveOccurred()) + Expect(recipient).To(HavePrefix("age1")) + Expect(keyPath).To(BeEmpty()) + }) + }) + + Context("with SOPS_AGE_KEY_FILE env var pointing to a key file", func() { + It("should read the recipient from the referenced file", func() { + keyFile := filepath.Join(tmpDir, "keys.txt") + keyContent := "# created: 2024-01-01T00:00:00Z\n# public key: age1fromfile456\nAGE-SECRET-KEY-FAKE\n" + err := os.WriteFile(keyFile, []byte(keyContent), 0600) + Expect(err).ToNot(HaveOccurred()) + + os.Setenv("SOPS_AGE_KEY_FILE", keyFile) + + recipient, keyPath, err := installer.ResolveAgeKey(tmpDir) + Expect(err).ToNot(HaveOccurred()) + Expect(recipient).To(Equal("age1fromfile456")) + Expect(keyPath).To(Equal(keyFile)) + }) + + It("should return error if the file does not exist", func() { + os.Setenv("SOPS_AGE_KEY_FILE", filepath.Join(tmpDir, "nonexistent.txt")) + + _, _, err := installer.ResolveAgeKey(tmpDir) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to read age key")) + }) + + It("should return error if the file has no public key comment", func() { + keyFile := filepath.Join(tmpDir, "bad_key.txt") + err := os.WriteFile(keyFile, []byte("AGE-SECRET-KEY-NOPUBLICCOMMENT\n"), 0600) + Expect(err).ToNot(HaveOccurred()) + + os.Setenv("SOPS_AGE_KEY_FILE", keyFile) + + _, _, err = installer.ResolveAgeKey(tmpDir) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no public key comment")) + }) + }) + + Context("with no env vars set", func() { + It("should generate a new key when no default location exists", func() { + if !sopsAndAgeAvailable() { + Skip("age-keygen not available") + } + + recipient, keyPath, err := installer.ResolveAgeKey(tmpDir) + Expect(err).ToNot(HaveOccurred()) + Expect(recipient).To(HavePrefix("age1")) + Expect(keyPath).To(Equal(filepath.Join(tmpDir, "age_key.txt"))) + + // Verify the key file was created. + Expect(keyPath).To(BeAnExistingFile()) + }) + }) + }) + + Describe("EncryptVault", func() { + It("should return error if config is nil", func() { + configManager := &installer.InstallConfig{ + Config: nil, + } + err := configManager.EncryptVault("/tmp/vault.yaml", false, "age1dummy") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no configuration provided")) + }) + + Context("end-to-end encryption with sops and age", func() { + It("should encrypt the vault file", func() { + if !sopsAndAgeAvailable() { + Skip("sops and/or age not available") + } + + tmpDir, err := os.MkdirTemp("", "encrypt-vault-test-*") + Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + // Generate a real age key. + keyFile := filepath.Join(tmpDir, "test_age_key.txt") + out, keygenErr := exec.Command("age-keygen", "-o", keyFile).CombinedOutput() + Expect(keygenErr).ToNot(HaveOccurred(), string(out)) + + // Read the recipient from generated key. + keyData, err := os.ReadFile(keyFile) + Expect(err).ToNot(HaveOccurred()) + var recipient string + for _, line := range splitLines(string(keyData)) { + if len(line) > len("# public key: ") && line[:14] == "# public key: " { + recipient = line[14:] + break + } + } + Expect(recipient).To(HavePrefix("age1")) + + // Set up a config manager with a real filesystem writer and a simple config. + config := files.NewRootConfig() + configManager := &installer.InstallConfig{ + Config: &config, + Vault: &files.InstallVault{}, + } + configManager.SetFileIO(util.NewFilesystemWriter()) + + vaultPath := filepath.Join(tmpDir, "test.vault.yaml") + err = configManager.EncryptVault(vaultPath, false, recipient) + Expect(err).ToNot(HaveOccurred()) + + // Read the encrypted file from disk. + written, err := os.ReadFile(vaultPath) + Expect(err).ToNot(HaveOccurred()) + + // The content should be SOPS-encrypted (contains "sops" marker). + Expect(string(written)).To(ContainSubstring("sops")) + Expect(string(written)).To(ContainSubstring("age")) + + // Verify we can decrypt it. + origKeyFile := os.Getenv("SOPS_AGE_KEY_FILE") + os.Setenv("SOPS_AGE_KEY_FILE", keyFile) + defer os.Setenv("SOPS_AGE_KEY_FILE", origKeyFile) + + decOut, err := exec.Command("sops", "--decrypt", vaultPath).CombinedOutput() + Expect(err).ToNot(HaveOccurred(), string(decOut)) + Expect(string(decOut)).To(ContainSubstring("secrets:")) + }) + + It("should encrypt the vault file with comments", func() { + if !sopsAndAgeAvailable() { + Skip("sops and/or age not available") + } + + tmpDir, err := os.MkdirTemp("", "encrypt-vault-comments-*") + Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + // Generate a real age key. + keyFile := filepath.Join(tmpDir, "age_key.txt") + out, keygenErr := exec.Command("age-keygen", "-o", keyFile).CombinedOutput() + Expect(keygenErr).ToNot(HaveOccurred(), string(out)) + + keyData, err := os.ReadFile(keyFile) + Expect(err).ToNot(HaveOccurred()) + var recipient string + for _, line := range splitLines(string(keyData)) { + if len(line) > len("# public key: ") && line[:14] == "# public key: " { + recipient = line[14:] + break + } + } + Expect(recipient).To(HavePrefix("age1")) + + config := files.NewRootConfig() + configManager := &installer.InstallConfig{ + Config: &config, + Vault: &files.InstallVault{}, + } + configManager.SetFileIO(util.NewFilesystemWriter()) + + vaultPath := filepath.Join(tmpDir, "vault-with-comments.yaml") + err = configManager.EncryptVault(vaultPath, true, recipient) + Expect(err).ToNot(HaveOccurred()) + + written, err := os.ReadFile(vaultPath) + Expect(err).ToNot(HaveOccurred()) + Expect(string(written)).To(ContainSubstring("sops")) + + // Decrypt and verify the comments header survived. + origKeyFile := os.Getenv("SOPS_AGE_KEY_FILE") + os.Setenv("SOPS_AGE_KEY_FILE", keyFile) + defer os.Setenv("SOPS_AGE_KEY_FILE", origKeyFile) + + decOut, err := exec.Command("sops", "--decrypt", vaultPath).CombinedOutput() + Expect(err).ToNot(HaveOccurred(), string(decOut)) + Expect(string(decOut)).To(ContainSubstring("secrets:")) + }) + }) + }) +}) + +func splitLines(s string) []string { + var lines []string + start := 0 + for i := 0; i < len(s); i++ { + if s[i] == '\n' { + lines = append(lines, s[start:i]) + start = i + 1 + } + } + if start < len(s) { + lines = append(lines, s[start:]) + } + return lines +} From 904de3389f883b914dd56a899391d5a62134a69d Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Sun, 8 Mar 2026 09:06:47 +0100 Subject: [PATCH 09/31] feat: support dedicated install dir Signed-off-by: Tim Schrodi --- cli/cmd/bootstrap_local.go | 19 +++++++++++++++++-- internal/bootstrap/local/installer.go | 2 +- internal/bootstrap/local/local.go | 1 + 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/cli/cmd/bootstrap_local.go b/cli/cmd/bootstrap_local.go index c4afab94..a384f756 100644 --- a/cli/cmd/bootstrap_local.go +++ b/cli/cmd/bootstrap_local.go @@ -11,6 +11,7 @@ import ( stdio "io" "os" "os/exec" + "path/filepath" "strings" cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -69,8 +70,9 @@ func AddBootstrapLocalCmd(parent *cobra.Command) { flags.StringArrayVar(&bootstrapLocalCmd.CodesphereEnv.Experiments, "experiments", []string{}, "Experiments to enable in Codesphere installation (optional)") flags.StringArrayVar(&bootstrapLocalCmd.CodesphereEnv.FeatureFlags, "feature-flags", []string{}, "Feature flags to enable in Codesphere installation (optional)") // Config - flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.InstallConfigPath, "install-config", "config.yaml", "Path to install config file (optional)") - flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.SecretsFilePath, "secrets-file", "prod.vault.yaml", "Path to secrets files (optional)") + flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.InstallDir, "install-dir", ".installer", "Directory for config, secrets, and bundle files") + flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.InstallConfigPath, "install-config", "", "Path to install config file (default: /config.yaml)") + flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.SecretsFilePath, "secrets-file", "", "Path to secrets file (default: /prod.vault.yaml)") bootstrapLocalCmd.cmd.RunE = bootstrapLocalCmd.RunE parent.AddCommand(bootstrapLocalCmd.cmd) @@ -82,6 +84,19 @@ func (c *BootstrapLocalCmd) BootstrapLocal() error { return err } + // Resolve install-config and secrets-file defaults from install-dir. + if c.CodesphereEnv.InstallConfigPath == "" { + c.CodesphereEnv.InstallConfigPath = filepath.Join(c.CodesphereEnv.InstallDir, "config.yaml") + } + if c.CodesphereEnv.SecretsFilePath == "" { + c.CodesphereEnv.SecretsFilePath = filepath.Join(c.CodesphereEnv.InstallDir, "prod.vault.yaml") + } + + // Ensure the install directory exists. + if err := os.MkdirAll(c.CodesphereEnv.InstallDir, 0755); err != nil { + return fmt.Errorf("failed to create install directory %s: %w", c.CodesphereEnv.InstallDir, err) + } + if err := c.ValidatePrerequisites(ctx); err != nil { return err } diff --git a/internal/bootstrap/local/installer.go b/internal/bootstrap/local/installer.go index dcf5db66..da2c8f4e 100644 --- a/internal/bootstrap/local/installer.go +++ b/internal/bootstrap/local/installer.go @@ -52,7 +52,7 @@ func (b *LocalBootstrapper) DownloadInstallerPackage() (string, error) { } fullFilename := build.BuildPackageFilename(installerArtifactFilename) - destPath := filepath.Join(filepath.Dir(b.Env.InstallConfigPath), fullFilename) + destPath := filepath.Join(b.Env.InstallDir, fullFilename) // Support resuming a partial download. out, err := b.fw.OpenAppend(destPath) diff --git a/internal/bootstrap/local/local.go b/internal/bootstrap/local/local.go index 6152acc9..2abb44f3 100644 --- a/internal/bootstrap/local/local.go +++ b/internal/bootstrap/local/local.go @@ -62,6 +62,7 @@ type CodesphereEnvironment struct { InstallHash string `json:"install_hash"` InstallLocal string `json:"install_local"` // Config + InstallDir string `json:"-"` ExistingConfigUsed bool `json:"-"` InstallConfigPath string `json:"-"` SecretsFilePath string `json:"-"` From 43c6e8aa66a632e8de1d4717b84c456ad560462a Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Sun, 8 Mar 2026 19:58:38 +0100 Subject: [PATCH 10/31] feat: add cluster setup Signed-off-by: Tim Schrodi --- .gitignore | 1 + internal/bootstrap/local/installer.go | 183 +++++++++-- internal/bootstrap/local/local.go | 338 +++++++++++++++++++- internal/bootstrap/local/rook.go | 4 +- internal/installer/config_manager.go | 51 --- internal/installer/files/config_yaml.go | 69 +++- internal/installer/vault_encryption.go | 28 +- internal/installer/vault_encryption_test.go | 121 ------- 8 files changed, 555 insertions(+), 240 deletions(-) diff --git a/.gitignore b/.gitignore index 0be0cf89..aa977312 100644 --- a/.gitignore +++ b/.gitignore @@ -27,6 +27,7 @@ vendor # local env .envrc .env +.installer # bin file bin/ diff --git a/internal/bootstrap/local/installer.go b/internal/bootstrap/local/installer.go index da2c8f4e..be90fde7 100644 --- a/internal/bootstrap/local/installer.go +++ b/internal/bootstrap/local/installer.go @@ -9,18 +9,19 @@ import ( "os" "os/exec" "path/filepath" + "runtime" "strings" "github.com/codesphere-cloud/oms/internal/portal" "github.com/codesphere-cloud/oms/internal/util" ) -// installerSkipSteps lists the installer steps that are already handled by the -// local bootstrap (Ceph via Rook, Kubernetes setup, PostgreSQL via CNPG). -var installerSkipSteps = []string{"load-container-images", "ceph", "kubernetes"} +// installerComponentSteps lists the install-components.js steps executed +// locally (in order) instead of running the full private-cloud-installer. +var installerComponentSteps = []string{"setUpCluster", "codesphere"} // installerArtifactFilename is the artifact to download from the OMS portal. -const installerArtifactFilename = "installer.tar.gz" +const installerArtifactFilename = "installer-lite.tar.gz" // DownloadInstallerPackage downloads the Codesphere installer package from the // OMS portal, similar to how the GCP bootstrapper fetches it onto a jumpbox. @@ -45,15 +46,19 @@ func (b *LocalBootstrapper) DownloadInstallerPackage() (string, error) { if err != nil { return "", fmt.Errorf("failed to get build from portal: %w", err) } + fullFilename := build.BuildPackageFilename(installerArtifactFilename) + destPath := filepath.Join(b.Env.InstallDir, fullFilename) + + if b.fw.Exists(destPath) { + log.Printf("Installer package already exists at %s, skipping download", destPath) + return destPath, nil + } download, err := build.GetBuildForDownload(installerArtifactFilename) if err != nil { return "", fmt.Errorf("artifact %q not found in build: %w", installerArtifactFilename, err) } - fullFilename := build.BuildPackageFilename(installerArtifactFilename) - destPath := filepath.Join(b.Env.InstallDir, fullFilename) - // Support resuming a partial download. out, err := b.fw.OpenAppend(destPath) if err != nil { @@ -135,6 +140,11 @@ func (b *LocalBootstrapper) PrepareInstallerBundle() (string, error) { destDir = bundlePath + "-unpacked" } + if b.fw.Exists(destDir) { + log.Printf("Installer bundle is already extracted. Skipping extraction...\n") + return destDir, nil + } + log.Printf("Extracting installer bundle %s → %s", bundlePath, destDir) if err := util.ExtractTarGz(b.fw, bundlePath, destDir); err != nil { return "", fmt.Errorf("failed to extract installer bundle: %w", err) @@ -143,9 +153,89 @@ func (b *LocalBootstrapper) PrepareInstallerBundle() (string, error) { return destDir, nil } -// RunInstaller executes the Codesphere private-cloud-installer.js from the -// prepared bundle directory, skipping infrastructure steps already performed -// by the local bootstrap (Ceph, Kubernetes/PostgreSQL). +// symlinkLocalBinaries replaces bundled node, helm and kubectl binaries with +// symlinks to the locally installed versions. This is only done on non-Linux +// hosts because the bundled binaries are Linux x86_64 binaries that cannot run +// on macOS or other platforms. +func symlinkLocalBinaries(bundleDir string) error { + if runtime.GOOS == "linux" { + return nil + } + + for _, name := range []string{"node", "helm", "kubectl"} { + target := filepath.Join(bundleDir, name) + if err := symlinkBinary(name, target); err != nil { + return err + } + } + + return nil +} + +// symlinkDepsBinaries replaces bundled dependency binaries inside the +// extracted deps directory with symlinks to locally installed versions. +// This covers tools like sops and age which live under /sops/files/. +func symlinkDepsBinaries(depsDir string) error { + if runtime.GOOS == "linux" { + return nil + } + + // sops and age are resolved by install-components.js via + // resolveFileDependency(dependenciesDir, "sops", "") + // which maps to /sops/files/. + sopsFilesDir := filepath.Join(depsDir, "sops", "files") + for _, name := range []string{"sops", "age", "age-keygen"} { + target := filepath.Join(sopsFilesDir, name) + if err := symlinkBinary(name, target); err != nil { + return err + } + } + + // sops and age are resolved by install-components.js via + // resolveFileDependency(dependenciesDir, "installer", "") + // which maps to /installer/files/. + installerFilesDir := filepath.Join(depsDir, "installer", "files") + for _, name := range []string{"kubectl", "helm", "node"} { + target := filepath.Join(installerFilesDir, name) + if err := symlinkBinary(name, target); err != nil { + return err + } + } + + return nil +} + +// symlinkBinary creates a symlink at target pointing to the locally installed +// binary identified by name (looked up via $PATH). +func symlinkBinary(name, target string) error { + localPath, err := exec.LookPath(name) + if err != nil { + return fmt.Errorf("cannot find %q on the host system: %w", name, err) + } + + // Resolve to an absolute path so the symlink is stable. + localPath, err = filepath.Abs(localPath) + if err != nil { + return fmt.Errorf("failed to resolve absolute path for %q: %w", name, err) + } + + // Remove the bundled binary (or an existing symlink) if present. + if err := os.Remove(target); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to remove bundled %q: %w", name, err) + } + + if err := os.Symlink(localPath, target); err != nil { + return fmt.Errorf("failed to symlink %q → %q: %w", target, localPath, err) + } + + log.Printf("Symlinked %s → %s", target, localPath) + return nil +} + +// RunInstaller extracts the deps.tar.gz archive locally and then runs the +// install-components.js script directly on the local machine for each +// required component step (setUpCluster, codesphere), instead of running +// the private-cloud-installer.js which orchestrates remote nodes via SSH. func (b *LocalBootstrapper) RunInstaller() error { if b.Env.InstallVersion == "" && b.Env.InstallLocal == "" { log.Println("No installer package specified, skipping Codesphere installation.") @@ -157,13 +247,42 @@ func (b *LocalBootstrapper) RunInstaller() error { return fmt.Errorf("failed to prepare installer bundle: %w", err) } - nodePath := filepath.Join(bundleDir, "node") + // On non-Linux hosts the bundled binaries are Linux ELF executables that + // cannot run natively. Replace them with symlinks to the host's versions. + if err := symlinkLocalBinaries(bundleDir); err != nil { + return fmt.Errorf("failed to symlink local binaries: %w", err) + } - installerPath := filepath.Join(bundleDir, "private-cloud-installer.js") + // Extract deps.tar.gz locally so that install-components.js can find + // all dependency binaries (helm charts, sops, etc.) on the local machine. archivePath := filepath.Join(bundleDir, "deps.tar.gz") + depsDir := filepath.Join(bundleDir, "deps") + + if b.fw.Exists(depsDir) { + log.Printf("deps directory already exists at %s, skipping extraction", depsDir) + } else { + log.Printf("Extracting deps.tar.gz → %s", depsDir) + if err := util.ExtractTarGz(b.fw, archivePath, depsDir); err != nil { + return fmt.Errorf("failed to extract deps.tar.gz: %w", err) + } + } + + // Symlink sops and age inside the extracted deps directory so that + // install-components.js uses the locally installed versions. + if err := symlinkDepsBinaries(depsDir); err != nil { + return fmt.Errorf("failed to symlink deps binaries: %w", err) + } - // Resolve absolute paths for config and key so the installer finds them + nodePath := filepath.Join(bundleDir, "node") + installerPath := filepath.Join(bundleDir, "install-components.js") + + // Resolve absolute paths so install-components.js finds them // regardless of its working directory. + absDepsDir, err := filepath.Abs(depsDir) + if err != nil { + return fmt.Errorf("failed to resolve absolute deps dir: %w", err) + } + configPath, err := filepath.Abs(b.Env.InstallConfigPath) if err != nil { return fmt.Errorf("failed to resolve absolute config path: %w", err) @@ -178,25 +297,29 @@ func (b *LocalBootstrapper) RunInstaller() error { return fmt.Errorf("failed to resolve absolute key path: %w", err) } - // Build command arguments. - cmdArgs := []string{ - installerPath, - "--archive", archivePath, - "--config", configPath, - "--privKey", privKeyPath, - } - for _, step := range installerSkipSteps { - cmdArgs = append(cmdArgs, "--skipStep", step) - } + // Run each component step locally via install-components.js. + for _, component := range installerComponentSteps { + cmdArgs := []string{ + installerPath, + "--component", component, + "--configDir", filepath.Join(b.Env.InstallDir, "config"), + "--dependenciesDir", absDepsDir, + "--config", configPath, + "--privKey", privKeyPath, + } + + log.Printf("Running install-components.js --component %s", component) + log.Printf(" %s %s", nodePath, strings.Join(cmdArgs, " ")) + cmd := exec.Command(nodePath, cmdArgs...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Stdin = os.Stdin - log.Printf("Running Codesphere installer: %s %s", nodePath, strings.Join(cmdArgs, " ")) - cmd := exec.Command(nodePath, cmdArgs...) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.Stdin = os.Stdin + if err := cmd.Run(); err != nil { + return fmt.Errorf("install-components.js --component %s failed: %w", component, err) + } - if err := cmd.Run(); err != nil { - return fmt.Errorf("codesphere installer failed: %w", err) + log.Printf("Component %s installed successfully.", component) } log.Println("Codesphere installer finished successfully.") diff --git a/internal/bootstrap/local/local.go b/internal/bootstrap/local/local.go index 2abb44f3..1c8cfc94 100644 --- a/internal/bootstrap/local/local.go +++ b/internal/bootstrap/local/local.go @@ -4,7 +4,9 @@ import ( "context" "errors" "fmt" + "os" "path/filepath" + "strings" "github.com/codesphere-cloud/oms/internal/bootstrap" "github.com/codesphere-cloud/oms/internal/installer" @@ -13,11 +15,15 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) const ( codesphereSystemNamespace = "codesphere-system" + codesphereNamespace = "codesphere" + workspacesNamespace = "workspaces" ) type retryableWaitError struct { @@ -97,9 +103,9 @@ func (b *LocalBootstrapper) Bootstrap() error { return fmt.Errorf("failed to resolve age encryption key: %w", err) } - err = b.stlog.Step("Ensure codesphere system namespace", b.EnsureCodesphereSystemNamespace) + err = b.stlog.Step("Ensure namespaces", b.EnsureNamespaces) if err != nil { - return fmt.Errorf("failed to ensure namespace %q: %w", codesphereSystemNamespace, err) + return fmt.Errorf("failed to ensure namespaces: %w", err) } err = b.stlog.Step("Install Rook and test Ceph cluster", func() error { @@ -145,6 +151,11 @@ func (b *LocalBootstrapper) Bootstrap() error { return err } + err = b.stlog.Substep("Create Ceph admin credential secrets", b.CreateCephAdminSecrets) + if err != nil { + return err + } + return nil }) if err != nil { @@ -181,17 +192,127 @@ func (b *LocalBootstrapper) Bootstrap() error { return nil } -func (b *LocalBootstrapper) EnsureCodesphereSystemNamespace() error { - namespace := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: codesphereSystemNamespace}, +func (b *LocalBootstrapper) EnsureNamespaces() error { + for _, ns := range []string{codesphereSystemNamespace, codesphereNamespace, workspacesNamespace} { + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: ns}, + } + + // Mark the workspaces namespace as owned by the cluster-config Helm + // release so that the chart can manage it during install/upgrade. + if ns == workspacesNamespace { + namespace.Labels = map[string]string{ + "app.kubernetes.io/managed-by": "Helm", + "meta.helm.sh/release-name": "cluster-config", + "meta.helm.sh/release-namespace": codesphereNamespace, + } + namespace.Annotations = map[string]string{ + "meta.helm.sh/release-name": "cluster-config", + "meta.helm.sh/release-namespace": codesphereNamespace, + } + } + + if err := b.kubeClient.Create(b.ctx, namespace); err != nil && !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("failed to create namespace %q: %w", ns, err) + } + } + + // Create a dummy error-page-server Service in the codesphere namespace. + // The nginx ingress controller references this service as a default backend; + // without it the controller pods fail to start. + errorPageSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "error-page-server", + Namespace: codesphereNamespace, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{ + {Port: 8080, TargetPort: intstr.FromInt32(8080)}, + }, + Selector: map[string]string{"app": "error-page-server"}, + }, + } + if err := b.kubeClient.Create(b.ctx, errorPageSvc); err != nil && !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("failed to create dummy error-page-server service: %w", err) + } + + return nil +} + +// CreateCephAdminSecrets creates the ceph-admin-credentials Secret in the +// codesphere and workspaces namespaces containing the CephFS admin credentials. +func (b *LocalBootstrapper) CreateCephAdminSecrets() error { + if b.cephCredentials == nil { + return fmt.Errorf("ceph credentials have not been read yet") } - if err := b.kubeClient.Create(b.ctx, namespace); err != nil && !apierrors.IsAlreadyExists(err) { - return fmt.Errorf("failed to create namespace %q: %w", codesphereSystemNamespace, err) + + for _, ns := range []string{codesphereNamespace, workspacesNamespace} { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ceph-admin-credentials", + Namespace: ns, + }, + } + _, err := controllerutil.CreateOrUpdate(b.ctx, b.kubeClient, secret, func() error { + secret.Type = corev1.SecretTypeOpaque + secret.StringData = map[string]string{ + "ceph-username": b.cephCredentials.CephfsAdmin.Entity, + "ceph-secret": b.cephCredentials.CephfsAdmin.Key, + } + return nil + }) + if err != nil { + return fmt.Errorf("failed to create or update ceph-admin-credentials secret in namespace %q: %w", ns, err) + } } return nil } +// ReadClusterCIDRs reads the pod and service CIDRs from the running Kubernetes cluster. +// Pod CIDR is read from the first node's spec.podCIDR. +// Service CIDR is read from the kube-apiserver pod's --service-cluster-ip-range flag. +func (b *LocalBootstrapper) ReadClusterCIDRs() (podCIDR string, serviceCIDR string, err error) { + // Read pod CIDR from the first node. + nodeList := &corev1.NodeList{} + if err = b.kubeClient.List(b.ctx, nodeList); err != nil { + return "", "", fmt.Errorf("failed to list nodes: %w", err) + } + if len(nodeList.Items) == 0 { + return "", "", fmt.Errorf("no nodes found in cluster") + } + podCIDR = nodeList.Items[0].Spec.PodCIDR + if podCIDR == "" { + return "", "", fmt.Errorf("node %q does not have a podCIDR set", nodeList.Items[0].Name) + } + + // Read service CIDR from the kube-apiserver pod's --service-cluster-ip-range flag. + apiServerPod := &corev1.Pod{} + key := client.ObjectKey{Name: "kube-apiserver-" + nodeList.Items[0].Name, Namespace: "kube-system"} + if err = b.kubeClient.Get(b.ctx, key, apiServerPod); err != nil { + return "", "", fmt.Errorf("failed to get kube-apiserver pod: %w", err) + } + + for _, container := range apiServerPod.Spec.Containers { + for _, arg := range container.Command { + if strings.HasPrefix(arg, "--service-cluster-ip-range=") { + serviceCIDR = strings.TrimPrefix(arg, "--service-cluster-ip-range=") + break + } + } + if serviceCIDR != "" { + break + } + } + + if serviceCIDR == "" { + return "", "", fmt.Errorf("could not determine service CIDR from kube-apiserver pod") + } + + return podCIDR, serviceCIDR, nil +} + func (b *LocalBootstrapper) EnsureInstallConfig() error { if b.fw.Exists(b.Env.InstallConfigPath) { err := b.icg.LoadInstallConfigFromFile(b.Env.InstallConfigPath) @@ -243,7 +364,10 @@ func (b *LocalBootstrapper) ResolveAgeKey() error { } func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { - b.Env.InstallConfig.Secrets.BaseDir = filepath.Dir(b.Env.SecretsFilePath) + b.Env.InstallConfig.Secrets.BaseDir = filepath.Join(b.Env.InstallDir, "secrets") + if err := os.MkdirAll(b.Env.InstallConfig.Secrets.BaseDir, 0700); err != nil { + return fmt.Errorf("failed to create secrets base directory: %w", err) + } b.Env.InstallConfig.Registry.ReplaceImagesInBom = false b.Env.InstallConfig.Registry.LoadContainerImages = false @@ -268,6 +392,30 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { }, }) + // Store the active kubeconfig in the vault so that install-components.js + // can retrieve it via SecretManagerSops when deploying Helm charts. + kubeConfigContent, err := b.readKubeConfig() + if err != nil { + return fmt.Errorf("failed to read kubeconfig: %w", err) + } + b.Env.Vault.Secrets = append(b.Env.Vault.Secrets, files.SecretEntry{ + Name: "kubeConfig", + File: &files.SecretFile{ + Name: "kubeConfig", + Content: kubeConfigContent, + }, + }) + + b.Env.InstallConfig.Cluster.RookExternalCluster = &files.RookExternalClusterConfig{ + Enabled: false, + } + b.Env.InstallConfig.Cluster.PgOperator = &files.PgOperatorConfig{ + Enabled: false, + } + b.Env.InstallConfig.Cluster.RgwLoadBalancer = &files.RgwLoadBalancerConfig{ + Enabled: false, + } + b.Env.InstallConfig.Ceph = files.CephConfig{} if b.cephCredentials != nil { b.addCephSecretsToVault(b.Env.Vault) } @@ -275,22 +423,155 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { b.Env.InstallConfig.Kubernetes = files.KubernetesConfig{ ManagedByCodesphere: false, } + + podCIDR, serviceCIDR, err := b.ReadClusterCIDRs() + if err != nil { + return fmt.Errorf("failed to read cluster CIDRs: %w", err) + } + b.Env.InstallConfig.Kubernetes.PodCIDR = podCIDR + b.Env.InstallConfig.Kubernetes.ServiceCIDR = serviceCIDR + + // Override resource requests to "0" for all monitoring components so they + // can be scheduled on resource-constrained local clusters. + prometheusOverride := map[string]interface{}{ + "kube-prometheus-stack": map[string]interface{}{ + "prometheusOperator": map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": map[string]interface{}{"cpu": "0", "memory": "0"}, + }, + }, + "prometheus": map[string]interface{}{ + "prometheusSpec": map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": map[string]interface{}{"cpu": "0", "memory": "0"}, + }, + }, + }, + "prometheus-node-exporter": map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": map[string]interface{}{"cpu": "0", "memory": "0"}, + }, + }, + "kube-state-metrics": map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": map[string]interface{}{"cpu": "0", "memory": "0"}, + }, + }, + }, + } + + // Override cert-manager resource requests to "0" and reduce webhook replicas + // so components can be scheduled on resource-constrained local clusters. + b.Env.InstallConfig.Cluster.CertManager = &files.CertManagerConfig{ + Override: map[string]interface{}{ + "cert-manager": map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": map[string]interface{}{"cpu": "0", "memory": "0"}, + }, + "cainjector": map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": map[string]interface{}{"cpu": "0", "memory": "0"}, + }, + }, + "webhook": map[string]interface{}{ + "replicaCount": 1, + "resources": map[string]interface{}{ + "requests": map[string]interface{}{"cpu": "0", "memory": "0"}, + }, + }, + "startupapicheck": map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": map[string]interface{}{"cpu": "0", "memory": "0"}, + }, + }, + }, + }, + } + b.Env.InstallConfig.Cluster.Monitoring = &files.MonitoringConfig{ Prometheus: &files.PrometheusConfig{ RemoteWrite: &files.RemoteWriteConfig{ Enabled: false, ClusterName: "local-test", }, + Override: prometheusOverride, + }, + BlackboxExporter: &files.BlackboxExporterConfig{ + Override: map[string]interface{}{ + "prometheus-blackbox-exporter": map[string]interface{}{ + "replicas": 1, + "resources": map[string]interface{}{ + "requests": map[string]interface{}{"cpu": "0", "memory": "0"}, + }, + }, + }, + }, + PushGateway: &files.PushGatewayConfig{ + Override: map[string]interface{}{ + "prometheus-pushgateway": map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": map[string]interface{}{"cpu": "0", "memory": "0"}, + }, + }, + }, + }, + Loki: &files.LokiConfig{ + Enabled: false, + }, + Grafana: &files.GrafanaConfig{ + Enabled: false, + }, + GrafanaAlloy: &files.GrafanaAlloyConfig{ + Enabled: false, }, } + b.Env.InstallConfig.Cluster.Gateway = files.GatewayConfig{ ServiceType: "LoadBalancer", + Override: map[string]interface{}{ + "ingress-nginx": map[string]interface{}{ + "controller": map[string]interface{}{ + "replicaCount": 1, + "resources": map[string]interface{}{ + "requests": map[string]interface{}{ + "cpu": "0", + "memory": "0", + }, + }, + }, + }, + }, } b.Env.InstallConfig.Cluster.PublicGateway = files.GatewayConfig{ ServiceType: "LoadBalancer", + Override: map[string]interface{}{ + "ingress-nginx": map[string]interface{}{ + "controller": map[string]interface{}{ + "replicaCount": 1, + "resources": map[string]interface{}{ + "requests": map[string]interface{}{ + "cpu": "0", + "memory": "0", + }, + }, + }, + }, + "nginx": map[string]interface{}{ + "replicaCount": 1, + "resources": map[string]interface{}{ + "requests": map[string]interface{}{ + "cpu": "0", + "memory": "0", + }, + }, + }, + }, } // TODO: certificates + b.Env.InstallConfig.Codesphere.CertIssuer = files.CertIssuerConfig{ + Type: "self-signed", + } b.Env.InstallConfig.Codesphere.Domain = b.Env.BaseDomain b.Env.InstallConfig.Codesphere.WorkspaceHostingBaseDomain = "ws." + b.Env.BaseDomain @@ -380,8 +661,11 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { return fmt.Errorf("failed to write config file: %w", err) } - if err := b.icg.EncryptVault(b.Env.SecretsFilePath, true, b.ageRecipient); err != nil { - return fmt.Errorf("failed to write encrypted vault file: %w", err) + if err := b.icg.WriteVault(b.Env.SecretsFilePath, true); err != nil { + return fmt.Errorf("failed to write vault file: %w", err) + } + if err := installer.EncryptFileWithSOPS(b.Env.SecretsFilePath, filepath.Join(b.Env.InstallConfig.Secrets.BaseDir, "prod.vault.yaml"), b.ageRecipient); err != nil { + return fmt.Errorf("failed to encrypt vault file: %w", err) } return nil @@ -397,13 +681,33 @@ func (b *LocalBootstrapper) addCephSecretsToVault(vault *files.InstallVault) { vault.Secrets = append(vault.Secrets, files.SecretEntry{Name: "cephFsId", Fields: &files.SecretFields{Password: creds.FSID}}, - files.SecretEntry{Name: "cephfsAdmin", Fields: &files.SecretFields{Password: creds.CephfsAdmin.Key}}, - files.SecretEntry{Name: "cephfsAdminCodesphere", Fields: &files.SecretFields{Password: creds.CephfsAdminCodesphere.Key}}, - files.SecretEntry{Name: "csiRbdNode", Fields: &files.SecretFields{Password: creds.CSIRBDNode.Key}}, - files.SecretEntry{Name: "csiRbdProvisioner", Fields: &files.SecretFields{Password: creds.CSIRBDProvisioner.Key}}, - files.SecretEntry{Name: "csiCephfsNode", Fields: &files.SecretFields{Password: creds.CSICephFSNode.Key}}, - files.SecretEntry{Name: "csiCephfsProvisioner", Fields: &files.SecretFields{Password: creds.CSICephFSProvisioner.Key}}, + files.SecretEntry{Name: "cephfsAdmin", Fields: &files.SecretFields{Username: creds.CephfsAdmin.Entity, Password: creds.CephfsAdmin.Key}}, + files.SecretEntry{Name: "cephfsAdminCodesphere", Fields: &files.SecretFields{Username: creds.CephfsAdminCodesphere.Entity, Password: creds.CephfsAdminCodesphere.Key}}, + files.SecretEntry{Name: "csiRbdNode", Fields: &files.SecretFields{Username: creds.CSIRBDNode.Entity, Password: creds.CSIRBDNode.Key}}, + files.SecretEntry{Name: "csiRbdProvisioner", Fields: &files.SecretFields{Username: creds.CSIRBDProvisioner.Entity, Password: creds.CSIRBDProvisioner.Key}}, + files.SecretEntry{Name: "csiCephfsNode", Fields: &files.SecretFields{Username: creds.CSICephFSNode.Entity, Password: creds.CSICephFSNode.Key}}, + files.SecretEntry{Name: "csiCephfsProvisioner", Fields: &files.SecretFields{Username: creds.CSICephFSProvisioner.Entity, Password: creds.CSICephFSProvisioner.Key}}, // csiOperator is managed by Rook internally; provide a dummy value for vault compatibility. - files.SecretEntry{Name: "csiOperator", Fields: &files.SecretFields{Password: "dummy"}}, + files.SecretEntry{Name: "csiOperator", Fields: &files.SecretFields{Username: "client.csi-rbd-provisioner", Password: "dummy"}}, ) } + +// readKubeConfig reads the active kubeconfig file and returns its content. +// It checks $KUBECONFIG first, falling back to ~/.kube/config. +func (b *LocalBootstrapper) readKubeConfig() (string, error) { + kubeconfigPath := os.Getenv("KUBECONFIG") + if kubeconfigPath == "" { + home, err := os.UserHomeDir() + if err != nil { + return "", fmt.Errorf("failed to determine home directory: %w", err) + } + kubeconfigPath = filepath.Join(home, ".kube", "config") + } + + data, err := os.ReadFile(kubeconfigPath) + if err != nil { + return "", fmt.Errorf("failed to read kubeconfig from %s: %w", kubeconfigPath, err) + } + + return string(data), nil +} diff --git a/internal/bootstrap/local/rook.go b/internal/bootstrap/local/rook.go index 647bf561..01a1e53f 100644 --- a/internal/bootstrap/local/rook.go +++ b/internal/bootstrap/local/rook.go @@ -299,7 +299,7 @@ func (b *LocalBootstrapper) WaitForTestCephClusterReady() error { err := b.kubeClient.Get(ctx, clusterKey, cluster) if err != nil { if apierrors.IsNotFound(err) { - return &retryableWaitError{err: fmt.Errorf("Ceph cluster %q not found yet", rookClusterName)} + return &retryableWaitError{err: fmt.Errorf("ceph cluster %q not found yet", rookClusterName)} } return err @@ -314,7 +314,7 @@ func (b *LocalBootstrapper) WaitForTestCephClusterReady() error { } return &retryableWaitError{err: fmt.Errorf( - "Ceph cluster is not ready yet (phase=%q, state=%q, message=%q)", + "ceph cluster is not ready yet (phase=%q, state=%q, message=%q)", lastPhase, lastState, lastMessage, diff --git a/internal/installer/config_manager.go b/internal/installer/config_manager.go index 823f6eae..1e6f8b88 100644 --- a/internal/installer/config_manager.go +++ b/internal/installer/config_manager.go @@ -8,7 +8,6 @@ import ( "io" "net" "net/url" - "os" "github.com/codesphere-cloud/oms/internal/installer/files" "github.com/codesphere-cloud/oms/internal/util" @@ -34,7 +33,6 @@ type InstallConfigManager interface { GenerateSecrets() error WriteInstallConfig(configPath string, withComments bool) error WriteVault(vaultPath string, withComments bool) error - EncryptVault(vaultPath string, withComments bool, ageRecipient string) error } type InstallConfig struct { @@ -260,55 +258,6 @@ func (g *InstallConfig) WriteVault(vaultPath string, withComments bool) error { return nil } -func (g *InstallConfig) EncryptVault(vaultPath string, withComments bool, ageRecipient string) error { - if g.Config == nil { - return fmt.Errorf("no configuration provided - config is nil") - } - - vault := g.Config.ExtractVault(g.GetVault()) - vaultYAML, err := vault.Marshal() - if err != nil { - return fmt.Errorf("failed to marshal vault.yaml: %w", err) - } - - if withComments { - vaultYAML = AddVaultComments(vaultYAML) - } - - // Write plaintext vault to a temporary file. - tmpFile, err := os.CreateTemp("", "vault-*.yaml") - if err != nil { - return fmt.Errorf("failed to create temp file for vault encryption: %w", err) - } - tmpPath := tmpFile.Name() - defer os.Remove(tmpPath) - - if _, err := tmpFile.Write(vaultYAML); err != nil { - tmpFile.Close() - return fmt.Errorf("failed to write decrypted vault to temp file: %w", err) - } - if err := tmpFile.Close(); err != nil { - return fmt.Errorf("failed to close temp file: %w", err) - } - - // Encrypt the temp file in-place using SOPS + age. - if err := encryptFileWithSOPS(tmpPath, ageRecipient); err != nil { - return err - } - - // Read the encrypted content and write to the final vault path. - encryptedData, err := os.ReadFile(tmpPath) - if err != nil { - return fmt.Errorf("failed to read encrypted vault: %w", err) - } - - if err := g.fileIO.CreateAndWrite(vaultPath, encryptedData, "Encrypted Secrets"); err != nil { - return err - } - - return nil -} - func AddConfigComments(yamlData []byte) []byte { header := `# Codesphere Installer Configuration # Generated by OMS CLI diff --git a/internal/installer/files/config_yaml.go b/internal/installer/files/config_yaml.go index 0ca6c50f..1fec29c6 100644 --- a/internal/installer/files/config_yaml.go +++ b/internal/installer/files/config_yaml.go @@ -35,6 +35,7 @@ type SecretFile struct { } type SecretFields struct { + Username string `yaml:"username,omitempty"` Password string `yaml:"password"` } @@ -166,10 +167,14 @@ type K8sNode struct { } type ClusterConfig struct { - Certificates ClusterCertificates `yaml:"certificates"` - Monitoring *MonitoringConfig `yaml:"monitoring,omitempty"` - Gateway GatewayConfig `yaml:"gateway"` - PublicGateway GatewayConfig `yaml:"publicGateway"` + Certificates ClusterCertificates `yaml:"certificates"` + CertManager *CertManagerConfig `yaml:"certManager,omitempty"` + Monitoring *MonitoringConfig `yaml:"monitoring,omitempty"` + Gateway GatewayConfig `yaml:"gateway"` + PublicGateway GatewayConfig `yaml:"publicGateway"` + RookExternalCluster *RookExternalClusterConfig `yaml:"rookExternalCluster,omitempty"` + PgOperator *PgOperatorConfig `yaml:"pgOperator,omitempty"` + RgwLoadBalancer *RgwLoadBalancerConfig `yaml:"rgwLoadBalancer,omitempty"` IngressCAKey string `yaml:"-"` } @@ -210,9 +215,26 @@ type ACMEDNS01Solver struct { } type GatewayConfig struct { - ServiceType string `yaml:"serviceType"` - Annotations map[string]string `yaml:"annotations,omitempty"` - IPAddresses []string `yaml:"ipAddresses,omitempty"` + ServiceType string `yaml:"serviceType"` + Annotations map[string]string `yaml:"annotations,omitempty"` + IPAddresses []string `yaml:"ipAddresses,omitempty"` + Override map[string]interface{} `yaml:"override,omitempty"` +} + +type CertManagerConfig struct { + Override map[string]interface{} `yaml:"override,omitempty"` +} + +type RookExternalClusterConfig struct { + Enabled bool `yaml:"enabled"` +} + +type PgOperatorConfig struct { + Enabled bool `yaml:"enabled"` +} + +type RgwLoadBalancerConfig struct { + Enabled bool `yaml:"enabled"` } type MetalLBConfig struct { @@ -462,11 +484,40 @@ type ManagedServiceBackendsConfig struct { } type MonitoringConfig struct { - Prometheus *PrometheusConfig `yaml:"prometheus,omitempty"` + Prometheus *PrometheusConfig `yaml:"prometheus,omitempty"` + BlackboxExporter *BlackboxExporterConfig `yaml:"blackboxExporter,omitempty"` + PushGateway *PushGatewayConfig `yaml:"pushGateway,omitempty"` + Loki *LokiConfig `yaml:"loki,omitempty"` + Grafana *GrafanaConfig `yaml:"grafana,omitempty"` + GrafanaAlloy *GrafanaAlloyConfig `yaml:"grafanaAlloy,omitempty"` } type PrometheusConfig struct { - RemoteWrite *RemoteWriteConfig `yaml:"remoteWrite,omitempty"` + RemoteWrite *RemoteWriteConfig `yaml:"remoteWrite,omitempty"` + Override map[string]interface{} `yaml:"override,omitempty"` +} + +type BlackboxExporterConfig struct { + Override map[string]interface{} `yaml:"override,omitempty"` +} + +type PushGatewayConfig struct { + Override map[string]interface{} `yaml:"override,omitempty"` +} + +type LokiConfig struct { + Enabled bool `yaml:"enabled"` + Override map[string]interface{} `yaml:"override,omitempty"` +} + +type GrafanaConfig struct { + Enabled bool `yaml:"enabled"` + Override map[string]interface{} `yaml:"override,omitempty"` +} + +type GrafanaAlloyConfig struct { + Enabled bool `yaml:"enabled"` + Override map[string]interface{} `yaml:"override,omitempty"` } type RemoteWriteConfig struct { diff --git a/internal/installer/vault_encryption.go b/internal/installer/vault_encryption.go index 45415ba7..45031461 100644 --- a/internal/installer/vault_encryption.go +++ b/internal/installer/vault_encryption.go @@ -55,20 +55,28 @@ func ResolveAgeKey(fallbackDir string) (recipient string, keyPath string, err er home, homeErr := os.UserHomeDir() if homeErr == nil { defaultPath := filepath.Join(home, defaultAgeKeyRelPath) - if _, statErr := os.Stat(defaultPath); statErr == nil { - recipient, err = readRecipientFromFile(defaultPath) - if err != nil { - return "", "", fmt.Errorf("failed to read age key from default location %s: %w", defaultPath, err) - } + recipient, err = readRecipientFromFile(defaultPath) + if err == nil { return recipient, defaultPath, nil } + if !os.IsNotExist(err) { + return "", "", fmt.Errorf("failed to read age key from default location %s: %w", defaultPath, err) + } } // 4. Generate a new key. keyPath = filepath.Join(fallbackDir, "age_key.txt") - recipient, err = generateAgeKey(keyPath) + recipient, err = readRecipientFromFile(keyPath) if err != nil { - return "", "", fmt.Errorf("failed to generate age key: %w", err) + if !os.IsNotExist(err) { + return "", "", fmt.Errorf("failed to read age key from fallback location %s: %w", keyPath, err) + } + // File does not exist, will generate a new key. + recipient, err = generateAgeKey(keyPath) + if err != nil { + return "", "", fmt.Errorf("failed to generate age key: %w", err) + } + return recipient, keyPath, nil } return recipient, keyPath, nil } @@ -130,9 +138,9 @@ func generateAgeKey(keyPath string) (string, error) { return recipient, nil } -// encryptFileWithSOPS encrypts a file in-place using SOPS with the given age recipient. -func encryptFileWithSOPS(filePath string, recipient string) error { - cmd := exec.Command("sops", "--encrypt", "--age", recipient, "--in-place", filePath) +// EncryptFileWithSOPS encrypts a file in-place using SOPS with the given age recipient. +func EncryptFileWithSOPS(src, target, recipient string) error { + cmd := exec.Command("sops", "--encrypt", "--age", recipient, "--output", target, src) out, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("sops encrypt failed: %w: %s", err, out) diff --git a/internal/installer/vault_encryption_test.go b/internal/installer/vault_encryption_test.go index 3d2652f6..206a681d 100644 --- a/internal/installer/vault_encryption_test.go +++ b/internal/installer/vault_encryption_test.go @@ -12,8 +12,6 @@ import ( . "github.com/onsi/gomega" "github.com/codesphere-cloud/oms/internal/installer" - "github.com/codesphere-cloud/oms/internal/installer/files" - "github.com/codesphere-cloud/oms/internal/util" ) func sopsAndAgeAvailable() bool { @@ -159,125 +157,6 @@ var _ = Describe("VaultEncryption", func() { }) }) }) - - Describe("EncryptVault", func() { - It("should return error if config is nil", func() { - configManager := &installer.InstallConfig{ - Config: nil, - } - err := configManager.EncryptVault("/tmp/vault.yaml", false, "age1dummy") - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("no configuration provided")) - }) - - Context("end-to-end encryption with sops and age", func() { - It("should encrypt the vault file", func() { - if !sopsAndAgeAvailable() { - Skip("sops and/or age not available") - } - - tmpDir, err := os.MkdirTemp("", "encrypt-vault-test-*") - Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(tmpDir) - - // Generate a real age key. - keyFile := filepath.Join(tmpDir, "test_age_key.txt") - out, keygenErr := exec.Command("age-keygen", "-o", keyFile).CombinedOutput() - Expect(keygenErr).ToNot(HaveOccurred(), string(out)) - - // Read the recipient from generated key. - keyData, err := os.ReadFile(keyFile) - Expect(err).ToNot(HaveOccurred()) - var recipient string - for _, line := range splitLines(string(keyData)) { - if len(line) > len("# public key: ") && line[:14] == "# public key: " { - recipient = line[14:] - break - } - } - Expect(recipient).To(HavePrefix("age1")) - - // Set up a config manager with a real filesystem writer and a simple config. - config := files.NewRootConfig() - configManager := &installer.InstallConfig{ - Config: &config, - Vault: &files.InstallVault{}, - } - configManager.SetFileIO(util.NewFilesystemWriter()) - - vaultPath := filepath.Join(tmpDir, "test.vault.yaml") - err = configManager.EncryptVault(vaultPath, false, recipient) - Expect(err).ToNot(HaveOccurred()) - - // Read the encrypted file from disk. - written, err := os.ReadFile(vaultPath) - Expect(err).ToNot(HaveOccurred()) - - // The content should be SOPS-encrypted (contains "sops" marker). - Expect(string(written)).To(ContainSubstring("sops")) - Expect(string(written)).To(ContainSubstring("age")) - - // Verify we can decrypt it. - origKeyFile := os.Getenv("SOPS_AGE_KEY_FILE") - os.Setenv("SOPS_AGE_KEY_FILE", keyFile) - defer os.Setenv("SOPS_AGE_KEY_FILE", origKeyFile) - - decOut, err := exec.Command("sops", "--decrypt", vaultPath).CombinedOutput() - Expect(err).ToNot(HaveOccurred(), string(decOut)) - Expect(string(decOut)).To(ContainSubstring("secrets:")) - }) - - It("should encrypt the vault file with comments", func() { - if !sopsAndAgeAvailable() { - Skip("sops and/or age not available") - } - - tmpDir, err := os.MkdirTemp("", "encrypt-vault-comments-*") - Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(tmpDir) - - // Generate a real age key. - keyFile := filepath.Join(tmpDir, "age_key.txt") - out, keygenErr := exec.Command("age-keygen", "-o", keyFile).CombinedOutput() - Expect(keygenErr).ToNot(HaveOccurred(), string(out)) - - keyData, err := os.ReadFile(keyFile) - Expect(err).ToNot(HaveOccurred()) - var recipient string - for _, line := range splitLines(string(keyData)) { - if len(line) > len("# public key: ") && line[:14] == "# public key: " { - recipient = line[14:] - break - } - } - Expect(recipient).To(HavePrefix("age1")) - - config := files.NewRootConfig() - configManager := &installer.InstallConfig{ - Config: &config, - Vault: &files.InstallVault{}, - } - configManager.SetFileIO(util.NewFilesystemWriter()) - - vaultPath := filepath.Join(tmpDir, "vault-with-comments.yaml") - err = configManager.EncryptVault(vaultPath, true, recipient) - Expect(err).ToNot(HaveOccurred()) - - written, err := os.ReadFile(vaultPath) - Expect(err).ToNot(HaveOccurred()) - Expect(string(written)).To(ContainSubstring("sops")) - - // Decrypt and verify the comments header survived. - origKeyFile := os.Getenv("SOPS_AGE_KEY_FILE") - os.Setenv("SOPS_AGE_KEY_FILE", keyFile) - defer os.Setenv("SOPS_AGE_KEY_FILE", origKeyFile) - - decOut, err := exec.Command("sops", "--decrypt", vaultPath).CombinedOutput() - Expect(err).ToNot(HaveOccurred(), string(decOut)) - Expect(string(decOut)).To(ContainSubstring("secrets:")) - }) - }) - }) }) func splitLines(s string) []string { From 17509bb419d9efc58c372661f07dd8c1ef8cca5d Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Sun, 8 Mar 2026 21:38:28 +0100 Subject: [PATCH 11/31] feat: support db migrations Signed-off-by: Tim Schrodi --- internal/bootstrap/local/installer.go | 25 ++++++++++++++++++++++++- internal/bootstrap/local/local.go | 18 +++++++++++++++++- internal/installer/files/config_yaml.go | 2 ++ 3 files changed, 43 insertions(+), 2 deletions(-) diff --git a/internal/bootstrap/local/installer.go b/internal/bootstrap/local/installer.go index be90fde7..e5b3fe1d 100644 --- a/internal/bootstrap/local/installer.go +++ b/internal/bootstrap/local/installer.go @@ -297,6 +297,26 @@ func (b *LocalBootstrapper) RunInstaller() error { return fmt.Errorf("failed to resolve absolute key path: %w", err) } + // Start a kubectl port-forward so that install-components.js on the host + // can reach the PostgreSQL service running inside the cluster. The + // codesphere component step needs this for DB migration and setup. + localDbPort := 5432 + pfCmd := exec.Command("kubectl", "port-forward", + "svc/masterdata-rw", "-n", "codesphere-system", + fmt.Sprintf("%d:5432", localDbPort)) + pfCmd.Stdout = os.Stdout + pfCmd.Stderr = os.Stderr + if err := pfCmd.Start(); err != nil { + return fmt.Errorf("failed to start kubectl port-forward for PostgreSQL: %w", err) + } + defer func() { + if pfCmd.Process != nil { + _ = pfCmd.Process.Kill() + _ = pfCmd.Wait() + } + }() + log.Println("kubectl port-forward for PostgreSQL started (localhost:5432 → masterdata-rw:5432)") + // Run each component step locally via install-components.js. for _, component := range installerComponentSteps { cmdArgs := []string{ @@ -311,9 +331,12 @@ func (b *LocalBootstrapper) RunInstaller() error { log.Printf("Running install-components.js --component %s", component) log.Printf(" %s %s", nodePath, strings.Join(cmdArgs, " ")) cmd := exec.Command(nodePath, cmdArgs...) + cmd.Env = []string{ + "DB_MIGRATION_HOST=localhost", + fmt.Sprintf("DB_MIGRATION_PORT=%d", localDbPort), + } cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr - cmd.Stdin = os.Stdin if err := cmd.Run(); err != nil { return fmt.Errorf("install-components.js --component %s failed: %w", component, err) diff --git a/internal/bootstrap/local/local.go b/internal/bootstrap/local/local.go index 1c8cfc94..4dbb4f9e 100644 --- a/internal/bootstrap/local/local.go +++ b/internal/bootstrap/local/local.go @@ -377,7 +377,14 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { if err != nil { return fmt.Errorf("failed to read PostgreSQL CA: %w", err) } - b.Env.InstallConfig.Postgres.ServerAddress = "masterdata-rw.codesphere-system.svc.cluster.local" + // The installer runs on the host machine but the PostgreSQL service is only + // reachable inside the cluster. We point ServerAddress at localhost so that + // the DB migration (run by install-components.js on the host) connects + // through a kubectl port-forward that RunInstaller starts automatically. + // AltName carries the real in-cluster DNS name so that the TLS servername + // (SNI) matches the certificate issued by CNPG. + b.Env.InstallConfig.Postgres.ServerAddress = "127.0.0.1" + b.Env.InstallConfig.Postgres.AltName = "masterdata-rw.codesphere-system.svc.cluster.local" b.Env.InstallConfig.Postgres.Port = 5432 b.Env.InstallConfig.Postgres.Primary = nil b.Env.InstallConfig.Postgres.Replica = nil @@ -650,6 +657,15 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { b.Env.InstallConfig.Codesphere.Experiments = b.Env.Experiments b.Env.InstallConfig.Codesphere.Features = b.Env.FeatureFlags + // The Helm values written by install-components.js propagate + // postgres.host from ServerAddress (127.0.0.1). Override it back to the + // in-cluster service name so that the Codesphere pods can reach the DB. + b.Env.InstallConfig.Codesphere.Override = map[string]interface{}{ + "postgres": map[string]interface{}{ + "host": "masterdata-rw.codesphere-system.svc.cluster.local", + }, + } + if !b.Env.ExistingConfigUsed { err := b.icg.GenerateSecrets() if err != nil { diff --git a/internal/installer/files/config_yaml.go b/internal/installer/files/config_yaml.go index 1fec29c6..839ee72b 100644 --- a/internal/installer/files/config_yaml.go +++ b/internal/installer/files/config_yaml.go @@ -80,6 +80,7 @@ type PostgresConfig struct { Primary *PostgresPrimaryConfig `yaml:"primary,omitempty"` Replica *PostgresReplicaConfig `yaml:"replica,omitempty"` ServerAddress string `yaml:"serverAddress,omitempty"` + AltName string `yaml:"altName,omitempty"` Port int `yaml:"port,omitempty"` Database string `yaml:"database,omitempty"` @@ -288,6 +289,7 @@ type CodesphereConfig struct { GitProviders *GitProvidersConfig `yaml:"gitProviders,omitempty"` ManagedServices []ManagedServiceConfig `yaml:"managedServices,omitempty"` OpenBao *OpenBaoConfig `yaml:"openBao,omitempty"` + Override map[string]interface{} `yaml:"override,omitempty"` DomainAuthPrivateKey string `yaml:"-"` DomainAuthPublicKey string `yaml:"-"` From e672df5d5e0a1a1d4d9f3c9e32addd685586e52f Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Mon, 9 Mar 2026 13:47:29 +0100 Subject: [PATCH 12/31] move db to codesphere namespace Signed-off-by: Tim Schrodi --- cli/cmd/bootstrap_local.go | 28 +++-- internal/bootstrap/local/installer.go | 23 +++-- internal/bootstrap/local/local.go | 130 ++++++++++++++++++------ internal/bootstrap/local/postgres.go | 17 ++-- internal/installer/files/config_yaml.go | 1 - 5 files changed, 139 insertions(+), 60 deletions(-) diff --git a/cli/cmd/bootstrap_local.go b/cli/cmd/bootstrap_local.go index a384f756..711e84dd 100644 --- a/cli/cmd/bootstrap_local.go +++ b/cli/cmd/bootstrap_local.go @@ -26,6 +26,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" ctrlconfig "sigs.k8s.io/controller-runtime/pkg/client/config" ) @@ -65,6 +66,10 @@ func AddBootstrapLocalCmd(parent *cobra.Command) { flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.InstallVersion, "install-version", "", "Codesphere version to install (downloaded from the OMS portal)") flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.InstallHash, "install-hash", "", "Codesphere package hash (required when install-version is set)") flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.InstallLocal, "install-local", "", "Path to a local installer package (tar.gz or unpacked directory)") + // Registry + flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.RegistryUser, "registry-user", "", "Custom Registry username (only for GitHub registry type) (optional)") + flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.GitHubPAT, "github-pat", "", "GitHub Personal Access Token to use for direct image access. Scope required: package read (optional)") + // Codesphere Environment flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.BaseDomain, "base-domain", "cs.local", "Base domain for Codesphere") flags.StringArrayVar(&bootstrapLocalCmd.CodesphereEnv.Experiments, "experiments", []string{}, "Experiments to enable in Codesphere installation (optional)") @@ -75,6 +80,9 @@ func AddBootstrapLocalCmd(parent *cobra.Command) { flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.SecretsFilePath, "secrets-file", "", "Path to secrets file (default: /prod.vault.yaml)") bootstrapLocalCmd.cmd.RunE = bootstrapLocalCmd.RunE + util.MarkFlagRequired(bootstrapLocalCmd.cmd, "registry-user") + util.MarkFlagRequired(bootstrapLocalCmd.cmd, "github-pat") + parent.AddCommand(bootstrapLocalCmd.cmd) } @@ -104,12 +112,12 @@ func (c *BootstrapLocalCmd) BootstrapLocal() error { stlog := bootstrap.NewStepLogger(false) icg := installer.NewInstallConfigManager() fw := util.NewFilesystemWriter() - kubeClient, err := c.GetKubeClient(ctx) + kubeClient, restConfig, err := c.GetKubeClient(ctx) if err != nil { return fmt.Errorf("failed to initialize Kubernetes client: %w", err) } - bs := local.NewLocalBootstrapper(ctx, stlog, kubeClient, fw, icg, c.CodesphereEnv) + bs := local.NewLocalBootstrapper(ctx, stlog, kubeClient, restConfig, fw, icg, c.CodesphereEnv) return bs.Bootstrap() } @@ -153,30 +161,30 @@ Consult the offical rook guide for more details and troubleshooting: https://roo return nil } -func (c *BootstrapLocalCmd) GetKubeClient(ctx context.Context) (ctrlclient.Client, error) { +func (c *BootstrapLocalCmd) GetKubeClient(ctx context.Context) (ctrlclient.Client, *rest.Config, error) { kubeConfig, err := ctrlconfig.GetConfig() if err != nil { - return nil, fmt.Errorf("failed to load Kubernetes config: %w", err) + return nil, nil, fmt.Errorf("failed to load Kubernetes config: %w", err) } scheme := runtime.NewScheme() if err := clientgoscheme.AddToScheme(scheme); err != nil { - return nil, fmt.Errorf("failed to add Kubernetes core scheme: %w", err) + return nil, nil, fmt.Errorf("failed to add Kubernetes core scheme: %w", err) } if err := cnpgv1.AddToScheme(scheme); err != nil { - return nil, fmt.Errorf("failed to add CloudNativePG scheme: %w", err) + return nil, nil, fmt.Errorf("failed to add CloudNativePG scheme: %w", err) } if err := rookcephv1.AddToScheme(scheme); err != nil { - return nil, fmt.Errorf("failed to add Rook Ceph scheme: %w", err) + return nil, nil, fmt.Errorf("failed to add Rook Ceph scheme: %w", err) } kubeClient, err := ctrlclient.New(kubeConfig, ctrlclient.Options{Scheme: scheme}) if err != nil { - return nil, fmt.Errorf("failed to initialize Kubernetes client: %w", err) + return nil, nil, fmt.Errorf("failed to initialize Kubernetes client: %w", err) } - return kubeClient, nil + return kubeClient, kubeConfig, nil } func (c *BootstrapLocalCmd) ValidatePrerequisites(ctx context.Context) error { @@ -196,7 +204,7 @@ func (c *BootstrapLocalCmd) ValidatePrerequisites(ctx context.Context) error { } func (c *BootstrapLocalCmd) ValidateKubernetesCluster(ctx context.Context) error { - kubeClient, err := c.GetKubeClient(ctx) + kubeClient, _, err := c.GetKubeClient(ctx) if err != nil { return err } diff --git a/internal/bootstrap/local/installer.go b/internal/bootstrap/local/installer.go index e5b3fe1d..c5030e8c 100644 --- a/internal/bootstrap/local/installer.go +++ b/internal/bootstrap/local/installer.go @@ -302,20 +302,29 @@ func (b *LocalBootstrapper) RunInstaller() error { // codesphere component step needs this for DB migration and setup. localDbPort := 5432 pfCmd := exec.Command("kubectl", "port-forward", - "svc/masterdata-rw", "-n", "codesphere-system", + "svc/masterdata-rw", "-n", "codesphere", fmt.Sprintf("%d:5432", localDbPort)) pfCmd.Stdout = os.Stdout pfCmd.Stderr = os.Stderr - if err := pfCmd.Start(); err != nil { - return fmt.Errorf("failed to start kubectl port-forward for PostgreSQL: %w", err) - } + pfErrCh := make(chan error, 1) + go func() { + pfErrCh <- pfCmd.Run() + }() defer func() { if pfCmd.Process != nil { _ = pfCmd.Process.Kill() - _ = pfCmd.Wait() } + // Drain the goroutine so it doesn't leak. + <-pfErrCh }() - log.Println("kubectl port-forward for PostgreSQL started (localhost:5432 → masterdata-rw:5432)") + + // Check that the port-forward didn't exit immediately. + select { + case err := <-pfErrCh: + return fmt.Errorf("kubectl port-forward for PostgreSQL exited early: %w", err) + default: + } + log.Println("kubectl port-forward for PostgreSQL started (127.0.0.1:5432 → masterdata-rw:5432)") // Run each component step locally via install-components.js. for _, component := range installerComponentSteps { @@ -332,7 +341,7 @@ func (b *LocalBootstrapper) RunInstaller() error { log.Printf(" %s %s", nodePath, strings.Join(cmdArgs, " ")) cmd := exec.Command(nodePath, cmdArgs...) cmd.Env = []string{ - "DB_MIGRATION_HOST=localhost", + "DB_MIGRATION_HOST=127.0.0.1", fmt.Sprintf("DB_MIGRATION_PORT=%d", localDbPort), } cmd.Stdout = os.Stdout diff --git a/internal/bootstrap/local/local.go b/internal/bootstrap/local/local.go index 4dbb4f9e..23fb2f10 100644 --- a/internal/bootstrap/local/local.go +++ b/internal/bootstrap/local/local.go @@ -16,6 +16,9 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) @@ -47,6 +50,7 @@ type LocalBootstrapper struct { ctx context.Context stlog *bootstrap.StepLogger kubeClient client.Client + restConfig *rest.Config fw util.FileIO icg installer.InstallConfigManager // Environment @@ -67,6 +71,9 @@ type CodesphereEnvironment struct { InstallVersion string `json:"install_version"` InstallHash string `json:"install_hash"` InstallLocal string `json:"install_local"` + // Registry + GitHubPAT string `json:"-"` + RegistryUser string `json:"-"` // Config InstallDir string `json:"-"` ExistingConfigUsed bool `json:"-"` @@ -76,11 +83,12 @@ type CodesphereEnvironment struct { Vault *files.InstallVault `json:"-"` } -func NewLocalBootstrapper(ctx context.Context, stlog *bootstrap.StepLogger, kubeClient client.Client, fw util.FileIO, icg installer.InstallConfigManager, env *CodesphereEnvironment) *LocalBootstrapper { +func NewLocalBootstrapper(ctx context.Context, stlog *bootstrap.StepLogger, kubeClient client.Client, restConfig *rest.Config, fw util.FileIO, icg installer.InstallConfigManager, env *CodesphereEnvironment) *LocalBootstrapper { return &LocalBootstrapper{ ctx: ctx, stlog: stlog, kubeClient: kubeClient, + restConfig: restConfig, fw: fw, icg: icg, Env: env, @@ -156,6 +164,11 @@ func (b *LocalBootstrapper) Bootstrap() error { return err } + err = b.stlog.Substep("Sync ceph-mon-endpoints ConfigMap", b.SyncCephMonEndpoints) + if err != nil { + return err + } + return nil }) if err != nil { @@ -270,6 +283,35 @@ func (b *LocalBootstrapper) CreateCephAdminSecrets() error { return nil } +// SyncCephMonEndpoints copies the rook-ceph-mon-endpoints ConfigMap from the +// rook-ceph namespace into the codesphere and workspaces namespaces so that +// CSI plugins and other consumers can discover the Ceph monitor addresses. +func (b *LocalBootstrapper) SyncCephMonEndpoints() error { + source := &corev1.ConfigMap{} + key := client.ObjectKey{Namespace: "rook-ceph", Name: "rook-ceph-mon-endpoints"} + if err := b.kubeClient.Get(b.ctx, key, source); err != nil { + return fmt.Errorf("failed to read rook-ceph-mon-endpoints ConfigMap from rook-ceph namespace: %w", err) + } + + for _, ns := range []string{codesphereNamespace, workspacesNamespace} { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ceph-mon-endpoints", + Namespace: ns, + }, + } + _, err := controllerutil.CreateOrUpdate(b.ctx, b.kubeClient, cm, func() error { + cm.Data = source.Data + return nil + }) + if err != nil { + return fmt.Errorf("failed to sync ceph-mon-endpoints ConfigMap to namespace %q: %w", ns, err) + } + } + + return nil +} + // ReadClusterCIDRs reads the pod and service CIDRs from the running Kubernetes cluster. // Pod CIDR is read from the first node's spec.podCIDR. // Service CIDR is read from the kube-apiserver pod's --service-cluster-ip-range flag. @@ -368,8 +410,9 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { if err := os.MkdirAll(b.Env.InstallConfig.Secrets.BaseDir, 0700); err != nil { return fmt.Errorf("failed to create secrets base directory: %w", err) } - b.Env.InstallConfig.Registry.ReplaceImagesInBom = false - b.Env.InstallConfig.Registry.LoadContainerImages = false + if err := b.EnsureGitHubAccessConfigured(); err != nil { + return fmt.Errorf("failed to ensure GitHub access is configured: %w", err) + } b.Env.InstallConfig.Postgres.Mode = "external" b.Env.InstallConfig.Postgres.Database = cnpgDatabaseName @@ -377,14 +420,8 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { if err != nil { return fmt.Errorf("failed to read PostgreSQL CA: %w", err) } - // The installer runs on the host machine but the PostgreSQL service is only - // reachable inside the cluster. We point ServerAddress at localhost so that - // the DB migration (run by install-components.js on the host) connects - // through a kubectl port-forward that RunInstaller starts automatically. - // AltName carries the real in-cluster DNS name so that the TLS servername - // (SNI) matches the certificate issued by CNPG. - b.Env.InstallConfig.Postgres.ServerAddress = "127.0.0.1" - b.Env.InstallConfig.Postgres.AltName = "masterdata-rw.codesphere-system.svc.cluster.local" + + b.Env.InstallConfig.Postgres.ServerAddress = "masterdata-rw.codesphere.svc.cluster.local" b.Env.InstallConfig.Postgres.Port = 5432 b.Env.InstallConfig.Postgres.Primary = nil b.Env.InstallConfig.Postgres.Replica = nil @@ -657,15 +694,6 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { b.Env.InstallConfig.Codesphere.Experiments = b.Env.Experiments b.Env.InstallConfig.Codesphere.Features = b.Env.FeatureFlags - // The Helm values written by install-components.js propagate - // postgres.host from ServerAddress (127.0.0.1). Override it back to the - // in-cluster service name so that the Codesphere pods can reach the DB. - b.Env.InstallConfig.Codesphere.Override = map[string]interface{}{ - "postgres": map[string]interface{}{ - "host": "masterdata-rw.codesphere-system.svc.cluster.local", - }, - } - if !b.Env.ExistingConfigUsed { err := b.icg.GenerateSecrets() if err != nil { @@ -687,6 +715,18 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { return nil } +func (b *LocalBootstrapper) EnsureGitHubAccessConfigured() error { + if b.Env.GitHubPAT == "" { + return fmt.Errorf("GitHub PAT is not set") + } + b.Env.InstallConfig.Registry.Server = "ghcr.io" + b.Env.InstallConfig.Registry.Username = b.Env.RegistryUser + b.Env.InstallConfig.Registry.Password = b.Env.GitHubPAT + b.Env.InstallConfig.Registry.ReplaceImagesInBom = false + b.Env.InstallConfig.Registry.LoadContainerImages = false + return nil +} + // addCephSecretsToVault appends Ceph credentials to the vault as SecretEntry items. // These mirror the secrets that the JS installer stores via SecretManagerSops: // - cephFsId (password = FSID) @@ -708,21 +748,49 @@ func (b *LocalBootstrapper) addCephSecretsToVault(vault *files.InstallVault) { ) } -// readKubeConfig reads the active kubeconfig file and returns its content. -// It checks $KUBECONFIG first, falling back to ~/.kube/config. +// readKubeConfig builds a kubeconfig YAML from the in-memory rest.Config so +// that install-components.js can use it to talk to the cluster. func (b *LocalBootstrapper) readKubeConfig() (string, error) { - kubeconfigPath := os.Getenv("KUBECONFIG") - if kubeconfigPath == "" { - home, err := os.UserHomeDir() - if err != nil { - return "", fmt.Errorf("failed to determine home directory: %w", err) - } - kubeconfigPath = filepath.Join(home, ".kube", "config") + cfg := b.restConfig + + cluster := clientcmdapi.NewCluster() + cluster.Server = cfg.Host + cluster.CertificateAuthorityData = cfg.CAData + if cfg.CAFile != "" && len(cluster.CertificateAuthorityData) == 0 { + cluster.CertificateAuthority = cfg.CAFile + } + cluster.InsecureSkipTLSVerify = cfg.Insecure + + authInfo := clientcmdapi.NewAuthInfo() + authInfo.ClientCertificateData = cfg.CertData + if cfg.CertFile != "" && len(authInfo.ClientCertificateData) == 0 { + authInfo.ClientCertificate = cfg.CertFile + } + authInfo.ClientKeyData = cfg.KeyData + if cfg.KeyFile != "" && len(authInfo.ClientKeyData) == 0 { + authInfo.ClientKey = cfg.KeyFile + } + authInfo.Token = cfg.BearerToken + if cfg.BearerTokenFile != "" && authInfo.Token == "" { + authInfo.TokenFile = cfg.BearerTokenFile + } + if cfg.Username != "" { + authInfo.Username = cfg.Username + authInfo.Password = cfg.Password + } + + kubeConfig := clientcmdapi.NewConfig() + kubeConfig.Clusters["default"] = cluster + kubeConfig.AuthInfos["default"] = authInfo + kubeConfig.Contexts["default"] = &clientcmdapi.Context{ + Cluster: "default", + AuthInfo: "default", } + kubeConfig.CurrentContext = "default" - data, err := os.ReadFile(kubeconfigPath) + data, err := clientcmd.Write(*kubeConfig) if err != nil { - return "", fmt.Errorf("failed to read kubeconfig from %s: %w", kubeconfigPath, err) + return "", fmt.Errorf("failed to marshal kubeconfig from rest.Config: %w", err) } return string(data), nil diff --git a/internal/bootstrap/local/postgres.go b/internal/bootstrap/local/postgres.go index eab3cdb4..706386ce 100644 --- a/internal/bootstrap/local/postgres.go +++ b/internal/bootstrap/local/postgres.go @@ -56,7 +56,7 @@ func (b *LocalBootstrapper) InstallCloudNativePGHelmChart() error { cnpgReleaseName, cnpgChartRef, "--namespace", - codesphereSystemNamespace, + codesphereNamespace, "--create-namespace", "--set", "config.clusterWide=false", "--set", "resources.requests.cpu=0", @@ -74,7 +74,7 @@ func (b *LocalBootstrapper) DeployPostgresDatabase() error { postgresCluster := &cnpgv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: cnpgDatabaseClusterName, - Namespace: codesphereSystemNamespace, + Namespace: codesphereNamespace, }, } @@ -86,11 +86,6 @@ func (b *LocalBootstrapper) DeployPostgresDatabase() error { StorageClass: ptr.To(cephStorageClassName), Size: cnpgDatabaseStorageSize, }, - Bootstrap: &cnpgv1.BootstrapConfiguration{ - InitDB: &cnpgv1.BootstrapInitDB{ - Database: cnpgDatabaseName, - }, - }, EnableSuperuserAccess: ptr.To(true), } return nil @@ -112,7 +107,7 @@ func (b *LocalBootstrapper) WaitForPostgresDatabaseReady() error { clusterKey := client.ObjectKey{ Name: cnpgDatabaseClusterName, - Namespace: codesphereSystemNamespace, + Namespace: codesphereNamespace, } steps := int(cnpgReadyTimeout / cnpgReadyPollInterval) @@ -182,7 +177,7 @@ func (b *LocalBootstrapper) WaitForPostgresDatabaseReady() error { func (b *LocalBootstrapper) ReadPostgresSuperuserPassword() (string, error) { clusterKey := client.ObjectKey{ Name: cnpgDatabaseClusterName, - Namespace: codesphereSystemNamespace, + Namespace: codesphereNamespace, } cluster := &cnpgv1.Cluster{} @@ -193,7 +188,7 @@ func (b *LocalBootstrapper) ReadPostgresSuperuserPassword() (string, error) { secretName := cluster.GetSuperuserSecretName() secretKey := client.ObjectKey{ Name: secretName, - Namespace: codesphereSystemNamespace, + Namespace: codesphereNamespace, } secret := &corev1.Secret{} @@ -216,7 +211,7 @@ func (b *LocalBootstrapper) ReadPostgresCA() (string, error) { secretName := cnpgDatabaseClusterName + "-ca" secretKey := client.ObjectKey{ Name: secretName, - Namespace: codesphereSystemNamespace, + Namespace: codesphereNamespace, } secret := &corev1.Secret{} diff --git a/internal/installer/files/config_yaml.go b/internal/installer/files/config_yaml.go index 839ee72b..001688f8 100644 --- a/internal/installer/files/config_yaml.go +++ b/internal/installer/files/config_yaml.go @@ -289,7 +289,6 @@ type CodesphereConfig struct { GitProviders *GitProvidersConfig `yaml:"gitProviders,omitempty"` ManagedServices []ManagedServiceConfig `yaml:"managedServices,omitempty"` OpenBao *OpenBaoConfig `yaml:"openBao,omitempty"` - Override map[string]interface{} `yaml:"override,omitempty"` DomainAuthPrivateKey string `yaml:"-"` DomainAuthPublicKey string `yaml:"-"` From 561d0bd1c4ff77e13d6861eaa26510e75c94ca1d Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Tue, 10 Mar 2026 08:35:31 +0100 Subject: [PATCH 13/31] fix codepshere deployment and set no requests Signed-off-by: Tim Schrodi --- cli/cmd/bootstrap_local.go | 6 +- internal/bootstrap/local/installer.go | 174 ++++++++++++++++++++---- internal/bootstrap/local/local.go | 70 ++++++++-- internal/installer/files/config_yaml.go | 1 + 4 files changed, 211 insertions(+), 40 deletions(-) diff --git a/cli/cmd/bootstrap_local.go b/cli/cmd/bootstrap_local.go index 711e84dd..4959a0be 100644 --- a/cli/cmd/bootstrap_local.go +++ b/cli/cmd/bootstrap_local.go @@ -67,8 +67,8 @@ func AddBootstrapLocalCmd(parent *cobra.Command) { flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.InstallHash, "install-hash", "", "Codesphere package hash (required when install-version is set)") flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.InstallLocal, "install-local", "", "Path to a local installer package (tar.gz or unpacked directory)") // Registry - flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.RegistryUser, "registry-user", "", "Custom Registry username (only for GitHub registry type) (optional)") - flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.GitHubPAT, "github-pat", "", "GitHub Personal Access Token to use for direct image access. Scope required: package read (optional)") + flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.RegistryUser, "registry-user", "", "Custom Registry username (optional)") + flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.RegistryPassword, "registry-password", "", "Registry password to use for direct image access (for ghcr.io, use a GitHub Personal Access Token with package read scope)") // Codesphere Environment flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.BaseDomain, "base-domain", "cs.local", "Base domain for Codesphere") @@ -81,7 +81,7 @@ func AddBootstrapLocalCmd(parent *cobra.Command) { bootstrapLocalCmd.cmd.RunE = bootstrapLocalCmd.RunE util.MarkFlagRequired(bootstrapLocalCmd.cmd, "registry-user") - util.MarkFlagRequired(bootstrapLocalCmd.cmd, "github-pat") + util.MarkFlagRequired(bootstrapLocalCmd.cmd, "registry-password") parent.AddCommand(bootstrapLocalCmd.cmd) } diff --git a/internal/bootstrap/local/installer.go b/internal/bootstrap/local/installer.go index c5030e8c..c6958d9e 100644 --- a/internal/bootstrap/local/installer.go +++ b/internal/bootstrap/local/installer.go @@ -11,9 +11,15 @@ import ( "path/filepath" "runtime" "strings" + "time" "github.com/codesphere-cloud/oms/internal/portal" "github.com/codesphere-cloud/oms/internal/util" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" ) // installerComponentSteps lists the install-components.js steps executed @@ -23,6 +29,8 @@ var installerComponentSteps = []string{"setUpCluster", "codesphere"} // installerArtifactFilename is the artifact to download from the OMS portal. const installerArtifactFilename = "installer-lite.tar.gz" +const temporaryPostgresNodePortPrefix = "oms-masterdata-nodeport" + // DownloadInstallerPackage downloads the Codesphere installer package from the // OMS portal, similar to how the GCP bootstrapper fetches it onto a jumpbox. // The package is downloaded into the directory that contains the config/secrets @@ -232,6 +240,132 @@ func symlinkBinary(name, target string) error { return nil } +func (b *LocalBootstrapper) createTemporaryPostgresNodePortEndpoint() (string, int32, func(), error) { + masterdataSvc := &corev1.Service{} + masterdataSvcKey := types.NamespacedName{Name: "masterdata-rw", Namespace: codesphereNamespace} + if err := b.kubeClient.Get(b.ctx, masterdataSvcKey, masterdataSvc); err != nil { + return "", 0, nil, fmt.Errorf("failed to get PostgreSQL service %s/%s: %w", codesphereNamespace, "masterdata-rw", err) + } + + if len(masterdataSvc.Spec.Selector) == 0 { + return "", 0, nil, fmt.Errorf("service %s/%s has no selector; cannot create NodePort proxy", codesphereNamespace, "masterdata-rw") + } + + postgresPort, err := getPostgresServicePort(masterdataSvc) + if err != nil { + return "", 0, nil, err + } + + nowNanos := time.Now().UnixNano() + tmpServiceName := fmt.Sprintf("%s-%d", temporaryPostgresNodePortPrefix, nowNanos) + + selector := make(map[string]string, len(masterdataSvc.Spec.Selector)) + for k, v := range masterdataSvc.Spec.Selector { + selector[k] = v + } + + tmpService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: tmpServiceName, + Namespace: codesphereNamespace, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Selector: selector, + Ports: []corev1.ServicePort{{ + Name: "postgres", + Protocol: corev1.ProtocolTCP, + Port: postgresPort.Port, + TargetPort: postgresPort.TargetPort, + }}, + }, + } + + if err := b.kubeClient.Create(b.ctx, tmpService); err != nil { + return "", 0, nil, fmt.Errorf("failed to create temporary PostgreSQL NodePort service %s/%s: %w", codesphereNamespace, tmpServiceName, err) + } + + cleanup := func() { + if err := b.kubeClient.Delete(b.ctx, tmpService); err != nil && !apierrors.IsNotFound(err) { + log.Printf("Warning: failed to delete temporary PostgreSQL NodePort service %s/%s: %v", codesphereNamespace, tmpServiceName, err) + } + } + + if len(tmpService.Spec.Ports) == 0 || tmpService.Spec.Ports[0].NodePort == 0 { + if err := b.kubeClient.Get(b.ctx, types.NamespacedName{Name: tmpServiceName, Namespace: codesphereNamespace}, tmpService); err != nil { + cleanup() + return "", 0, nil, fmt.Errorf("failed to read temporary PostgreSQL NodePort service %s/%s: %w", codesphereNamespace, tmpServiceName, err) + } + } + + if len(tmpService.Spec.Ports) == 0 || tmpService.Spec.Ports[0].NodePort == 0 { + cleanup() + return "", 0, nil, fmt.Errorf("temporary PostgreSQL NodePort service %s/%s has no allocated nodePort", codesphereNamespace, tmpServiceName) + } + + nodeIP, err := b.resolveNodeIPForNodePort() + if err != nil { + cleanup() + return "", 0, nil, err + } + + return nodeIP, tmpService.Spec.Ports[0].NodePort, cleanup, nil +} + +func getPostgresServicePort(svc *corev1.Service) (corev1.ServicePort, error) { + for _, port := range svc.Spec.Ports { + if port.Port == 5432 { + if port.TargetPort.Type == intstr.Int && port.TargetPort.IntValue() == 0 { + port.TargetPort = intstr.FromInt(5432) + } + if port.TargetPort.Type == intstr.String && port.TargetPort.String() == "" { + port.TargetPort = intstr.FromInt(5432) + } + return port, nil + } + } + + if len(svc.Spec.Ports) == 0 { + return corev1.ServicePort{}, fmt.Errorf("service %s/%s has no ports", svc.Namespace, svc.Name) + } + + port := svc.Spec.Ports[0] + if port.TargetPort.Type == intstr.Int && port.TargetPort.IntValue() == 0 { + port.TargetPort = intstr.FromInt(int(port.Port)) + } + if port.TargetPort.Type == intstr.String && port.TargetPort.String() == "" { + port.TargetPort = intstr.FromInt(int(port.Port)) + } + + return port, nil +} + +func (b *LocalBootstrapper) resolveNodeIPForNodePort() (string, error) { + nodeList := &corev1.NodeList{} + if err := b.kubeClient.List(b.ctx, nodeList); err != nil { + return "", fmt.Errorf("failed to list cluster nodes for NodePort endpoint: %w", err) + } + + if len(nodeList.Items) == 0 { + return "", fmt.Errorf("connected to Kubernetes cluster but no nodes are available") + } + + for _, node := range nodeList.Items { + for _, addr := range node.Status.Addresses { + if addr.Type == corev1.NodeInternalIP && addr.Address != "" { + return addr.Address, nil + } + } + for _, addr := range node.Status.Addresses { + if addr.Type == corev1.NodeExternalIP && addr.Address != "" { + return addr.Address, nil + } + } + } + + return "", fmt.Errorf("failed to resolve node IP address for NodePort endpoint") +} + // RunInstaller extracts the deps.tar.gz archive locally and then runs the // install-components.js script directly on the local machine for each // required component step (setUpCluster, codesphere), instead of running @@ -297,34 +431,16 @@ func (b *LocalBootstrapper) RunInstaller() error { return fmt.Errorf("failed to resolve absolute key path: %w", err) } - // Start a kubectl port-forward so that install-components.js on the host - // can reach the PostgreSQL service running inside the cluster. The - // codesphere component step needs this for DB migration and setup. - localDbPort := 5432 - pfCmd := exec.Command("kubectl", "port-forward", - "svc/masterdata-rw", "-n", "codesphere", - fmt.Sprintf("%d:5432", localDbPort)) - pfCmd.Stdout = os.Stdout - pfCmd.Stderr = os.Stderr - pfErrCh := make(chan error, 1) - go func() { - pfErrCh <- pfCmd.Run() - }() - defer func() { - if pfCmd.Process != nil { - _ = pfCmd.Process.Kill() - } - // Drain the goroutine so it doesn't leak. - <-pfErrCh - }() - - // Check that the port-forward didn't exit immediately. - select { - case err := <-pfErrCh: - return fmt.Errorf("kubectl port-forward for PostgreSQL exited early: %w", err) - default: + // Create a temporary NodePort service for PostgreSQL so that + // install-components.js can reach the database without a long-lived + // kubectl port-forward session. + dbHost, dbPort, cleanupNodePortSvc, err := b.createTemporaryPostgresNodePortEndpoint() + if err != nil { + return err } - log.Println("kubectl port-forward for PostgreSQL started (127.0.0.1:5432 → masterdata-rw:5432)") + defer cleanupNodePortSvc() + + log.Printf("Temporary PostgreSQL NodePort service ready (%s:%d)", dbHost, dbPort) // Run each component step locally via install-components.js. for _, component := range installerComponentSteps { @@ -341,8 +457,8 @@ func (b *LocalBootstrapper) RunInstaller() error { log.Printf(" %s %s", nodePath, strings.Join(cmdArgs, " ")) cmd := exec.Command(nodePath, cmdArgs...) cmd.Env = []string{ - "DB_MIGRATION_HOST=127.0.0.1", - fmt.Sprintf("DB_MIGRATION_PORT=%d", localDbPort), + fmt.Sprintf("DB_MIGRATION_HOST=%s", dbHost), + fmt.Sprintf("DB_MIGRATION_PORT=%d", dbPort), } cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr diff --git a/internal/bootstrap/local/local.go b/internal/bootstrap/local/local.go index 23fb2f10..4b3f9509 100644 --- a/internal/bootstrap/local/local.go +++ b/internal/bootstrap/local/local.go @@ -15,7 +15,6 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" @@ -72,8 +71,8 @@ type CodesphereEnvironment struct { InstallHash string `json:"install_hash"` InstallLocal string `json:"install_local"` // Registry - GitHubPAT string `json:"-"` - RegistryUser string `json:"-"` + RegistryUser string `json:"-"` + RegistryPassword string `json:"-"` // Config InstallDir string `json:"-"` ExistingConfigUsed bool `json:"-"` @@ -237,12 +236,22 @@ func (b *LocalBootstrapper) EnsureNamespaces() error { ObjectMeta: metav1.ObjectMeta{ Name: "error-page-server", Namespace: codesphereNamespace, + Labels: map[string]string{ + "app.kubernetes.io/managed-by": "Helm", + "meta.helm.sh/release-name": "codesphere", + "meta.helm.sh/release-namespace": codesphereNamespace, + }, + Annotations: map[string]string{ + "meta.helm.sh/release-name": "codesphere", + "meta.helm.sh/release-namespace": codesphereNamespace, + }, }, Spec: corev1.ServiceSpec{ Type: corev1.ServiceTypeClusterIP, - Ports: []corev1.ServicePort{ - {Port: 8080, TargetPort: intstr.FromInt32(8080)}, - }, + Ports: []corev1.ServicePort{{ + Name: "web", + Port: 8083, + }}, Selector: map[string]string{"app": "error-page-server"}, }, } @@ -625,6 +634,51 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { CNameBaseDomain: "ws." + b.Env.BaseDomain, } b.Env.InstallConfig.Codesphere.DNSServers = []string{"8.8.8.8"} + + // Override resource requests to "0" for Codesphere services so they can + // be scheduled on resource-constrained local clusters. + codesphereOverride := b.Env.InstallConfig.Codesphere.Override + if codesphereOverride == nil { + codesphereOverride = map[string]interface{}{} + } + + globalOverride, ok := codesphereOverride["global"].(map[string]interface{}) + if !ok || globalOverride == nil { + globalOverride = map[string]interface{}{} + } + + servicesOverride, ok := globalOverride["services"].(map[string]interface{}) + if !ok || servicesOverride == nil { + servicesOverride = map[string]interface{}{} + } + + for _, serviceName := range []string{ + "auth_service", + "deployment_service", + "error_page_server", + "ide_frontend", + "ide_service", + "marketplace", + "payment_service", + "public_api_service", + "team_service", + "workspace_proxy", + "workspace_service", + } { + servicesOverride[serviceName] = map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": map[string]interface{}{ + "cpu": "0", + "memory": "0", + }, + }, + } + } + + globalOverride["services"] = servicesOverride + codesphereOverride["global"] = globalOverride + b.Env.InstallConfig.Codesphere.Override = codesphereOverride + b.Env.InstallConfig.Codesphere.DeployConfig = files.DeployConfig{ Images: map[string]files.ImageConfig{ "ubuntu-24.04": { @@ -716,12 +770,12 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { } func (b *LocalBootstrapper) EnsureGitHubAccessConfigured() error { - if b.Env.GitHubPAT == "" { + if b.Env.RegistryPassword == "" { return fmt.Errorf("GitHub PAT is not set") } b.Env.InstallConfig.Registry.Server = "ghcr.io" b.Env.InstallConfig.Registry.Username = b.Env.RegistryUser - b.Env.InstallConfig.Registry.Password = b.Env.GitHubPAT + b.Env.InstallConfig.Registry.Password = b.Env.RegistryPassword b.Env.InstallConfig.Registry.ReplaceImagesInBom = false b.Env.InstallConfig.Registry.LoadContainerImages = false return nil diff --git a/internal/installer/files/config_yaml.go b/internal/installer/files/config_yaml.go index 001688f8..39372ea1 100644 --- a/internal/installer/files/config_yaml.go +++ b/internal/installer/files/config_yaml.go @@ -279,6 +279,7 @@ type CodesphereConfig struct { DNSServers []string `yaml:"dnsServers"` Experiments []string `yaml:"experiments"` Features []string `yaml:"features"` + Override map[string]interface{} `yaml:"override,omitempty"` ExtraCAPem string `yaml:"extraCaPem,omitempty"` ExtraWorkspaceEnvVars map[string]string `yaml:"extraWorkspaceEnvVars,omitempty"` ExtraWorkspaceFiles []ExtraWorkspaceFile `yaml:"extraWorkspaceFiles,omitempty"` From b0918cdd6acbd38d349ab13356836f8eb4c9fe7a Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Tue, 10 Mar 2026 14:15:13 +0100 Subject: [PATCH 14/31] share common cs config and add resource profiles Signed-off-by: Tim Schrodi --- cli/cmd/bootstrap_local.go | 33 +-- internal/bootstrap/bootstrap_suite_test.go | 13 + internal/bootstrap/codesphere_defaults.go | 81 ++++++ internal/bootstrap/gcp/gcp.go | 67 +---- internal/bootstrap/local/local.go | 252 ++----------------- internal/bootstrap/resource_profiles.go | 208 +++++++++++++++ internal/bootstrap/resource_profiles_test.go | 92 +++++++ 7 files changed, 429 insertions(+), 317 deletions(-) create mode 100644 internal/bootstrap/bootstrap_suite_test.go create mode 100644 internal/bootstrap/codesphere_defaults.go create mode 100644 internal/bootstrap/resource_profiles.go create mode 100644 internal/bootstrap/resource_profiles_test.go diff --git a/cli/cmd/bootstrap_local.go b/cli/cmd/bootstrap_local.go index 4959a0be..be84abce 100644 --- a/cli/cmd/bootstrap_local.go +++ b/cli/cmd/bootstrap_local.go @@ -52,9 +52,9 @@ func AddBootstrapLocalCmd(parent *cobra.Command) { cmd: &cobra.Command{ Use: "bootstrap-local", Short: "Bootstrap a local Codesphere environment", - Long: csio.Long(`Bootstraps a local Codesphere environment using only a single Kubernetes cluster. + Long: csio.Long(`Bootstraps a local Codesphere environment using a single Linux x86_64 Kubernetes cluster. Rook is used to install Ceph, and CNPG is used for the PostgreSQL database. - Warning: For local setups, it is highly recommended to use Minikube with a virtual machine, similar to the Rook Minikube guide. + For local setups, use Minikube with a virtual machine on Linux. Not for production use.`), }, CodesphereEnv: &local.CodesphereEnvironment{}, @@ -74,6 +74,7 @@ func AddBootstrapLocalCmd(parent *cobra.Command) { flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.BaseDomain, "base-domain", "cs.local", "Base domain for Codesphere") flags.StringArrayVar(&bootstrapLocalCmd.CodesphereEnv.Experiments, "experiments", []string{}, "Experiments to enable in Codesphere installation (optional)") flags.StringArrayVar(&bootstrapLocalCmd.CodesphereEnv.FeatureFlags, "feature-flags", []string{}, "Feature flags to enable in Codesphere installation (optional)") + flags.StringVar((*string)(&bootstrapLocalCmd.CodesphereEnv.ResourceProfile), "resource-profile", string(bootstrap.ResourceProfileNoRequests), "Resource profile to apply to the install config (supported: noRequests)") // Config flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.InstallDir, "install-dir", ".installer", "Directory for config, secrets, and bundle files") flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.InstallConfigPath, "install-config", "", "Path to install config file (default: /config.yaml)") @@ -122,25 +123,27 @@ func (c *BootstrapLocalCmd) BootstrapLocal() error { } func (c *BootstrapLocalCmd) ConfirmLocalBootstrapWarning() error { - fmt.Println(csio.Long(`Codesphere uses Ceph for storage. For local Codesphere bootstrapping, Ceph is installed via Rook, and Minikube is the recommended way to quickly get Rook up and running. Minikube should not be used for production. While other tools such as k3d/kind are great, users have faced issues deploying Rook. + fmt.Println(csio.Long(`############################################################ +# Local Bootstrap Warning # +############################################################ -Always use a virtual machine when testing Rook. Never use your host system where local devices may mistakenly be consumed. +Codesphere local bootstrap is for testing only. -To install Minikube follow the official guide. It is recommended to use the qemu2 driver when running on a Linux machine and the hyperkit driver when running on a MacOS. Both allow to create and attach additional disks to the virtual machine. This is required for the Ceph OSD to consume one drive. We don't recommend any other drivers for Rook. You will need a Minikube version 1.23 or higher. +Currently supported: +- One Kubernetes cluster with Linux x86_64 nodes only +- Minikube on Linux with a VM and an extra disk for Rook/Ceph +- Minikube 1.23 or newer -Starting the cluster on Minikube is as simple as running: +Not supported: +- Minikube on macOS -# On Linux -minikube start --disk-size=40g --extra-disks=1 --driver qemu2 +Never run Rook directly on your host system; local disks may be consumed. -# On MacOS with Intel processor -minikube start --disk-size=40g --extra-disks=1 --driver hyperkit +Recommended command: + minikube start --disk-size=40g --extra-disks=1 --driver kvm2 -# On MacOS with Apple silicon -minikube start --disk-size=40g --extra-disks 1 --driver qemu - - -Consult the offical rook guide for more details and troubleshooting: https://rook.io/docs/rook/latest-release/Contributing/development-environment/#minikube +Rook guide: + https://rook.io/docs/rook/latest-release/Contributing/development-environment/#minikube `)) if c.Yes { diff --git a/internal/bootstrap/bootstrap_suite_test.go b/internal/bootstrap/bootstrap_suite_test.go new file mode 100644 index 00000000..6670d071 --- /dev/null +++ b/internal/bootstrap/bootstrap_suite_test.go @@ -0,0 +1,13 @@ +package bootstrap_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestBootstrap(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Bootstrap Suite") +} diff --git a/internal/bootstrap/codesphere_defaults.go b/internal/bootstrap/codesphere_defaults.go new file mode 100644 index 00000000..09fe511b --- /dev/null +++ b/internal/bootstrap/codesphere_defaults.go @@ -0,0 +1,81 @@ +// Copyright (c) Codesphere Inc. +// SPDX-License-Identifier: Apache-2.0 + +package bootstrap + +import "github.com/codesphere-cloud/oms/internal/installer/files" + +// DefaultCodesphereDeployConfig returns a fresh copy of the default Codesphere +// deployConfig used by the bootstrap flows. +func DefaultCodesphereDeployConfig() files.DeployConfig { + return files.DeployConfig{ + Images: map[string]files.ImageConfig{ + "ubuntu-24.04": { + Name: "Ubuntu 24.04", + SupportedUntil: "2028-05-31", + Flavors: map[string]files.FlavorConfig{ + "default": { + Image: files.ImageRef{ + BomRef: "workspace-agent-24.04", + }, + Pool: map[int]int{ + 1: 1, + 2: 1, + 3: 0, + }, + }, + }, + }, + }, + } +} + +// DefaultCodespherePlans returns a fresh copy of the default Codesphere plans +// used by the bootstrap flows. +func DefaultCodespherePlans() files.PlansConfig { + return files.PlansConfig{ + HostingPlans: map[int]files.HostingPlan{ + 1: { + CPUTenth: 20, + GPUParts: 0, + MemoryMb: 4096, + StorageMb: 20480, + TempStorageMb: 1024, + }, + 2: { + CPUTenth: 40, + GPUParts: 0, + MemoryMb: 8192, + StorageMb: 40960, + TempStorageMb: 1024, + }, + 3: { + CPUTenth: 80, + GPUParts: 0, + MemoryMb: 16384, + StorageMb: 40960, + TempStorageMb: 1024, + }, + }, + WorkspacePlans: map[int]files.WorkspacePlan{ + 1: { + Name: "Standard", + HostingPlanID: 1, + MaxReplicas: 3, + OnDemand: true, + }, + 2: { + Name: "Big", + HostingPlanID: 2, + MaxReplicas: 3, + OnDemand: true, + }, + 3: { + Name: "Pro", + HostingPlanID: 3, + MaxReplicas: 3, + OnDemand: true, + }, + }, + } +} diff --git a/internal/bootstrap/gcp/gcp.go b/internal/bootstrap/gcp/gcp.go index 49de45ca..c1c8fb83 100644 --- a/internal/bootstrap/gcp/gcp.go +++ b/internal/bootstrap/gcp/gcp.go @@ -1190,71 +1190,8 @@ func (b *GCPBootstrapper) UpdateInstallConfig() error { CNameBaseDomain: "ws." + b.Env.BaseDomain, } b.Env.InstallConfig.Codesphere.DNSServers = []string{"8.8.8.8"} - b.Env.InstallConfig.Codesphere.DeployConfig = files.DeployConfig{ - Images: map[string]files.ImageConfig{ - "ubuntu-24.04": { - Name: "Ubuntu 24.04", - SupportedUntil: "2028-05-31", - Flavors: map[string]files.FlavorConfig{ - "default": { - Image: files.ImageRef{ - BomRef: "workspace-agent-24.04", - }, - Pool: map[int]int{ - 1: 1, - 2: 1, - 3: 0, - }, - }, - }, - }, - }, - } - b.Env.InstallConfig.Codesphere.Plans = files.PlansConfig{ - HostingPlans: map[int]files.HostingPlan{ - 1: { - CPUTenth: 20, - GPUParts: 0, - MemoryMb: 4096, - StorageMb: 20480, - TempStorageMb: 1024, - }, - 2: { - CPUTenth: 40, - GPUParts: 0, - MemoryMb: 8192, - StorageMb: 40960, - TempStorageMb: 1024, - }, - 3: { - CPUTenth: 80, - GPUParts: 0, - MemoryMb: 16384, - StorageMb: 40960, - TempStorageMb: 1024, - }, - }, - WorkspacePlans: map[int]files.WorkspacePlan{ - 1: { - Name: "Standard", - HostingPlanID: 1, - MaxReplicas: 3, - OnDemand: true, - }, - 2: { - Name: "Big", - HostingPlanID: 2, - MaxReplicas: 3, - OnDemand: true, - }, - 3: { - Name: "Pro", - HostingPlanID: 3, - MaxReplicas: 3, - OnDemand: true, - }, - }, - } + b.Env.InstallConfig.Codesphere.DeployConfig = bootstrap.DefaultCodesphereDeployConfig() + b.Env.InstallConfig.Codesphere.Plans = bootstrap.DefaultCodespherePlans() b.Env.InstallConfig.Codesphere.GitProviders = &files.GitProvidersConfig{} if b.Env.GitHubAppName != "" && b.Env.GithubAppClientID != "" && b.Env.GithubAppClientSecret != "" { diff --git a/internal/bootstrap/local/local.go b/internal/bootstrap/local/local.go index 4b3f9509..d542480b 100644 --- a/internal/bootstrap/local/local.go +++ b/internal/bootstrap/local/local.go @@ -63,9 +63,10 @@ type LocalBootstrapper struct { } type CodesphereEnvironment struct { - BaseDomain string `json:"base_domain"` - Experiments []string `json:"experiments"` - FeatureFlags []string `json:"feature_flags"` + BaseDomain string `json:"base_domain"` + Experiments []string `json:"experiments"` + FeatureFlags []string `json:"feature_flags"` + ResourceProfile bootstrap.ResourceProfile `json:"resource_profile"` // Installer InstallVersion string `json:"install_version"` InstallHash string `json:"install_hash"` @@ -447,7 +448,7 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { // Store the active kubeconfig in the vault so that install-components.js // can retrieve it via SecretManagerSops when deploying Helm charts. - kubeConfigContent, err := b.readKubeConfig() + kubeConfigContent, err := b.getKubeConfig() if err != nil { return fmt.Errorf("failed to read kubeconfig: %w", err) } @@ -483,142 +484,22 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { } b.Env.InstallConfig.Kubernetes.PodCIDR = podCIDR b.Env.InstallConfig.Kubernetes.ServiceCIDR = serviceCIDR - - // Override resource requests to "0" for all monitoring components so they - // can be scheduled on resource-constrained local clusters. - prometheusOverride := map[string]interface{}{ - "kube-prometheus-stack": map[string]interface{}{ - "prometheusOperator": map[string]interface{}{ - "resources": map[string]interface{}{ - "requests": map[string]interface{}{"cpu": "0", "memory": "0"}, - }, - }, - "prometheus": map[string]interface{}{ - "prometheusSpec": map[string]interface{}{ - "resources": map[string]interface{}{ - "requests": map[string]interface{}{"cpu": "0", "memory": "0"}, - }, - }, - }, - "prometheus-node-exporter": map[string]interface{}{ - "resources": map[string]interface{}{ - "requests": map[string]interface{}{"cpu": "0", "memory": "0"}, - }, - }, - "kube-state-metrics": map[string]interface{}{ - "resources": map[string]interface{}{ - "requests": map[string]interface{}{"cpu": "0", "memory": "0"}, - }, - }, - }, - } - - // Override cert-manager resource requests to "0" and reduce webhook replicas - // so components can be scheduled on resource-constrained local clusters. - b.Env.InstallConfig.Cluster.CertManager = &files.CertManagerConfig{ - Override: map[string]interface{}{ - "cert-manager": map[string]interface{}{ - "resources": map[string]interface{}{ - "requests": map[string]interface{}{"cpu": "0", "memory": "0"}, - }, - "cainjector": map[string]interface{}{ - "resources": map[string]interface{}{ - "requests": map[string]interface{}{"cpu": "0", "memory": "0"}, - }, - }, - "webhook": map[string]interface{}{ - "replicaCount": 1, - "resources": map[string]interface{}{ - "requests": map[string]interface{}{"cpu": "0", "memory": "0"}, - }, - }, - "startupapicheck": map[string]interface{}{ - "resources": map[string]interface{}{ - "requests": map[string]interface{}{"cpu": "0", "memory": "0"}, - }, - }, - }, - }, - } - b.Env.InstallConfig.Cluster.Monitoring = &files.MonitoringConfig{ Prometheus: &files.PrometheusConfig{ RemoteWrite: &files.RemoteWriteConfig{ Enabled: false, ClusterName: "local-test", }, - Override: prometheusOverride, - }, - BlackboxExporter: &files.BlackboxExporterConfig{ - Override: map[string]interface{}{ - "prometheus-blackbox-exporter": map[string]interface{}{ - "replicas": 1, - "resources": map[string]interface{}{ - "requests": map[string]interface{}{"cpu": "0", "memory": "0"}, - }, - }, - }, - }, - PushGateway: &files.PushGatewayConfig{ - Override: map[string]interface{}{ - "prometheus-pushgateway": map[string]interface{}{ - "resources": map[string]interface{}{ - "requests": map[string]interface{}{"cpu": "0", "memory": "0"}, - }, - }, - }, - }, - Loki: &files.LokiConfig{ - Enabled: false, - }, - Grafana: &files.GrafanaConfig{ - Enabled: false, - }, - GrafanaAlloy: &files.GrafanaAlloyConfig{ - Enabled: false, }, + Loki: &files.LokiConfig{Enabled: false}, + Grafana: &files.GrafanaConfig{Enabled: false}, + GrafanaAlloy: &files.GrafanaAlloyConfig{Enabled: false}, } - b.Env.InstallConfig.Cluster.Gateway = files.GatewayConfig{ ServiceType: "LoadBalancer", - Override: map[string]interface{}{ - "ingress-nginx": map[string]interface{}{ - "controller": map[string]interface{}{ - "replicaCount": 1, - "resources": map[string]interface{}{ - "requests": map[string]interface{}{ - "cpu": "0", - "memory": "0", - }, - }, - }, - }, - }, } b.Env.InstallConfig.Cluster.PublicGateway = files.GatewayConfig{ ServiceType: "LoadBalancer", - Override: map[string]interface{}{ - "ingress-nginx": map[string]interface{}{ - "controller": map[string]interface{}{ - "replicaCount": 1, - "resources": map[string]interface{}{ - "requests": map[string]interface{}{ - "cpu": "0", - "memory": "0", - }, - }, - }, - }, - "nginx": map[string]interface{}{ - "replicaCount": 1, - "resources": map[string]interface{}{ - "requests": map[string]interface{}{ - "cpu": "0", - "memory": "0", - }, - }, - }, - }, } // TODO: certificates @@ -635,114 +516,11 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { } b.Env.InstallConfig.Codesphere.DNSServers = []string{"8.8.8.8"} - // Override resource requests to "0" for Codesphere services so they can - // be scheduled on resource-constrained local clusters. - codesphereOverride := b.Env.InstallConfig.Codesphere.Override - if codesphereOverride == nil { - codesphereOverride = map[string]interface{}{} - } - - globalOverride, ok := codesphereOverride["global"].(map[string]interface{}) - if !ok || globalOverride == nil { - globalOverride = map[string]interface{}{} - } - - servicesOverride, ok := globalOverride["services"].(map[string]interface{}) - if !ok || servicesOverride == nil { - servicesOverride = map[string]interface{}{} - } - - for _, serviceName := range []string{ - "auth_service", - "deployment_service", - "error_page_server", - "ide_frontend", - "ide_service", - "marketplace", - "payment_service", - "public_api_service", - "team_service", - "workspace_proxy", - "workspace_service", - } { - servicesOverride[serviceName] = map[string]interface{}{ - "resources": map[string]interface{}{ - "requests": map[string]interface{}{ - "cpu": "0", - "memory": "0", - }, - }, - } - } + b.Env.InstallConfig.Codesphere.DeployConfig = bootstrap.DefaultCodesphereDeployConfig() + b.Env.InstallConfig.Codesphere.Plans = bootstrap.DefaultCodespherePlans() - globalOverride["services"] = servicesOverride - codesphereOverride["global"] = globalOverride - b.Env.InstallConfig.Codesphere.Override = codesphereOverride - - b.Env.InstallConfig.Codesphere.DeployConfig = files.DeployConfig{ - Images: map[string]files.ImageConfig{ - "ubuntu-24.04": { - Name: "Ubuntu 24.04", - SupportedUntil: "2028-05-31", - Flavors: map[string]files.FlavorConfig{ - "default": { - Image: files.ImageRef{ - BomRef: "workspace-agent-24.04", - }, - Pool: map[int]int{ - 1: 1, - 2: 1, - 3: 0, - }, - }, - }, - }, - }, - } - b.Env.InstallConfig.Codesphere.Plans = files.PlansConfig{ - HostingPlans: map[int]files.HostingPlan{ - 1: { - CPUTenth: 20, - GPUParts: 0, - MemoryMb: 4096, - StorageMb: 20480, - TempStorageMb: 1024, - }, - 2: { - CPUTenth: 40, - GPUParts: 0, - MemoryMb: 8192, - StorageMb: 40960, - TempStorageMb: 1024, - }, - 3: { - CPUTenth: 80, - GPUParts: 0, - MemoryMb: 16384, - StorageMb: 40960, - TempStorageMb: 1024, - }, - }, - WorkspacePlans: map[int]files.WorkspacePlan{ - 1: { - Name: "Standard", - HostingPlanID: 1, - MaxReplicas: 3, - OnDemand: true, - }, - 2: { - Name: "Big", - HostingPlanID: 2, - MaxReplicas: 3, - OnDemand: true, - }, - 3: { - Name: "Pro", - HostingPlanID: 3, - MaxReplicas: 3, - OnDemand: true, - }, - }, + if err := bootstrap.ApplyResourceProfile(b.Env.InstallConfig, b.Env.ResourceProfile); err != nil { + return fmt.Errorf("failed to apply resource profile %q: %w", b.Env.ResourceProfile, err) } b.Env.InstallConfig.Codesphere.Experiments = b.Env.Experiments @@ -771,7 +549,7 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { func (b *LocalBootstrapper) EnsureGitHubAccessConfigured() error { if b.Env.RegistryPassword == "" { - return fmt.Errorf("GitHub PAT is not set") + return fmt.Errorf("Registry password is not set") } b.Env.InstallConfig.Registry.Server = "ghcr.io" b.Env.InstallConfig.Registry.Username = b.Env.RegistryUser @@ -802,9 +580,9 @@ func (b *LocalBootstrapper) addCephSecretsToVault(vault *files.InstallVault) { ) } -// readKubeConfig builds a kubeconfig YAML from the in-memory rest.Config so +// getKubeConfig builds a kubeconfig YAML from the in-memory rest.Config so // that install-components.js can use it to talk to the cluster. -func (b *LocalBootstrapper) readKubeConfig() (string, error) { +func (b *LocalBootstrapper) getKubeConfig() (string, error) { cfg := b.restConfig cluster := clientcmdapi.NewCluster() diff --git a/internal/bootstrap/resource_profiles.go b/internal/bootstrap/resource_profiles.go new file mode 100644 index 00000000..b6a6d986 --- /dev/null +++ b/internal/bootstrap/resource_profiles.go @@ -0,0 +1,208 @@ +// Copyright (c) Codesphere Inc. +// SPDX-License-Identifier: Apache-2.0 + +package bootstrap + +import ( + "fmt" + + "github.com/codesphere-cloud/oms/internal/installer/files" +) + +type ResourceProfile string + +const ( + ResourceProfileNoRequests ResourceProfile = "noRequests" +) + +// ApplyResourceProfile mutates a RootConfig in-place to apply the requested +// resource profile overrides. +func ApplyResourceProfile(config *files.RootConfig, profile ResourceProfile) error { + if config == nil { + return fmt.Errorf("root config is nil") + } + + switch profile { + case "": + return nil + case ResourceProfileNoRequests: + applyNoRequestsProfile(config) + return nil + default: + return fmt.Errorf("unsupported resource profile %q", profile) + } +} + +func applyNoRequestsProfile(config *files.RootConfig) { + if config.Cluster.CertManager == nil { + config.Cluster.CertManager = &files.CertManagerConfig{} + } + config.Cluster.CertManager.Override = deepMergeMaps(config.Cluster.CertManager.Override, map[string]interface{}{ + "cert-manager": map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": zeroRequests(), + }, + "cainjector": map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": zeroRequests(), + }, + }, + "webhook": map[string]interface{}{ + "replicaCount": 1, + "resources": map[string]interface{}{ + "requests": zeroRequests(), + }, + }, + "startupapicheck": map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": zeroRequests(), + }, + }, + }, + }) + + if config.Cluster.Monitoring == nil { + config.Cluster.Monitoring = &files.MonitoringConfig{} + } + if config.Cluster.Monitoring.Prometheus == nil { + config.Cluster.Monitoring.Prometheus = &files.PrometheusConfig{} + } + config.Cluster.Monitoring.Prometheus.Override = deepMergeMaps(config.Cluster.Monitoring.Prometheus.Override, map[string]interface{}{ + "kube-prometheus-stack": map[string]interface{}{ + "prometheusOperator": map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": zeroRequests(), + }, + }, + "prometheus": map[string]interface{}{ + "prometheusSpec": map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": zeroRequests(), + }, + }, + }, + "prometheus-node-exporter": map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": zeroRequests(), + }, + }, + "kube-state-metrics": map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": zeroRequests(), + }, + }, + }, + }) + + if config.Cluster.Monitoring.BlackboxExporter == nil { + config.Cluster.Monitoring.BlackboxExporter = &files.BlackboxExporterConfig{} + } + config.Cluster.Monitoring.BlackboxExporter.Override = deepMergeMaps(config.Cluster.Monitoring.BlackboxExporter.Override, map[string]interface{}{ + "prometheus-blackbox-exporter": map[string]interface{}{ + "replicas": 1, + "resources": map[string]interface{}{ + "requests": zeroRequests(), + }, + }, + }) + + if config.Cluster.Monitoring.PushGateway == nil { + config.Cluster.Monitoring.PushGateway = &files.PushGatewayConfig{} + } + config.Cluster.Monitoring.PushGateway.Override = deepMergeMaps(config.Cluster.Monitoring.PushGateway.Override, map[string]interface{}{ + "prometheus-pushgateway": map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": zeroRequests(), + }, + }, + }) + + config.Cluster.Gateway.Override = deepMergeMaps(config.Cluster.Gateway.Override, map[string]interface{}{ + "ingress-nginx": map[string]interface{}{ + "controller": map[string]interface{}{ + "replicaCount": 1, + "resources": map[string]interface{}{ + "requests": zeroRequests(), + }, + }, + }, + }) + + config.Cluster.PublicGateway.Override = deepMergeMaps(config.Cluster.PublicGateway.Override, map[string]interface{}{ + "ingress-nginx": map[string]interface{}{ + "controller": map[string]interface{}{ + "replicaCount": 1, + "resources": map[string]interface{}{ + "requests": zeroRequests(), + }, + }, + }, + "nginx": map[string]interface{}{ + "replicaCount": 1, + "resources": map[string]interface{}{ + "requests": zeroRequests(), + }, + }, + }) + + if config.Codesphere.Override == nil { + config.Codesphere.Override = map[string]interface{}{} + } + + serviceProfiles := map[string]interface{}{} + for _, serviceName := range []string{ + "auth_service", + "deployment_service", + "error_page_server", + "ide_frontend", + "ide_service", + "marketplace", + "payment_service", + "public_api_service", + "team_service", + "workspace_proxy", + "workspace_service", + } { + serviceProfiles[serviceName] = map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": zeroRequests(), + }, + } + } + + config.Codesphere.Override = deepMergeMaps(config.Codesphere.Override, map[string]interface{}{ + "global": map[string]interface{}{ + "services": serviceProfiles, + }, + }) +} + +func zeroRequests() map[string]interface{} { + return map[string]interface{}{ + "cpu": "0", + "memory": "0", + } +} + +func deepMergeMaps(dst, src map[string]interface{}) map[string]interface{} { + if dst == nil { + dst = map[string]interface{}{} + } + + for key, srcVal := range src { + srcMap, srcIsMap := srcVal.(map[string]interface{}) + if !srcIsMap { + dst[key] = srcVal + continue + } + + dstMap, dstIsMap := dst[key].(map[string]interface{}) + if !dstIsMap || dstMap == nil { + dstMap = map[string]interface{}{} + } + + dst[key] = deepMergeMaps(dstMap, srcMap) + } + + return dst +} diff --git a/internal/bootstrap/resource_profiles_test.go b/internal/bootstrap/resource_profiles_test.go new file mode 100644 index 00000000..6120bf9b --- /dev/null +++ b/internal/bootstrap/resource_profiles_test.go @@ -0,0 +1,92 @@ +package bootstrap_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/codesphere-cloud/oms/internal/bootstrap" + "github.com/codesphere-cloud/oms/internal/installer/files" +) + +var _ = Describe("ApplyResourceProfile", func() { + Describe("noRequests", func() { + It("preserves existing config while adding zero requests", func() { + config := &files.RootConfig{ + Cluster: files.ClusterConfig{ + Monitoring: &files.MonitoringConfig{ + Prometheus: &files.PrometheusConfig{ + RemoteWrite: &files.RemoteWriteConfig{ + Enabled: true, + ClusterName: "existing-cluster", + }, + }, + }, + Gateway: files.GatewayConfig{ + ServiceType: "LoadBalancer", + Override: map[string]interface{}{ + "ingress-nginx": map[string]interface{}{ + "controller": map[string]interface{}{ + "existing": "value", + }, + }, + }, + }, + }, + Codesphere: files.CodesphereConfig{ + Override: map[string]interface{}{ + "global": map[string]interface{}{ + "services": map[string]interface{}{ + "auth_service": map[string]interface{}{ + "existing": "value", + }, + }, + }, + }, + }, + } + + Expect(bootstrap.ApplyResourceProfile(config, bootstrap.ResourceProfileNoRequests)).To(Succeed()) + + Expect(config.Cluster.Monitoring).NotTo(BeNil()) + Expect(config.Cluster.Monitoring.Prometheus).NotTo(BeNil()) + Expect(config.Cluster.Monitoring.Prometheus.RemoteWrite).NotTo(BeNil()) + Expect(config.Cluster.Monitoring.Prometheus.RemoteWrite.Enabled).To(BeTrue()) + Expect(config.Cluster.Monitoring.Prometheus.RemoteWrite.ClusterName).To(Equal("existing-cluster")) + + controller := mustMap(mustMap(config.Cluster.Gateway.Override["ingress-nginx"])["controller"]) + Expect(controller).To(HaveKeyWithValue("existing", "value")) + assertZeroRequests(mustMap(controller["resources"])["requests"]) + + authService := mustMap(mustMap(mustMap(config.Codesphere.Override["global"])["services"])["auth_service"]) + Expect(authService).To(HaveKeyWithValue("existing", "value")) + assertZeroRequests(mustMap(authService["resources"])["requests"]) + + Expect(config.Cluster.CertManager).NotTo(BeNil()) + Expect(config.Cluster.CertManager.Override).NotTo(BeNil()) + Expect(config.Cluster.Monitoring.BlackboxExporter).NotTo(BeNil()) + Expect(config.Cluster.Monitoring.PushGateway).NotTo(BeNil()) + Expect(config.Cluster.PublicGateway.Override).NotTo(BeNil()) + }) + }) + + It("returns an error for an invalid profile", func() { + config := &files.RootConfig{} + Expect(bootstrap.ApplyResourceProfile(config, bootstrap.ResourceProfile("invalid"))).To(MatchError(ContainSubstring("unsupported resource profile"))) + }) + + It("returns an error for a nil config", func() { + Expect(bootstrap.ApplyResourceProfile(nil, bootstrap.ResourceProfileNoRequests)).To(MatchError("root config is nil")) + }) +}) + +func mustMap(value interface{}) map[string]interface{} { + result, ok := value.(map[string]interface{}) + Expect(ok).To(BeTrue(), "expected map[string]interface{}, got %T", value) + return result +} + +func assertZeroRequests(value interface{}) { + requests := mustMap(value) + Expect(requests).To(HaveKeyWithValue("cpu", "0")) + Expect(requests).To(HaveKeyWithValue("memory", "0")) +} From 53bc0098382713791ca1f5df6f401709951bd334 Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Tue, 10 Mar 2026 14:39:28 +0100 Subject: [PATCH 15/31] properly handle errors Signed-off-by: Tim Schrodi --- internal/bootstrap/local/local.go | 2 +- internal/bootstrap/local/rook.go | 17 +++++++++++++-- internal/installer/vault_encryption_test.go | 24 ++++++++++----------- 3 files changed, 28 insertions(+), 15 deletions(-) diff --git a/internal/bootstrap/local/local.go b/internal/bootstrap/local/local.go index d542480b..624af6c3 100644 --- a/internal/bootstrap/local/local.go +++ b/internal/bootstrap/local/local.go @@ -549,7 +549,7 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { func (b *LocalBootstrapper) EnsureGitHubAccessConfigured() error { if b.Env.RegistryPassword == "" { - return fmt.Errorf("Registry password is not set") + return fmt.Errorf("registry password is not set") } b.Env.InstallConfig.Registry.Server = "ghcr.io" b.Env.InstallConfig.Registry.Username = b.Env.RegistryUser diff --git a/internal/bootstrap/local/rook.go b/internal/bootstrap/local/rook.go index 01a1e53f..79267c7b 100644 --- a/internal/bootstrap/local/rook.go +++ b/internal/bootstrap/local/rook.go @@ -131,7 +131,7 @@ func buildRookHelmValues() ([]byte, error) { return json.MarshalIndent(values, "", " ") } -func (b *LocalBootstrapper) InstallRookHelmChart() error { +func (b *LocalBootstrapper) InstallRookHelmChart() (err error) { repoAddOut, err := exec.CommandContext(b.ctx, "helm", "repo", "add", rookRepoName, rookRepoURL).CombinedOutput() if err != nil && !strings.Contains(string(repoAddOut), "already exists") { return fmt.Errorf("failed to add helm repo %q: %w (%s)", rookRepoName, err, strings.TrimSpace(string(repoAddOut))) @@ -151,7 +151,20 @@ func (b *LocalBootstrapper) InstallRookHelmChart() error { if err != nil { return fmt.Errorf("failed to create temp directory for Helm values: %w", err) } - defer os.RemoveAll(tmpDir) + defer func() { + cleanupErr := os.RemoveAll(tmpDir) + if cleanupErr == nil { + return + } + + cleanupErr = fmt.Errorf("failed to remove temporary Helm values directory %q: %w", tmpDir, cleanupErr) + if err != nil { + err = errors.Join(err, cleanupErr) + return + } + + err = cleanupErr + }() valuesPath := filepath.Join(tmpDir, "values.json") if err := os.WriteFile(valuesPath, helmValues, 0600); err != nil { diff --git a/internal/installer/vault_encryption_test.go b/internal/installer/vault_encryption_test.go index 206a681d..e931a710 100644 --- a/internal/installer/vault_encryption_test.go +++ b/internal/installer/vault_encryption_test.go @@ -42,29 +42,29 @@ var _ = Describe("VaultEncryption", func() { // Save and clear env vars to isolate tests. origAgeKey, hasOrigAgeKey = os.LookupEnv("SOPS_AGE_KEY") origAgeKeyFile, hasOrigKeyFile = os.LookupEnv("SOPS_AGE_KEY_FILE") - os.Unsetenv("SOPS_AGE_KEY") - os.Unsetenv("SOPS_AGE_KEY_FILE") + Expect(os.Unsetenv("SOPS_AGE_KEY")).To(Succeed()) + Expect(os.Unsetenv("SOPS_AGE_KEY_FILE")).To(Succeed()) }) AfterEach(func() { - os.RemoveAll(tmpDir) + Expect(os.RemoveAll(tmpDir)).To(Succeed()) // Restore env vars. if hasOrigAgeKey { - os.Setenv("SOPS_AGE_KEY", origAgeKey) + Expect(os.Setenv("SOPS_AGE_KEY", origAgeKey)).To(Succeed()) } else { - os.Unsetenv("SOPS_AGE_KEY") + Expect(os.Unsetenv("SOPS_AGE_KEY")).To(Succeed()) } if hasOrigKeyFile { - os.Setenv("SOPS_AGE_KEY_FILE", origAgeKeyFile) + Expect(os.Setenv("SOPS_AGE_KEY_FILE", origAgeKeyFile)).To(Succeed()) } else { - os.Unsetenv("SOPS_AGE_KEY_FILE") + Expect(os.Unsetenv("SOPS_AGE_KEY_FILE")).To(Succeed()) } }) Context("with SOPS_AGE_KEY env var containing key file content", func() { It("should extract the recipient from the public key comment", func() { keyContent := "# created: 2024-01-01T00:00:00Z\n# public key: age1testrecipient123\nAGE-SECRET-KEY-FAKE\n" - os.Setenv("SOPS_AGE_KEY", keyContent) + Expect(os.Setenv("SOPS_AGE_KEY", keyContent)).To(Succeed()) recipient, keyPath, err := installer.ResolveAgeKey(tmpDir) Expect(err).ToNot(HaveOccurred()) @@ -96,7 +96,7 @@ var _ = Describe("VaultEncryption", func() { } Expect(privKeyLine).ToNot(BeEmpty()) - os.Setenv("SOPS_AGE_KEY", privKeyLine) + Expect(os.Setenv("SOPS_AGE_KEY", privKeyLine)).To(Succeed()) recipient, keyPath, err := installer.ResolveAgeKey(tmpDir) Expect(err).ToNot(HaveOccurred()) @@ -112,7 +112,7 @@ var _ = Describe("VaultEncryption", func() { err := os.WriteFile(keyFile, []byte(keyContent), 0600) Expect(err).ToNot(HaveOccurred()) - os.Setenv("SOPS_AGE_KEY_FILE", keyFile) + Expect(os.Setenv("SOPS_AGE_KEY_FILE", keyFile)).To(Succeed()) recipient, keyPath, err := installer.ResolveAgeKey(tmpDir) Expect(err).ToNot(HaveOccurred()) @@ -121,7 +121,7 @@ var _ = Describe("VaultEncryption", func() { }) It("should return error if the file does not exist", func() { - os.Setenv("SOPS_AGE_KEY_FILE", filepath.Join(tmpDir, "nonexistent.txt")) + Expect(os.Setenv("SOPS_AGE_KEY_FILE", filepath.Join(tmpDir, "nonexistent.txt"))).To(Succeed()) _, _, err := installer.ResolveAgeKey(tmpDir) Expect(err).To(HaveOccurred()) @@ -133,7 +133,7 @@ var _ = Describe("VaultEncryption", func() { err := os.WriteFile(keyFile, []byte("AGE-SECRET-KEY-NOPUBLICCOMMENT\n"), 0600) Expect(err).ToNot(HaveOccurred()) - os.Setenv("SOPS_AGE_KEY_FILE", keyFile) + Expect(os.Setenv("SOPS_AGE_KEY_FILE", keyFile)).To(Succeed()) _, _, err = installer.ResolveAgeKey(tmpDir) Expect(err).To(HaveOccurred()) From 8591c5a205899cca3c8f30b2d8a63da943463d99 Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Thu, 12 Mar 2026 19:29:58 +0100 Subject: [PATCH 16/31] fix local profiles Signed-off-by: Tim Schrodi --- internal/bootstrap/resource_profiles.go | 4 +--- internal/bootstrap/resource_profiles_test.go | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/internal/bootstrap/resource_profiles.go b/internal/bootstrap/resource_profiles.go index b6a6d986..1f1c7516 100644 --- a/internal/bootstrap/resource_profiles.go +++ b/internal/bootstrap/resource_profiles.go @@ -164,9 +164,7 @@ func applyNoRequestsProfile(config *files.RootConfig) { "workspace_service", } { serviceProfiles[serviceName] = map[string]interface{}{ - "resources": map[string]interface{}{ - "requests": zeroRequests(), - }, + "requests": zeroRequests(), } } diff --git a/internal/bootstrap/resource_profiles_test.go b/internal/bootstrap/resource_profiles_test.go index 6120bf9b..b436e129 100644 --- a/internal/bootstrap/resource_profiles_test.go +++ b/internal/bootstrap/resource_profiles_test.go @@ -59,7 +59,7 @@ var _ = Describe("ApplyResourceProfile", func() { authService := mustMap(mustMap(mustMap(config.Codesphere.Override["global"])["services"])["auth_service"]) Expect(authService).To(HaveKeyWithValue("existing", "value")) - assertZeroRequests(mustMap(authService["resources"])["requests"]) + assertZeroRequests(authService["requests"]) Expect(config.Cluster.CertManager).NotTo(BeNil()) Expect(config.Cluster.CertManager.Override).NotTo(BeNil()) From 23aaa83048c0f2dad5f9f59c5abdbcc573801fc1 Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Fri, 13 Mar 2026 15:34:28 +0100 Subject: [PATCH 17/31] properly set secrets and use new profiles Signed-off-by: Tim Schrodi --- cli/cmd/bootstrap_local.go | 2 +- internal/bootstrap/local/local.go | 49 ++--- internal/bootstrap/resource_profiles.go | 206 ------------------- internal/bootstrap/resource_profiles_test.go | 92 --------- internal/installer/files/config_yaml.go | 10 + 5 files changed, 27 insertions(+), 332 deletions(-) delete mode 100644 internal/bootstrap/resource_profiles.go delete mode 100644 internal/bootstrap/resource_profiles_test.go diff --git a/cli/cmd/bootstrap_local.go b/cli/cmd/bootstrap_local.go index be84abce..032aa7a9 100644 --- a/cli/cmd/bootstrap_local.go +++ b/cli/cmd/bootstrap_local.go @@ -74,7 +74,7 @@ func AddBootstrapLocalCmd(parent *cobra.Command) { flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.BaseDomain, "base-domain", "cs.local", "Base domain for Codesphere") flags.StringArrayVar(&bootstrapLocalCmd.CodesphereEnv.Experiments, "experiments", []string{}, "Experiments to enable in Codesphere installation (optional)") flags.StringArrayVar(&bootstrapLocalCmd.CodesphereEnv.FeatureFlags, "feature-flags", []string{}, "Feature flags to enable in Codesphere installation (optional)") - flags.StringVar((*string)(&bootstrapLocalCmd.CodesphereEnv.ResourceProfile), "resource-profile", string(bootstrap.ResourceProfileNoRequests), "Resource profile to apply to the install config (supported: noRequests)") + flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.Profile, "profile", installer.PROFILE_DEV, "Profile to apply to the install config like resources (supported: dev, minimal, prod)") // Config flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.InstallDir, "install-dir", ".installer", "Directory for config, secrets, and bundle files") flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.InstallConfigPath, "install-config", "", "Path to install config file (default: /config.yaml)") diff --git a/internal/bootstrap/local/local.go b/internal/bootstrap/local/local.go index 624af6c3..19e068b1 100644 --- a/internal/bootstrap/local/local.go +++ b/internal/bootstrap/local/local.go @@ -63,10 +63,10 @@ type LocalBootstrapper struct { } type CodesphereEnvironment struct { - BaseDomain string `json:"base_domain"` - Experiments []string `json:"experiments"` - FeatureFlags []string `json:"feature_flags"` - ResourceProfile bootstrap.ResourceProfile `json:"resource_profile"` + BaseDomain string `json:"base_domain"` + Experiments []string `json:"experiments"` + FeatureFlags []string `json:"feature_flags"` + Profile string `json:"profile"` // Installer InstallVersion string `json:"install_version"` InstallHash string `json:"install_hash"` @@ -374,7 +374,7 @@ func (b *LocalBootstrapper) EnsureInstallConfig() error { b.Env.ExistingConfigUsed = true } else { - err := b.icg.ApplyProfile("dev") + err := b.icg.ApplyProfile(b.Env.Profile) if err != nil { return fmt.Errorf("failed to apply profile: %w", err) } @@ -439,7 +439,7 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { if err != nil { return fmt.Errorf("failed to read PostgreSQL superuser password: %w", err) } - b.Env.Vault.Secrets = append(b.Env.Vault.Secrets, files.SecretEntry{ + b.Env.Vault.SetSecret(files.SecretEntry{ Name: "postgresPassword", Fields: &files.SecretFields{ Password: pgPassword, @@ -452,7 +452,7 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { if err != nil { return fmt.Errorf("failed to read kubeconfig: %w", err) } - b.Env.Vault.Secrets = append(b.Env.Vault.Secrets, files.SecretEntry{ + b.Env.Vault.SetSecret(files.SecretEntry{ Name: "kubeConfig", File: &files.SecretFile{ Name: "kubeConfig", @@ -484,17 +484,6 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { } b.Env.InstallConfig.Kubernetes.PodCIDR = podCIDR b.Env.InstallConfig.Kubernetes.ServiceCIDR = serviceCIDR - b.Env.InstallConfig.Cluster.Monitoring = &files.MonitoringConfig{ - Prometheus: &files.PrometheusConfig{ - RemoteWrite: &files.RemoteWriteConfig{ - Enabled: false, - ClusterName: "local-test", - }, - }, - Loki: &files.LokiConfig{Enabled: false}, - Grafana: &files.GrafanaConfig{Enabled: false}, - GrafanaAlloy: &files.GrafanaAlloyConfig{Enabled: false}, - } b.Env.InstallConfig.Cluster.Gateway = files.GatewayConfig{ ServiceType: "LoadBalancer", } @@ -519,10 +508,6 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { b.Env.InstallConfig.Codesphere.DeployConfig = bootstrap.DefaultCodesphereDeployConfig() b.Env.InstallConfig.Codesphere.Plans = bootstrap.DefaultCodespherePlans() - if err := bootstrap.ApplyResourceProfile(b.Env.InstallConfig, b.Env.ResourceProfile); err != nil { - return fmt.Errorf("failed to apply resource profile %q: %w", b.Env.ResourceProfile, err) - } - b.Env.InstallConfig.Codesphere.Experiments = b.Env.Experiments b.Env.InstallConfig.Codesphere.Features = b.Env.FeatureFlags @@ -567,17 +552,15 @@ func (b *LocalBootstrapper) EnsureGitHubAccessConfigured() error { func (b *LocalBootstrapper) addCephSecretsToVault(vault *files.InstallVault) { creds := b.cephCredentials - vault.Secrets = append(vault.Secrets, - files.SecretEntry{Name: "cephFsId", Fields: &files.SecretFields{Password: creds.FSID}}, - files.SecretEntry{Name: "cephfsAdmin", Fields: &files.SecretFields{Username: creds.CephfsAdmin.Entity, Password: creds.CephfsAdmin.Key}}, - files.SecretEntry{Name: "cephfsAdminCodesphere", Fields: &files.SecretFields{Username: creds.CephfsAdminCodesphere.Entity, Password: creds.CephfsAdminCodesphere.Key}}, - files.SecretEntry{Name: "csiRbdNode", Fields: &files.SecretFields{Username: creds.CSIRBDNode.Entity, Password: creds.CSIRBDNode.Key}}, - files.SecretEntry{Name: "csiRbdProvisioner", Fields: &files.SecretFields{Username: creds.CSIRBDProvisioner.Entity, Password: creds.CSIRBDProvisioner.Key}}, - files.SecretEntry{Name: "csiCephfsNode", Fields: &files.SecretFields{Username: creds.CSICephFSNode.Entity, Password: creds.CSICephFSNode.Key}}, - files.SecretEntry{Name: "csiCephfsProvisioner", Fields: &files.SecretFields{Username: creds.CSICephFSProvisioner.Entity, Password: creds.CSICephFSProvisioner.Key}}, - // csiOperator is managed by Rook internally; provide a dummy value for vault compatibility. - files.SecretEntry{Name: "csiOperator", Fields: &files.SecretFields{Username: "client.csi-rbd-provisioner", Password: "dummy"}}, - ) + vault.SetSecret(files.SecretEntry{Name: "cephFsId", Fields: &files.SecretFields{Password: creds.FSID}}) + vault.SetSecret(files.SecretEntry{Name: "cephfsAdmin", Fields: &files.SecretFields{Username: creds.CephfsAdmin.Entity, Password: creds.CephfsAdmin.Key}}) + vault.SetSecret(files.SecretEntry{Name: "cephfsAdminCodesphere", Fields: &files.SecretFields{Username: creds.CephfsAdminCodesphere.Entity, Password: creds.CephfsAdminCodesphere.Key}}) + vault.SetSecret(files.SecretEntry{Name: "csiRbdNode", Fields: &files.SecretFields{Username: creds.CSIRBDNode.Entity, Password: creds.CSIRBDNode.Key}}) + vault.SetSecret(files.SecretEntry{Name: "csiRbdProvisioner", Fields: &files.SecretFields{Username: creds.CSIRBDProvisioner.Entity, Password: creds.CSIRBDProvisioner.Key}}) + vault.SetSecret(files.SecretEntry{Name: "csiCephfsNode", Fields: &files.SecretFields{Username: creds.CSICephFSNode.Entity, Password: creds.CSICephFSNode.Key}}) + vault.SetSecret(files.SecretEntry{Name: "csiCephfsProvisioner", Fields: &files.SecretFields{Username: creds.CSICephFSProvisioner.Entity, Password: creds.CSICephFSProvisioner.Key}}) + // csiOperator is managed by Rook internally; provide a dummy value for vault compatibility. + vault.SetSecret(files.SecretEntry{Name: "csiOperator", Fields: &files.SecretFields{Username: "client.csi-rbd-provisioner", Password: "dummy"}}) } // getKubeConfig builds a kubeconfig YAML from the in-memory rest.Config so diff --git a/internal/bootstrap/resource_profiles.go b/internal/bootstrap/resource_profiles.go deleted file mode 100644 index 1f1c7516..00000000 --- a/internal/bootstrap/resource_profiles.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright (c) Codesphere Inc. -// SPDX-License-Identifier: Apache-2.0 - -package bootstrap - -import ( - "fmt" - - "github.com/codesphere-cloud/oms/internal/installer/files" -) - -type ResourceProfile string - -const ( - ResourceProfileNoRequests ResourceProfile = "noRequests" -) - -// ApplyResourceProfile mutates a RootConfig in-place to apply the requested -// resource profile overrides. -func ApplyResourceProfile(config *files.RootConfig, profile ResourceProfile) error { - if config == nil { - return fmt.Errorf("root config is nil") - } - - switch profile { - case "": - return nil - case ResourceProfileNoRequests: - applyNoRequestsProfile(config) - return nil - default: - return fmt.Errorf("unsupported resource profile %q", profile) - } -} - -func applyNoRequestsProfile(config *files.RootConfig) { - if config.Cluster.CertManager == nil { - config.Cluster.CertManager = &files.CertManagerConfig{} - } - config.Cluster.CertManager.Override = deepMergeMaps(config.Cluster.CertManager.Override, map[string]interface{}{ - "cert-manager": map[string]interface{}{ - "resources": map[string]interface{}{ - "requests": zeroRequests(), - }, - "cainjector": map[string]interface{}{ - "resources": map[string]interface{}{ - "requests": zeroRequests(), - }, - }, - "webhook": map[string]interface{}{ - "replicaCount": 1, - "resources": map[string]interface{}{ - "requests": zeroRequests(), - }, - }, - "startupapicheck": map[string]interface{}{ - "resources": map[string]interface{}{ - "requests": zeroRequests(), - }, - }, - }, - }) - - if config.Cluster.Monitoring == nil { - config.Cluster.Monitoring = &files.MonitoringConfig{} - } - if config.Cluster.Monitoring.Prometheus == nil { - config.Cluster.Monitoring.Prometheus = &files.PrometheusConfig{} - } - config.Cluster.Monitoring.Prometheus.Override = deepMergeMaps(config.Cluster.Monitoring.Prometheus.Override, map[string]interface{}{ - "kube-prometheus-stack": map[string]interface{}{ - "prometheusOperator": map[string]interface{}{ - "resources": map[string]interface{}{ - "requests": zeroRequests(), - }, - }, - "prometheus": map[string]interface{}{ - "prometheusSpec": map[string]interface{}{ - "resources": map[string]interface{}{ - "requests": zeroRequests(), - }, - }, - }, - "prometheus-node-exporter": map[string]interface{}{ - "resources": map[string]interface{}{ - "requests": zeroRequests(), - }, - }, - "kube-state-metrics": map[string]interface{}{ - "resources": map[string]interface{}{ - "requests": zeroRequests(), - }, - }, - }, - }) - - if config.Cluster.Monitoring.BlackboxExporter == nil { - config.Cluster.Monitoring.BlackboxExporter = &files.BlackboxExporterConfig{} - } - config.Cluster.Monitoring.BlackboxExporter.Override = deepMergeMaps(config.Cluster.Monitoring.BlackboxExporter.Override, map[string]interface{}{ - "prometheus-blackbox-exporter": map[string]interface{}{ - "replicas": 1, - "resources": map[string]interface{}{ - "requests": zeroRequests(), - }, - }, - }) - - if config.Cluster.Monitoring.PushGateway == nil { - config.Cluster.Monitoring.PushGateway = &files.PushGatewayConfig{} - } - config.Cluster.Monitoring.PushGateway.Override = deepMergeMaps(config.Cluster.Monitoring.PushGateway.Override, map[string]interface{}{ - "prometheus-pushgateway": map[string]interface{}{ - "resources": map[string]interface{}{ - "requests": zeroRequests(), - }, - }, - }) - - config.Cluster.Gateway.Override = deepMergeMaps(config.Cluster.Gateway.Override, map[string]interface{}{ - "ingress-nginx": map[string]interface{}{ - "controller": map[string]interface{}{ - "replicaCount": 1, - "resources": map[string]interface{}{ - "requests": zeroRequests(), - }, - }, - }, - }) - - config.Cluster.PublicGateway.Override = deepMergeMaps(config.Cluster.PublicGateway.Override, map[string]interface{}{ - "ingress-nginx": map[string]interface{}{ - "controller": map[string]interface{}{ - "replicaCount": 1, - "resources": map[string]interface{}{ - "requests": zeroRequests(), - }, - }, - }, - "nginx": map[string]interface{}{ - "replicaCount": 1, - "resources": map[string]interface{}{ - "requests": zeroRequests(), - }, - }, - }) - - if config.Codesphere.Override == nil { - config.Codesphere.Override = map[string]interface{}{} - } - - serviceProfiles := map[string]interface{}{} - for _, serviceName := range []string{ - "auth_service", - "deployment_service", - "error_page_server", - "ide_frontend", - "ide_service", - "marketplace", - "payment_service", - "public_api_service", - "team_service", - "workspace_proxy", - "workspace_service", - } { - serviceProfiles[serviceName] = map[string]interface{}{ - "requests": zeroRequests(), - } - } - - config.Codesphere.Override = deepMergeMaps(config.Codesphere.Override, map[string]interface{}{ - "global": map[string]interface{}{ - "services": serviceProfiles, - }, - }) -} - -func zeroRequests() map[string]interface{} { - return map[string]interface{}{ - "cpu": "0", - "memory": "0", - } -} - -func deepMergeMaps(dst, src map[string]interface{}) map[string]interface{} { - if dst == nil { - dst = map[string]interface{}{} - } - - for key, srcVal := range src { - srcMap, srcIsMap := srcVal.(map[string]interface{}) - if !srcIsMap { - dst[key] = srcVal - continue - } - - dstMap, dstIsMap := dst[key].(map[string]interface{}) - if !dstIsMap || dstMap == nil { - dstMap = map[string]interface{}{} - } - - dst[key] = deepMergeMaps(dstMap, srcMap) - } - - return dst -} diff --git a/internal/bootstrap/resource_profiles_test.go b/internal/bootstrap/resource_profiles_test.go deleted file mode 100644 index b436e129..00000000 --- a/internal/bootstrap/resource_profiles_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package bootstrap_test - -import ( - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/codesphere-cloud/oms/internal/bootstrap" - "github.com/codesphere-cloud/oms/internal/installer/files" -) - -var _ = Describe("ApplyResourceProfile", func() { - Describe("noRequests", func() { - It("preserves existing config while adding zero requests", func() { - config := &files.RootConfig{ - Cluster: files.ClusterConfig{ - Monitoring: &files.MonitoringConfig{ - Prometheus: &files.PrometheusConfig{ - RemoteWrite: &files.RemoteWriteConfig{ - Enabled: true, - ClusterName: "existing-cluster", - }, - }, - }, - Gateway: files.GatewayConfig{ - ServiceType: "LoadBalancer", - Override: map[string]interface{}{ - "ingress-nginx": map[string]interface{}{ - "controller": map[string]interface{}{ - "existing": "value", - }, - }, - }, - }, - }, - Codesphere: files.CodesphereConfig{ - Override: map[string]interface{}{ - "global": map[string]interface{}{ - "services": map[string]interface{}{ - "auth_service": map[string]interface{}{ - "existing": "value", - }, - }, - }, - }, - }, - } - - Expect(bootstrap.ApplyResourceProfile(config, bootstrap.ResourceProfileNoRequests)).To(Succeed()) - - Expect(config.Cluster.Monitoring).NotTo(BeNil()) - Expect(config.Cluster.Monitoring.Prometheus).NotTo(BeNil()) - Expect(config.Cluster.Monitoring.Prometheus.RemoteWrite).NotTo(BeNil()) - Expect(config.Cluster.Monitoring.Prometheus.RemoteWrite.Enabled).To(BeTrue()) - Expect(config.Cluster.Monitoring.Prometheus.RemoteWrite.ClusterName).To(Equal("existing-cluster")) - - controller := mustMap(mustMap(config.Cluster.Gateway.Override["ingress-nginx"])["controller"]) - Expect(controller).To(HaveKeyWithValue("existing", "value")) - assertZeroRequests(mustMap(controller["resources"])["requests"]) - - authService := mustMap(mustMap(mustMap(config.Codesphere.Override["global"])["services"])["auth_service"]) - Expect(authService).To(HaveKeyWithValue("existing", "value")) - assertZeroRequests(authService["requests"]) - - Expect(config.Cluster.CertManager).NotTo(BeNil()) - Expect(config.Cluster.CertManager.Override).NotTo(BeNil()) - Expect(config.Cluster.Monitoring.BlackboxExporter).NotTo(BeNil()) - Expect(config.Cluster.Monitoring.PushGateway).NotTo(BeNil()) - Expect(config.Cluster.PublicGateway.Override).NotTo(BeNil()) - }) - }) - - It("returns an error for an invalid profile", func() { - config := &files.RootConfig{} - Expect(bootstrap.ApplyResourceProfile(config, bootstrap.ResourceProfile("invalid"))).To(MatchError(ContainSubstring("unsupported resource profile"))) - }) - - It("returns an error for a nil config", func() { - Expect(bootstrap.ApplyResourceProfile(nil, bootstrap.ResourceProfileNoRequests)).To(MatchError("root config is nil")) - }) -}) - -func mustMap(value interface{}) map[string]interface{} { - result, ok := value.(map[string]interface{}) - Expect(ok).To(BeTrue(), "expected map[string]interface{}, got %T", value) - return result -} - -func assertZeroRequests(value interface{}) { - requests := mustMap(value) - Expect(requests).To(HaveKeyWithValue("cpu", "0")) - Expect(requests).To(HaveKeyWithValue("memory", "0")) -} diff --git a/internal/installer/files/config_yaml.go b/internal/installer/files/config_yaml.go index 39372ea1..957a97ca 100644 --- a/internal/installer/files/config_yaml.go +++ b/internal/installer/files/config_yaml.go @@ -19,6 +19,16 @@ func (v *InstallVault) Marshal() ([]byte, error) { return yaml.Marshal(v) } +func (v *InstallVault) SetSecret(entry SecretEntry) { + for i, s := range v.Secrets { + if s.Name == entry.Name { + v.Secrets[i] = entry + return + } + } + v.Secrets = append(v.Secrets, entry) +} + func (v *InstallVault) Unmarshal(data []byte) error { return yaml.Unmarshal(data, v) } From e608b2e056fba0ac4b4d01953d775290d508944f Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Fri, 13 Mar 2026 15:58:30 +0100 Subject: [PATCH 18/31] set underprovisioning factor Signed-off-by: Tim Schrodi --- internal/installer/resource_profiles.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/installer/resource_profiles.go b/internal/installer/resource_profiles.go index e6e1a7a1..49a2f020 100644 --- a/internal/installer/resource_profiles.go +++ b/internal/installer/resource_profiles.go @@ -170,6 +170,10 @@ func applyNoRequestsProfile(config *files.RootConfig) { config.Codesphere.Override = util.DeepMergeMaps(config.Codesphere.Override, map[string]any{ "global": map[string]any{ "services": serviceProfiles, + "underprovisionFactors": map[string]any{ + "cpu": 0.25, + "memory": 0.25, + }, }, }) } From dc49de4af48570c57a65e1b2507ed233a11d0fbd Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Fri, 13 Mar 2026 16:09:53 +0100 Subject: [PATCH 19/31] remove registry pw from flag and read from stdin or env var Signed-off-by: Tim Schrodi --- cli/cmd/bootstrap_local.go | 26 +++++++++++++++++++++++-- internal/installer/files/config_yaml.go | 1 - 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/cli/cmd/bootstrap_local.go b/cli/cmd/bootstrap_local.go index 032aa7a9..ef503d8b 100644 --- a/cli/cmd/bootstrap_local.go +++ b/cli/cmd/bootstrap_local.go @@ -14,6 +14,8 @@ import ( "path/filepath" "strings" + "golang.org/x/term" + cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" csio "github.com/codesphere-cloud/cs-go/pkg/io" rookcephv1 "github.com/codesphere-cloud/oms/external/ceph.rook.io/v1" @@ -68,7 +70,6 @@ func AddBootstrapLocalCmd(parent *cobra.Command) { flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.InstallLocal, "install-local", "", "Path to a local installer package (tar.gz or unpacked directory)") // Registry flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.RegistryUser, "registry-user", "", "Custom Registry username (optional)") - flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.RegistryPassword, "registry-password", "", "Registry password to use for direct image access (for ghcr.io, use a GitHub Personal Access Token with package read scope)") // Codesphere Environment flags.StringVar(&bootstrapLocalCmd.CodesphereEnv.BaseDomain, "base-domain", "cs.local", "Base domain for Codesphere") @@ -82,7 +83,6 @@ func AddBootstrapLocalCmd(parent *cobra.Command) { bootstrapLocalCmd.cmd.RunE = bootstrapLocalCmd.RunE util.MarkFlagRequired(bootstrapLocalCmd.cmd, "registry-user") - util.MarkFlagRequired(bootstrapLocalCmd.cmd, "registry-password") parent.AddCommand(bootstrapLocalCmd.cmd) } @@ -93,6 +93,10 @@ func (c *BootstrapLocalCmd) BootstrapLocal() error { return err } + if err := c.resolveRegistryPassword(); err != nil { + return err + } + // Resolve install-config and secrets-file defaults from install-dir. if c.CodesphereEnv.InstallConfigPath == "" { c.CodesphereEnv.InstallConfigPath = filepath.Join(c.CodesphereEnv.InstallDir, "config.yaml") @@ -122,6 +126,24 @@ func (c *BootstrapLocalCmd) BootstrapLocal() error { return bs.Bootstrap() } +func (c *BootstrapLocalCmd) resolveRegistryPassword() error { + if pw := os.Getenv("OMS_REGISTRY_PASSWORD"); len(pw) != 0 { + c.CodesphereEnv.RegistryPassword = pw + return nil + } + fmt.Print("Registry password: ") + pw, err := term.ReadPassword(int(os.Stdin.Fd())) + fmt.Println() + if err != nil { + return fmt.Errorf("failed to read registry password: %w", err) + } + if len(pw) == 0 { + return fmt.Errorf("registry password is required; set OMS_REGISTRY_PASSWORD or enter it when prompted") + } + c.CodesphereEnv.RegistryPassword = string(pw) + return nil +} + func (c *BootstrapLocalCmd) ConfirmLocalBootstrapWarning() error { fmt.Println(csio.Long(`############################################################ # Local Bootstrap Warning # diff --git a/internal/installer/files/config_yaml.go b/internal/installer/files/config_yaml.go index 98120a0c..14c41e1c 100644 --- a/internal/installer/files/config_yaml.go +++ b/internal/installer/files/config_yaml.go @@ -289,7 +289,6 @@ type CodesphereConfig struct { DNSServers []string `yaml:"dnsServers"` Experiments []string `yaml:"experiments"` Features []string `yaml:"features"` - Override map[string]interface{} `yaml:"override,omitempty"` ExtraCAPem string `yaml:"extraCaPem,omitempty"` ExtraWorkspaceEnvVars map[string]string `yaml:"extraWorkspaceEnvVars,omitempty"` ExtraWorkspaceFiles []ExtraWorkspaceFile `yaml:"extraWorkspaceFiles,omitempty"` From 53766ee548d3943dcf2d9ca2024fab4aa3923c5a Mon Sep 17 00:00:00 2001 From: schrodit <7979201+schrodit@users.noreply.github.com> Date: Fri, 13 Mar 2026 15:20:54 +0000 Subject: [PATCH 20/31] chore(docs): Auto-update docs and licenses Signed-off-by: schrodit <7979201+schrodit@users.noreply.github.com> --- .golangci.yml | 3 + NOTICE | 174 +++++++++++++++++++-- docs/oms_beta_bootstrap-local.md | 19 ++- external/ceph.rook.io/doc.go | 3 + external/ceph.rook.io/register.go | 3 + internal/bootstrap/bootstrap_suite_test.go | 3 + internal/bootstrap/local/local.go | 3 + internal/tmpl/NOTICE | 174 +++++++++++++++++++-- 8 files changed, 354 insertions(+), 28 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index f2d393e5..4d022960 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,3 +1,6 @@ +# Copyright (c) Codesphere Inc. +# SPDX-License-Identifier: Apache-2.0 + version: "2" linters: exclusions: diff --git a/NOTICE b/NOTICE index 6d4dba00..4d109efb 100644 --- a/NOTICE +++ b/NOTICE @@ -123,6 +123,12 @@ Version: v0.0.0-20230301143203-a9d515a09cc2 License: MIT License URL: https://github.com/asaskevich/govalidator/blob/a9d515a09cc2/LICENSE +---------- +Module: github.com/beorn7/perks/quantile +Version: v1.0.1 +License: MIT +License URL: https://github.com/beorn7/perks/blob/v1.0.1/LICENSE + ---------- Module: github.com/blang/semver/v4 Version: v4.0.0 @@ -153,6 +159,30 @@ Version: v1.6.3 License: BSD-3-Clause License URL: https://github.com/cloudflare/circl/blob/v1.6.3/LICENSE +---------- +Module: github.com/cloudnative-pg/barman-cloud/pkg/api +Version: v0.4.1-0.20260108104508-ced266c145f5 +License: Apache-2.0 +License URL: https://github.com/cloudnative-pg/barman-cloud/blob/ced266c145f5/LICENSE + +---------- +Module: github.com/cloudnative-pg/cloudnative-pg +Version: v1.28.1 +License: Apache-2.0 +License URL: https://github.com/cloudnative-pg/cloudnative-pg/blob/v1.28.1/LICENSE + +---------- +Module: github.com/cloudnative-pg/cnpg-i/pkg/identity +Version: v0.3.1 +License: Apache-2.0 +License URL: https://github.com/cloudnative-pg/cnpg-i/blob/v0.3.1/LICENSE + +---------- +Module: github.com/cloudnative-pg/machinery/pkg +Version: v0.3.3 +License: Apache-2.0 +License URL: https://github.com/cloudnative-pg/machinery/blob/v0.3.3/LICENSE + ---------- Module: github.com/codesphere-cloud/cs-go Version: v0.19.4 @@ -165,6 +195,12 @@ Version: Unknown License: Apache-2.0 License URL: https://github.com/codesphere-cloud/oms/blob/HEAD/internal/tmpl/LICENSE +---------- +Module: github.com/containernetworking/cni +Version: v1.2.0-rc1 +License: Apache-2.0 +License URL: https://github.com/containernetworking/cni/blob/v1.2.0-rc1/LICENSE + ---------- Module: github.com/cpuguy83/go-md2man/v2/md2man Version: v2.0.7 @@ -203,9 +239,9 @@ License URL: https://github.com/dylibso/observe-sdk/blob/2d926c5d788a/go/LICENSE ---------- Module: github.com/emicklei/go-restful/v3 -Version: v3.12.2 +Version: v3.13.0 License: MIT -License URL: https://github.com/emicklei/go-restful/blob/v3.12.2/LICENSE +License URL: https://github.com/emicklei/go-restful/blob/v3.13.0/LICENSE ---------- Module: github.com/evanphx/json-patch/v5 @@ -243,6 +279,12 @@ Version: v0.37.2-flux.1 License: Apache-2.0 License URL: https://github.com/fluxcd/cli-utils/blob/v0.37.2-flux.1/LICENSE +---------- +Module: github.com/fsnotify/fsnotify +Version: v1.9.0 +License: BSD-3-Clause +License URL: https://github.com/fsnotify/fsnotify/blob/v1.9.0/LICENSE + ---------- Module: github.com/fxamacker/cbor/v2 Version: v2.9.0 @@ -279,6 +321,12 @@ Version: v1.2.2 License: Apache-2.0 License URL: https://github.com/go-logr/stdr/blob/v1.2.2/LICENSE +---------- +Module: github.com/go-logr/zapr +Version: v1.3.0 +License: Apache-2.0 +License URL: https://github.com/go-logr/zapr/blob/v1.3.0/LICENSE + ---------- Module: github.com/go-openapi/jsonpointer Version: v0.22.4 @@ -377,9 +425,9 @@ License URL: https://github.com/google/btree/blob/v1.1.3/LICENSE ---------- Module: github.com/google/gnostic-models -Version: v0.7.0 +Version: v0.7.1 License: Apache-2.0 -License URL: https://github.com/google/gnostic-models/blob/v0.7.0/LICENSE +License URL: https://github.com/google/gnostic-models/blob/v0.7.1/LICENSE ---------- Module: github.com/google/go-cmp/cmp @@ -423,6 +471,12 @@ Version: v2.17.0 License: BSD-3-Clause License URL: https://github.com/googleapis/gax-go/blob/v2.17.0/v2/LICENSE +---------- +Module: github.com/gorilla/websocket +Version: v1.5.4-0.20250319132907-e064f32e3674 +License: BSD-2-Clause +License URL: https://github.com/gorilla/websocket/blob/e064f32e3674/LICENSE + ---------- Module: github.com/gosuri/uitable Version: v0.0.4 @@ -489,12 +543,24 @@ Version: v1.1.12 License: MIT License URL: https://github.com/json-iterator/go/blob/v1.1.12/LICENSE +---------- +Module: github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg +Version: v1.7.7 +License: Apache-2.0 +License URL: https://github.com/k8snetworkplumbingwg/network-attachment-definition-client/blob/v1.7.7/LICENSE + ---------- Module: github.com/kr/fs Version: v0.1.0 License: BSD-3-Clause License URL: https://github.com/kr/fs/blob/v0.1.0/LICENSE +---------- +Module: github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1 +Version: v8.4.0 +License: Apache-2.0 +License URL: https://github.com/kubernetes-csi/external-snapshotter/blob/client/v8.4.0/client/LICENSE + ---------- Module: github.com/lann/builder Version: v0.0.0-20180802200727-47ae307949d0 @@ -561,6 +627,12 @@ Version: v1.0.2 License: MIT License URL: https://github.com/mitchellh/reflectwalk/blob/v1.0.2/LICENSE +---------- +Module: github.com/moby/spdystream +Version: v0.5.0 +License: Apache-2.0 +License URL: https://github.com/moby/spdystream/blob/v0.5.0/LICENSE + ---------- Module: github.com/moby/term Version: v0.5.2 @@ -591,6 +663,12 @@ Version: v0.0.0-20191010083416-a7dc8b61c822 License: BSD-3-Clause License URL: https://github.com/munnerz/goautoneg/blob/a7dc8b61c822/LICENSE +---------- +Module: github.com/mxk/go-flowrate/flowrate +Version: v0.0.0-20140419014527-cca7078d478f +License: BSD-3-Clause +License URL: https://github.com/mxk/go-flowrate/blob/cca7078d478f/LICENSE + ---------- Module: github.com/onsi/gomega Version: v1.39.1 @@ -609,12 +687,24 @@ Version: v1.1.1 License: Apache-2.0 License URL: https://github.com/opencontainers/image-spec/blob/v1.1.1/LICENSE +---------- +Module: github.com/openshift/api/security/v1 +Version: v0.0.0-20260306105915-ec7ab20aa8c4 +License: Apache-2.0 +License URL: https://github.com/openshift/api/blob/ec7ab20aa8c4/LICENSE + ---------- Module: github.com/peterbourgon/diskv Version: v2.0.1 License: MIT License URL: https://github.com/peterbourgon/diskv/blob/v2.0.1/LICENSE +---------- +Module: github.com/pkg/errors +Version: v0.9.1 +License: BSD-2-Clause +License URL: https://github.com/pkg/errors/blob/v0.9.1/LICENSE + ---------- Module: github.com/pkg/sftp Version: v1.13.10 @@ -627,6 +717,42 @@ Version: v1.0.1-0.20181226105442-5d4384ee4fb2 License: BSD-3-Clause License URL: https://github.com/pmezard/go-difflib/blob/5d4384ee4fb2/LICENSE +---------- +Module: github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring +Version: v0.87.1 +License: Apache-2.0 +License URL: https://github.com/prometheus-operator/prometheus-operator/blob/pkg/apis/monitoring/v0.87.1/pkg/apis/monitoring/LICENSE + +---------- +Module: github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil +Version: v1.23.2 +License: BSD-3-Clause +License URL: https://github.com/prometheus/client_golang/blob/v1.23.2/internal/github.com/golang/gddo/LICENSE + +---------- +Module: github.com/prometheus/client_golang/prometheus +Version: v1.23.2 +License: Apache-2.0 +License URL: https://github.com/prometheus/client_golang/blob/v1.23.2/LICENSE + +---------- +Module: github.com/prometheus/client_model/go +Version: v0.6.2 +License: Apache-2.0 +License URL: https://github.com/prometheus/client_model/blob/v0.6.2/LICENSE + +---------- +Module: github.com/prometheus/common +Version: v0.67.5 +License: Apache-2.0 +License URL: https://github.com/prometheus/common/blob/v0.67.5/LICENSE + +---------- +Module: github.com/prometheus/procfs +Version: v0.19.2 +License: Apache-2.0 +License URL: https://github.com/prometheus/procfs/blob/v0.19.2/LICENSE + ---------- Module: github.com/rubenv/sql-migrate Version: v1.8.1 @@ -795,6 +921,18 @@ Version: v1.9.0 License: Apache-2.0 License URL: https://github.com/open-telemetry/opentelemetry-proto-go/blob/otlp/v1.9.0/otlp/LICENSE +---------- +Module: go.uber.org/multierr +Version: v1.11.0 +License: MIT +License URL: https://github.com/uber-go/multierr/blob/v1.11.0/LICENSE.txt + +---------- +Module: go.uber.org/zap +Version: v1.27.1 +License: MIT +License URL: https://github.com/uber-go/zap/blob/v1.27.1/LICENSE + ---------- Module: go.yaml.in/yaml/v2 Version: v2.4.3 @@ -813,6 +951,12 @@ Version: v0.49.0 License: BSD-3-Clause License URL: https://cs.opensource.google/go/x/crypto/+/v0.49.0:LICENSE +---------- +Module: golang.org/x/mod/semver +Version: v0.33.0 +License: BSD-3-Clause +License URL: https://cs.opensource.google/go/x/mod/+/v0.33.0:LICENSE + ---------- Module: golang.org/x/net Version: v0.51.0 @@ -855,6 +999,12 @@ Version: v0.15.0 License: BSD-3-Clause License URL: https://cs.opensource.google/go/x/time/+/v0.15.0:LICENSE +---------- +Module: gomodules.xyz/jsonpatch/v2 +Version: v2.5.0 +License: Apache-2.0 +License URL: https://github.com/gomodules/jsonpatch/blob/v2.5.0/v2/LICENSE + ---------- Module: google.golang.org/api Version: v0.271.0 @@ -1013,15 +1163,15 @@ License URL: https://github.com/kubernetes/kubectl/blob/v0.35.1/LICENSE ---------- Module: k8s.io/utils -Version: v0.0.0-20260106112306-0fe9cd71b2f8 +Version: v0.0.0-20260108192941-914a6e750570 License: Apache-2.0 -License URL: https://github.com/kubernetes/utils/blob/0fe9cd71b2f8/LICENSE +License URL: https://github.com/kubernetes/utils/blob/914a6e750570/LICENSE ---------- -Module: k8s.io/utils/internal/third_party/forked/golang/net -Version: v0.0.0-20260106112306-0fe9cd71b2f8 +Module: k8s.io/utils/internal/third_party/forked/golang +Version: v0.0.0-20260108192941-914a6e750570 License: BSD-3-Clause -License URL: https://github.com/kubernetes/utils/blob/0fe9cd71b2f8/internal/third_party/forked/golang/LICENSE +License URL: https://github.com/kubernetes/utils/blob/914a6e750570/internal/third_party/forked/golang/LICENSE ---------- Module: oras.land/oras-go/v2 @@ -1030,10 +1180,10 @@ License: Apache-2.0 License URL: https://github.com/oras-project/oras-go/blob/v2.6.0/LICENSE ---------- -Module: sigs.k8s.io/controller-runtime/pkg -Version: v0.23.1 +Module: sigs.k8s.io/controller-runtime +Version: v0.23.2 License: Apache-2.0 -License URL: https://github.com/kubernetes-sigs/controller-runtime/blob/v0.23.1/LICENSE +License URL: https://github.com/kubernetes-sigs/controller-runtime/blob/v0.23.2/LICENSE ---------- Module: sigs.k8s.io/json diff --git a/docs/oms_beta_bootstrap-local.md b/docs/oms_beta_bootstrap-local.md index 052e1a80..23bba49c 100644 --- a/docs/oms_beta_bootstrap-local.md +++ b/docs/oms_beta_bootstrap-local.md @@ -4,9 +4,9 @@ Bootstrap a local Codesphere environment ### Synopsis -Bootstraps a local Codesphere environment using only a single Kubernetes cluster. +Bootstraps a local Codesphere environment using a single Linux x86_64 Kubernetes cluster. Rook is used to install Ceph, and CNPG is used for the PostgreSQL database. -Warning: For local setups, it is highly recommended to use Minikube with a virtual machine, similar to the Rook Minikube guide. +For local setups, use Minikube with a virtual machine on Linux. Not for production use. ``` @@ -16,8 +16,19 @@ oms beta bootstrap-local [flags] ### Options ``` - -h, --help help for bootstrap-local - -y, --yes Auto-approve the local bootstrapping warning prompt + --base-domain string Base domain for Codesphere (default "cs.local") + --experiments stringArray Experiments to enable in Codesphere installation (optional) + --feature-flags stringArray Feature flags to enable in Codesphere installation (optional) + -h, --help help for bootstrap-local + --install-config string Path to install config file (default: /config.yaml) + --install-dir string Directory for config, secrets, and bundle files (default ".installer") + --install-hash string Codesphere package hash (required when install-version is set) + --install-local string Path to a local installer package (tar.gz or unpacked directory) + --install-version string Codesphere version to install (downloaded from the OMS portal) + --profile string Profile to apply to the install config like resources (supported: dev, minimal, prod) (default "dev") + --registry-user string Custom Registry username (optional) + --secrets-file string Path to secrets file (default: /prod.vault.yaml) + -y, --yes Auto-approve the local bootstrapping warning prompt ``` ### SEE ALSO diff --git a/external/ceph.rook.io/doc.go b/external/ceph.rook.io/doc.go index b03c6f15..1a2d5a05 100644 --- a/external/ceph.rook.io/doc.go +++ b/external/ceph.rook.io/doc.go @@ -1,3 +1,6 @@ +// Copyright (c) Codesphere Inc. +// SPDX-License-Identifier: Apache-2.0 + // Copy of github.com/rook/rook/pkg/apis/ceph.rook.io/v1, due to issues // with the package dependencies: // diff --git a/external/ceph.rook.io/register.go b/external/ceph.rook.io/register.go index d722c383..87ee3962 100644 --- a/external/ceph.rook.io/register.go +++ b/external/ceph.rook.io/register.go @@ -1,3 +1,6 @@ +// Copyright (c) Codesphere Inc. +// SPDX-License-Identifier: Apache-2.0 + package cephrookio const ( diff --git a/internal/bootstrap/bootstrap_suite_test.go b/internal/bootstrap/bootstrap_suite_test.go index 6670d071..8b287edb 100644 --- a/internal/bootstrap/bootstrap_suite_test.go +++ b/internal/bootstrap/bootstrap_suite_test.go @@ -1,3 +1,6 @@ +// Copyright (c) Codesphere Inc. +// SPDX-License-Identifier: Apache-2.0 + package bootstrap_test import ( diff --git a/internal/bootstrap/local/local.go b/internal/bootstrap/local/local.go index 19e068b1..6b7610cc 100644 --- a/internal/bootstrap/local/local.go +++ b/internal/bootstrap/local/local.go @@ -1,3 +1,6 @@ +// Copyright (c) Codesphere Inc. +// SPDX-License-Identifier: Apache-2.0 + package local import ( diff --git a/internal/tmpl/NOTICE b/internal/tmpl/NOTICE index 6d4dba00..4d109efb 100644 --- a/internal/tmpl/NOTICE +++ b/internal/tmpl/NOTICE @@ -123,6 +123,12 @@ Version: v0.0.0-20230301143203-a9d515a09cc2 License: MIT License URL: https://github.com/asaskevich/govalidator/blob/a9d515a09cc2/LICENSE +---------- +Module: github.com/beorn7/perks/quantile +Version: v1.0.1 +License: MIT +License URL: https://github.com/beorn7/perks/blob/v1.0.1/LICENSE + ---------- Module: github.com/blang/semver/v4 Version: v4.0.0 @@ -153,6 +159,30 @@ Version: v1.6.3 License: BSD-3-Clause License URL: https://github.com/cloudflare/circl/blob/v1.6.3/LICENSE +---------- +Module: github.com/cloudnative-pg/barman-cloud/pkg/api +Version: v0.4.1-0.20260108104508-ced266c145f5 +License: Apache-2.0 +License URL: https://github.com/cloudnative-pg/barman-cloud/blob/ced266c145f5/LICENSE + +---------- +Module: github.com/cloudnative-pg/cloudnative-pg +Version: v1.28.1 +License: Apache-2.0 +License URL: https://github.com/cloudnative-pg/cloudnative-pg/blob/v1.28.1/LICENSE + +---------- +Module: github.com/cloudnative-pg/cnpg-i/pkg/identity +Version: v0.3.1 +License: Apache-2.0 +License URL: https://github.com/cloudnative-pg/cnpg-i/blob/v0.3.1/LICENSE + +---------- +Module: github.com/cloudnative-pg/machinery/pkg +Version: v0.3.3 +License: Apache-2.0 +License URL: https://github.com/cloudnative-pg/machinery/blob/v0.3.3/LICENSE + ---------- Module: github.com/codesphere-cloud/cs-go Version: v0.19.4 @@ -165,6 +195,12 @@ Version: Unknown License: Apache-2.0 License URL: https://github.com/codesphere-cloud/oms/blob/HEAD/internal/tmpl/LICENSE +---------- +Module: github.com/containernetworking/cni +Version: v1.2.0-rc1 +License: Apache-2.0 +License URL: https://github.com/containernetworking/cni/blob/v1.2.0-rc1/LICENSE + ---------- Module: github.com/cpuguy83/go-md2man/v2/md2man Version: v2.0.7 @@ -203,9 +239,9 @@ License URL: https://github.com/dylibso/observe-sdk/blob/2d926c5d788a/go/LICENSE ---------- Module: github.com/emicklei/go-restful/v3 -Version: v3.12.2 +Version: v3.13.0 License: MIT -License URL: https://github.com/emicklei/go-restful/blob/v3.12.2/LICENSE +License URL: https://github.com/emicklei/go-restful/blob/v3.13.0/LICENSE ---------- Module: github.com/evanphx/json-patch/v5 @@ -243,6 +279,12 @@ Version: v0.37.2-flux.1 License: Apache-2.0 License URL: https://github.com/fluxcd/cli-utils/blob/v0.37.2-flux.1/LICENSE +---------- +Module: github.com/fsnotify/fsnotify +Version: v1.9.0 +License: BSD-3-Clause +License URL: https://github.com/fsnotify/fsnotify/blob/v1.9.0/LICENSE + ---------- Module: github.com/fxamacker/cbor/v2 Version: v2.9.0 @@ -279,6 +321,12 @@ Version: v1.2.2 License: Apache-2.0 License URL: https://github.com/go-logr/stdr/blob/v1.2.2/LICENSE +---------- +Module: github.com/go-logr/zapr +Version: v1.3.0 +License: Apache-2.0 +License URL: https://github.com/go-logr/zapr/blob/v1.3.0/LICENSE + ---------- Module: github.com/go-openapi/jsonpointer Version: v0.22.4 @@ -377,9 +425,9 @@ License URL: https://github.com/google/btree/blob/v1.1.3/LICENSE ---------- Module: github.com/google/gnostic-models -Version: v0.7.0 +Version: v0.7.1 License: Apache-2.0 -License URL: https://github.com/google/gnostic-models/blob/v0.7.0/LICENSE +License URL: https://github.com/google/gnostic-models/blob/v0.7.1/LICENSE ---------- Module: github.com/google/go-cmp/cmp @@ -423,6 +471,12 @@ Version: v2.17.0 License: BSD-3-Clause License URL: https://github.com/googleapis/gax-go/blob/v2.17.0/v2/LICENSE +---------- +Module: github.com/gorilla/websocket +Version: v1.5.4-0.20250319132907-e064f32e3674 +License: BSD-2-Clause +License URL: https://github.com/gorilla/websocket/blob/e064f32e3674/LICENSE + ---------- Module: github.com/gosuri/uitable Version: v0.0.4 @@ -489,12 +543,24 @@ Version: v1.1.12 License: MIT License URL: https://github.com/json-iterator/go/blob/v1.1.12/LICENSE +---------- +Module: github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg +Version: v1.7.7 +License: Apache-2.0 +License URL: https://github.com/k8snetworkplumbingwg/network-attachment-definition-client/blob/v1.7.7/LICENSE + ---------- Module: github.com/kr/fs Version: v0.1.0 License: BSD-3-Clause License URL: https://github.com/kr/fs/blob/v0.1.0/LICENSE +---------- +Module: github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1 +Version: v8.4.0 +License: Apache-2.0 +License URL: https://github.com/kubernetes-csi/external-snapshotter/blob/client/v8.4.0/client/LICENSE + ---------- Module: github.com/lann/builder Version: v0.0.0-20180802200727-47ae307949d0 @@ -561,6 +627,12 @@ Version: v1.0.2 License: MIT License URL: https://github.com/mitchellh/reflectwalk/blob/v1.0.2/LICENSE +---------- +Module: github.com/moby/spdystream +Version: v0.5.0 +License: Apache-2.0 +License URL: https://github.com/moby/spdystream/blob/v0.5.0/LICENSE + ---------- Module: github.com/moby/term Version: v0.5.2 @@ -591,6 +663,12 @@ Version: v0.0.0-20191010083416-a7dc8b61c822 License: BSD-3-Clause License URL: https://github.com/munnerz/goautoneg/blob/a7dc8b61c822/LICENSE +---------- +Module: github.com/mxk/go-flowrate/flowrate +Version: v0.0.0-20140419014527-cca7078d478f +License: BSD-3-Clause +License URL: https://github.com/mxk/go-flowrate/blob/cca7078d478f/LICENSE + ---------- Module: github.com/onsi/gomega Version: v1.39.1 @@ -609,12 +687,24 @@ Version: v1.1.1 License: Apache-2.0 License URL: https://github.com/opencontainers/image-spec/blob/v1.1.1/LICENSE +---------- +Module: github.com/openshift/api/security/v1 +Version: v0.0.0-20260306105915-ec7ab20aa8c4 +License: Apache-2.0 +License URL: https://github.com/openshift/api/blob/ec7ab20aa8c4/LICENSE + ---------- Module: github.com/peterbourgon/diskv Version: v2.0.1 License: MIT License URL: https://github.com/peterbourgon/diskv/blob/v2.0.1/LICENSE +---------- +Module: github.com/pkg/errors +Version: v0.9.1 +License: BSD-2-Clause +License URL: https://github.com/pkg/errors/blob/v0.9.1/LICENSE + ---------- Module: github.com/pkg/sftp Version: v1.13.10 @@ -627,6 +717,42 @@ Version: v1.0.1-0.20181226105442-5d4384ee4fb2 License: BSD-3-Clause License URL: https://github.com/pmezard/go-difflib/blob/5d4384ee4fb2/LICENSE +---------- +Module: github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring +Version: v0.87.1 +License: Apache-2.0 +License URL: https://github.com/prometheus-operator/prometheus-operator/blob/pkg/apis/monitoring/v0.87.1/pkg/apis/monitoring/LICENSE + +---------- +Module: github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil +Version: v1.23.2 +License: BSD-3-Clause +License URL: https://github.com/prometheus/client_golang/blob/v1.23.2/internal/github.com/golang/gddo/LICENSE + +---------- +Module: github.com/prometheus/client_golang/prometheus +Version: v1.23.2 +License: Apache-2.0 +License URL: https://github.com/prometheus/client_golang/blob/v1.23.2/LICENSE + +---------- +Module: github.com/prometheus/client_model/go +Version: v0.6.2 +License: Apache-2.0 +License URL: https://github.com/prometheus/client_model/blob/v0.6.2/LICENSE + +---------- +Module: github.com/prometheus/common +Version: v0.67.5 +License: Apache-2.0 +License URL: https://github.com/prometheus/common/blob/v0.67.5/LICENSE + +---------- +Module: github.com/prometheus/procfs +Version: v0.19.2 +License: Apache-2.0 +License URL: https://github.com/prometheus/procfs/blob/v0.19.2/LICENSE + ---------- Module: github.com/rubenv/sql-migrate Version: v1.8.1 @@ -795,6 +921,18 @@ Version: v1.9.0 License: Apache-2.0 License URL: https://github.com/open-telemetry/opentelemetry-proto-go/blob/otlp/v1.9.0/otlp/LICENSE +---------- +Module: go.uber.org/multierr +Version: v1.11.0 +License: MIT +License URL: https://github.com/uber-go/multierr/blob/v1.11.0/LICENSE.txt + +---------- +Module: go.uber.org/zap +Version: v1.27.1 +License: MIT +License URL: https://github.com/uber-go/zap/blob/v1.27.1/LICENSE + ---------- Module: go.yaml.in/yaml/v2 Version: v2.4.3 @@ -813,6 +951,12 @@ Version: v0.49.0 License: BSD-3-Clause License URL: https://cs.opensource.google/go/x/crypto/+/v0.49.0:LICENSE +---------- +Module: golang.org/x/mod/semver +Version: v0.33.0 +License: BSD-3-Clause +License URL: https://cs.opensource.google/go/x/mod/+/v0.33.0:LICENSE + ---------- Module: golang.org/x/net Version: v0.51.0 @@ -855,6 +999,12 @@ Version: v0.15.0 License: BSD-3-Clause License URL: https://cs.opensource.google/go/x/time/+/v0.15.0:LICENSE +---------- +Module: gomodules.xyz/jsonpatch/v2 +Version: v2.5.0 +License: Apache-2.0 +License URL: https://github.com/gomodules/jsonpatch/blob/v2.5.0/v2/LICENSE + ---------- Module: google.golang.org/api Version: v0.271.0 @@ -1013,15 +1163,15 @@ License URL: https://github.com/kubernetes/kubectl/blob/v0.35.1/LICENSE ---------- Module: k8s.io/utils -Version: v0.0.0-20260106112306-0fe9cd71b2f8 +Version: v0.0.0-20260108192941-914a6e750570 License: Apache-2.0 -License URL: https://github.com/kubernetes/utils/blob/0fe9cd71b2f8/LICENSE +License URL: https://github.com/kubernetes/utils/blob/914a6e750570/LICENSE ---------- -Module: k8s.io/utils/internal/third_party/forked/golang/net -Version: v0.0.0-20260106112306-0fe9cd71b2f8 +Module: k8s.io/utils/internal/third_party/forked/golang +Version: v0.0.0-20260108192941-914a6e750570 License: BSD-3-Clause -License URL: https://github.com/kubernetes/utils/blob/0fe9cd71b2f8/internal/third_party/forked/golang/LICENSE +License URL: https://github.com/kubernetes/utils/blob/914a6e750570/internal/third_party/forked/golang/LICENSE ---------- Module: oras.land/oras-go/v2 @@ -1030,10 +1180,10 @@ License: Apache-2.0 License URL: https://github.com/oras-project/oras-go/blob/v2.6.0/LICENSE ---------- -Module: sigs.k8s.io/controller-runtime/pkg -Version: v0.23.1 +Module: sigs.k8s.io/controller-runtime +Version: v0.23.2 License: Apache-2.0 -License URL: https://github.com/kubernetes-sigs/controller-runtime/blob/v0.23.1/LICENSE +License URL: https://github.com/kubernetes-sigs/controller-runtime/blob/v0.23.2/LICENSE ---------- Module: sigs.k8s.io/json From 50d182440029923ad6716721a0f3bdffd76f9b6f Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Fri, 13 Mar 2026 16:20:50 +0100 Subject: [PATCH 21/31] adjust bootrapping message Signed-off-by: Tim Schrodi --- cli/cmd/bootstrap_local.go | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/cli/cmd/bootstrap_local.go b/cli/cmd/bootstrap_local.go index ef503d8b..a13578fb 100644 --- a/cli/cmd/bootstrap_local.go +++ b/cli/cmd/bootstrap_local.go @@ -148,24 +148,21 @@ func (c *BootstrapLocalCmd) ConfirmLocalBootstrapWarning() error { fmt.Println(csio.Long(`############################################################ # Local Bootstrap Warning # ############################################################ - -Codesphere local bootstrap is for testing only. - -Currently supported: -- One Kubernetes cluster with Linux x86_64 nodes only -- Minikube on Linux with a VM and an extra disk for Rook/Ceph -- Minikube 1.23 or newer - -Not supported: -- Minikube on macOS - -Never run Rook directly on your host system; local disks may be consumed. - -Recommended command: - minikube start --disk-size=40g --extra-disks=1 --driver kvm2 - -Rook guide: - https://rook.io/docs/rook/latest-release/Contributing/development-environment/#minikube +# +# Codesphere local bootstrap is for testing only. +# +# Currently supported: +# - One Kubernetes cluster with Linux x86_64 nodes only +# - Kubernetes Cluster on Linux with a VM and an extra disk for Rook/Ceph +# +# Not supported: +# - Minikube on macOS +# +# Never run Rook directly on your host system; local disks may be consumed. +# +# Recommended command: +# minikube start --disk-size=40g --extra-disks=1 --driver kvm2 +############################################################ `)) if c.Yes { From 04c23bbd9eda734f689aa405185010bd2d93a8f3 Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Sat, 14 Mar 2026 15:18:19 +0100 Subject: [PATCH 22/31] add addtional component with zero requests make profiel being applied conditionally Signed-off-by: Tim Schrodi --- internal/bootstrap/local/local.go | 17 +- internal/installer/config_manager_profile.go | 286 +++++++++++-------- internal/installer/resource_profiles.go | 3 + 3 files changed, 179 insertions(+), 127 deletions(-) diff --git a/internal/bootstrap/local/local.go b/internal/bootstrap/local/local.go index 6b7610cc..dfb2d93c 100644 --- a/internal/bootstrap/local/local.go +++ b/internal/bootstrap/local/local.go @@ -376,11 +376,10 @@ func (b *LocalBootstrapper) EnsureInstallConfig() error { } b.Env.ExistingConfigUsed = true - } else { - err := b.icg.ApplyProfile(b.Env.Profile) - if err != nil { - return fmt.Errorf("failed to apply profile: %w", err) - } + } + err := b.icg.ApplyProfile(b.Env.Profile) + if err != nil { + return fmt.Errorf("failed to apply profile: %w", err) } b.Env.InstallConfig = b.icg.GetInstallConfig() @@ -487,12 +486,8 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { } b.Env.InstallConfig.Kubernetes.PodCIDR = podCIDR b.Env.InstallConfig.Kubernetes.ServiceCIDR = serviceCIDR - b.Env.InstallConfig.Cluster.Gateway = files.GatewayConfig{ - ServiceType: "LoadBalancer", - } - b.Env.InstallConfig.Cluster.PublicGateway = files.GatewayConfig{ - ServiceType: "LoadBalancer", - } + b.Env.InstallConfig.Cluster.Gateway.ServiceType = "LoadBalancer" + b.Env.InstallConfig.Cluster.PublicGateway.ServiceType = "LoadBalancer" // TODO: certificates b.Env.InstallConfig.Codesphere.CertIssuer = files.CertIssuerConfig{ diff --git a/internal/installer/config_manager_profile.go b/internal/installer/config_manager_profile.go index dbd79791..8b588813 100644 --- a/internal/installer/config_manager_profile.go +++ b/internal/installer/config_manager_profile.go @@ -22,83 +22,129 @@ func (g *InstallConfig) ApplyProfile(profile string) error { g.Config = &files.RootConfig{} } - g.Config.Ceph.NodesSubnet = "127.0.0.1/32" - g.Config.Ceph.Hosts = []files.CephHost{{Hostname: "localhost", IPAddress: "127.0.0.1", IsMaster: true}} - g.Config.Ceph.OSDs = []files.CephOSD{ - { - SpecID: "default", - Placement: files.CephPlacement{ - HostPattern: "*", - }, - DataDevices: files.CephDataDevices{ - Size: "240G:300G", - Limit: 1, - }, - DBDevices: files.CephDBDevices{ - Size: "120G:150G", - Limit: 1, + if g.Config.Ceph.NodesSubnet == "" { + g.Config.Ceph.NodesSubnet = "127.0.0.1/32" + } + if g.Config.Ceph.Hosts == nil { + g.Config.Ceph.Hosts = []files.CephHost{{Hostname: "localhost", IPAddress: "127.0.0.1", IsMaster: true}} + } + if g.Config.Ceph.OSDs == nil { + g.Config.Ceph.OSDs = []files.CephOSD{ + { + SpecID: "default", + Placement: files.CephPlacement{ + HostPattern: "*", + }, + DataDevices: files.CephDataDevices{ + Size: "240G:300G", + Limit: 1, + }, + DBDevices: files.CephDBDevices{ + Size: "120G:150G", + Limit: 1, + }, }, - }, + } } - g.Config.Datacenter.ID = 1 - g.Config.Datacenter.City = "Karlsruhe" - g.Config.Datacenter.CountryCode = "DE" + if g.Config.Datacenter.ID == 0 { + g.Config.Datacenter.ID = 1 + } + if g.Config.Datacenter.City == "" { + g.Config.Datacenter.City = "Karlsruhe" + } + if g.Config.Datacenter.CountryCode == "" { + g.Config.Datacenter.CountryCode = "DE" + } - g.Config.Postgres.Mode = "install" - g.Config.Postgres.Primary = &files.PostgresPrimaryConfig{ - IP: "127.0.0.1", - Hostname: "localhost", + if g.Config.Postgres.Mode == "" { + g.Config.Postgres.Mode = "install" + } + if g.Config.Postgres.Primary == nil { + g.Config.Postgres.Primary = &files.PostgresPrimaryConfig{ + IP: "127.0.0.1", + Hostname: "localhost", + } } g.Config.Kubernetes.ManagedByCodesphere = true g.Config.Kubernetes.NeedsKubeConfig = false - g.Config.Kubernetes.APIServerHost = "127.0.0.1" - g.Config.Kubernetes.ControlPlanes = []files.K8sNode{{IPAddress: "127.0.0.1"}} - g.Config.Kubernetes.Workers = []files.K8sNode{{IPAddress: "127.0.0.1"}} - - g.Config.Cluster.Certificates = files.ClusterCertificates{ - CA: files.CAConfig{ - Algorithm: "RSA", - KeySizeBits: 2048, - }, - } - g.Config.Cluster.Gateway = files.GatewayConfig{ServiceType: "LoadBalancer"} - g.Config.Cluster.PublicGateway = files.GatewayConfig{ServiceType: "LoadBalancer"} - g.Config.MetalLB = &files.MetalLBConfig{ - Enabled: false, - Pools: []files.MetalLBPoolDef{}, - } - g.Config.Registry = &files.RegistryConfig{} - - g.Config.Codesphere.Domain = "codesphere.local" - g.Config.Codesphere.WorkspaceHostingBaseDomain = "ws.local" - g.Config.Codesphere.CustomDomains.CNameBaseDomain = "custom.local" - g.Config.Codesphere.DNSServers = []string{"8.8.8.8", "1.1.1.1"} - g.Config.Codesphere.Experiments = []string{} - g.Config.Codesphere.WorkspaceImages = &files.WorkspaceImagesConfig{ - Agent: &files.ImageRef{ - BomRef: "workspace-agent-24.04", - }, - } - g.Config.Codesphere.DeployConfig = files.DeployConfig{ - Images: map[string]files.ImageConfig{ - "ubuntu-24.04": { - Name: "Ubuntu 24.04", - SupportedUntil: "2028-05-31", - Flavors: map[string]files.FlavorConfig{ - "default": { - Image: files.ImageRef{ - BomRef: "workspace-agent-24.04", + if g.Config.Kubernetes.APIServerHost == "" { + g.Config.Kubernetes.APIServerHost = "127.0.0.1" + } + if g.Config.Kubernetes.ControlPlanes == nil { + g.Config.Kubernetes.ControlPlanes = []files.K8sNode{{IPAddress: "127.0.0.1"}} + } + if g.Config.Kubernetes.Workers == nil { + g.Config.Kubernetes.Workers = []files.K8sNode{{IPAddress: "127.0.0.1"}} + } + + if g.Config.Cluster.Certificates.CA.Algorithm == "" { + g.Config.Cluster.Certificates = files.ClusterCertificates{ + CA: files.CAConfig{ + Algorithm: "RSA", + KeySizeBits: 2048, + }, + } + } + if g.Config.Cluster.Gateway.ServiceType == "" { + g.Config.Cluster.Gateway = files.GatewayConfig{ServiceType: "LoadBalancer"} + } + if g.Config.Cluster.PublicGateway.ServiceType == "" { + g.Config.Cluster.PublicGateway = files.GatewayConfig{ServiceType: "LoadBalancer"} + } + if g.Config.MetalLB == nil { + g.Config.MetalLB = &files.MetalLBConfig{ + Enabled: false, + Pools: []files.MetalLBPoolDef{}, + } + } + if g.Config.Registry == nil { + g.Config.Registry = &files.RegistryConfig{} + } + + if g.Config.Codesphere.Domain == "" { + g.Config.Codesphere.Domain = "codesphere.local" + } + if g.Config.Codesphere.WorkspaceHostingBaseDomain == "" { + g.Config.Codesphere.WorkspaceHostingBaseDomain = "ws.local" + } + if g.Config.Codesphere.CustomDomains.CNameBaseDomain == "" { + g.Config.Codesphere.CustomDomains.CNameBaseDomain = "custom.local" + } + if g.Config.Codesphere.DNSServers == nil { + g.Config.Codesphere.DNSServers = []string{"8.8.8.8", "1.1.1.1"} + } + if g.Config.Codesphere.Experiments == nil { + g.Config.Codesphere.Experiments = []string{} + } + if g.Config.Codesphere.WorkspaceImages == nil { + g.Config.Codesphere.WorkspaceImages = &files.WorkspaceImagesConfig{ + Agent: &files.ImageRef{ + BomRef: "workspace-agent-24.04", + }, + } + } + if g.Config.Codesphere.DeployConfig.Images == nil { + g.Config.Codesphere.DeployConfig = files.DeployConfig{ + Images: map[string]files.ImageConfig{ + "ubuntu-24.04": { + Name: "Ubuntu 24.04", + SupportedUntil: "2028-05-31", + Flavors: map[string]files.FlavorConfig{ + "default": { + Image: files.ImageRef{ + BomRef: "workspace-agent-24.04", + }, + Pool: map[int]int{1: 1}, }, - Pool: map[int]int{1: 1}, }, }, }, - }, + } } - g.Config.Codesphere.Plans = files.PlansConfig{ - HostingPlans: map[int]files.HostingPlan{ + if g.Config.Codesphere.Plans.HostingPlans == nil { + g.Config.Codesphere.Plans.HostingPlans = map[int]files.HostingPlan{ 1: { CPUTenth: 10, GPUParts: 0, @@ -106,80 +152,88 @@ func (g *InstallConfig) ApplyProfile(profile string) error { StorageMb: 20480, TempStorageMb: 1024, }, - }, - WorkspacePlans: map[int]files.WorkspacePlan{ + } + } + if g.Config.Codesphere.Plans.WorkspacePlans == nil { + g.Config.Codesphere.Plans.WorkspacePlans = map[int]files.WorkspacePlan{ 1: { Name: "Standard", HostingPlanID: 1, MaxReplicas: 3, OnDemand: true, }, - }, - } - g.Config.ManagedServiceBackends = &files.ManagedServiceBackendsConfig{ - Postgres: make(map[string]interface{}), - } - g.Config.Codesphere.ManagedServices = []files.ManagedServiceConfig{ - { - Name: "postgres", - Version: "v1", - }, - { - Name: "babelfish", - Version: "v1", - }, - { - Name: "s3", - Version: "v1", - }, - { - Name: "virtual-k8s", - Version: "v1", - }, - } - g.Config.Secrets.BaseDir = "/root/secrets" + } + } + if g.Config.ManagedServiceBackends == nil { + g.Config.ManagedServiceBackends = &files.ManagedServiceBackendsConfig{ + Postgres: make(map[string]interface{}), + } + } + if g.Config.Codesphere.ManagedServices == nil { + g.Config.Codesphere.ManagedServices = []files.ManagedServiceConfig{ + {Name: "postgres", Version: "v1"}, + {Name: "babelfish", Version: "v1"}, + {Name: "s3", Version: "v1"}, + {Name: "virtual-k8s", Version: "v1"}, + } + } + if g.Config.Secrets.BaseDir == "" { + g.Config.Secrets.BaseDir = "/root/secrets" + } switch profile { case PROFILE_DEV, PROFILE_DEVELOPMENT: - g.Config.Datacenter.Name = "dev" + if g.Config.Datacenter.Name == "" { + g.Config.Datacenter.Name = "dev" + } if err := ApplyResourceProfile(g.Config, ResourceProfileNoRequests); err != nil { return fmt.Errorf("applying resource profile: %w", err) } - g.Config.Cluster.Monitoring = &files.MonitoringConfig{ - Prometheus: &files.PrometheusConfig{ - RemoteWrite: &files.RemoteWriteConfig{ - Enabled: false, - ClusterName: "local-test", + if g.Config.Cluster.Monitoring == nil { + g.Config.Cluster.Monitoring = &files.MonitoringConfig{ + Prometheus: &files.PrometheusConfig{ + RemoteWrite: &files.RemoteWriteConfig{ + Enabled: false, + ClusterName: "local-test", + }, }, - }, - Loki: &files.LokiConfig{Enabled: false}, - Grafana: &files.GrafanaConfig{Enabled: false}, - GrafanaAlloy: &files.GrafanaAlloyConfig{Enabled: false}, + Loki: &files.LokiConfig{Enabled: false}, + Grafana: &files.GrafanaConfig{Enabled: false}, + GrafanaAlloy: &files.GrafanaAlloyConfig{Enabled: false}, + } } case PROFILE_PROD, PROFILE_PRODUCTION: - g.Config.Datacenter.Name = "production" - g.Config.Codesphere.Plans.WorkspacePlans = map[int]files.WorkspacePlan{ - 1: { - Name: "Standard Developer", - HostingPlanID: 1, - MaxReplicas: 3, - OnDemand: true, - }, + if g.Config.Datacenter.Name == "" { + g.Config.Datacenter.Name = "production" + } + if g.Config.Codesphere.Plans.WorkspacePlans == nil { + g.Config.Codesphere.Plans.WorkspacePlans = map[int]files.WorkspacePlan{ + 1: { + Name: "Standard Developer", + HostingPlanID: 1, + MaxReplicas: 3, + OnDemand: true, + }, + } } case PROFILE_MINIMAL: - g.Config.Datacenter.Name = "minimal" + if g.Config.Datacenter.Name == "" { + g.Config.Datacenter.Name = "minimal" + } if err := ApplyResourceProfile(g.Config, ResourceProfileNoRequests); err != nil { return fmt.Errorf("applying resource profile: %w", err) } - g.Config.Codesphere.Plans.WorkspacePlans = map[int]files.WorkspacePlan{ - 1: { - Name: "Standard Developer", - HostingPlanID: 1, - MaxReplicas: 1, - OnDemand: true, - }, + if g.Config.Codesphere.Plans.WorkspacePlans == nil { + g.Config.Codesphere.Plans.WorkspacePlans = map[int]files.WorkspacePlan{ + 1: { + Name: "Standard Developer", + HostingPlanID: 1, + MaxReplicas: 1, + OnDemand: true, + }, + } } default: diff --git a/internal/installer/resource_profiles.go b/internal/installer/resource_profiles.go index 49a2f020..42c0ae74 100644 --- a/internal/installer/resource_profiles.go +++ b/internal/installer/resource_profiles.go @@ -170,6 +170,9 @@ func applyNoRequestsProfile(config *files.RootConfig) { config.Codesphere.Override = util.DeepMergeMaps(config.Codesphere.Override, map[string]any{ "global": map[string]any{ "services": serviceProfiles, + "frontendGateway": map[string]any{ + "requests": zeroRequests(), + }, "underprovisionFactors": map[string]any{ "cpu": 0.25, "memory": 0.25, From cde57f2db95fa3a7c8d8f5122751944395f760d5 Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Fri, 20 Mar 2026 13:11:21 +0100 Subject: [PATCH 23/31] make images configurable Signed-off-by: Tim Schrodi --- internal/bootstrap/local/local.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/internal/bootstrap/local/local.go b/internal/bootstrap/local/local.go index dfb2d93c..a6908d1a 100644 --- a/internal/bootstrap/local/local.go +++ b/internal/bootstrap/local/local.go @@ -503,7 +503,16 @@ func (b *LocalBootstrapper) UpdateInstallConfig() (err error) { } b.Env.InstallConfig.Codesphere.DNSServers = []string{"8.8.8.8"} - b.Env.InstallConfig.Codesphere.DeployConfig = bootstrap.DefaultCodesphereDeployConfig() + defaultImages := bootstrap.DefaultCodesphereDeployConfig().Images + if b.Env.InstallConfig.Codesphere.DeployConfig.Images == nil { + b.Env.InstallConfig.Codesphere.DeployConfig.Images = defaultImages + } else { + for imageName, defaultImage := range defaultImages { + if _, exists := b.Env.InstallConfig.Codesphere.DeployConfig.Images[imageName]; !exists { + b.Env.InstallConfig.Codesphere.DeployConfig.Images[imageName] = defaultImage + } + } + } b.Env.InstallConfig.Codesphere.Plans = bootstrap.DefaultCodespherePlans() b.Env.InstallConfig.Codesphere.Experiments = b.Env.Experiments From 3ece58a9420f2b3126f8974b187869e3bb16ae3b Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Sat, 21 Mar 2026 13:27:18 +0100 Subject: [PATCH 24/31] fix config manager profile test Signed-off-by: Tim Schrodi --- internal/installer/config_manager_profile.go | 34 ++++++++++++++------ 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/internal/installer/config_manager_profile.go b/internal/installer/config_manager_profile.go index 8b588813..f26f64fd 100644 --- a/internal/installer/config_manager_profile.go +++ b/internal/installer/config_manager_profile.go @@ -124,6 +124,10 @@ func (g *InstallConfig) ApplyProfile(profile string) error { BomRef: "workspace-agent-24.04", }, } + } else if g.Config.Codesphere.WorkspaceImages.Agent == nil { + g.Config.Codesphere.WorkspaceImages.Agent = &files.ImageRef{ + BomRef: "workspace-agent-24.04", + } } if g.Config.Codesphere.DeployConfig.Images == nil { g.Config.Codesphere.DeployConfig = files.DeployConfig{ @@ -168,6 +172,8 @@ func (g *InstallConfig) ApplyProfile(profile string) error { g.Config.ManagedServiceBackends = &files.ManagedServiceBackendsConfig{ Postgres: make(map[string]interface{}), } + } else if g.Config.ManagedServiceBackends.Postgres == nil { + g.Config.ManagedServiceBackends.Postgres = make(map[string]interface{}) } if g.Config.Codesphere.ManagedServices == nil { g.Config.Codesphere.ManagedServices = []files.ManagedServiceConfig{ @@ -190,18 +196,26 @@ func (g *InstallConfig) ApplyProfile(profile string) error { return fmt.Errorf("applying resource profile: %w", err) } if g.Config.Cluster.Monitoring == nil { - g.Config.Cluster.Monitoring = &files.MonitoringConfig{ - Prometheus: &files.PrometheusConfig{ - RemoteWrite: &files.RemoteWriteConfig{ - Enabled: false, - ClusterName: "local-test", - }, - }, - Loki: &files.LokiConfig{Enabled: false}, - Grafana: &files.GrafanaConfig{Enabled: false}, - GrafanaAlloy: &files.GrafanaAlloyConfig{Enabled: false}, + g.Config.Cluster.Monitoring = &files.MonitoringConfig{} + } + if g.Config.Cluster.Monitoring.Prometheus == nil { + g.Config.Cluster.Monitoring.Prometheus = &files.PrometheusConfig{} + } + if g.Config.Cluster.Monitoring.Prometheus.RemoteWrite == nil { + g.Config.Cluster.Monitoring.Prometheus.RemoteWrite = &files.RemoteWriteConfig{ + Enabled: false, + ClusterName: "local-test", } } + if g.Config.Cluster.Monitoring.Loki == nil { + g.Config.Cluster.Monitoring.Loki = &files.LokiConfig{Enabled: false} + } + if g.Config.Cluster.Monitoring.Grafana == nil { + g.Config.Cluster.Monitoring.Grafana = &files.GrafanaConfig{Enabled: false} + } + if g.Config.Cluster.Monitoring.GrafanaAlloy == nil { + g.Config.Cluster.Monitoring.GrafanaAlloy = &files.GrafanaAlloyConfig{Enabled: false} + } case PROFILE_PROD, PROFILE_PRODUCTION: if g.Config.Datacenter.Name == "" { From a71f89482ce27192a8047c8523d04f3d66613fd3 Mon Sep 17 00:00:00 2001 From: schrodit <7979201+schrodit@users.noreply.github.com> Date: Sat, 21 Mar 2026 12:30:11 +0000 Subject: [PATCH 25/31] chore(docs): Auto-update docs and licenses Signed-off-by: schrodit <7979201+schrodit@users.noreply.github.com> --- NOTICE | 32 ++++++++++++++++---------------- internal/tmpl/NOTICE | 32 ++++++++++++++++---------------- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/NOTICE b/NOTICE index 321d5086..c9784406 100644 --- a/NOTICE +++ b/NOTICE @@ -185,9 +185,9 @@ License URL: https://github.com/cloudnative-pg/machinery/blob/v0.3.3/LICENSE ---------- Module: github.com/codesphere-cloud/cs-go -Version: v0.21.1 +Version: v0.22.0 License: Apache-2.0 -License URL: https://github.com/codesphere-cloud/cs-go/blob/v0.21.1/LICENSE +License URL: https://github.com/codesphere-cloud/cs-go/blob/v0.22.0/LICENSE ---------- Module: github.com/codesphere-cloud/oms/internal/tmpl @@ -935,9 +935,9 @@ License URL: https://github.com/uber-go/zap/blob/v1.27.1/LICENSE ---------- Module: go.yaml.in/yaml/v2 -Version: v2.4.3 +Version: v2.4.4 License: Apache-2.0 -License URL: https://github.com/yaml/go-yaml/blob/v2.4.3/LICENSE +License URL: https://github.com/yaml/go-yaml/blob/v2.4.4/LICENSE ---------- Module: go.yaml.in/yaml/v3 @@ -1079,9 +1079,9 @@ License URL: https://github.com/helm/helm/blob/v4.1.3/LICENSE ---------- Module: k8s.io/api -Version: v0.35.2 +Version: v0.35.3 License: Apache-2.0 -License URL: https://github.com/kubernetes/api/blob/v0.35.2/LICENSE +License URL: https://github.com/kubernetes/api/blob/v0.35.3/LICENSE ---------- Module: k8s.io/apiextensions-apiserver/pkg/apis/apiextensions @@ -1091,15 +1091,15 @@ License URL: https://github.com/kubernetes/apiextensions-apiserver/blob/v0.35.1/ ---------- Module: k8s.io/apimachinery/pkg -Version: v0.35.2 +Version: v0.35.3 License: Apache-2.0 -License URL: https://github.com/kubernetes/apimachinery/blob/v0.35.2/LICENSE +License URL: https://github.com/kubernetes/apimachinery/blob/v0.35.3/LICENSE ---------- Module: k8s.io/apimachinery/third_party/forked/golang -Version: v0.35.2 +Version: v0.35.3 License: BSD-3-Clause -License URL: https://github.com/kubernetes/apimachinery/blob/v0.35.2/third_party/forked/golang/LICENSE +License URL: https://github.com/kubernetes/apimachinery/blob/v0.35.3/third_party/forked/golang/LICENSE ---------- Module: k8s.io/apiserver/pkg/endpoints/deprecation @@ -1109,21 +1109,21 @@ License URL: https://github.com/kubernetes/apiserver/blob/v0.35.1/LICENSE ---------- Module: k8s.io/cli-runtime/pkg -Version: v0.35.2 +Version: v0.35.3 License: Apache-2.0 -License URL: https://github.com/kubernetes/cli-runtime/blob/v0.35.2/LICENSE +License URL: https://github.com/kubernetes/cli-runtime/blob/v0.35.3/LICENSE ---------- Module: k8s.io/client-go -Version: v0.35.2 +Version: v0.35.3 License: Apache-2.0 -License URL: https://github.com/kubernetes/client-go/blob/v0.35.2/LICENSE +License URL: https://github.com/kubernetes/client-go/blob/v0.35.3/LICENSE ---------- Module: k8s.io/client-go/third_party/forked/golang/template -Version: v0.35.2 +Version: v0.35.3 License: BSD-3-Clause -License URL: https://github.com/kubernetes/client-go/blob/v0.35.2/third_party/forked/golang/LICENSE +License URL: https://github.com/kubernetes/client-go/blob/v0.35.3/third_party/forked/golang/LICENSE ---------- Module: k8s.io/component-base/version diff --git a/internal/tmpl/NOTICE b/internal/tmpl/NOTICE index 321d5086..c9784406 100644 --- a/internal/tmpl/NOTICE +++ b/internal/tmpl/NOTICE @@ -185,9 +185,9 @@ License URL: https://github.com/cloudnative-pg/machinery/blob/v0.3.3/LICENSE ---------- Module: github.com/codesphere-cloud/cs-go -Version: v0.21.1 +Version: v0.22.0 License: Apache-2.0 -License URL: https://github.com/codesphere-cloud/cs-go/blob/v0.21.1/LICENSE +License URL: https://github.com/codesphere-cloud/cs-go/blob/v0.22.0/LICENSE ---------- Module: github.com/codesphere-cloud/oms/internal/tmpl @@ -935,9 +935,9 @@ License URL: https://github.com/uber-go/zap/blob/v1.27.1/LICENSE ---------- Module: go.yaml.in/yaml/v2 -Version: v2.4.3 +Version: v2.4.4 License: Apache-2.0 -License URL: https://github.com/yaml/go-yaml/blob/v2.4.3/LICENSE +License URL: https://github.com/yaml/go-yaml/blob/v2.4.4/LICENSE ---------- Module: go.yaml.in/yaml/v3 @@ -1079,9 +1079,9 @@ License URL: https://github.com/helm/helm/blob/v4.1.3/LICENSE ---------- Module: k8s.io/api -Version: v0.35.2 +Version: v0.35.3 License: Apache-2.0 -License URL: https://github.com/kubernetes/api/blob/v0.35.2/LICENSE +License URL: https://github.com/kubernetes/api/blob/v0.35.3/LICENSE ---------- Module: k8s.io/apiextensions-apiserver/pkg/apis/apiextensions @@ -1091,15 +1091,15 @@ License URL: https://github.com/kubernetes/apiextensions-apiserver/blob/v0.35.1/ ---------- Module: k8s.io/apimachinery/pkg -Version: v0.35.2 +Version: v0.35.3 License: Apache-2.0 -License URL: https://github.com/kubernetes/apimachinery/blob/v0.35.2/LICENSE +License URL: https://github.com/kubernetes/apimachinery/blob/v0.35.3/LICENSE ---------- Module: k8s.io/apimachinery/third_party/forked/golang -Version: v0.35.2 +Version: v0.35.3 License: BSD-3-Clause -License URL: https://github.com/kubernetes/apimachinery/blob/v0.35.2/third_party/forked/golang/LICENSE +License URL: https://github.com/kubernetes/apimachinery/blob/v0.35.3/third_party/forked/golang/LICENSE ---------- Module: k8s.io/apiserver/pkg/endpoints/deprecation @@ -1109,21 +1109,21 @@ License URL: https://github.com/kubernetes/apiserver/blob/v0.35.1/LICENSE ---------- Module: k8s.io/cli-runtime/pkg -Version: v0.35.2 +Version: v0.35.3 License: Apache-2.0 -License URL: https://github.com/kubernetes/cli-runtime/blob/v0.35.2/LICENSE +License URL: https://github.com/kubernetes/cli-runtime/blob/v0.35.3/LICENSE ---------- Module: k8s.io/client-go -Version: v0.35.2 +Version: v0.35.3 License: Apache-2.0 -License URL: https://github.com/kubernetes/client-go/blob/v0.35.2/LICENSE +License URL: https://github.com/kubernetes/client-go/blob/v0.35.3/LICENSE ---------- Module: k8s.io/client-go/third_party/forked/golang/template -Version: v0.35.2 +Version: v0.35.3 License: BSD-3-Clause -License URL: https://github.com/kubernetes/client-go/blob/v0.35.2/third_party/forked/golang/LICENSE +License URL: https://github.com/kubernetes/client-go/blob/v0.35.3/third_party/forked/golang/LICENSE ---------- Module: k8s.io/component-base/version From d5f7f05ce77592f3a39b6c258946257f7719ebfc Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Sun, 22 Mar 2026 13:39:28 +0100 Subject: [PATCH 26/31] remove unncecessary ceph files Signed-off-by: Tim Schrodi --- external/ceph.rook.io/doc.go | 12 - external/ceph.rook.io/v1/annotations_test.go | 154 ------ external/ceph.rook.io/v1/cleanup.go | 47 -- external/ceph.rook.io/v1/cluster.go | 57 --- external/ceph.rook.io/v1/doc.go | 21 - external/ceph.rook.io/v1/filesystem.go | 21 - external/ceph.rook.io/v1/labels_test.go | 329 ------------- external/ceph.rook.io/v1/mirror.go | 26 - external/ceph.rook.io/v1/namespace.go | 31 -- external/ceph.rook.io/v1/network.go | 196 -------- external/ceph.rook.io/v1/network_test.go | 444 ------------------ external/ceph.rook.io/v1/nfs.go | 105 ----- external/ceph.rook.io/v1/nfs_test.go | 224 --------- external/ceph.rook.io/v1/nvmeof.go | 26 - external/ceph.rook.io/v1/object.go | 203 -------- external/ceph.rook.io/v1/object_test.go | 366 --------------- external/ceph.rook.io/v1/placement.go | 146 ------ external/ceph.rook.io/v1/placement_test.go | 360 -------------- external/ceph.rook.io/v1/pool.go | 97 ---- external/ceph.rook.io/v1/pool_test.go | 74 --- external/ceph.rook.io/v1/priorityclasses.go | 73 --- .../ceph.rook.io/v1/priorityclasses_test.go | 63 --- external/ceph.rook.io/v1/resources.go | 123 ----- external/ceph.rook.io/v1/scc.go | 81 ---- external/ceph.rook.io/v1/scc_test.go | 30 -- external/ceph.rook.io/v1/spec_test.go | 169 ------- external/ceph.rook.io/v1/status.go | 74 --- external/ceph.rook.io/v1/status_test.go | 135 ------ external/ceph.rook.io/v1/storage_test.go | 291 ------------ external/ceph.rook.io/v1/topic.go | 84 ---- external/ceph.rook.io/v1/topic_test.go | 170 ------- external/ceph.rook.io/v1/volume.go | 84 ---- external/ceph.rook.io/v1/volume_test.go | 110 ----- 33 files changed, 4426 deletions(-) delete mode 100644 external/ceph.rook.io/doc.go delete mode 100644 external/ceph.rook.io/v1/annotations_test.go delete mode 100644 external/ceph.rook.io/v1/cleanup.go delete mode 100644 external/ceph.rook.io/v1/cluster.go delete mode 100644 external/ceph.rook.io/v1/doc.go delete mode 100644 external/ceph.rook.io/v1/filesystem.go delete mode 100644 external/ceph.rook.io/v1/labels_test.go delete mode 100644 external/ceph.rook.io/v1/mirror.go delete mode 100644 external/ceph.rook.io/v1/namespace.go delete mode 100644 external/ceph.rook.io/v1/network.go delete mode 100644 external/ceph.rook.io/v1/network_test.go delete mode 100644 external/ceph.rook.io/v1/nfs.go delete mode 100644 external/ceph.rook.io/v1/nfs_test.go delete mode 100644 external/ceph.rook.io/v1/nvmeof.go delete mode 100644 external/ceph.rook.io/v1/object.go delete mode 100644 external/ceph.rook.io/v1/object_test.go delete mode 100644 external/ceph.rook.io/v1/placement.go delete mode 100644 external/ceph.rook.io/v1/placement_test.go delete mode 100644 external/ceph.rook.io/v1/pool.go delete mode 100644 external/ceph.rook.io/v1/pool_test.go delete mode 100644 external/ceph.rook.io/v1/priorityclasses.go delete mode 100644 external/ceph.rook.io/v1/priorityclasses_test.go delete mode 100644 external/ceph.rook.io/v1/resources.go delete mode 100644 external/ceph.rook.io/v1/scc.go delete mode 100644 external/ceph.rook.io/v1/scc_test.go delete mode 100644 external/ceph.rook.io/v1/spec_test.go delete mode 100644 external/ceph.rook.io/v1/status.go delete mode 100644 external/ceph.rook.io/v1/status_test.go delete mode 100644 external/ceph.rook.io/v1/storage_test.go delete mode 100644 external/ceph.rook.io/v1/topic.go delete mode 100644 external/ceph.rook.io/v1/topic_test.go delete mode 100644 external/ceph.rook.io/v1/volume.go delete mode 100644 external/ceph.rook.io/v1/volume_test.go diff --git a/external/ceph.rook.io/doc.go b/external/ceph.rook.io/doc.go deleted file mode 100644 index 1a2d5a05..00000000 --- a/external/ceph.rook.io/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) Codesphere Inc. -// SPDX-License-Identifier: Apache-2.0 - -// Copy of github.com/rook/rook/pkg/apis/ceph.rook.io/v1, due to issues -// with the package dependencies: -// -// go: github.com/libopenstorage/secrets@v0.0.0-00010101000000-000000000000 requires -// -// github.com/portworx/sched-ops@v1.20.4-rc1 requires -// github.com/kubernetes-incubator/external-storage@v0.20.4-openstorage-rc2 requires -// k8s.io/apiextensions-apiserver@v0.0.0: reading k8s.io/apiextensions-apiserver/go.mod at revision v0.0.0: unknown revision v0.0.0 -package cephrookio diff --git a/external/ceph.rook.io/v1/annotations_test.go b/external/ceph.rook.io/v1/annotations_test.go deleted file mode 100644 index 465ccbd2..00000000 --- a/external/ceph.rook.io/v1/annotations_test.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/yaml" -) - -func TestCephAnnotationsMerge(t *testing.T) { - // No annotations defined - testAnnotations := AnnotationsSpec{} - a := GetOSDAnnotations(testAnnotations) - assert.Nil(t, a) - - // Only a specific component annotations without "all" - testAnnotations = AnnotationsSpec{ - "mgr": {"mgrkey": "mgrval"}, - "mon": {"monkey": "monval"}, - "osd": {"osdkey": "osdval"}, - "rgw": {"rgwkey": "rgwval"}, - "rbdmirror": {"rbdmirrorkey": "rbdmirrorval"}, - } - a = GetMgrAnnotations(testAnnotations) - assert.Equal(t, "mgrval", a["mgrkey"]) - assert.Equal(t, 1, len(a)) - a = GetMonAnnotations(testAnnotations) - assert.Equal(t, "monval", a["monkey"]) - assert.Equal(t, 1, len(a)) - a = GetOSDAnnotations(testAnnotations) - assert.Equal(t, "osdval", a["osdkey"]) - assert.Equal(t, 1, len(a)) - - // No annotations matching the component - testAnnotations = AnnotationsSpec{ - "mgr": {"mgrkey": "mgrval"}, - } - a = GetMonAnnotations(testAnnotations) - assert.Nil(t, a) - - // Merge with "all" - testAnnotations = AnnotationsSpec{ - "all": {"allkey1": "allval1", "allkey2": "allval2"}, - "mgr": {"mgrkey": "mgrval"}, - "cmdreporter": {"myversions": "detect"}, - "crashcollector": {"crash": "crashval"}, - "osd": {"osdkey": "osdval"}, - } - a = GetMonAnnotations(testAnnotations) - assert.Equal(t, "allval1", a["allkey1"]) - assert.Equal(t, "allval2", a["allkey2"]) - assert.Equal(t, 2, len(a)) - a = GetMgrAnnotations(testAnnotations) - assert.Equal(t, "mgrval", a["mgrkey"]) - assert.Equal(t, "allval1", a["allkey1"]) - assert.Equal(t, "allval2", a["allkey2"]) - assert.Equal(t, 3, len(a)) - b := GetCmdReporterAnnotations(testAnnotations) - assert.Equal(t, "detect", b["myversions"]) - assert.Equal(t, "allval1", b["allkey1"]) - assert.Equal(t, "allval2", b["allkey2"]) - c := GetCrashCollectorAnnotations(testAnnotations) - assert.Equal(t, "crashval", c["crash"]) - assert.Equal(t, "allval1", c["allkey1"]) - assert.Equal(t, "allval2", c["allkey2"]) - d := GetOSDAnnotations(testAnnotations) - assert.Equal(t, "allval1", d["allkey1"]) - assert.Equal(t, "allval2", d["allkey2"]) - assert.Equal(t, "osdval", d["osdkey"]) -} - -func TestAnnotationsSpec(t *testing.T) { - specYaml := []byte(` -mgr: - foo: bar - hello: world -mon: -`) - - // convert the raw spec yaml into JSON - rawJSON, err := yaml.ToJSON(specYaml) - assert.Nil(t, err) - - // unmarshal the JSON into a strongly typed annotations spec object - var annotations AnnotationsSpec - err = json.Unmarshal(rawJSON, &annotations) - assert.Nil(t, err) - - // the unmarshalled annotations spec should equal the expected spec below - expected := AnnotationsSpec{ - "mgr": map[string]string{ - "foo": "bar", - "hello": "world", - }, - "mon": nil, - } - assert.Equal(t, expected, annotations) -} - -func TestAnnotationsApply(t *testing.T) { - objMeta := &metav1.ObjectMeta{} - testAnnotations := Annotations{ - "foo": "bar", - "hello": "world", - } - testAnnotations.ApplyToObjectMeta(objMeta) - assert.Equal(t, testAnnotations, Annotations(objMeta.Annotations)) - - testAnnotations["isthisatest"] = "test" - testAnnotations.ApplyToObjectMeta(objMeta) - assert.Equal(t, testAnnotations, Annotations(objMeta.Annotations)) -} - -func TestAnnotationsMerge(t *testing.T) { - testAnnotationsPart1 := Annotations{ - "foo": "bar", - "hello": "world", - } - testAnnotationsPart2 := Annotations{ - "bar": "foo", - "hello": "earth", - } - expected := map[string]string{ - "foo": "bar", - "bar": "foo", - "hello": "world", - } - assert.Equal(t, expected, map[string]string(testAnnotationsPart1.Merge(testAnnotationsPart2))) - - // Test that nil annotations can still be appended to - testAnnotationsPart3 := Annotations{ - "hello": "world", - } - var empty Annotations - assert.Equal(t, map[string]string(testAnnotationsPart3), map[string]string(empty.Merge(testAnnotationsPart3))) -} diff --git a/external/ceph.rook.io/v1/cleanup.go b/external/ceph.rook.io/v1/cleanup.go deleted file mode 100644 index e17e82ae..00000000 --- a/external/ceph.rook.io/v1/cleanup.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -const ( - // SanitizeDataSourceZero uses /dev/zero as sanitize source - SanitizeDataSourceZero SanitizeDataSourceProperty = "zero" - - // SanitizeDataSourceRandom uses `shred's default entropy source - SanitizeDataSourceRandom SanitizeDataSourceProperty = "random" - - // SanitizeMethodComplete will sanitize everything on the disk - SanitizeMethodComplete SanitizeMethodProperty = "complete" - - // SanitizeMethodQuick will sanitize metadata only on the disk - SanitizeMethodQuick SanitizeMethodProperty = "quick" - - // DeleteDataDirOnHostsConfirmation represents the validation to destroy dataDirHostPath - DeleteDataDirOnHostsConfirmation CleanupConfirmationProperty = "yes-really-destroy-data" -) - -// HasDataDirCleanPolicy returns whether the cluster has a data dir policy -func (c *CleanupPolicySpec) HasDataDirCleanPolicy() bool { - return c.Confirmation == DeleteDataDirOnHostsConfirmation -} - -func (c *SanitizeMethodProperty) String() string { - return string(*c) -} - -func (c *SanitizeDataSourceProperty) String() string { - return string(*c) -} diff --git a/external/ceph.rook.io/v1/cluster.go b/external/ceph.rook.io/v1/cluster.go deleted file mode 100644 index 36e800e0..00000000 --- a/external/ceph.rook.io/v1/cluster.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// RequireMsgr2 checks if the network settings require the msgr2 protocol -func (c *ClusterSpec) RequireMsgr2() bool { - if c.Network.Connections == nil { - return false - } - if c.Network.Connections.RequireMsgr2 { - return true - } - if c.Network.Connections.Compression != nil && c.Network.Connections.Compression.Enabled { - return true - } - if c.Network.Connections.Encryption != nil && c.Network.Connections.Encryption.Enabled { - return true - } - return false -} - -// RequireMsgr2 checks if the network settings require the msgr2 protocol -func (c *ClusterSpec) NetworkEncryptionEnabled() bool { - if c.Network.Connections == nil { - return false - } - if c.Network.Connections.Encryption == nil { - return false - } - return c.Network.Connections.Encryption.Enabled -} - -func (c *ClusterSpec) IsStretchCluster() bool { - return c.Mon.StretchCluster != nil && len(c.Mon.StretchCluster.Zones) > 0 -} - -func (c *ClusterSpec) ZonesRequired() bool { - return c.IsStretchCluster() || len(c.Mon.Zones) > 0 -} - -func (c *CephCluster) GetStatusConditions() *[]Condition { - return &c.Status.Conditions -} diff --git a/external/ceph.rook.io/v1/doc.go b/external/ceph.rook.io/v1/doc.go deleted file mode 100644 index b8774f97..00000000 --- a/external/ceph.rook.io/v1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package,register - -// Package v1 is the v1 version of the API. -// +groupName=ceph.rook.io -package v1 diff --git a/external/ceph.rook.io/v1/filesystem.go b/external/ceph.rook.io/v1/filesystem.go deleted file mode 100644 index d5128ec1..00000000 --- a/external/ceph.rook.io/v1/filesystem.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -func (c *CephFilesystem) GetStatusConditions() *[]Condition { - return &c.Status.Conditions -} diff --git a/external/ceph.rook.io/v1/labels_test.go b/external/ceph.rook.io/v1/labels_test.go deleted file mode 100644 index 71cd1a73..00000000 --- a/external/ceph.rook.io/v1/labels_test.go +++ /dev/null @@ -1,329 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/yaml" -) - -func TestCephLabelsMerge(t *testing.T) { - // No Labels defined - testLabels := LabelsSpec{} - a := GetOSDLabels(testLabels) - assert.Nil(t, a) - - // Only a specific component labels without "all" - testLabels = LabelsSpec{ - "mgr": {"mgrkey": "mgrval"}, - "mon": {"monkey": "monval"}, - "osd": {"osdkey": "osdval"}, - "rgw": {"rgwkey": "rgwval"}, - "rbdmirror": {"rbdmirrorkey": "rbdmirrorval"}, - } - a = GetMgrLabels(testLabels) - assert.Equal(t, "mgrval", a["mgrkey"]) - assert.Equal(t, 1, len(a)) - a = GetMonLabels(testLabels) - assert.Equal(t, "monval", a["monkey"]) - assert.Equal(t, 1, len(a)) - a = GetOSDLabels(testLabels) - assert.Equal(t, "osdval", a["osdkey"]) - assert.Equal(t, 1, len(a)) - - // No Labels matching the component - testLabels = LabelsSpec{ - "mgr": {"mgrkey": "mgrval"}, - } - a = GetMonLabels(testLabels) - assert.Nil(t, a) - - // Merge with "all" - testLabels = LabelsSpec{ - "all": {"allkey1": "allval1", "allkey2": "allval2"}, - "mgr": {"mgrkey": "mgrval"}, - "cmdreporter": {"detect": "myversion"}, - } - a = GetMonLabels(testLabels) - assert.Equal(t, "allval1", a["allkey1"]) - assert.Equal(t, "allval2", a["allkey2"]) - assert.Equal(t, 2, len(a)) - a = GetMgrLabels(testLabels) - assert.Equal(t, "mgrval", a["mgrkey"]) - assert.Equal(t, "allval1", a["allkey1"]) - assert.Equal(t, "allval2", a["allkey2"]) - assert.Equal(t, 3, len(a)) - a = GetCmdReporterLabels(testLabels) - assert.Equal(t, "myversion", a["detect"]) - assert.Equal(t, "allval1", a["allkey1"]) - assert.Equal(t, "allval2", a["allkey2"]) - assert.Equal(t, 3, len(a)) -} - -func TestLabelsSpec(t *testing.T) { - specYaml := []byte(` -mgr: - foo: bar - hello: world -mon: -`) - - // convert the raw spec yaml into JSON - rawJSON, err := yaml.ToJSON(specYaml) - assert.Nil(t, err) - - // unmarshal the JSON into a strongly typed Labels spec object - var Labels LabelsSpec - err = json.Unmarshal(rawJSON, &Labels) - assert.Nil(t, err) - - // the unmarshalled Labels spec should equal the expected spec below - expected := LabelsSpec{ - "mgr": map[string]string{ - "foo": "bar", - "hello": "world", - }, - "mon": nil, - } - assert.Equal(t, expected, Labels) -} - -func TestLabelsApply(t *testing.T) { - tcs := []struct { - name string - target *metav1.ObjectMeta - input Labels - expected Labels - }{ - { - name: "it should be able to update meta with no label", - target: &metav1.ObjectMeta{}, - input: Labels{ - "foo": "bar", - }, - expected: Labels{ - "foo": "bar", - }, - }, - { - name: "it should keep the original labels when new labels are set", - target: &metav1.ObjectMeta{ - Labels: Labels{ - "foo": "bar", - }, - }, - input: Labels{ - "hello": "world", - }, - expected: Labels{ - "foo": "bar", - "hello": "world", - }, - }, - { - name: "it should NOT overwrite the existing keys", - target: &metav1.ObjectMeta{ - Labels: Labels{ - "foo": "bar", - }, - }, - input: Labels{ - "foo": "baz", - }, - expected: Labels{ - "foo": "bar", - }, - }, - } - - for _, tc := range tcs { - tc.input.ApplyToObjectMeta(tc.target) - assert.Equal(t, map[string]string(tc.expected), tc.target.Labels) - } -} - -func TestLabelsOverwriteApply(t *testing.T) { - tcs := []struct { - name string - target *metav1.ObjectMeta - input Labels - expected Labels - }{ - { - name: "it should be able to update meta with no label", - target: &metav1.ObjectMeta{}, - input: Labels{ - "foo": "bar", - }, - expected: Labels{ - "foo": "bar", - }, - }, - { - name: "it should keep the original labels when new labels are set", - target: &metav1.ObjectMeta{ - Labels: Labels{ - "foo": "bar", - }, - }, - input: Labels{ - "hello": "world", - }, - expected: Labels{ - "foo": "bar", - "hello": "world", - }, - }, - { - name: "it should overwrite the existing keys", - target: &metav1.ObjectMeta{ - Labels: Labels{ - "foo": "bar", - }, - }, - input: Labels{ - "foo": "baz", - }, - expected: Labels{ - "foo": "baz", - }, - }, - } - - for _, tc := range tcs { - tc.input.OverwriteApplyToObjectMeta(tc.target) - assert.Equal(t, map[string]string(tc.expected), tc.target.Labels) - } -} - -func TestLabelsMerge(t *testing.T) { - testLabelsPart1 := Labels{ - "foo": "bar", - "hello": "world", - } - testLabelsPart2 := Labels{ - "bar": "foo", - "hello": "earth", - } - expected := map[string]string{ - "foo": "bar", - "bar": "foo", - "hello": "world", - } - assert.Equal(t, expected, map[string]string(testLabelsPart1.Merge(testLabelsPart2))) - - // Test that nil Labels can still be appended to - testLabelsPart3 := Labels{ - "hello": "world", - } - var empty Labels - assert.Equal(t, map[string]string(testLabelsPart3), map[string]string(empty.Merge(testLabelsPart3))) -} - -func TestToValidDNSLabel(t *testing.T) { - tests := []struct { - name string - input string - want string - }{ - {"empty string", "", ""}, - {"single dash", "-", ""}, - {"multiple dashes", "----", ""}, - {"lc a", "a", "a"}, - {"lc z", "z", "z"}, - {"lc alphabet", "abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxyz"}, - {"UC A", "A", "a"}, - {"UC Z", "Z", "z"}, - {"UC ALPHABET", "ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz"}, - {"mixed case AlPhAbEt", "AbCdEfGhIjKlMnOpQrStUvWxYz", "abcdefghijklmnopqrstuvwxyz"}, - {"single 0", "0", "d0"}, - {"single 9", "9", "d9"}, - {"single 1", "1", "d1"}, - {"numbers", "01234567890", "d01234567890"}, - {"letters with numbers", "1a0b1c2d3e4f5g6h7i8j9k0", "d1a0b1c2d3e4f5g6h7i8j9k0"}, - {"single / symbol", "/", ""}, - {"single : symbol", ":", ""}, - {"single . symbol", ".", ""}, - {"bunch of symbols", "`~!@#$%^&*()_+-={}[]\\|;':\",.<>/?", ""}, - { - "alphabet with symbols", - "a~b!c@d#e$f^g&h*i(j)k_l-m+n+o[p]q{r}s|t:u;v'wz", "a-b-c-d-e-f-g-h-i-j-k-l-m-n-o-p-q-r-s-t-u-v-w-x-y-z", - }, - {"multiple symbols between letters", "a//b//c", "a-b-c"}, - {"symbol before", "/a/b/c", "a-b-c"}, - {"symbol after", "a/b/c/", "a-b-c"}, - {"symbols before and after", "/a/b/c/", "a-b-c"}, - {"multiple symbols before after between", "//a//b//c//", "a-b-c"}, - {"mix of all tests except length", "//1a//B-c/d_f/../00-thing.ini/", "d1a-b-c-d-f-00-thing-ini"}, - { - "too long input -> middle trim", - "qwertyuiopqwertyuiopqwertyuiopaaqwertyuiopqwertyuiopqwertyuiopaa", - "qwertyuiopqwertyuiopqwertyuiop--wertyuiopqwertyuiopqwertyuiopaa", - }, - { - "too long input but symbols allow for no middle trim", - "/qwertyuiopqwerty/uiopqwertyuiop//qwertyuiopqwerty/uiopqwertyuiop/", - "qwertyuiopqwerty-uiopqwertyuiop-qwertyuiopqwerty-uiopqwertyuiop", - }, - { - "max allowed length but starts with number -> middle trim", - "123qwertyuiopqwertyuiopqwertyuiopqwertyuiopqwertyuiopqwertyuiop", - "d123qwertyuiopqwertyuiopqwerty--pqwertyuiopqwertyuiopqwertyuiop", - }, - { - "max allowed length ok", - "qwertyuiopqwertyuiopqwertyuiopqwertyuiopqwertyuiopqwertyuiop123", - "qwertyuiopqwertyuiopqwertyuiopqwertyuiopqwertyuiopqwertyuiop123", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, tt.want, ToValidDNSLabel(tt.input)) - }) - } -} - -func Test_cutMiddle(t *testing.T) { - // not an exported function, so don't bother with extreme cases like 0, 1, 2, or 3 len inputs - t.Run("len 8 -> 6", func(t *testing.T) { - assert.Equal(t, "ab--gh", cutMiddle("abcdefgh", 6)) - }) - t.Run("len 9 -> 6", func(t *testing.T) { - assert.Equal(t, "ab--hi", cutMiddle("abcdefghi", 6)) - }) - t.Run("len 9 -> 7", func(t *testing.T) { - assert.Equal(t, "ab--ghi", cutMiddle("abcdefghi", 7)) - }) - t.Run("len 10 -> 10", func(t *testing.T) { - assert.Equal(t, "qwertyuiop", cutMiddle("qwertyuiop", 10)) - }) - // below is what we really want to test - t.Run("len 63 -> 63", func(t *testing.T) { - assert.Equal(t, - "qwertyuiopqwertyuiopqwertyuiop12qwertyuiopqwertyuiopqwertyuiop1", - cutMiddle("qwertyuiopqwertyuiopqwertyuiop12qwertyuiopqwertyuiopqwertyuiop1", 63)) - }) - t.Run("len 64 -> 63", func(t *testing.T) { - assert.Equal(t, - "qwertyuiopqwertyuiopqwertyuiop--wertyuiopqwertyuiopqwertyuiop12", - cutMiddle("qwertyuiopqwertyuiopqwertyuiop12qwertyuiopqwertyuiopqwertyuiop12", 63)) - }) -} diff --git a/external/ceph.rook.io/v1/mirror.go b/external/ceph.rook.io/v1/mirror.go deleted file mode 100644 index 205aad78..00000000 --- a/external/ceph.rook.io/v1/mirror.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// HasPeers returns whether the RBD mirror daemon has peer and should connect to it -func (m *MirroringPeerSpec) HasPeers() bool { - return len(m.SecretNames) != 0 -} - -func (m *FSMirroringSpec) SnapShotScheduleEnabled() bool { - return len(m.SnapshotSchedules) != 0 -} diff --git a/external/ceph.rook.io/v1/namespace.go b/external/ceph.rook.io/v1/namespace.go deleted file mode 100644 index 58b4b3e4..00000000 --- a/external/ceph.rook.io/v1/namespace.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2025 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -const ( - ImplicitNamespaceKey = "" - ImplicitNamespaceVal = "" -) - -func GetRadosNamespaceName(cephBlockPoolRadosNamespace *CephBlockPoolRadosNamespace) string { - if cephBlockPoolRadosNamespace.Spec.Name == ImplicitNamespaceKey { - return ImplicitNamespaceVal - } else if cephBlockPoolRadosNamespace.Spec.Name != "" { - return cephBlockPoolRadosNamespace.Spec.Name - } - return cephBlockPoolRadosNamespace.Name -} diff --git a/external/ceph.rook.io/v1/network.go b/external/ceph.rook.io/v1/network.go deleted file mode 100644 index debe0d39..00000000 --- a/external/ceph.rook.io/v1/network.go +++ /dev/null @@ -1,196 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "encoding/json" - "fmt" - "net" - "strings" - - nadv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" - nadutils "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils" - "github.com/pkg/errors" -) - -// enforceHostNetwork is a private package variable that can be set via the rook-operator-config -// setting "ROOK_ENFORCE_HOST_NETWORK". when set to "true", it lets rook create all pods with host network enabled. -// This can be used, for example, to run Rook in k8s clusters with no CNI where host networking is required -var enforceHostNetwork bool = false - -// IsMultus get whether to use multus network provider -func (n *NetworkSpec) IsMultus() bool { - return n.Provider == NetworkProviderMultus -} - -// IsHost is intended to be used to determine if the rook operator should configure -// managed pods to use host networking. -// This behavior is enabled by configuring the cephCluster with the "host" network provider. -// This method also maintains compatibility with the old HostNetwork setting -// which is incompatible with other network providers: HostNetwork set to true -// together with an empty or unset network provider has the same effect as -// network.Provider set to "host" -func (n *NetworkSpec) IsHost() bool { - return enforceHostNetwork || (n.HostNetwork && n.Provider == NetworkProviderDefault) || n.Provider == NetworkProviderHost -} - -func ValidateNetworkSpec(clusterNamespace string, spec NetworkSpec) error { - if spec.HostNetwork && (spec.Provider != NetworkProviderDefault) { - return errors.Errorf(`the legacy hostNetwork setting is only valid with the default network provider ("") and not with '%q'`, spec.Provider) - } - if spec.IsMultus() { - if len(spec.Selectors) == 0 { - return errors.Errorf("at least one network selector must be specified when using the %q network provider", NetworkProviderMultus) - } - - if _, err := spec.GetNetworkSelection(clusterNamespace, CephNetworkPublic); err != nil { - return errors.Wrap(err, "ceph public network selector provided for multus is invalid") - } - if _, err := spec.GetNetworkSelection(clusterNamespace, CephNetworkCluster); err != nil { - return errors.Wrap(err, "ceph cluster network selector provided for multus is invalid") - } - } - - if !spec.AddressRanges.IsEmpty() { - if !spec.IsMultus() && !spec.IsHost() { - // TODO: be sure to update docs that AddressRanges can be specified for host networking as - // well as multus so that the override configmap doesn't need to be set - return errors.Errorf("network ranges can only be specified for %q and %q network providers", NetworkProviderHost, NetworkProviderMultus) - } - if spec.IsMultus() { - if len(spec.AddressRanges.Public) > 0 && !spec.NetworkHasSelection(CephNetworkPublic) { - return errors.Errorf("public address range can only be specified for multus if there is a public network selection") - } - if len(spec.AddressRanges.Cluster) > 0 && !spec.NetworkHasSelection(CephNetworkCluster) { - return errors.Errorf("cluster address range can only be specified for multus if there is a cluster network selection") - } - } - } - - if err := spec.AddressRanges.Validate(); err != nil { - return err - } - - return nil -} - -func ValidateNetworkSpecUpdate(clusterNamespace string, oldSpec, newSpec NetworkSpec) error { - // Allow an attempt to enable or disable host networking, but not other provider changes - oldProvider := oldSpec.Provider - newProvider := newSpec.Provider - if oldProvider != newProvider && oldProvider != "host" && newProvider != "host" { - return errors.Errorf("invalid update: network provider change from %q to %q is not allowed", oldProvider, newProvider) - } - - return ValidateNetworkSpec(clusterNamespace, newSpec) -} - -// NetworkHasSelection returns true if the given Ceph network has a selection. -func (n *NetworkSpec) NetworkHasSelection(network CephNetworkType) bool { - s, ok := n.Selectors[network] - if !ok || s == "" { - return false - } - return true -} - -// GetNetworkSelection gets the network selection for a given Ceph network, or nil if the network -// doesn't have a selection. -func (n *NetworkSpec) GetNetworkSelection(clusterNamespace string, network CephNetworkType) (*nadv1.NetworkSelectionElement, error) { - if !n.NetworkHasSelection(network) { - return nil, nil // no selection for network - } - s := n.Selectors[network] - // From documentation of the "k8s.v1.cni.cncf.io/network-status" annotation, valid JSON inputs - // must be in list form, surrounded with brackets. The NAD utility library will only parse - // list-format JSON input. However, old versions of Rook code allowed non-list JSON objects. - // In order to support legacy users, make an attempt to turn single-JSON-object inputs into - // len(1) lists so that they parse correctly by the util library. Do not advertise this - // "feature" in documentation since it is not technically the correct format. - if strings.HasPrefix(s, "{") && strings.HasSuffix(s, "}") { - s = "[" + s + "]" - } - selection, err := nadutils.ParseNetworkAnnotation(s, clusterNamespace) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse %q network selector %q", network, s) - } - if len(selection) != 1 { - return nil, errors.Errorf("%q network selector %q has multiple (%d) selections, which is not supported", network, s, len(selection)) - } - return selection[0], nil -} - -// NetworkSelectionsToAnnotationValue converts NetworkAttachmentDefinition network selection -// elements to an annotation value for the "k8s.v1.cni.cncf.io/networks" annotation key. -func NetworkSelectionsToAnnotationValue(selections ...*nadv1.NetworkSelectionElement) (string, error) { - reduced := []*nadv1.NetworkSelectionElement{} - for _, s := range selections { - if s != nil { - reduced = append(reduced, s) - } - } - if len(reduced) == 0 { - return "", nil - } - b, err := json.Marshal(reduced) - if err != nil { - return "", errors.Wrap(err, "failed to convert network selections to annotation value") - } - return string(b), nil -} - -func (n *AddressRangesSpec) IsEmpty() bool { - return n == nil || len(n.Public) == 0 && len(n.Cluster) == 0 -} - -func (n *AddressRangesSpec) Validate() error { - if n.IsEmpty() { - return nil - } - - allRanges := append(n.Public, n.Cluster...) - invalid := []string{} - for _, cidr := range allRanges { - _, _, err := net.ParseCIDR(string(cidr)) - if err != nil { - // returned err is "invalid CIDR: " & not more useful than invalid list below - invalid = append(invalid, string(cidr)) - } - } - if len(invalid) == 0 { - return nil - } - - return fmt.Errorf("%d network ranges are invalid: %v", len(invalid), invalid) -} - -// String turns a CIDR list into a comma-delimited string of CIDRs -func (l *CIDRList) String() string { - sl := []string{} - for _, c := range *l { - sl = append(sl, string(c)) - } - return strings.Join(sl, ", ") -} - -func SetEnforceHostNetwork(val bool) { - enforceHostNetwork = val -} - -func EnforceHostNetwork() bool { - return enforceHostNetwork -} diff --git a/external/ceph.rook.io/v1/network_test.go b/external/ceph.rook.io/v1/network_test.go deleted file mode 100644 index c5b65e55..00000000 --- a/external/ceph.rook.io/v1/network_test.go +++ /dev/null @@ -1,444 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "encoding/json" - "fmt" - "testing" - - nadv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" - "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/util/yaml" -) - -func TestNetworkCephSpecLegacy(t *testing.T) { - netSpecYAML := []byte(`hostNetwork: true`) - - rawJSON, err := yaml.ToJSON(netSpecYAML) - assert.Nil(t, err) - - var net NetworkSpec - - err = json.Unmarshal(rawJSON, &net) - assert.Nil(t, err) - - expected := NetworkSpec{HostNetwork: true} - - assert.Equal(t, expected, net) -} - -func TestValidateNetworkSpec(t *testing.T) { - net := NetworkSpec{ - HostNetwork: true, - Provider: NetworkProviderDefault, - } - err := ValidateNetworkSpec("", net) - assert.NoError(t, err) - - net = NetworkSpec{ - HostNetwork: true, - Provider: NetworkProviderHost, - } - err = ValidateNetworkSpec("", net) - assert.Error(t, err) - - net = NetworkSpec{ - HostNetwork: false, - Provider: NetworkProviderDefault, - } - err = ValidateNetworkSpec("", net) - assert.NoError(t, err) - - net = NetworkSpec{ - HostNetwork: false, - Provider: NetworkProviderHost, - } - err = ValidateNetworkSpec("", net) - assert.NoError(t, err) -} - -// test the NetworkSpec.IsHost method with different network providers -// Also test it in combination with the legacy -// "HostNetwork" setting. -// Also test the effect of the operator config setting -// ROOK_ENFORCE_HOST_NETWORK. -func TestNetworkCephIsHost(t *testing.T) { - net := NetworkSpec{HostNetwork: false} - - net.Provider = NetworkProviderHost - assert.True(t, net.IsHost()) - - net.Provider = NetworkProviderDefault - net.HostNetwork = true - assert.True(t, net.IsHost()) - - // enforcing does not change the result if host network is selected - // anyway in the cluster. - SetEnforceHostNetwork(true) - assert.True(t, net.IsHost()) - - SetEnforceHostNetwork(false) - assert.True(t, net.IsHost()) - - net = NetworkSpec{} - net.Provider = NetworkProviderDefault - net.HostNetwork = false - assert.False(t, net.IsHost()) - - net = NetworkSpec{} - net.Provider = NetworkProviderMultus - net.HostNetwork = false - assert.False(t, net.IsHost()) - - // test that not enforcing does not change the result. - SetEnforceHostNetwork(false) - assert.False(t, net.IsHost()) - - // test enforcing of host network - SetEnforceHostNetwork(true) - assert.True(t, net.IsHost()) - - SetEnforceHostNetwork(false) - net = NetworkSpec{} - net.Provider = NetworkProviderMultus - net.HostNetwork = true - assert.False(t, net.IsHost()) - - // test with nonempty but invalid provider - net = NetworkSpec{} - net.HostNetwork = true - net.Provider = "foo" - SetEnforceHostNetwork(false) - assert.False(t, net.IsHost()) - SetEnforceHostNetwork(true) - assert.True(t, net.IsHost()) -} - -func TestNetworkSpec(t *testing.T) { - netSpecYAML := []byte(` -provider: host -selectors: - server: enp2s0f0 - broker: enp2s0f0`) - - rawJSON, err := yaml.ToJSON(netSpecYAML) - assert.Nil(t, err) - - var net NetworkSpec - - err = json.Unmarshal(rawJSON, &net) - assert.Nil(t, err) - - expected := NetworkSpec{ - Provider: "host", - Selectors: map[CephNetworkType]string{ - "server": "enp2s0f0", - "broker": "enp2s0f0", - }, - } - - assert.Equal(t, expected, net) -} - -func TestAddressRangesSpec_IsEmpty(t *testing.T) { - var specNil *AddressRangesSpec - assert.True(t, specNil.IsEmpty()) - - empty := &AddressRangesSpec{} - assert.True(t, empty.IsEmpty()) - - someCIDR := CIDR("1.1.1.1/16") - nonEmptyTests := []AddressRangesSpec{ - {Public: []CIDR{someCIDR}}, - {Public: []CIDR{someCIDR, someCIDR}}, - {Cluster: []CIDR{someCIDR}}, - {Cluster: []CIDR{someCIDR, someCIDR}}, - {Public: []CIDR{someCIDR}, Cluster: []CIDR{someCIDR}}, - {Public: []CIDR{someCIDR, someCIDR}, Cluster: []CIDR{someCIDR, someCIDR}}, - } - for _, spec := range nonEmptyTests { - assert.False(t, spec.IsEmpty()) - } -} - -func TestAddressRangesSpec_Validate(t *testing.T) { - // only test a small subset of CIDRs since Rook should definitely use the Go stdlib underneath - v1 := CIDR("123.123.123.123/24") - v2 := CIDR("1.0.0.1/24") - v3 := CIDR("2000::/64") - v4 := CIDR("2000:2000:2000:2000:2000:2000:2000:2000/64") - v5 := CIDR("2000::128.128.128.128/96") // ipv4 expressed as subnet of ipv6 is valid - - // invalid CIDRs - i1 := CIDR("123.123.123/24") - i2 := CIDR("123.123.123.123/33") - i4 := CIDR("2000/64") - i3 := CIDR("2000:/64") - i5 := CIDR("2000::128.128.128.128/129") - - tests := []struct { - name string - spec AddressRangesSpec - numErrs int - }{ - {"empty", AddressRangesSpec{}, 0}, - {"all valid", AddressRangesSpec{ - Public: []CIDR{v1}, - Cluster: []CIDR{v2, v3, v4, v5}, - }, 0}, - {"all invalid", AddressRangesSpec{ - Public: []CIDR{i1}, - Cluster: []CIDR{i2, i3, i4, i5}, - }, 5}, - {"public only, valid", AddressRangesSpec{Public: []CIDR{v1}}, 0}, - {"public only, invalid", AddressRangesSpec{Public: []CIDR{i1}}, 1}, - {"cluster only, valid", AddressRangesSpec{Cluster: []CIDR{v2}}, 0}, - {"cluster only, invalid", AddressRangesSpec{Cluster: []CIDR{i2}}, 1}, - {"public valid, cluster valid", AddressRangesSpec{ - Public: []CIDR{v1}, - Cluster: []CIDR{v2}, - }, 0}, - {"public valid, cluster invalid", AddressRangesSpec{ - Public: []CIDR{v2}, - Cluster: []CIDR{i2}, - }, 1}, - {"public invalid, cluster valid", AddressRangesSpec{ - Public: []CIDR{i3}, - Cluster: []CIDR{v2}, - }, 1}, - {"public invalid, cluster invalid", AddressRangesSpec{ - Public: []CIDR{i3}, - Cluster: []CIDR{i4}, - }, 2}, - {"both, valid and invalid", AddressRangesSpec{ - Public: []CIDR{v1, i2}, - Cluster: []CIDR{v3, i4}, - }, 2}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.spec.Validate() - if tt.numErrs > 0 { - assert.Error(t, err) - t.Log(err) - assert.ErrorContains(t, err, fmt.Sprintf("%d network ranges are invalid", tt.numErrs)) - } else { - assert.NoError(t, err) - } - }) - } -} - -// these two functions are should almost always used together and can be unit tested together more -// easily than apart -func TestNetworkSpec_GetNetworkSelection_NetworkSelectionsToAnnotationValue(t *testing.T) { - // inputs are the same definition expressed in json format or non-json format - input1 := func(json bool) string { - if json { - return `[{"name": "macvlan", "interface": "net1"}]` - } - return "macvlan@net1" - } - input2 := func(json bool) string { - if json { - return `[{"name": "macvlan", "interface": "net2"}]` - } - return "macvlan@net2" - } - - // allow running the test suite with json-format or non-json-format inputs - testGetNetworkAnnotationValue := func(t *testing.T, json bool) { - t.Helper() - - tests := []struct { - name string - specSelectors map[CephNetworkType]string - cephNets []CephNetworkType - want string - wantErr bool - }{ - { - name: "public want public", - specSelectors: map[CephNetworkType]string{ - "public": input1(json), - }, - cephNets: []CephNetworkType{CephNetworkPublic}, - want: `[{"name":"macvlan","namespace":"ns","interface":"net1"}]`, - wantErr: false, - }, - { - name: "cluster want cluster", - specSelectors: map[CephNetworkType]string{ - "cluster": input1(json), - }, - cephNets: []CephNetworkType{CephNetworkCluster}, - want: `[{"name":"macvlan","namespace":"ns","interface":"net1"}]`, - wantErr: false, - }, - { - name: "public want cluster", - specSelectors: map[CephNetworkType]string{ - "public": input1(json), - }, - cephNets: []CephNetworkType{CephNetworkCluster}, - want: ``, - wantErr: false, - }, - { - name: "cluster want public", - specSelectors: map[CephNetworkType]string{ - "cluster": input1(json), - }, - cephNets: []CephNetworkType{CephNetworkPublic}, - want: ``, - wantErr: false, - }, - { - name: "nothing want public", - specSelectors: map[CephNetworkType]string{}, - cephNets: []CephNetworkType{CephNetworkPublic}, - want: ``, - wantErr: false, - }, - { - name: "nothing want cluster", - specSelectors: map[CephNetworkType]string{}, - cephNets: []CephNetworkType{CephNetworkCluster}, - want: ``, - wantErr: false, - }, - { - name: "unknown want public", - specSelectors: map[CephNetworkType]string{ - "uncleKnown": input1(json), - }, - cephNets: []CephNetworkType{CephNetworkPublic}, - want: ``, - wantErr: false, - }, - { - name: "unknown want cluster", - specSelectors: map[CephNetworkType]string{ - "uncleKnown": input1(json), - }, - cephNets: []CephNetworkType{CephNetworkCluster}, - want: ``, - wantErr: false, - }, - { - name: "public want public and cluster", - specSelectors: map[CephNetworkType]string{ - "public": input1(json), - }, - cephNets: []CephNetworkType{CephNetworkPublic, CephNetworkCluster}, - want: `[{"name":"macvlan","namespace":"ns","interface":"net1"}]`, - wantErr: false, - }, - { - name: "cluster want public and cluster", - specSelectors: map[CephNetworkType]string{ - "cluster": input1(json), - }, - cephNets: []CephNetworkType{CephNetworkPublic, CephNetworkCluster}, - want: `[{"name":"macvlan","namespace":"ns","interface":"net1"}]`, - wantErr: false, - }, - { - name: "public and cluster want public and cluster", - specSelectors: map[CephNetworkType]string{ - "public": input1(json), - "cluster": input2(json), - }, - cephNets: []CephNetworkType{CephNetworkPublic, CephNetworkCluster}, - want: `[{"name":"macvlan","namespace":"ns","interface":"net1"},{"name":"macvlan","namespace":"ns","interface":"net2"}]`, - wantErr: false, - }, - { - name: "support mixed json-non-json spec", - specSelectors: map[CephNetworkType]string{ - "public": input1(json), - "cluster": input2(!json), // invert json-ness of this one - }, - cephNets: []CephNetworkType{CephNetworkPublic, CephNetworkCluster}, - want: `[{"name":"macvlan","namespace":"ns","interface":"net1"},{"name":"macvlan","namespace":"ns","interface":"net2"}]`, - wantErr: false, - }, - { - name: "public and cluster want nothing", - specSelectors: map[CephNetworkType]string{ - "public": input1(json), - "cluster": input2(json), - }, - cephNets: []CephNetworkType{}, - want: ``, - wantErr: false, - }, - { - name: "legacy single json object support", - specSelectors: map[CephNetworkType]string{ - "public": `{"name": "legacyJsonObject"}`, - }, - cephNets: []CephNetworkType{CephNetworkPublic, CephNetworkCluster}, - want: `[{"name":"legacyJsonObject","namespace":"ns"}]`, - wantErr: false, - }, - { - name: "invalid network selections", - specSelectors: map[CephNetworkType]string{ - "public": `[{"name": "jsonWithNoClosingBracket"}`, - "cluster": "multus%net", - }, - cephNets: []CephNetworkType{CephNetworkPublic, CephNetworkCluster}, - want: ``, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - n := &NetworkSpec{ - Selectors: tt.specSelectors, - } - - selections := []*nadv1.NetworkSelectionElement{} - errs := []error{} - for _, net := range tt.cephNets { - s, err := n.GetNetworkSelection("ns", net) - if err != nil { - errs = append(errs, err) - } - selections = append(selections, s) - } - got, err := NetworkSelectionsToAnnotationValue(selections...) - if err != nil { - errs = append(errs, err) - } - - assert.Equal(t, tt.wantErr, len(errs) > 0, "wantErr %v but got errs %v", tt.wantErr, errs) - assert.Equal(t, tt.want, got) - }) - } - } - - // Actual subtests - t.Run("non-JSON input", func(t *testing.T) { - testGetNetworkAnnotationValue(t, false) - }) - t.Run("JSON input", func(t *testing.T) { - testGetNetworkAnnotationValue(t, true) - }) -} diff --git a/external/ceph.rook.io/v1/nfs.go b/external/ceph.rook.io/v1/nfs.go deleted file mode 100644 index 37d5a3cf..00000000 --- a/external/ceph.rook.io/v1/nfs.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright 2022 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "reflect" - - "github.com/pkg/errors" - v1 "k8s.io/api/core/v1" -) - -// KerberosEnabled returns true if Kerberos is enabled from the spec. -func (n *NFSSecuritySpec) KerberosEnabled() bool { - if n == nil { - return false - } - if n.Kerberos != nil { - return true - } - return false -} - -// GetPrincipalName gets the principal name for the Kerberos spec or the default value if it is unset. -func (k *KerberosSpec) GetPrincipalName() string { - if k.PrincipalName == "" { - return "nfs" - } - return k.PrincipalName -} - -func (n *CephNFS) IsHostNetwork(c *ClusterSpec) bool { - if n.Spec.Server.HostNetwork != nil { - return *n.Spec.Server.HostNetwork - } - return c.Network.IsHost() -} - -func (sec *NFSSecuritySpec) Validate() error { - if sec == nil { - return nil - } - - if sec.SSSD != nil { - sidecar := sec.SSSD.Sidecar - if sidecar == nil { - return errors.New("System Security Services Daemon (SSSD) is enabled, but no runtime option is specified; supported: [runInSidecar]") - } - - if sidecar.Image == "" { - return errors.New("System Security Services Daemon (SSSD) sidecar is enabled, but no image is specified") - } - - if volSourceExistsAndIsEmpty(sidecar.SSSDConfigFile.VolumeSource.ToKubernetesVolumeSource()) { - return errors.New("System Security Services Daemon (SSSD) sidecar is enabled with config from a VolumeSource, but no source is specified") - } - - subDirs := map[string]bool{} - for _, additionalFile := range sidecar.AdditionalFiles { - subDir := additionalFile.SubPath - if subDir == "" { - return errors.New("System Security Services Daemon (SSSD) sidecar is enabled with additional file having no subPath specified") - } - - if volSourceExistsAndIsEmpty(additionalFile.VolumeSource.ToKubernetesVolumeSource()) { - return errors.Errorf("System Security Services Daemon (SSSD) sidecar is enabled with additional file (subPath %q), but no source is specified", subDir) - } - - if _, ok := subDirs[subDir]; ok { - return errors.Errorf("System Security Services Daemon (SSSD) sidecar is enabled with additional file containing duplicate subPath %q", subDir) - } - subDirs[subDir] = true - } - } - - krb := sec.Kerberos - if krb != nil { - if volSourceExistsAndIsEmpty(krb.ConfigFiles.VolumeSource.ToKubernetesVolumeSource()) { - return errors.New("Kerberos is enabled with config from a VolumeSource, but no source is specified") - } - - if volSourceExistsAndIsEmpty(krb.KeytabFile.VolumeSource.ToKubernetesVolumeSource()) { - return errors.New("Kerberos is enabled with keytab from a VolumeSource, but no source is specified") - } - } - - return nil -} - -func volSourceExistsAndIsEmpty(v *v1.VolumeSource) bool { - return v != nil && reflect.DeepEqual(*v, v1.VolumeSource{}) -} diff --git a/external/ceph.rook.io/v1/nfs_test.go b/external/ceph.rook.io/v1/nfs_test.go deleted file mode 100644 index a5a2a6d7..00000000 --- a/external/ceph.rook.io/v1/nfs_test.go +++ /dev/null @@ -1,224 +0,0 @@ -/* -Copyright 2022 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "testing" - - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" -) - -func TestNFSSecuritySpec_Validate(t *testing.T) { - isFailing := true - isOkay := false - - withSSSD := func(sssd *SSSDSpec) *NFSSecuritySpec { - return &NFSSecuritySpec{ - SSSD: sssd, - } - } - - configMapVolumeSource := &ConfigFileVolumeSource{ - ConfigMap: &v1.ConfigMapVolumeSource{}, - } - - tests := []struct { - name string - security *NFSSecuritySpec - wantErr bool - }{ - {"security = nil", nil, isOkay}, - {"security empty", &NFSSecuritySpec{}, isOkay}, - {"security.sssd empty", withSSSD(&SSSDSpec{}), isFailing}, - { - "security.sssd.sidecar empty", - withSSSD(&SSSDSpec{ - Sidecar: &SSSDSidecar{}, - }), - isFailing, - }, - { - "security.sssd.sidecar fully specified", - withSSSD(&SSSDSpec{ - Sidecar: &SSSDSidecar{ - Image: "myimage", - SSSDConfigFile: SSSDSidecarConfigFile{ - VolumeSource: configMapVolumeSource, - }, - }, - }), - isOkay, - }, - { - "security.sssd.sidecar missing image", - withSSSD(&SSSDSpec{ - Sidecar: &SSSDSidecar{ - Image: "", - SSSDConfigFile: SSSDSidecarConfigFile{ - VolumeSource: configMapVolumeSource, - }, - }, - }), - isFailing, - }, - { - "security.sssd.sidecar.sssdConfigFile empty", - withSSSD(&SSSDSpec{ - Sidecar: &SSSDSidecar{ - Image: "myimage", - SSSDConfigFile: SSSDSidecarConfigFile{}, - }, - }), - isOkay, - }, - { - "security.sssd.sidecar.sssdConfigFile.volumeSource empty", - withSSSD(&SSSDSpec{ - Sidecar: &SSSDSidecar{ - Image: "myimage", - SSSDConfigFile: SSSDSidecarConfigFile{ - VolumeSource: &ConfigFileVolumeSource{}, - }, - }, - }), - isFailing, - }, - { - "security.sssd.sidecar.additionalFiles empty", - withSSSD(&SSSDSpec{ - Sidecar: &SSSDSidecar{ - Image: "myimage", - AdditionalFiles: AdditionalVolumeMounts{}, - }, - }), - isOkay, - }, - { - "security.sssd.sidecar.additionalFiles multiple valid", - withSSSD(&SSSDSpec{ - Sidecar: &SSSDSidecar{ - Image: "myimage", - AdditionalFiles: AdditionalVolumeMounts{ - {SubPath: "one", VolumeSource: configMapVolumeSource}, - {SubPath: "two", VolumeSource: configMapVolumeSource}, - {SubPath: "three", VolumeSource: configMapVolumeSource}, - }, - }, - }), - isOkay, - }, - { - "security.sssd.sidecar.additionalFiles one empty subDir", - withSSSD(&SSSDSpec{ - Sidecar: &SSSDSidecar{ - Image: "myimage", - AdditionalFiles: AdditionalVolumeMounts{ - {SubPath: "one", VolumeSource: configMapVolumeSource}, - {SubPath: "", VolumeSource: configMapVolumeSource}, - {SubPath: "three", VolumeSource: configMapVolumeSource}, - }, - }, - }), - isFailing, - }, - { - "security.sssd.sidecar.additionalFiles duplicate subDirs", - withSSSD(&SSSDSpec{ - Sidecar: &SSSDSidecar{ - Image: "myimage", - AdditionalFiles: AdditionalVolumeMounts{ - {SubPath: "one", VolumeSource: configMapVolumeSource}, - {SubPath: "two", VolumeSource: configMapVolumeSource}, - {SubPath: "one", VolumeSource: configMapVolumeSource}, - }, - }, - }), - isFailing, - }, - { - "security.sssd.sidecar.additionalFiles one vol source empty", - withSSSD(&SSSDSpec{ - Sidecar: &SSSDSidecar{ - Image: "myimage", - AdditionalFiles: AdditionalVolumeMounts{ - {SubPath: "one", VolumeSource: configMapVolumeSource}, - {SubPath: "", VolumeSource: &ConfigFileVolumeSource{}}, - {SubPath: "three", VolumeSource: configMapVolumeSource}, - }, - }, - }), - isFailing, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := tt.security.Validate(); (err != nil) != tt.wantErr { - t.Errorf("NFSSecuritySpec.Validate() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestNFSSecuritySpec_KerberosEnabled(t *testing.T) { - t.Run("nil security spec", func(t *testing.T) { - var sec *NFSSecuritySpec - assert.False(t, sec.KerberosEnabled()) - }) - - t.Run("empty security spec", func(t *testing.T) { - sec := &NFSSecuritySpec{} - assert.False(t, sec.KerberosEnabled()) - }) - - t.Run("empty kerberos spec", func(t *testing.T) { - sec := &NFSSecuritySpec{ - Kerberos: &KerberosSpec{}, - } - assert.True(t, sec.KerberosEnabled()) - }) - - t.Run("filled in kerberos spec", func(t *testing.T) { - sec := &NFSSecuritySpec{ - Kerberos: &KerberosSpec{ - PrincipalName: "mom", - }, - } - assert.True(t, sec.KerberosEnabled()) - }) -} - -func TestKerberosSpec_GetPrincipalName(t *testing.T) { - t.Run("empty kerberos spec", func(t *testing.T) { - k := &KerberosSpec{} - assert.Equal(t, "nfs", k.GetPrincipalName()) - }) - - t.Run("principal name nfs", func(t *testing.T) { - k := &KerberosSpec{ - PrincipalName: "nfs", - } - assert.Equal(t, "nfs", k.GetPrincipalName()) - }) - - t.Run("principal name set", func(t *testing.T) { - k := &KerberosSpec{ - PrincipalName: "set", - } - assert.Equal(t, "set", k.GetPrincipalName()) - }) -} diff --git a/external/ceph.rook.io/v1/nvmeof.go b/external/ceph.rook.io/v1/nvmeof.go deleted file mode 100644 index 1d2b2811..00000000 --- a/external/ceph.rook.io/v1/nvmeof.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Copyright 2024 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// IsHostNetwork returns true if host networking is enabled for this gateway. -// If not explicitly set on the gateway spec, it inherits from the cluster spec. -func (n *CephNVMeOFGateway) IsHostNetwork(c *ClusterSpec) bool { - if n.Spec.HostNetwork != nil { - return *n.Spec.HostNetwork - } - return c.Network.IsHost() -} diff --git a/external/ceph.rook.io/v1/object.go b/external/ceph.rook.io/v1/object.go deleted file mode 100644 index 9aee85e9..00000000 --- a/external/ceph.rook.io/v1/object.go +++ /dev/null @@ -1,203 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "fmt" - - "github.com/pkg/errors" - "k8s.io/apimachinery/pkg/util/validation" -) - -const ServiceServingCertKey = "service.beta.openshift.io/serving-cert-secret-name" - -// 38 is the max length of a ceph store name as total length of the resource name cannot be more than 63 characters limit -// and there is a configmap which is formed by appending `rook-ceph-rgw--mime-types` -// so over all it brings up to (63-14-11 = 38) characters for the store name -const objectStoreNameMaxLen = 38 - -func (s *ObjectStoreSpec) IsMultisite() bool { - return s.Zone.Name != "" -} - -func (s *ObjectStoreSpec) IsTLSEnabled() bool { - return s.Gateway.SecurePort != 0 && (s.Gateway.SSLCertificateRef != "" || s.GetServiceServingCert() != "") -} - -func (s *ObjectStoreSpec) IsRGWDashboardEnabled() bool { - return s.Gateway.DashboardEnabled == nil || *s.Gateway.DashboardEnabled -} - -func (s *ObjectStoreSpec) GetPort() (int32, error) { - if s.IsTLSEnabled() { - return s.Gateway.SecurePort, nil - } else if s.Gateway.Port != 0 { - return s.Gateway.Port, nil - } - return -1, errors.New("At least one of Port or SecurePort should be non-zero") -} - -func (s *ObjectStoreSpec) IsExternal() bool { - return len(s.Gateway.ExternalRgwEndpoints) != 0 -} - -func (s *ObjectStoreSpec) IsHostNetwork(c *ClusterSpec) bool { - if s.Gateway.HostNetwork != nil { - return *s.Gateway.HostNetwork - } - return c.Network.IsHost() -} - -func (s *ObjectRealmSpec) IsPullRealm() bool { - return s.Pull.Endpoint != "" -} - -// ValidateObjectSpec validate the object store arguments -func ValidateObjectSpec(gs *CephObjectStore) error { - if gs.Name == "" { - return errors.New("missing name") - } - if gs.Namespace == "" { - return errors.New("missing namespace") - } - - // validate the object store name only if it is not an external cluster - // as external cluster won't create the rgw daemon and it's other resources - // and there is some legacy external cluster which has more length of objectstore - // so to run them successfully we are not validating the objectstore name - if !gs.Spec.IsExternal() { - if len(gs.Name) > objectStoreNameMaxLen { - return errors.New("object store name cannot be longer than 38 characters") - } - } - securePort := gs.Spec.Gateway.SecurePort - if securePort < 0 || securePort > 65535 { - return errors.Errorf("securePort value of %d must be between 0 and 65535", securePort) - } - if gs.Spec.Gateway.Port <= 0 && gs.Spec.Gateway.SecurePort <= 0 { - return errors.New("invalid create: either of port or securePort fields should be not be zero") - } - - // check hosting spec - if gs.Spec.Hosting != nil { - if gs.Spec.Hosting.AdvertiseEndpoint != nil { - ep := gs.Spec.Hosting.AdvertiseEndpoint - errList := validation.IsDNS1123Subdomain(ep.DnsName) - if len(errList) > 0 { - return errors.Errorf("hosting.advertiseEndpoint.dnsName %q must be a valid DNS-1123 subdomain: %v", ep.DnsName, errList) - } - if ep.Port < 1 || ep.Port > 65535 { - return errors.Errorf("hosting.advertiseEndpoint.port %d must be between 1 and 65535", ep.Port) - } - } - dnsNameErrs := []string{} - for _, dnsName := range gs.Spec.Hosting.DNSNames { - errs := validation.IsDNS1123Subdomain(dnsName) - if len(errs) > 0 { - // errors do not report the domains that are errored; add them to help users - errs = append(errs, fmt.Sprintf("error on dns name %q", dnsName)) - dnsNameErrs = append(dnsNameErrs, errs...) - } - } - if len(dnsNameErrs) > 0 { - return errors.Errorf("one or more hosting.dnsNames is not a valid DNS-1123 subdomain: %v", dnsNameErrs) - } - } - - return nil -} - -func (s *ObjectStoreSpec) GetServiceServingCert() string { - if s.Gateway.Service != nil { - return s.Gateway.Service.Annotations[ServiceServingCertKey] - } - return "" -} - -// GetServiceName gets the name of the Rook-created CephObjectStore service. -// This method helps ensure adherence to stable, documented behavior (API). -func (c *CephObjectStore) GetServiceName() string { - return "rook-ceph-rgw-" + c.GetName() -} - -// GetServiceDomainName gets the domain name of the Rook-created CephObjectStore service. -// This method helps ensure adherence to stable, documented behavior (API). -func (c *CephObjectStore) GetServiceDomainName() string { - return fmt.Sprintf("%s.%s.svc", c.GetServiceName(), c.GetNamespace()) -} - -func (c *CephObjectStore) AdvertiseEndpointIsSet() bool { - return c.Spec.Hosting != nil && c.Spec.Hosting.AdvertiseEndpoint != nil && - c.Spec.Hosting.AdvertiseEndpoint.DnsName != "" && c.Spec.Hosting.AdvertiseEndpoint.Port != 0 -} - -// GetAdvertiseEndpoint returns address, port, and isTls information about the advertised endpoint -// for the CephObjectStore. This method helps ensure adherence to stable, documented behavior (API). -func (c *CephObjectStore) GetAdvertiseEndpoint() (string, int32, bool, error) { - port, err := c.Spec.GetPort() - if err != nil { - return "", 0, false, err - } - isTls := c.Spec.IsTLSEnabled() - - address := c.GetServiceDomainName() // service domain name is the default advertise address - if c.Spec.IsExternal() { - // for external clusters, the first external RGW endpoint is the default advertise address - address = c.Spec.Gateway.ExternalRgwEndpoints[0].String() - } - - // if users override the advertise endpoint themselves, these value take priority - if c.AdvertiseEndpointIsSet() { - address = c.Spec.Hosting.AdvertiseEndpoint.DnsName - port = c.Spec.Hosting.AdvertiseEndpoint.Port - isTls = c.Spec.Hosting.AdvertiseEndpoint.UseTls - } - - return address, port, isTls, nil -} - -// GetAdvertiseEndpointUrl gets the fully-formed advertised endpoint URL for the CephObjectStore. -// This method helps ensure adherence to stable, documented behavior (API). -func (c *CephObjectStore) GetAdvertiseEndpointUrl() (string, error) { - address, port, isTls, err := c.GetAdvertiseEndpoint() - if err != nil { - return "", err - } - - protocol := "http" - if isTls { - protocol = "https" - } - return fmt.Sprintf("%s://%s:%d", protocol, address, port), nil -} - -func (c *CephObjectStore) GetStatusConditions() *[]Condition { - return &c.Status.Conditions -} - -func (z *CephObjectZone) GetStatusConditions() *[]Condition { - return &z.Status.Conditions -} - -// String returns an addressable string representation of the EndpointAddress. -func (e *EndpointAddress) String() string { - // hostname is easier to read, and it is probably less likely to change, so prefer it over IP - if e.Hostname != "" { - return e.Hostname - } - return e.IP -} diff --git a/external/ceph.rook.io/v1/object_test.go b/external/ceph.rook.io/v1/object_test.go deleted file mode 100644 index 8d32a7f0..00000000 --- a/external/ceph.rook.io/v1/object_test.go +++ /dev/null @@ -1,366 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "testing" - - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestValidateObjectStoreSpec(t *testing.T) { - o := &CephObjectStore{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-store", - Namespace: "rook-ceph", - }, - Spec: ObjectStoreSpec{ - Gateway: GatewaySpec{ - Port: 1, - SecurePort: 0, - }, - }, - } - err := ValidateObjectSpec(o) - assert.NoError(t, err) - - // when both port and securePort are o - o.Spec.Gateway.Port = 0 - err = ValidateObjectSpec(o) - assert.Error(t, err) - - // when securePort is greater than 65535 - o.Spec.Gateway.SecurePort = 65536 - err = ValidateObjectSpec(o) - assert.Error(t, err) - - // when name is empty - o.Name = "" - err = ValidateObjectSpec(o) - assert.Error(t, err) - - // when namespace is empty - o.Namespace = "" - err = ValidateObjectSpec(o) - assert.Error(t, err) - - t.Run("hosting", func(t *testing.T) { - o := &CephObjectStore{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-store", - Namespace: "rook-ceph", - }, - Spec: ObjectStoreSpec{ - Gateway: GatewaySpec{ - Port: 1, - SecurePort: 0, - }, - Hosting: &ObjectStoreHostingSpec{ - AdvertiseEndpoint: &ObjectEndpointSpec{ - DnsName: "valid.dns.addr", - Port: 1, - }, - DNSNames: []string{"valid.dns.addr", "valid.dns.com"}, - }, - }, - } - err := ValidateObjectSpec(o) - assert.NoError(t, err) - - // wildcard advertise dns name - s := o.DeepCopy() - s.Spec.Hosting.AdvertiseEndpoint.DnsName = "*.invalid.dns.addr" - err = ValidateObjectSpec(s) - assert.ErrorContains(t, err, `"*.invalid.dns.addr"`) - - // empty advertise dns name - s = o.DeepCopy() - s.Spec.Hosting.AdvertiseEndpoint.DnsName = "" - err = ValidateObjectSpec(s) - assert.ErrorContains(t, err, `""`) - - // zero port - s = o.DeepCopy() - s.Spec.Hosting.AdvertiseEndpoint.Port = 0 - err = ValidateObjectSpec(s) - assert.ErrorContains(t, err, "0") - - // 65536 port - s = o.DeepCopy() - s.Spec.Hosting.AdvertiseEndpoint.Port = 65536 - err = ValidateObjectSpec(s) - assert.ErrorContains(t, err, "65536") - - // first dnsName invalid - s = o.DeepCopy() - s.Spec.Hosting.DNSNames = []string{"-invalid.dns.name", "accepted.dns.name"} - err = ValidateObjectSpec(s) - assert.ErrorContains(t, err, `"-invalid.dns.name"`) - assert.NotContains(t, err.Error(), "accepted.dns.name") - - // second dnsName invalid - s = o.DeepCopy() - s.Spec.Hosting.DNSNames = []string{"accepted.dns.name", "-invalid.dns.name"} - err = ValidateObjectSpec(s) - assert.ErrorContains(t, err, `"-invalid.dns.name"`) - assert.NotContains(t, err.Error(), "accepted.dns.name") - - // both dnsNames invalid - s = o.DeepCopy() - s.Spec.Hosting.DNSNames = []string{"*.invalid.dns.name", "-invalid.dns.name"} - err = ValidateObjectSpec(s) - assert.ErrorContains(t, err, `"-invalid.dns.name"`) - assert.ErrorContains(t, err, `"*.invalid.dns.name"`) - }) -} - -func TestIsTLSEnabled(t *testing.T) { - objStore := &CephObjectStore{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-store", - Namespace: "rook-ceph", - }, - Spec: ObjectStoreSpec{ - Gateway: GatewaySpec{ - Port: 1, - SecurePort: 0, - }, - }, - } - IsTLS := objStore.Spec.IsTLSEnabled() - assert.False(t, IsTLS) - - // only securePort is set without certs - objStore.Spec.Gateway.SecurePort = 443 - IsTLS = objStore.Spec.IsTLSEnabled() - assert.False(t, IsTLS) - - // when SSLCertificateRef is set with securePort - objStore.Spec.Gateway.SSLCertificateRef = "my-tls-cert" - IsTLS = objStore.Spec.IsTLSEnabled() - assert.True(t, IsTLS) - - // when service serving cert is used - objStore.Spec.Gateway.SSLCertificateRef = "" - objStore.Spec.Gateway.Service = &(RGWServiceSpec{Annotations: Annotations{ServiceServingCertKey: "rgw-cert"}}) - IsTLS = objStore.Spec.IsTLSEnabled() - assert.True(t, IsTLS) - - // when cert are set but securePort unset - objStore.Spec.Gateway.SecurePort = 0 - IsTLS = objStore.Spec.IsTLSEnabled() - assert.False(t, IsTLS) -} - -func TestCephObjectStore_GetAdvertiseEndpointUrl(t *testing.T) { - emptySpec := func() *CephObjectStore { - return &CephObjectStore{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-store", - Namespace: "my-ns", - }, - } - } - - httpSpec := func() *CephObjectStore { - s := emptySpec() - s.Spec.Gateway.Port = 8080 - return s - } - - httpsSpec := func() *CephObjectStore { - s := emptySpec() - s.Spec.Gateway.SecurePort = 8443 - s.Spec.Gateway.SSLCertificateRef = "my-cert" - return s - } - - dualSpec := func() *CephObjectStore { - s := emptySpec() - s.Spec.Gateway.Port = 8080 - s.Spec.Gateway.SecurePort = 8443 - s.Spec.Gateway.SSLCertificateRef = "my-cert" - return s - } - - removeCert := func(s *CephObjectStore) *CephObjectStore { - s.Spec.Gateway.SSLCertificateRef = "" - return s - } - - initHosting := func(s *CephObjectStore) *CephObjectStore { - if s.Spec.Hosting == nil { - s.Spec.Hosting = &ObjectStoreHostingSpec{} - } - return s - } - - addExternalIPs := func(s *CephObjectStore) *CephObjectStore { - s.Spec.Gateway.ExternalRgwEndpoints = []EndpointAddress{ - {IP: "192.168.1.1"}, - {IP: "192.168.1.2"}, - } - return s - } - - addExternalHostnames := func(s *CephObjectStore) *CephObjectStore { - s.Spec.Gateway.ExternalRgwEndpoints = []EndpointAddress{ - {Hostname: "s3.external.com"}, - {Hostname: "s3.other.com"}, - } - return s - } - - addNilAdvertise := func(s *CephObjectStore) *CephObjectStore { - s = initHosting(s) - s.Spec.Hosting.AdvertiseEndpoint = nil - return s - } - - addAdvertiseHttp := func(s *CephObjectStore) *CephObjectStore { - s = initHosting(s) - s.Spec.Hosting.AdvertiseEndpoint = &ObjectEndpointSpec{ - DnsName: "my-endpoint.com", - Port: 80, - UseTls: false, - } - return s - } - - addAdvertiseHttps := func(s *CephObjectStore) *CephObjectStore { - s = initHosting(s) - s.Spec.Hosting.AdvertiseEndpoint = &ObjectEndpointSpec{ - DnsName: "my-endpoint.com", - Port: 443, - UseTls: true, - } - return s - } - - type test struct { - name string - store *CephObjectStore - want string - wantErrContain string - } - - // base level tests, internal mode - tests := []test{ - {"nil hosting : internal : empty ", emptySpec(), "", "Port"}, - {"nil hosting : internal : port ", httpSpec(), "http://rook-ceph-rgw-my-store.my-ns.svc:8080", ""}, - {"nil hosting : internal : securePort ", httpsSpec(), "https://rook-ceph-rgw-my-store.my-ns.svc:8443", ""}, - {"nil hosting : internal : port + securePort ", dualSpec(), "https://rook-ceph-rgw-my-store.my-ns.svc:8443", ""}, - {"nil hosting : internal : securePort, no cert ", removeCert(httpsSpec()), "", "Port"}, - {"nil hosting : internal : port + securePort, no cert", removeCert(dualSpec()), "http://rook-ceph-rgw-my-store.my-ns.svc:8080", ""}, - {"nil hosting : external IPs : empty ", addExternalIPs(emptySpec()), "", "Port"}, - {"nil hosting : external IPs : port ", addExternalIPs(httpSpec()), "http://192.168.1.1:8080", ""}, - {"nil hosting : external IPs : securePort ", addExternalIPs(httpsSpec()), "https://192.168.1.1:8443", ""}, - {"nil hosting : external IPs : port + securePort ", addExternalIPs(dualSpec()), "https://192.168.1.1:8443", ""}, - {"nil hosting : external IPs : securePort, no cert ", addExternalIPs(removeCert(httpsSpec())), "", "Port"}, - {"nil hosting : external IPs : port + securePort, no cert", addExternalIPs(removeCert(dualSpec())), "http://192.168.1.1:8080", ""}, - {"nil hosting : external Hostnames: empty ", addExternalHostnames(emptySpec()), "", "Port"}, - {"nil hosting : external Hostnames: port ", addExternalHostnames(httpSpec()), "http://s3.external.com:8080", ""}, - {"nil hosting : external Hostnames: securePort ", addExternalHostnames(httpsSpec()), "https://s3.external.com:8443", ""}, - {"nil hosting : external Hostnames: port + securePort ", addExternalHostnames(dualSpec()), "https://s3.external.com:8443", ""}, - {"nil hosting : external Hostnames: securePort, no cert ", addExternalHostnames(removeCert(httpsSpec())), "", "Port"}, - {"nil hosting : external Hostnames: port + securePort, no cert", addExternalHostnames(removeCert(dualSpec())), "http://s3.external.com:8080", ""}, - - {"nil advertise : internal : empty ", addNilAdvertise(emptySpec()), "", "Port"}, - {"nil advertise : internal : port ", addNilAdvertise(httpSpec()), "http://rook-ceph-rgw-my-store.my-ns.svc:8080", ""}, - {"nil advertise : internal : securePort ", addNilAdvertise(httpsSpec()), "https://rook-ceph-rgw-my-store.my-ns.svc:8443", ""}, - {"nil advertise : internal : port + securePort ", addNilAdvertise(dualSpec()), "https://rook-ceph-rgw-my-store.my-ns.svc:8443", ""}, - {"nil advertise : internal : securePort, no cert ", addNilAdvertise(removeCert(httpsSpec())), "", "Port"}, - {"nil advertise : internal : port + securePort, no cert", addNilAdvertise(removeCert(dualSpec())), "http://rook-ceph-rgw-my-store.my-ns.svc:8080", ""}, - {"nil advertise : external IPs : empty ", addNilAdvertise(addExternalIPs(emptySpec())), "", "Port"}, - {"nil advertise : external IPs : port ", addNilAdvertise(addExternalIPs(httpSpec())), "http://192.168.1.1:8080", ""}, - {"nil advertise : external IPs : securePort ", addNilAdvertise(addExternalIPs(httpsSpec())), "https://192.168.1.1:8443", ""}, - {"nil advertise : external IPs : port + securePort ", addNilAdvertise(addExternalIPs(dualSpec())), "https://192.168.1.1:8443", ""}, - {"nil advertise : external IPs : securePort, no cert ", addNilAdvertise(addExternalIPs(removeCert(httpsSpec()))), "", "Port"}, - {"nil advertise : external IPs : port + securePort, no cert", addNilAdvertise(addExternalIPs(removeCert(dualSpec()))), "http://192.168.1.1:8080", ""}, - {"nil advertise : external Hostnames: empty ", addNilAdvertise(addExternalHostnames(emptySpec())), "", "Port"}, - {"nil advertise : external Hostnames: port ", addNilAdvertise(addExternalHostnames(httpSpec())), "http://s3.external.com:8080", ""}, - {"nil advertise : external Hostnames: securePort ", addNilAdvertise(addExternalHostnames(httpsSpec())), "https://s3.external.com:8443", ""}, - {"nil advertise : external Hostnames: port + securePort ", addNilAdvertise(addExternalHostnames(dualSpec())), "https://s3.external.com:8443", ""}, - {"nil advertise : external Hostnames: securePort, no cert ", addNilAdvertise(addExternalHostnames(removeCert(httpsSpec()))), "", "Port"}, - {"nil advertise : external Hostnames: port + securePort, no cert", addNilAdvertise(addExternalHostnames(removeCert(dualSpec()))), "http://s3.external.com:8080", ""}, - - {"HTTP advertise : internal : empty ", addAdvertiseHttp(emptySpec()), "", "Port"}, - {"HTTP advertise : internal : port ", addAdvertiseHttp(httpSpec()), "http://my-endpoint.com:80", ""}, - {"HTTP advertise : internal : securePort ", addAdvertiseHttp(httpsSpec()), "http://my-endpoint.com:80", ""}, - {"HTTP advertise : internal : port + securePort ", addAdvertiseHttp(dualSpec()), "http://my-endpoint.com:80", ""}, - {"HTTP advertise : internal : securePort, no cert ", addAdvertiseHttp(removeCert(httpsSpec())), "", "Port"}, - {"HTTP advertise : internal : port + securePort, no cert", addAdvertiseHttp(removeCert(dualSpec())), "http://my-endpoint.com:80", ""}, - {"HTTP advertise : external IPs : empty ", addAdvertiseHttp(addExternalIPs(emptySpec())), "", "Port"}, - {"HTTP advertise : external IPs : port ", addAdvertiseHttp(addExternalIPs(httpSpec())), "http://my-endpoint.com:80", ""}, - {"HTTP advertise : external IPs : securePort ", addAdvertiseHttp(addExternalIPs(httpsSpec())), "http://my-endpoint.com:80", ""}, - {"HTTP advertise : external IPs : port + securePort ", addAdvertiseHttp(addExternalIPs(dualSpec())), "http://my-endpoint.com:80", ""}, - {"HTTP advertise : external IPs : securePort, no cert ", addAdvertiseHttp(addExternalIPs(removeCert(httpsSpec()))), "", "Port"}, - {"HTTP advertise : external IPs : port + securePort, no cert", addAdvertiseHttp(addExternalIPs(removeCert(dualSpec()))), "http://my-endpoint.com:80", ""}, - {"HTTP advertise : external Hostnames: empty ", addAdvertiseHttp(addExternalHostnames(emptySpec())), "", "Port"}, - {"HTTP advertise : external Hostnames: port ", addAdvertiseHttp(addExternalHostnames(httpSpec())), "http://my-endpoint.com:80", ""}, - {"HTTP advertise : external Hostnames: securePort ", addAdvertiseHttp(addExternalHostnames(httpsSpec())), "http://my-endpoint.com:80", ""}, - {"HTTP advertise : external Hostnames: port + securePort ", addAdvertiseHttp(addExternalHostnames(dualSpec())), "http://my-endpoint.com:80", ""}, - {"HTTP advertise : external Hostnames: securePort, no cert ", addAdvertiseHttp(addExternalHostnames(removeCert(httpsSpec()))), "", "Port"}, - {"HTTP advertise : external Hostnames: port + securePort, no cert", addAdvertiseHttp(addExternalHostnames(removeCert(dualSpec()))), "http://my-endpoint.com:80", ""}, - - {"HTTPS advertise: internal : empty ", addAdvertiseHttps(emptySpec()), "", "Port"}, - {"HTTPS advertise: internal : port ", addAdvertiseHttps(httpSpec()), "https://my-endpoint.com:443", ""}, - {"HTTPS advertise: internal : securePort ", addAdvertiseHttps(httpsSpec()), "https://my-endpoint.com:443", ""}, - {"HTTPS advertise: internal : port + securePort ", addAdvertiseHttps(dualSpec()), "https://my-endpoint.com:443", ""}, - {"HTTPS advertise: internal : securePort, no cert ", addAdvertiseHttps(removeCert(httpsSpec())), "", "Port"}, - {"HTTPS advertise: internal : port + securePort, no cert", addAdvertiseHttps(removeCert(dualSpec())), "https://my-endpoint.com:443", ""}, - {"HTTPS advertise: external IPs : empty ", addAdvertiseHttps(addExternalIPs(emptySpec())), "", "Port"}, - {"HTTPS advertise: external IPs : port ", addAdvertiseHttps(addExternalIPs(httpSpec())), "https://my-endpoint.com:443", ""}, - {"HTTPS advertise: external IPs : securePort ", addAdvertiseHttps(addExternalIPs(httpsSpec())), "https://my-endpoint.com:443", ""}, - {"HTTPS advertise: external IPs : port + securePort ", addAdvertiseHttps(addExternalIPs(dualSpec())), "https://my-endpoint.com:443", ""}, - {"HTTPS advertise: external IPs : securePort, no cert ", addAdvertiseHttps(addExternalIPs(removeCert(httpsSpec()))), "", "Port"}, - {"HTTPS advertise: external IPs : port + securePort, no cert", addAdvertiseHttps(addExternalIPs(removeCert(dualSpec()))), "https://my-endpoint.com:443", ""}, - {"HTTPS advertise: external Hostnames: empty ", addAdvertiseHttps(addExternalHostnames(emptySpec())), "", "Port"}, - {"HTTPS advertise: external Hostnames: port ", addAdvertiseHttps(addExternalHostnames(httpSpec())), "https://my-endpoint.com:443", ""}, - {"HTTPS advertise: external Hostnames: securePort ", addAdvertiseHttps(addExternalHostnames(httpsSpec())), "https://my-endpoint.com:443", ""}, - {"HTTPS advertise: external Hostnames: port + securePort ", addAdvertiseHttps(addExternalHostnames(dualSpec())), "https://my-endpoint.com:443", ""}, - {"HTTPS advertise: external Hostnames: securePort, no cert ", addAdvertiseHttps(addExternalHostnames(removeCert(httpsSpec()))), "", "Port"}, - {"HTTPS advertise: external Hostnames: port + securePort, no cert", addAdvertiseHttps(addExternalHostnames(removeCert(dualSpec()))), "https://my-endpoint.com:443", ""}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := tt.store.GetAdvertiseEndpointUrl() - assert.Equal(t, tt.want, got) - if tt.wantErrContain != "" { - assert.ErrorContains(t, err, tt.wantErrContain) - } else { - assert.NoError(t, err) - } - }) - - if tt.store.Spec.Hosting != nil { - t.Run("with DNS names: "+tt.name, func(t *testing.T) { - // dnsNames shouldn't change the test result at all - s := tt.store.DeepCopy() - s.Spec.Hosting.DNSNames = []string{"should.not.show.up"} - got, err := s.GetAdvertiseEndpointUrl() - assert.Equal(t, tt.want, got) - if tt.wantErrContain != "" { - assert.ErrorContains(t, err, tt.wantErrContain) - } else { - assert.NoError(t, err) - } - }) - } - } -} diff --git a/external/ceph.rook.io/v1/placement.go b/external/ceph.rook.io/v1/placement.go deleted file mode 100644 index 0cc327fe..00000000 --- a/external/ceph.rook.io/v1/placement.go +++ /dev/null @@ -1,146 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package v1 - -import ( - v1 "k8s.io/api/core/v1" -) - -func (p PlacementSpec) All() Placement { - return p[KeyAll] -} - -// ApplyToPodSpec adds placement to a pod spec -func (p Placement) ApplyToPodSpec(t *v1.PodSpec) { - if t.Affinity == nil { - t.Affinity = &v1.Affinity{} - } - if p.NodeAffinity != nil { - t.Affinity.NodeAffinity = p.mergeNodeAffinity(t.Affinity.NodeAffinity) - } - if p.PodAffinity != nil { - t.Affinity.PodAffinity = p.PodAffinity.DeepCopy() - } - if p.PodAntiAffinity != nil { - t.Affinity.PodAntiAffinity = p.PodAntiAffinity.DeepCopy() - } - if p.Tolerations != nil { - t.Tolerations = p.mergeTolerations(t.Tolerations) - } - if p.TopologySpreadConstraints != nil { - t.TopologySpreadConstraints = p.TopologySpreadConstraints - } -} - -func (p Placement) mergeNodeAffinity(nodeAffinity *v1.NodeAffinity) *v1.NodeAffinity { - // no node affinity is specified yet, so return the placement's nodeAffinity - result := p.NodeAffinity.DeepCopy() - if nodeAffinity == nil { - return result - } - - // merge the preferred node affinity that was already specified, and the placement's nodeAffinity - result.PreferredDuringSchedulingIgnoredDuringExecution = append( - nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution, - p.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution...) - - // nothing to merge if no affinity was passed in - if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil { - return result - } - // take the desired affinity if there was none on the placement - if p.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil { - result.RequiredDuringSchedulingIgnoredDuringExecution = nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution - return result - } - // take the desired affinity node selectors without the need to merge - if len(nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) == 0 { - return result - } - // take the placement affinity node selectors without the need to merge - if len(p.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) == 0 { - // take the placement from the first option since the second isn't specified - result.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms - return result - } - - // merge the match expressions together since they are defined in both placements - // this will only work if we want an "and" between all the expressions, more complex conditions won't work with this merge - var nodeTerm v1.NodeSelectorTerm - nodeTerm.MatchExpressions = append( - nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions, - p.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions...) - nodeTerm.MatchFields = append( - nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields, - p.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields...) - result.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0] = nodeTerm - - return result -} - -func (p Placement) mergeTolerations(tolerations []v1.Toleration) []v1.Toleration { - // no toleration is specified yet, return placement's toleration - if tolerations == nil { - return p.Tolerations - } - - return append(p.Tolerations, tolerations...) -} - -// Merge returns a Placement which results from merging the attributes of the -// original Placement with the attributes of the supplied one. The supplied -// Placement's attributes will override the original ones if defined. -func (p Placement) Merge(with Placement) Placement { - ret := p - if with.NodeAffinity != nil { - ret.NodeAffinity = with.NodeAffinity - } - if with.PodAffinity != nil { - ret.PodAffinity = with.PodAffinity - } - if with.PodAntiAffinity != nil { - ret.PodAntiAffinity = with.PodAntiAffinity - } - if with.Tolerations != nil { - ret.Tolerations = ret.mergeTolerations(with.Tolerations) - } - if with.TopologySpreadConstraints != nil { - ret.TopologySpreadConstraints = with.TopologySpreadConstraints - } - return ret -} - -// GetMgrPlacement returns the placement for the MGR service -func GetMgrPlacement(p PlacementSpec) Placement { - return p.All().Merge(p[KeyMgr]) -} - -// GetMonPlacement returns the placement for the MON service -func GetMonPlacement(p PlacementSpec) Placement { - return p.All().Merge(p[KeyMon]) -} - -// GetArbiterPlacement returns the placement for the arbiter MON service -func GetArbiterPlacement(p PlacementSpec) Placement { - // If the mon is the arbiter in a stretch cluster and its placement is specified, return it - // without merging with the "all" placement so it can be handled separately from all other daemons - return p[KeyMonArbiter] -} - -// GetOSDPlacement returns the placement for the OSD service -func GetOSDPlacement(p PlacementSpec) Placement { - return p.All().Merge(p[KeyOSD]) -} diff --git a/external/ceph.rook.io/v1/placement_test.go b/external/ceph.rook.io/v1/placement_test.go deleted file mode 100644 index c8dc8115..00000000 --- a/external/ceph.rook.io/v1/placement_test.go +++ /dev/null @@ -1,360 +0,0 @@ -/* -Copyright 2016 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package v1 - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/yaml" -) - -func TestPlacementSpec(t *testing.T) { - specYaml := []byte(` -nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: foo - operator: In - values: - - bar -tolerations: - - key: foo - operator: Exists -topologySpreadConstraints: - - maxSkew: 1 - topologyKey: zone - whenUnsatisfiable: DoNotSchedule - labelSelector: - matchLabels: - foo: bar`) - // convert the raw spec yaml into JSON - rawJSON, err := yaml.ToJSON(specYaml) - assert.Nil(t, err) - - // unmarshal the JSON into a strongly typed placement spec object - var placement Placement - err = json.Unmarshal(rawJSON, &placement) - assert.Nil(t, err) - - // the unmarshalled placement spec should equal the expected spec below - expected := Placement{ - NodeAffinity: &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "foo", - Operator: v1.NodeSelectorOpIn, - Values: []string{"bar"}, - }, - }, - }, - }, - }, - }, - Tolerations: []v1.Toleration{ - { - Key: "foo", - Operator: v1.TolerationOpExists, - }, - }, - TopologySpreadConstraints: []v1.TopologySpreadConstraint{ - { - MaxSkew: 1, - TopologyKey: "zone", - WhenUnsatisfiable: "DoNotSchedule", - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"foo": "bar"}, - }, - }, - }, - } - assert.Equal(t, expected, placement) -} - -func TestMergeNodeAffinity(t *testing.T) { - // affinity is nil - p := Placement{} - result := p.mergeNodeAffinity(nil) - assert.Nil(t, result) - - // node affinity is only set on the placement and should remain unchanged - p.NodeAffinity = placementTestGenerateNodeAffinity() - result = p.mergeNodeAffinity(nil) - assert.Equal(t, p.NodeAffinity, result) - - // preferred set, but required not set - affinityToMerge := placementTestGenerateNodeAffinity() - affinityToMerge.RequiredDuringSchedulingIgnoredDuringExecution = nil - result = p.mergeNodeAffinity(affinityToMerge) - assert.Equal(t, 2, len(result.PreferredDuringSchedulingIgnoredDuringExecution)) - assert.Equal(t, p.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution, result.RequiredDuringSchedulingIgnoredDuringExecution) - - // preferred and required expressions set - affinityToMerge = placementTestGenerateNodeAffinity() - affinityToMerge.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Key = "baz" - result = p.mergeNodeAffinity(affinityToMerge) - assert.Equal(t, 2, len(result.PreferredDuringSchedulingIgnoredDuringExecution)) - assert.Equal(t, 2, len(result.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions)) - assert.Equal(t, "baz", result.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Key) - assert.Equal(t, "foo", result.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[1].Key) -} - -func TestPlacementApplyToPodSpec(t *testing.T) { - to := placementTestGetTolerations("foo", "bar") - na := placementTestGenerateNodeAffinity() - antiaffinity := placementAntiAffinity("v1") - tc := placementTestGetTopologySpreadConstraints("zone") - expected := &v1.PodSpec{ - Affinity: &v1.Affinity{NodeAffinity: na, PodAntiAffinity: antiaffinity}, - Tolerations: to, - TopologySpreadConstraints: tc, - } - - var p Placement - var ps *v1.PodSpec - - p = Placement{ - NodeAffinity: na, - Tolerations: to, - PodAntiAffinity: antiaffinity, - TopologySpreadConstraints: tc, - } - ps = &v1.PodSpec{} - p.ApplyToPodSpec(ps) - assert.Equal(t, expected, ps) - assert.Equal(t, 1, len(ps.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution)) - - // Appending some other antiaffinity to the pod spec should not alter the original placement antiaffinity - otherAntiAffinity := placementAntiAffinity("v2") - ps.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append( - ps.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, - otherAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution...) - assert.Equal(t, 1, len(antiaffinity.PreferredDuringSchedulingIgnoredDuringExecution)) - - // partial update - p = Placement{NodeAffinity: na, PodAntiAffinity: antiaffinity} - ps = &v1.PodSpec{Tolerations: to, TopologySpreadConstraints: tc} - p.ApplyToPodSpec(ps) - assert.Equal(t, expected, ps) - - // overridden attributes - p = Placement{ - NodeAffinity: na, - PodAntiAffinity: antiaffinity, - Tolerations: to, - TopologySpreadConstraints: tc, - } - ps = &v1.PodSpec{ - TopologySpreadConstraints: placementTestGetTopologySpreadConstraints("rack"), - } - p.ApplyToPodSpec(ps) - assert.Equal(t, expected, ps) - - // The preferred affinity is merged from both sources to result in two node affinities - p = Placement{NodeAffinity: na, PodAntiAffinity: antiaffinity} - nap := placementTestGenerateNodeAffinity() - nap.PreferredDuringSchedulingIgnoredDuringExecution[0].Weight = 5 - ps = &v1.PodSpec{ - Affinity: &v1.Affinity{NodeAffinity: nap}, - Tolerations: to, - TopologySpreadConstraints: tc, - } - p.ApplyToPodSpec(ps) - assert.Equal(t, 2, len(ps.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution)) - - p = Placement{NodeAffinity: na, PodAntiAffinity: antiaffinity} - to = placementTestGetTolerations("foo", "bar") - ps = &v1.PodSpec{ - Tolerations: to, - } - p.ApplyToPodSpec(ps) - assert.Equal(t, 1, len(ps.Tolerations)) - p = Placement{Tolerations: to, NodeAffinity: na, PodAntiAffinity: antiaffinity} - p.ApplyToPodSpec(ps) - assert.Equal(t, 2, len(ps.Tolerations)) -} - -func TestPlacementMerge(t *testing.T) { - to := placementTestGetTolerations("foo", "bar") - na := placementTestGenerateNodeAffinity() - tc := placementTestGetTopologySpreadConstraints("zone") - - var original, with, expected, merged Placement - - original = Placement{} - with = Placement{Tolerations: to} - expected = Placement{Tolerations: to} - merged = original.Merge(with) - assert.Equal(t, expected, merged) - - original = Placement{NodeAffinity: na} - with = Placement{Tolerations: to} - expected = Placement{NodeAffinity: na, Tolerations: to} - merged = original.Merge(with) - assert.Equal(t, expected, merged) - - original = Placement{} - with = Placement{TopologySpreadConstraints: tc} - expected = Placement{TopologySpreadConstraints: tc} - merged = original.Merge(with) - assert.Equal(t, expected, merged) - - original = Placement{ - Tolerations: placementTestGetTolerations("bar", "baz"), - TopologySpreadConstraints: placementTestGetTopologySpreadConstraints("rack"), - } - with = Placement{ - NodeAffinity: na, - Tolerations: to, - TopologySpreadConstraints: tc, - } - var ts int64 = 10 - expected = Placement{ - NodeAffinity: na, - Tolerations: []v1.Toleration{ - { - Key: "bar", - Operator: v1.TolerationOpExists, - Value: "baz", - Effect: v1.TaintEffectNoSchedule, - TolerationSeconds: &ts, - }, - { - Key: "foo", - Operator: v1.TolerationOpExists, - Value: "bar", - Effect: v1.TaintEffectNoSchedule, - TolerationSeconds: &ts, - }, - }, - TopologySpreadConstraints: tc, - } - merged = original.Merge(with) - assert.Equal(t, expected, merged) -} - -func placementTestGetTolerations(key, value string) []v1.Toleration { - var ts int64 = 10 - return []v1.Toleration{ - { - Key: key, - Operator: v1.TolerationOpExists, - Value: value, - Effect: v1.TaintEffectNoSchedule, - TolerationSeconds: &ts, - }, - } -} - -func placementTestGetTopologySpreadConstraints(topologyKey string) []v1.TopologySpreadConstraint { - return []v1.TopologySpreadConstraint{ - { - MaxSkew: 1, - TopologyKey: topologyKey, - WhenUnsatisfiable: "DoNotSchedule", - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"foo": "bar"}, - }, - }, - } -} - -func placementAntiAffinity(value string) *v1.PodAntiAffinity { - return &v1.PodAntiAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{ - { - Weight: 50, - PodAffinityTerm: v1.PodAffinityTerm{ - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app": value, - }, - }, - TopologyKey: v1.LabelHostname, - }, - }, - }, - } -} - -func placementTestGenerateNodeAffinity() *v1.NodeAffinity { - return &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "foo", - Operator: v1.NodeSelectorOpExists, - Values: []string{"bar"}, - }, - }, - }, - }, - }, - PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{ - { - Weight: 10, - Preference: v1.NodeSelectorTerm{ - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "foo", - Operator: v1.NodeSelectorOpExists, - Values: []string{"bar"}, - }, - }, - }, - }, - }, - } -} - -func TestMergeToleration(t *testing.T) { - // placement is nil - p := Placement{} - result := p.mergeTolerations(nil) - assert.Nil(t, result) - - placementToleration := []v1.Toleration{ - { - Key: "foo", - Operator: v1.TolerationOpEqual, - }, - } - - p.Tolerations = placementToleration - result = p.mergeTolerations(nil) - assert.Equal(t, p.Tolerations, result) - - newToleration := []v1.Toleration{ - { - Key: "new", - Operator: v1.TolerationOpExists, - }, - } - - result = p.mergeTolerations(newToleration) - assert.Equal(t, 2, len(result)) - assert.Equal(t, placementToleration[0].Key, result[0].Key) - assert.Equal(t, newToleration[0].Key, result[1].Key) -} diff --git a/external/ceph.rook.io/v1/pool.go b/external/ceph.rook.io/v1/pool.go deleted file mode 100644 index a3653f04..00000000 --- a/external/ceph.rook.io/v1/pool.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "github.com/pkg/errors" -) - -func (p *PoolSpec) IsReplicated() bool { - return p.Replicated.Size > 0 -} - -func (p *PoolSpec) IsErasureCoded() bool { - return p.ErasureCoded.CodingChunks > 0 || p.ErasureCoded.DataChunks > 0 -} - -func (p *PoolSpec) IsHybridStoragePool() bool { - return p.Replicated.HybridStorage != nil -} - -// ValidateCephBlockPool validates specifically a CephBlockPool's spec (not just any NamedPoolSpec) -func ValidateCephBlockPool(p *CephBlockPool) error { - if p.Spec.Name == ".rgw.root" || p.Spec.Name == ".mgr" || p.Spec.Name == ".nfs" { - if p.Spec.IsErasureCoded() { - return errors.Errorf("invalid CephBlockPool spec: ceph built-in pool %q cannot be erasure coded", p.Name) - } - } - - return validatePoolSpec(p.ToNamedPoolSpec()) -} - -// validate any NamedPoolSpec -func validatePoolSpec(ps NamedPoolSpec) error { - // Checks if either ErasureCoded or Replicated fields are set - if ps.ErasureCoded.CodingChunks <= 0 && ps.ErasureCoded.DataChunks <= 0 && ps.Replicated.TargetSizeRatio <= 0 && ps.Replicated.Size <= 0 { - return errors.New("invalid pool spec: either of erasurecoded or replicated fields should be set") - } - // Check if any of the ErasureCoded fields are populated. Then check if replicated is populated. Both can't be populated at same time. - if ps.ErasureCoded.CodingChunks > 0 || ps.ErasureCoded.DataChunks > 0 || ps.ErasureCoded.Algorithm != "" { - if ps.Replicated.Size > 0 || ps.Replicated.TargetSizeRatio > 0 { - return errors.New("invalid pool spec: both erasurecoded and replicated fields cannot be set at the same time") - } - } - - if ps.Replicated.Size == 0 && ps.Replicated.TargetSizeRatio == 0 { - // Check if datachunks is set and has value less than 2. - if ps.ErasureCoded.DataChunks < 2 && ps.ErasureCoded.DataChunks != 0 { - return errors.New("invalid pool spec: erasurecoded.datachunks needs minimum value of 2") - } - - // Check if codingchunks is set and has value less than 1. - if ps.ErasureCoded.CodingChunks < 1 && ps.ErasureCoded.CodingChunks != 0 { - return errors.New("invalid pool spec: erasurecoded.codingchunks needs minimum value of 1") - } - } - return nil -} - -func (p *CephBlockPool) ToNamedPoolSpec() NamedPoolSpec { - // If the name is not overridden in the pool spec.name, set it to the name of the pool CR - name := p.Spec.Name - if name == "" { - // Set the name of the pool CR since a name override wasn't specified in the spec - name = p.Name - } - return NamedPoolSpec{ - Name: name, - PoolSpec: p.Spec.PoolSpec, - } -} - -func (p *CephBlockPool) GetStatusConditions() *[]Condition { - return &p.Status.Conditions -} - -func (p *CephBlockPoolRadosNamespace) GetStatusConditions() *[]Condition { - return &p.Status.Conditions -} - -// SnapshotSchedulesEnabled returns whether snapshot schedules are desired -func (p *MirroringSpec) SnapshotSchedulesEnabled() bool { - return len(p.SnapshotSchedules) > 0 -} diff --git a/external/ceph.rook.io/v1/pool_test.go b/external/ceph.rook.io/v1/pool_test.go deleted file mode 100644 index a72d31a5..00000000 --- a/external/ceph.rook.io/v1/pool_test.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2020 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "testing" - - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestValidatePoolSpec(t *testing.T) { - p := &CephBlockPool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ec-pool", - }, - Spec: NamedBlockPoolSpec{ - PoolSpec: PoolSpec{ - ErasureCoded: ErasureCodedSpec{ - CodingChunks: 1, - DataChunks: 2, - }, - }, - }, - } - err := validatePoolSpec(p.ToNamedPoolSpec()) - assert.NoError(t, err) - - p.Spec.ErasureCoded.DataChunks = 1 - err = validatePoolSpec(p.ToNamedPoolSpec()) - assert.Error(t, err) -} - -func TestMirroringSpec_SnapshotSchedulesEnabled(t *testing.T) { - type fields struct { - Enabled bool - Mode string - SnapshotSchedules []SnapshotScheduleSpec - } - tests := []struct { - name string - fields fields - want bool - }{ - {"disabled", fields{Enabled: true, Mode: "pool", SnapshotSchedules: []SnapshotScheduleSpec{}}, false}, - {"enabled", fields{Enabled: true, Mode: "pool", SnapshotSchedules: []SnapshotScheduleSpec{{Interval: "2d"}}}, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - p := &MirroringSpec{ - Enabled: tt.fields.Enabled, - Mode: tt.fields.Mode, - SnapshotSchedules: tt.fields.SnapshotSchedules, - } - if got := p.SnapshotSchedulesEnabled(); got != tt.want { - t.Errorf("MirroringSpec.SnapshotSchedulesEnabled() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/external/ceph.rook.io/v1/priorityclasses.go b/external/ceph.rook.io/v1/priorityclasses.go deleted file mode 100644 index 05e56c61..00000000 --- a/external/ceph.rook.io/v1/priorityclasses.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// All returns the priority class name defined for 'all' daemons in the Ceph cluster CRD. -func (p PriorityClassNamesSpec) All() string { - if val, ok := p[KeyAll]; ok { - return val - } - return "" -} - -// GetMgrPriorityClassName returns the priority class name for the MGR service -func GetMgrPriorityClassName(p PriorityClassNamesSpec) string { - if _, ok := p[KeyMgr]; !ok { - return p.All() - } - return p[KeyMgr] -} - -// GetMonPriorityClassName returns the priority class name for the monitors -func GetMonPriorityClassName(p PriorityClassNamesSpec) string { - if _, ok := p[KeyMon]; !ok { - return p.All() - } - return p[KeyMon] -} - -// GetOSDPriorityClassName returns the priority class name for the OSDs -func GetOSDPriorityClassName(p PriorityClassNamesSpec) string { - if _, ok := p[KeyOSD]; !ok { - return p.All() - } - return p[KeyOSD] -} - -// GetCleanupPriorityClassName returns the priority class name for the cleanup job -func GetCleanupPriorityClassName(p PriorityClassNamesSpec) string { - if _, ok := p[KeyCleanup]; !ok { - return p.All() - } - return p[KeyCleanup] -} - -// GetCrashCollectorPriorityClassName returns the priority class name for the crashcollector -func GetCrashCollectorPriorityClassName(p PriorityClassNamesSpec) string { - if _, ok := p[KeyCrashCollector]; !ok { - return p.All() - } - return p[KeyCrashCollector] -} - -// GetCephExporterPriorityClassName returns the priority class name for the ceph-exporter -func GetCephExporterPriorityClassName(p PriorityClassNamesSpec) string { - if _, ok := p[KeyCephExporter]; !ok { - return p.All() - } - return p[KeyCephExporter] -} diff --git a/external/ceph.rook.io/v1/priorityclasses_test.go b/external/ceph.rook.io/v1/priorityclasses_test.go deleted file mode 100644 index cc2f018a..00000000 --- a/external/ceph.rook.io/v1/priorityclasses_test.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright 2019 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/util/yaml" -) - -func TestPriorityClassNamesSpec(t *testing.T) { - specYaml := []byte(` -all: all-class -mgr: mgr-class -mon: mon-class -osd: osd-class -crashcollector: crashcollector-class -`) - - // convert the raw spec yaml into JSON - rawJSON, err := yaml.ToJSON(specYaml) - assert.Nil(t, err) - - // unmarshal the JSON into a strongly typed annotations spec object - var priorityClassNames PriorityClassNamesSpec - err = json.Unmarshal(rawJSON, &priorityClassNames) - assert.Nil(t, err) - - // the unmarshalled priority class names spec should equal the expected spec below - expected := PriorityClassNamesSpec{ - "all": "all-class", - "mgr": "mgr-class", - "mon": "mon-class", - "osd": "osd-class", - "crashcollector": "crashcollector-class", - } - assert.Equal(t, expected, priorityClassNames) -} - -func TestPriorityClassNamesDefaultToAll(t *testing.T) { - priorityClassNames := PriorityClassNamesSpec{ - "all": "all-class", - "mon": "mon-class", - } - - assert.Equal(t, "all-class", priorityClassNames.All()) -} diff --git a/external/ceph.rook.io/v1/resources.go b/external/ceph.rook.io/v1/resources.go deleted file mode 100644 index 95baec3c..00000000 --- a/external/ceph.rook.io/v1/resources.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - v1 "k8s.io/api/core/v1" -) - -const ( - // ResourcesKeyMon represents the name of resource in the CR for a mon - ResourcesKeyMon = "mon" - // ResourcesKeyMgr represents the name of resource in the CR for a mgr - ResourcesKeyMgr = "mgr" - // ResourcesKeyMgrSidecar represents the name of resource in the CR for a mgr - ResourcesKeyMgrSidecar = "mgr-sidecar" - // ResourcesKeyOSD represents the name of a resource in the CR for all OSDs - ResourcesKeyOSD = "osd" - // ResourcesKeyPrepareOSD represents the name of resource in the CR for the osd prepare job - ResourcesKeyPrepareOSD = "prepareosd" - // ResourcesKeyCmdReporter represents the name of resource in the CR for the detect version and network jobs - ResourcesKeyCmdReporter = "cmd-reporter" - // ResourcesKeyMDS represents the name of resource in the CR for the mds - ResourcesKeyMDS = "mds" - // ResourcesKeyCrashCollector represents the name of resource in the CR for the crash - ResourcesKeyCrashCollector = "crashcollector" - // ResourcesKeyLogCollector represents the name of resource in the CR for the log - ResourcesKeyLogCollector = "logcollector" - // ResourcesKeyRBDMirror represents the name of resource in the CR for the rbd mirror - ResourcesKeyRBDMirror = "rbdmirror" - // ResourcesKeyFilesystemMirror represents the name of resource in the CR for the filesystem mirror - ResourcesKeyFilesystemMirror = "fsmirror" - // ResourcesKeyCleanup represents the name of resource in the CR for the cleanup - ResourcesKeyCleanup = "cleanup" - // ResourcesKeyCleanup represents the name of resource in the CR for ceph-exporter - ResourcesKeyCephExporter = "exporter" -) - -// GetMgrResources returns the resources for the MGR service -func GetMgrResources(p ResourceSpec) v1.ResourceRequirements { - return p[ResourcesKeyMgr] -} - -// GetMgrSidecarResources returns the resources for the MGR sidecar container -func GetMgrSidecarResources(p ResourceSpec) v1.ResourceRequirements { - return p[ResourcesKeyMgrSidecar] -} - -// GetMonResources returns the resources for the monitors -func GetMonResources(p ResourceSpec) v1.ResourceRequirements { - return p[ResourcesKeyMon] -} - -// GetOSDResources returns the resources for all OSDs or for OSDs of specified device class (hdd, nvme, ssd) -func GetOSDResources(p ResourceSpec, deviceClass string) v1.ResourceRequirements { - if deviceClass == "" { - return p[ResourcesKeyOSD] - } - // if device class specified, but not set in requirements return common osd requirements if present - r, ok := p[getOSDResourceKeyForDeviceClass(deviceClass)] - if ok { - return r - } - return p[ResourcesKeyOSD] -} - -// GetOSDResourcesForDeviceClass returns the resources for a device class, if specified -func GetOSDResourcesForDeviceClass(resourceSpec ResourceSpec, deviceClass string) (v1.ResourceRequirements, bool) { - // if the device class requests specific resources, return them here - if resources, ok := resourceSpec[getOSDResourceKeyForDeviceClass(deviceClass)]; ok { - return resources, true - } - // no resources requested specific to the device class - return v1.ResourceRequirements{}, false -} - -// getOSDResourceKeyForDeviceClass returns key name for device class in resources spec -func getOSDResourceKeyForDeviceClass(deviceClass string) string { - return ResourcesKeyOSD + "-" + deviceClass -} - -// GetPrepareOSDResources returns the resources for the OSDs prepare job -func GetPrepareOSDResources(p ResourceSpec) v1.ResourceRequirements { - return p[ResourcesKeyPrepareOSD] -} - -// GetCmdReporterResources returns the resources for the detect version job -func GetCmdReporterResources(p ResourceSpec) v1.ResourceRequirements { - return p[ResourcesKeyCmdReporter] -} - -// GetCrashCollectorResources returns the resources for the crash daemon -func GetCrashCollectorResources(p ResourceSpec) v1.ResourceRequirements { - return p[ResourcesKeyCrashCollector] -} - -// GetLogCollectorResources returns the resources for the logo collector -func GetLogCollectorResources(p ResourceSpec) v1.ResourceRequirements { - return p[ResourcesKeyLogCollector] -} - -// GetCleanupResources returns the resources for the cleanup job -func GetCleanupResources(p ResourceSpec) v1.ResourceRequirements { - return p[ResourcesKeyCleanup] -} - -// GetCephExporterResources returns the resources for the cleanup job -func GetCephExporterResources(p ResourceSpec) v1.ResourceRequirements { - return p[ResourcesKeyCephExporter] -} diff --git a/external/ceph.rook.io/v1/scc.go b/external/ceph.rook.io/v1/scc.go deleted file mode 100644 index 8a76c156..00000000 --- a/external/ceph.rook.io/v1/scc.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "fmt" - - secv1 "github.com/openshift/api/security/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// NewSecurityContextConstraints returns a new SecurityContextConstraints for Rook-Ceph to run on -// OpenShift. -func NewSecurityContextConstraints(name string, namespaces ...string) *secv1.SecurityContextConstraints { - return &secv1.SecurityContextConstraints{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "security.openshift.io/v1", - Kind: "SecurityContextConstraints", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - AllowPrivilegedContainer: true, - AllowHostDirVolumePlugin: true, - ReadOnlyRootFilesystem: false, - AllowHostIPC: true, - AllowHostNetwork: false, - AllowHostPorts: false, - AllowedCapabilities: []corev1.Capability{"MKNOD"}, - RequiredDropCapabilities: []corev1.Capability{"ALL"}, - DefaultAddCapabilities: []corev1.Capability{}, - RunAsUser: secv1.RunAsUserStrategyOptions{ - Type: secv1.RunAsUserStrategyRunAsAny, - }, - SELinuxContext: secv1.SELinuxContextStrategyOptions{ - Type: secv1.SELinuxStrategyMustRunAs, - }, - FSGroup: secv1.FSGroupStrategyOptions{ - Type: secv1.FSGroupStrategyMustRunAs, - }, - SupplementalGroups: secv1.SupplementalGroupsStrategyOptions{ - Type: secv1.SupplementalGroupsStrategyRunAsAny, - }, - Volumes: []secv1.FSType{ - secv1.FSTypeConfigMap, - secv1.FSTypeDownwardAPI, - secv1.FSTypeEmptyDir, - secv1.FSTypeHostPath, - secv1.FSTypePersistentVolumeClaim, - secv1.FSProjected, - secv1.FSTypeSecret, - }, - Users: func() (users []string) { - for _, ns := range namespaces { - users = append(users, []string{ - fmt.Sprintf("system:serviceaccount:%s:rook-ceph-system", ns), - fmt.Sprintf("system:serviceaccount:%s:rook-ceph-default", ns), - fmt.Sprintf("system:serviceaccount:%s:rook-ceph-mgr", ns), - fmt.Sprintf("system:serviceaccount:%s:rook-ceph-osd", ns), - fmt.Sprintf("system:serviceaccount:%s:rook-ceph-rgw", ns), - }...) - } - return - }(), - } -} diff --git a/external/ceph.rook.io/v1/scc_test.go b/external/ceph.rook.io/v1/scc_test.go deleted file mode 100644 index 37553fce..00000000 --- a/external/ceph.rook.io/v1/scc_test.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestNewSecurityContextConstraints(t *testing.T) { - name := "rook-ceph" - scc := NewSecurityContextConstraints(name, name) - assert.True(t, scc.AllowPrivilegedContainer) - assert.Equal(t, name, scc.Name) -} diff --git a/external/ceph.rook.io/v1/spec_test.go b/external/ceph.rook.io/v1/spec_test.go deleted file mode 100644 index 88e95992..00000000 --- a/external/ceph.rook.io/v1/spec_test.go +++ /dev/null @@ -1,169 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package v1 - -import ( - "encoding/json" - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/util/yaml" -) - -func TestClusterSpecMarshal(t *testing.T) { - specYaml := []byte(` -dataDirHostPath: /var/lib/rook -mon: - count: 5 - allowMultiplePerNode: false -network: - hostNetwork: true -storage: - useAllNodes: false - useAllDevices: false - deviceFilter: "^sd." - devicePathFilter: "^/dev/disk/by-path/pci-.*" - location: "region=us-west,datacenter=delmar" - config: - metadataDevice: "nvme01" - databaseSizeMB: "1024" - nodes: - - name: "node2" - deviceFilter: "^foo*" - devicePathFilter: "^/dev/disk/by-id/.*foo.*"`) - - // convert the raw spec yaml into JSON - rawJSON, err := yaml.ToJSON(specYaml) - assert.Nil(t, err) - fmt.Printf("rawJSON: %s\n", string(rawJSON)) - - // unmarshal the JSON into a strongly typed storage spec object - var clusterSpec ClusterSpec - err = json.Unmarshal(rawJSON, &clusterSpec) - assert.Nil(t, err) - - // the unmarshalled storage spec should equal the expected spec below - useAllDevices := false - expectedSpec := ClusterSpec{ - Mon: MonSpec{ - Count: 5, - AllowMultiplePerNode: false, - }, - DataDirHostPath: "/var/lib/rook", - Network: NetworkSpec{ - HostNetwork: true, - }, - Storage: StorageScopeSpec{ - UseAllNodes: false, - Selection: Selection{ - UseAllDevices: &useAllDevices, - DeviceFilter: "^sd.", - DevicePathFilter: "^/dev/disk/by-path/pci-.*", - }, - Config: map[string]string{ - "metadataDevice": "nvme01", - "databaseSizeMB": "1024", - }, - Nodes: []Node{ - { - Name: "node2", - Selection: Selection{ - DeviceFilter: "^foo*", - DevicePathFilter: "^/dev/disk/by-id/.*foo.*", - }, - }, - }, - }, - } - - assert.Equal(t, expectedSpec, clusterSpec) -} - -func newTrue() *bool { - t := true - return &t -} - -func newFalse() *bool { - t := false - return &t -} - -func newInt(val int) *int { - return &val -} - -func newString(val string) *string { - return &val -} - -func TestObjectStoreSpecMarshalSwiftAndKeystone(t *testing.T) { - // Assert that the new ObjectStoreSpec fields specified in are correctly parsed - specYaml := []byte(` -auth: - keystone: - url: https://keystone:5000/ - acceptedRoles: ["_member_", "service", "admin"] - implicitTenants: swift - tokenCacheSize: 1000 - revocationInterval: 1200 - serviceUserSecretName: rgw-service-user -protocols: - swift: - accountInUrl: true - urlPrefix: /example - versioningEnabled: false - s3: - enabled: false - authUseKeystone: true -`) - rawJSON, err := yaml.ToJSON(specYaml) - assert.Nil(t, err) - fmt.Printf("rawJSON: %s\n", string(rawJSON)) - - // unmarshal the JSON into a strongly typed storage spec object - var objectStoreSpec ObjectStoreSpec - err = json.Unmarshal(rawJSON, &objectStoreSpec) - assert.Nil(t, err) - - // the unmarshalled storage spec should equal the expected spec below - expectedSpec := ObjectStoreSpec{ - Auth: AuthSpec{ - Keystone: &KeystoneSpec{ - Url: "https://keystone:5000/", - AcceptedRoles: []string{"_member_", "service", "admin"}, - ImplicitTenants: "swift", - TokenCacheSize: newInt(1000), - RevocationInterval: newInt(1200), - ServiceUserSecretName: "rgw-service-user", - }, - }, - Protocols: ProtocolSpec{ - S3: &S3Spec{ - Enabled: newFalse(), - AuthUseKeystone: newTrue(), - }, - Swift: &SwiftSpec{ - AccountInUrl: newTrue(), - UrlPrefix: newString("/example"), - VersioningEnabled: newFalse(), - }, - }, - } - - assert.Equal(t, expectedSpec, objectStoreSpec) -} diff --git a/external/ceph.rook.io/v1/status.go b/external/ceph.rook.io/v1/status.go deleted file mode 100644 index ce91612f..00000000 --- a/external/ceph.rook.io/v1/status.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// SetStatusCondition sets the corresponding condition in conditions to newCondition. -// conditions must be non-nil. -// 1. if the condition of the specified type already exists (all fields of the existing condition are updated to -// newCondition, LastTransitionTime is set to now if the new status differs from the old status) -// 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, and newCondition is appended) -func SetStatusCondition(conditions *[]Condition, newCondition Condition) { - if conditions == nil { - return - } - - now := metav1.NewTime(time.Now()) - - existingCondition := FindStatusCondition(*conditions, newCondition.Type) - if existingCondition == nil { - if newCondition.LastTransitionTime.IsZero() { - newCondition.LastTransitionTime = now - newCondition.LastHeartbeatTime = now - } - *conditions = append(*conditions, newCondition) - return - } - - if existingCondition.Status != newCondition.Status { - existingCondition.Status = newCondition.Status - if !newCondition.LastTransitionTime.IsZero() { - existingCondition.LastTransitionTime = newCondition.LastTransitionTime - } else { - existingCondition.LastTransitionTime = now - } - } - - existingCondition.Reason = newCondition.Reason - existingCondition.Message = newCondition.Message - if !newCondition.LastHeartbeatTime.IsZero() { - existingCondition.LastHeartbeatTime = newCondition.LastHeartbeatTime - } else { - existingCondition.LastHeartbeatTime = now - } -} - -// FindStatusCondition finds the conditionType in conditions. -func FindStatusCondition(conditions []Condition, conditionType ConditionType) *Condition { - for i := range conditions { - if conditions[i].Type == conditionType { - return &conditions[i] - } - } - - return nil -} diff --git a/external/ceph.rook.io/v1/status_test.go b/external/ceph.rook.io/v1/status_test.go deleted file mode 100644 index cdf9a622..00000000 --- a/external/ceph.rook.io/v1/status_test.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "reflect" - "testing" - "time" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Based on code from https://github.com/kubernetes/apimachinery/blob/master/pkg/api/meta/conditions.go - -func TestSetStatusCondition(t *testing.T) { - oneHourBefore := metav1.Time{Time: time.Now().Add(-1 * time.Hour)} - oneHourAfter := metav1.Time{Time: time.Now().Add(1 * time.Hour)} - - tests := []struct { - name string - conditions []Condition - toAdd Condition - expected []Condition - }{ - { - name: "should-add", - conditions: []Condition{ - {Type: "first"}, - {Type: "third"}, - }, - toAdd: Condition{Type: "second", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourBefore, Reason: "reason", Message: "message"}, - expected: []Condition{ - {Type: "first"}, - {Type: "third"}, - {Type: "second", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourBefore, Reason: "reason", Message: "message"}, - }, - }, - { - name: "use-supplied-transition-time", - conditions: []Condition{ - {Type: "first"}, - {Type: "second", Status: v1.ConditionFalse}, - {Type: "third"}, - }, - toAdd: Condition{Type: "second", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourBefore, Reason: "reason", Message: "message"}, - expected: []Condition{ - {Type: "first"}, - {Type: "second", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourBefore, Reason: "reason", Message: "message"}, - {Type: "third"}, - }, - }, - { - name: "update-fields", - conditions: []Condition{ - {Type: "first"}, - {Type: "second", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourBefore}, - {Type: "third"}, - }, - toAdd: Condition{Type: "second", Status: v1.ConditionTrue, LastTransitionTime: oneHourAfter, LastHeartbeatTime: oneHourAfter, Reason: "reason", Message: "message"}, - expected: []Condition{ - {Type: "first"}, - {Type: "second", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourAfter, Reason: "reason", Message: "message"}, - {Type: "third"}, - }, - }, - { - name: "empty-conditions", - conditions: []Condition{}, - toAdd: Condition{Type: "first", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourBefore, Reason: "reason", Message: "message"}, - expected: []Condition{ - {Type: "first", Status: v1.ConditionTrue, LastTransitionTime: oneHourBefore, LastHeartbeatTime: oneHourBefore, Reason: "reason", Message: "message"}, - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - SetStatusCondition(&test.conditions, test.toAdd) - if !reflect.DeepEqual(test.conditions, test.expected) { - t.Error(test.conditions) - } - }) - } -} - -func TestFindStatusCondition(t *testing.T) { - tests := []struct { - name string - conditions []Condition - conditionType string - expected *Condition - }{ - { - name: "not-present", - conditions: []Condition{ - {Type: "first"}, - }, - conditionType: "second", - expected: nil, - }, - { - name: "present", - conditions: []Condition{ - {Type: "first"}, - {Type: "second"}, - }, - conditionType: "second", - expected: &Condition{Type: "second"}, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - actual := FindStatusCondition(test.conditions, ConditionType(test.conditionType)) - if !reflect.DeepEqual(actual, test.expected) { - t.Error(actual) - } - }) - } -} diff --git a/external/ceph.rook.io/v1/storage_test.go b/external/ceph.rook.io/v1/storage_test.go deleted file mode 100644 index f431c86c..00000000 --- a/external/ceph.rook.io/v1/storage_test.go +++ /dev/null @@ -1,291 +0,0 @@ -/* -Copyright 2018 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package v1 - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestNodeExists(t *testing.T) { - t.Run("does not exist - no nodes specified", func(t *testing.T) { - spec := StorageScopeSpec{} - assert.False(t, spec.NodeExists("does-not-exist")) - }) - - t.Run("exists - single node specified", func(t *testing.T) { - spec := StorageScopeSpec{ - Nodes: []Node{ - {Name: "node1"}, // node gets nothing but its name set - }, - } - assert.True(t, spec.NodeExists("node1")) - }) - - t.Run("exists and not exists - multiple nodes specified", func(t *testing.T) { - spec := StorageScopeSpec{ - Nodes: []Node{ - {Name: "node1"}, // node gets nothing but its name set - {Name: "node3"}, - {Name: "node4"}, - }, - } - assert.True(t, spec.NodeExists("node1")) - assert.False(t, spec.NodeExists("node2")) - assert.True(t, spec.NodeExists("node3")) - assert.True(t, spec.NodeExists("node4")) - assert.False(t, spec.NodeExists("node5")) - assert.False(t, spec.NodeExists("does-not-exist")) - }) -} - -func TestResolveNodeNotExist(t *testing.T) { - // a nonexistent node should return nil - storageSpec := StorageScopeSpec{} - node := storageSpec.ResolveNode("fake node") - assert.Nil(t, node) -} - -func TestResolveNodeDefaultValues(t *testing.T) { - // a node with no properties and none defined in the cluster storage spec should get the default values - storageSpec := StorageScopeSpec{ - Nodes: []Node{ - {Name: "node1"}, // node gets nothing but its name set - }, - } - - node := storageSpec.ResolveNode("node1") - assert.NotNil(t, node) - assert.Equal(t, "", node.Selection.DeviceFilter) - assert.Equal(t, "", node.Selection.DevicePathFilter) - assert.False(t, node.Selection.GetUseAllDevices()) - assert.Equal(t, storageSpec.Devices, node.Devices) -} - -func TestResolveNodeInherentFromCluster(t *testing.T) { - // a node with no properties defined should inherit them from the cluster storage spec - storageSpec := StorageScopeSpec{ - Selection: Selection{ - DeviceFilter: "^sd.", - DevicePathFilter: "^/dev/disk/by-path/pci-.*", - Devices: []Device{{Name: "sda"}}, - }, - Config: map[string]string{ - "foo": "bar", - }, - Nodes: []Node{ - {Name: "node1"}, // node gets nothing but its name set - }, - } - - node := storageSpec.ResolveNode("node1") - assert.NotNil(t, node) - assert.Equal(t, "^sd.", node.Selection.DeviceFilter) - assert.Equal(t, "^/dev/disk/by-path/pci-.*", node.Selection.DevicePathFilter) - assert.False(t, node.Selection.GetUseAllDevices()) - assert.Equal(t, "bar", node.Config["foo"]) - assert.Equal(t, []Device{{Name: "sda"}}, node.Devices) -} - -func TestResolveNodeSpecificProperties(t *testing.T) { - // a node with its own specific properties defined should keep those values, regardless of what the global cluster config is - storageSpec := StorageScopeSpec{ - Selection: Selection{ - DeviceFilter: "^sd.", - DevicePathFilter: "^/dev/disk/by-path/pci-.*", - }, - Config: map[string]string{ - "foo": "bar", - "baz": "biz", - }, - Nodes: []Node{ - { - Name: "node1", // node has its own config that should override cluster level config - Selection: Selection{ - DeviceFilter: "nvme.*", - DevicePathFilter: "^/dev/disk/by-id/.*foo.*", - Devices: []Device{{Name: "device026"}}, - }, - Config: map[string]string{ - "foo": "node1bar", - }, - }, - }, - } - - node := storageSpec.ResolveNode("node1") - assert.NotNil(t, node) - assert.False(t, node.Selection.GetUseAllDevices()) - assert.Equal(t, "nvme.*", node.Selection.DeviceFilter) - assert.Equal(t, "^/dev/disk/by-id/.*foo.*", node.Selection.DevicePathFilter) - assert.Equal(t, []Device{{Name: "device026"}}, node.Devices) - assert.Equal(t, "node1bar", node.Config["foo"]) - assert.Equal(t, "biz", node.Config["baz"]) -} - -func TestResolveNodeUseAllDevices(t *testing.T) { - storageSpec := StorageScopeSpec{ - Selection: Selection{UseAllDevices: newBool(true)}, // UseAllDevices is set to true on the storage spec - Nodes: []Node{ - {Name: "node1"}, // node gets nothing but its name set - }, - } - - node := storageSpec.ResolveNode("node1") - assert.NotNil(t, node) - assert.True(t, node.Selection.GetUseAllDevices()) -} - -func TestUseAllDevices(t *testing.T) { - storageSpec := StorageScopeSpec{} - assert.False(t, storageSpec.AnyUseAllDevices()) - - storageSpec = StorageScopeSpec{ - Selection: Selection{ - UseAllDevices: newBool(true), - }, // UseAllDevices is set to true on the storage spec - } - assert.True(t, storageSpec.AnyUseAllDevices()) - - storageSpec = StorageScopeSpec{ - Selection: Selection{UseAllDevices: newBool(false)}, - Nodes: []Node{ - { - Name: "node1", - Selection: Selection{UseAllDevices: newBool(true)}, - }, - }, - } - assert.True(t, storageSpec.AnyUseAllDevices()) -} - -func TestClearUseAllDevices(t *testing.T) { - // create a storage spec with use all devices set to true for the cluster and for all nodes - storageSpec := StorageScopeSpec{ - Selection: Selection{UseAllDevices: newBool(true)}, - Nodes: []Node{ - { - Name: "node1", - Selection: Selection{UseAllDevices: newBool(true)}, - }, - }, - } - assert.True(t, storageSpec.AnyUseAllDevices()) - - // now clear the use all devices field, it should be cleared from the entire cluster and its nodes - storageSpec.ClearUseAllDevices() - assert.False(t, storageSpec.AnyUseAllDevices()) -} - -func TestClusterDirsDevsInherit(t *testing.T) { - // test for no directories or devices given - storageSpec := StorageScopeSpec{ - Nodes: []Node{ - { - Name: "node1", - }, - }, - } - node := storageSpec.ResolveNode("node1") - assert.NotNil(t, node) - assert.Equal(t, storageSpec.Devices, node.Devices) - - // test if cluster wide devices are inherited to no-directories/devices node - storageSpec = StorageScopeSpec{ - Selection: Selection{ - Devices: []Device{{Name: "device1"}}, - }, - Nodes: []Node{ - { - Name: "node1", - }, - }, - } - node = storageSpec.ResolveNode("node1") - assert.NotNil(t, node) - assert.Equal(t, []Device{{Name: "device1"}}, node.Devices) - - // test if node directories and devices are used - storageSpec = StorageScopeSpec{ - Nodes: []Node{ - { - Name: "node1", - Selection: Selection{ - Devices: []Device{{Name: "device2"}}, - }, - }, - }, - } - node = storageSpec.ResolveNode("node1") - assert.NotNil(t, node) - assert.Equal(t, []Device{{Name: "device2"}}, node.Devices) - - // test if cluster wide devices are and aren't inherited to nodes with and without directories/devices - storageSpec = StorageScopeSpec{ - Selection: Selection{ - Devices: []Device{{Name: "device4"}}, - }, - Nodes: []Node{ - { - Name: "node1", - Selection: Selection{ - Devices: []Device{{Name: "device3"}}, - }, - }, - { - Name: "node2", - }, - }, - } - // node1 keeps its specified devices - node = storageSpec.ResolveNode("node1") - assert.NotNil(t, node) - assert.Equal(t, []Device{{Name: "device3"}}, node.Devices) - - // node2 inherits the cluster wide devices since it specified none of its own - node = storageSpec.ResolveNode("node2") - assert.NotNil(t, node) - assert.Equal(t, []Device{{Name: "device4"}}, node.Devices) -} - -func TestStorageScopeSpec_NodeWithNameExists(t *testing.T) { - spec := &StorageScopeSpec{ - Nodes: []Node{}, - } - - assert.False(t, spec.NodeWithNameExists("node0")) - - spec.Nodes = []Node{ - {Name: "node0-hostname"}, - {Name: "node1"}, - {Name: "node2"}, - } - assert.True(t, spec.NodeWithNameExists("node0-hostname")) - assert.False(t, spec.NodeWithNameExists("node0")) - assert.True(t, spec.NodeWithNameExists("node1")) - assert.True(t, spec.NodeWithNameExists("node2")) -} - -func TestIsOnPVCEncrypted(t *testing.T) { - s := &StorageScopeSpec{} - assert.False(t, s.IsOnPVCEncrypted()) - - s.StorageClassDeviceSets = []StorageClassDeviceSet{ - {Encrypted: true}, - } - assert.True(t, s.IsOnPVCEncrypted()) -} diff --git a/external/ceph.rook.io/v1/topic.go b/external/ceph.rook.io/v1/topic.go deleted file mode 100644 index 40d9d8f9..00000000 --- a/external/ceph.rook.io/v1/topic.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -    http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "net/url" - "strings" - - "github.com/pkg/errors" -) - -func validateURI(uri string, expectedSchemas []string) error { - parsedURI, err := url.Parse(uri) - if err != nil { - return err - } - schema := strings.ToLower(parsedURI.Scheme) - for _, s := range expectedSchemas { - if s == schema { - return nil - } - } - return errors.Errorf("URI schema %q no in %v", schema, expectedSchemas) -} - -func ValidateHTTPSpec(s *HTTPEndpointSpec) error { - return validateURI(s.URI, []string{"http", "https"}) -} - -func ValidateAMQPSpec(s *AMQPEndpointSpec) error { - return validateURI(s.URI, []string{"amqp", "amqps"}) -} - -func ValidateKafkaSpec(s *KafkaEndpointSpec) error { - return validateURI(s.URI, []string{"kafka"}) -} - -// ValidateTopicSpec validate the bucket notification topic arguments -func (t *CephBucketTopic) ValidateTopicSpec() error { - hasEndpoint := false - if t.Spec.Endpoint.HTTP != nil { - hasEndpoint = true - if err := ValidateHTTPSpec(t.Spec.Endpoint.HTTP); err != nil { - return err - } - } - if t.Spec.Endpoint.AMQP != nil { - if hasEndpoint { - return errors.New("multiple endpoint specs") - } - hasEndpoint = true - if err := ValidateAMQPSpec(t.Spec.Endpoint.AMQP); err != nil { - return err - } - } - if t.Spec.Endpoint.Kafka != nil { - if hasEndpoint { - return errors.New("multiple endpoint specs") - } - hasEndpoint = true - if err := ValidateKafkaSpec(t.Spec.Endpoint.Kafka); err != nil { - return err - } - } - - if !hasEndpoint { - return errors.New("missing endpoint spec") - } - return nil -} diff --git a/external/ceph.rook.io/v1/topic_test.go b/external/ceph.rook.io/v1/topic_test.go deleted file mode 100644 index fee59a69..00000000 --- a/external/ceph.rook.io/v1/topic_test.go +++ /dev/null @@ -1,170 +0,0 @@ -/* -Copyright 2021 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "testing" - - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestValidateHTTPTopicSpec(t *testing.T) { - topic := &CephBucketTopic{ - ObjectMeta: metav1.ObjectMeta{ - Name: "fish-topic", - }, - Spec: BucketTopicSpec{ - OpaqueData: "me@email.com", - Persistent: true, - Endpoint: TopicEndpointSpec{ - HTTP: &HTTPEndpointSpec{ - URI: "http://myserver:9999", - DisableVerifySSL: false, - SendCloudEvents: false, - }, - }, - }, - } - - t.Run("valid", func(t *testing.T) { - err := topic.ValidateTopicSpec() - assert.NoError(t, err) - }) - t.Run("invalid endpoint host", func(t *testing.T) { - topic.Spec.Endpoint.HTTP.URI = "http://my server:9999" - err := topic.ValidateTopicSpec() - assert.Error(t, err) - }) - t.Run("https host", func(t *testing.T) { - topic.Spec.Endpoint.HTTP.URI = "https://127.0.0.1:9999" - err := topic.ValidateTopicSpec() - assert.NoError(t, err) - }) - t.Run("invalid endpoint schema", func(t *testing.T) { - topic.Spec.Endpoint.HTTP.URI = "kaboom://myserver:9999" - err := topic.ValidateTopicSpec() - assert.Error(t, err) - }) -} - -func TestValidateAMQPTopicSpec(t *testing.T) { - topic := &CephBucketTopic{ - ObjectMeta: metav1.ObjectMeta{ - Name: "fish-topic", - }, - Spec: BucketTopicSpec{ - OpaqueData: "me@email.com", - Persistent: true, - Endpoint: TopicEndpointSpec{ - AMQP: &AMQPEndpointSpec{ - URI: "amqp://myserver:9999", - Exchange: "fish-ex", - DisableVerifySSL: true, - AckLevel: "broker", - }, - }, - }, - } - - t.Run("valid", func(t *testing.T) { - err := topic.ValidateTopicSpec() - assert.NoError(t, err) - }) - t.Run("amqps host", func(t *testing.T) { - topic.Spec.Endpoint.AMQP.URI = "amqps://myserver:9999" - err := topic.ValidateTopicSpec() - assert.NoError(t, err) - }) - t.Run("endpoint schema mismatch", func(t *testing.T) { - topic.Spec.Endpoint.AMQP.URI = "http://myserver:9999" - err := topic.ValidateTopicSpec() - assert.Error(t, err) - }) -} - -func TestValidateKafkaTopicSpec(t *testing.T) { - topic := &CephBucketTopic{ - ObjectMeta: metav1.ObjectMeta{ - Name: "fish-topic", - }, - Spec: BucketTopicSpec{ - OpaqueData: "me@email.com", - Persistent: true, - Endpoint: TopicEndpointSpec{ - Kafka: &KafkaEndpointSpec{ - URI: "kafka://myserver:9999", - UseSSL: true, - DisableVerifySSL: true, - AckLevel: "broker", - Mechanism: "SCRAM-SHA-512", - }, - }, - }, - } - - t.Run("valid", func(t *testing.T) { - err := topic.ValidateTopicSpec() - assert.NoError(t, err) - }) - t.Run("endpoint schema mismatch", func(t *testing.T) { - topic.Spec.Endpoint.Kafka.URI = "http://myserver:9999" - err := topic.ValidateTopicSpec() - assert.Error(t, err) - }) -} - -func TestInvalidTopicSpec(t *testing.T) { - topic := &CephBucketTopic{ - ObjectMeta: metav1.ObjectMeta{ - Name: "fish-topic", - }, - Spec: BucketTopicSpec{ - OpaqueData: "me@email.com", - Persistent: true, - Endpoint: TopicEndpointSpec{ - Kafka: &KafkaEndpointSpec{ - URI: "kafka://myserver:9999", - UseSSL: true, - DisableVerifySSL: true, - AckLevel: "broker", - }, - AMQP: &AMQPEndpointSpec{ - URI: "amqp://myserver:9999", - Exchange: "fish-ex", - DisableVerifySSL: true, - AckLevel: "broker", - }, - }, - }, - } - - t.Run("too many endpoint specs", func(t *testing.T) { - err := topic.ValidateTopicSpec() - assert.Error(t, err) - }) - t.Run("valid", func(t *testing.T) { - topic.Spec.Endpoint.AMQP = nil - err := topic.ValidateTopicSpec() - assert.NoError(t, err) - }) - t.Run("too few endpoint specs", func(t *testing.T) { - topic.Spec.Endpoint.Kafka = nil - err := topic.ValidateTopicSpec() - assert.Error(t, err) - }) -} diff --git a/external/ceph.rook.io/v1/volume.go b/external/ceph.rook.io/v1/volume.go deleted file mode 100644 index d713d976..00000000 --- a/external/ceph.rook.io/v1/volume.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2023 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "path/filepath" - "reflect" - - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" -) - -func (src *ConfigFileVolumeSource) ToKubernetesVolumeSource() *corev1.VolumeSource { - if src == nil { - return nil - } - - dst := &corev1.VolumeSource{} - vDst := reflect.ValueOf(dst).Elem() - - tSrc := reflect.TypeOf(*src) - vSrc := reflect.ValueOf(*src) - for _, srcField := range reflect.VisibleFields(tSrc) { - if !srcField.IsExported() { - continue - } - - srcVal := vSrc.FieldByName(srcField.Name) - if srcVal.IsNil() { - continue // don't do anything if the src field is a nil ptr - } - - dstVal := vDst.FieldByName(srcField.Name) - dstVal.Set(srcVal) - } - - return dst -} - -// GenerateVolumesAndMounts converts Rook's AdditionalVolumeMounts type to a list of volumes and -// corresponding mounts that can be added to Kubernetes pod specs. -func (v *AdditionalVolumeMounts) GenerateVolumesAndMounts(rootDir string) ([]v1.Volume, []v1.VolumeMount) { - vols := []v1.Volume{} - mounts := []v1.VolumeMount{} - - for _, addVolMnt := range *v { - mountPath := filepath.Join(rootDir, addVolMnt.SubPath) - volName := ToValidDNSLabel(mountPath) - vols = append(vols, v1.Volume{ - Name: volName, - VolumeSource: *addVolMnt.VolumeSource.ToKubernetesVolumeSource(), - }) - mounts = append(mounts, v1.VolumeMount{ - Name: volName, - MountPath: mountPath, - }) - } - - return vols, mounts -} - -func (t *VolumeClaimTemplate) ToPVC() *corev1.PersistentVolumeClaim { - if t == nil { - return nil - } - return &corev1.PersistentVolumeClaim{ - ObjectMeta: *t.ObjectMeta.DeepCopy(), - Spec: *t.Spec.DeepCopy(), - } -} diff --git a/external/ceph.rook.io/v1/volume_test.go b/external/ceph.rook.io/v1/volume_test.go deleted file mode 100644 index 78133d65..00000000 --- a/external/ceph.rook.io/v1/volume_test.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright 2023 The Rook Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "fmt" - "reflect" - "testing" - - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" -) - -func validateToVolumeSource( - t *testing.T, - fieldUnderTest string, fieldValue reflect.Value, - in *ConfigFileVolumeSource, -) { - got := in.ToKubernetesVolumeSource() - - // validate got - vGot := reflect.ValueOf(got).Elem() - for _, gField := range reflect.VisibleFields(vGot.Type()) { - gFieldVal := vGot.FieldByName(gField.Name) - - if gField.Name != fieldUnderTest { - assert.Nilf(t, gFieldVal.Interface(), "fields NOT under test should be nil") - continue - } - - assert.Equalf(t, fieldValue.Interface(), gFieldVal.Interface(), - "fields under test should be deeply equal to what was created") - } -} - -func TestConfigFileVolumeSource_ToVolumeSource(t *testing.T) { - t.Run("nil receiver", func(t *testing.T) { - var in *ConfigFileVolumeSource = nil - got := in.ToKubernetesVolumeSource() - assert.Nil(t, got) - }) - - t.Run("zero-value receiver", func(t *testing.T) { - in := &ConfigFileVolumeSource{} - got := in.ToKubernetesVolumeSource() - assert.Equal(t, v1.VolumeSource{}, *got) - }) - - for _, field := range reflect.VisibleFields(reflect.TypeOf(ConfigFileVolumeSource{})) { - // for each struct field of ConfigFileVolumeSource, create a new CFVS with that field filled - // in with some non-nil value to test ToVolumeSource() with. Then ensure that every - // possible volume type of the CFVS converts to k8s' corev1.VolumeSource successfully - in := &ConfigFileVolumeSource{} - - // use reflection to set the field under test with a non-nil created object - vIn := reflect.ValueOf(in).Elem() - fIn := vIn.FieldByName(field.Name) - baseType := field.Type.Elem() - fVal := reflect.New(baseType) - fIn.Set(fVal) - - t.Run(fmt.Sprintf("%s: %s{}", field.Name, field.Type), func(t *testing.T) { - // test with zero object - validateToVolumeSource(t, field.Name, fVal, in) - }) - - t.Run(fmt.Sprintf("%s: %s{}", field.Name, field.Type), func(t *testing.T) { - // set some data set on the object - setSomeFields(field.Type.Elem(), fVal.Elem()) - fIn.Set(fVal) - - validateToVolumeSource(t, field.Name, fVal, in) - }) - } -} - -func setSomeFields(t reflect.Type, v reflect.Value) { - for _, f := range reflect.VisibleFields(t) { - fVal := v.FieldByName(f.Name) - setSomeData(fVal) - } -} - -func setSomeData(v reflect.Value) { - switch v.Kind() { - case reflect.Pointer: - v.Set(reflect.New(v.Type().Elem())) - setSomeData(v.Elem()) - case reflect.String: - v.SetString("string-data") - case reflect.Bool: - v.SetBool(true) - case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: - v.SetInt(0o755) - } -} From 60376402c1b5515f698233946eb24b4d5ca33542 Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Sun, 22 Mar 2026 13:42:04 +0100 Subject: [PATCH 27/31] migrate to new migration config Signed-off-by: Tim Schrodi --- internal/bootstrap/local/installer.go | 41 +++++++++++++++++--- internal/installer/files/config_yaml.go | 12 ++++++ internal/installer/files/config_yaml_test.go | 12 ++++++ 3 files changed, 60 insertions(+), 5 deletions(-) diff --git a/internal/bootstrap/local/installer.go b/internal/bootstrap/local/installer.go index c6958d9e..702a3526 100644 --- a/internal/bootstrap/local/installer.go +++ b/internal/bootstrap/local/installer.go @@ -4,6 +4,7 @@ package local import ( + "errors" "fmt" "log" "os" @@ -13,6 +14,7 @@ import ( "strings" "time" + "github.com/codesphere-cloud/oms/internal/installer/files" "github.com/codesphere-cloud/oms/internal/portal" "github.com/codesphere-cloud/oms/internal/util" corev1 "k8s.io/api/core/v1" @@ -366,11 +368,36 @@ func (b *LocalBootstrapper) resolveNodeIPForNodePort() (string, error) { return "", fmt.Errorf("failed to resolve node IP address for NodePort endpoint") } +func (b *LocalBootstrapper) configurePostgresForMigration(host string, port int32) (func() error, error) { + previousMigration := b.Env.InstallConfig.Codesphere.Migration + b.Env.InstallConfig.Codesphere.Migration = &files.MigrationConfig{ + Postgres: &files.MigrationPostgresConfig{ + Host: host, + Port: int(port), + Database: b.Env.InstallConfig.Postgres.Database, + AltName: b.Env.InstallConfig.Postgres.ServerAddress, + }, + } + + if err := b.icg.WriteInstallConfig(b.Env.InstallConfigPath, true); err != nil { + b.Env.InstallConfig.Codesphere.Migration = previousMigration + return nil, fmt.Errorf("failed to write migration config to install config: %w", err) + } + + return func() error { + b.Env.InstallConfig.Codesphere.Migration = previousMigration + if err := b.icg.WriteInstallConfig(b.Env.InstallConfigPath, true); err != nil { + return fmt.Errorf("failed to restore install config after installer run: %w", err) + } + return nil + }, nil +} + // RunInstaller extracts the deps.tar.gz archive locally and then runs the // install-components.js script directly on the local machine for each // required component step (setUpCluster, codesphere), instead of running // the private-cloud-installer.js which orchestrates remote nodes via SSH. -func (b *LocalBootstrapper) RunInstaller() error { +func (b *LocalBootstrapper) RunInstaller() (err error) { if b.Env.InstallVersion == "" && b.Env.InstallLocal == "" { log.Println("No installer package specified, skipping Codesphere installation.") return nil @@ -442,6 +469,14 @@ func (b *LocalBootstrapper) RunInstaller() error { log.Printf("Temporary PostgreSQL NodePort service ready (%s:%d)", dbHost, dbPort) + restoreMigrationConfig, err := b.configurePostgresForMigration(dbHost, dbPort) + if err != nil { + return err + } + defer func() { + err = errors.Join(err, restoreMigrationConfig()) + }() + // Run each component step locally via install-components.js. for _, component := range installerComponentSteps { cmdArgs := []string{ @@ -456,10 +491,6 @@ func (b *LocalBootstrapper) RunInstaller() error { log.Printf("Running install-components.js --component %s", component) log.Printf(" %s %s", nodePath, strings.Join(cmdArgs, " ")) cmd := exec.Command(nodePath, cmdArgs...) - cmd.Env = []string{ - fmt.Sprintf("DB_MIGRATION_HOST=%s", dbHost), - fmt.Sprintf("DB_MIGRATION_PORT=%d", dbPort), - } cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr diff --git a/internal/installer/files/config_yaml.go b/internal/installer/files/config_yaml.go index 14c41e1c..04266f42 100644 --- a/internal/installer/files/config_yaml.go +++ b/internal/installer/files/config_yaml.go @@ -299,12 +299,24 @@ type CodesphereConfig struct { GitProviders *GitProvidersConfig `yaml:"gitProviders,omitempty"` ManagedServices []ManagedServiceConfig `yaml:"managedServices,omitempty"` OpenBao *OpenBaoConfig `yaml:"openBao,omitempty"` + Migration *MigrationConfig `yaml:"migration,omitempty"` Override ChartOverride `yaml:"override,omitempty"` DomainAuthPrivateKey string `yaml:"-"` DomainAuthPublicKey string `yaml:"-"` } +type MigrationConfig struct { + Postgres *MigrationPostgresConfig `yaml:"postgres,omitempty"` +} + +type MigrationPostgresConfig struct { + Host string `yaml:"host,omitempty"` + Port int `yaml:"port,omitempty"` + Database string `yaml:"database,omitempty"` + AltName string `yaml:"altName,omitempty"` +} + type OpenBaoConfig struct { Engine string `yaml:"engine,omitempty"` URI string `yaml:"uri,omitempty"` diff --git a/internal/installer/files/config_yaml_test.go b/internal/installer/files/config_yaml_test.go index faf62218..99e4f48c 100644 --- a/internal/installer/files/config_yaml_test.go +++ b/internal/installer/files/config_yaml_test.go @@ -34,6 +34,12 @@ var _ = Describe("ConfigYaml", func() { server: registry.example.com codesphere: + migration: + postgres: + host: 10.0.0.25 + port: 30432 + database: masterdata + altName: masterdata-rw.codesphere.svc.cluster.local deployConfig: images: workspace-agent-24.04: @@ -95,6 +101,12 @@ codesphere: Expect(err).NotTo(HaveOccurred()) Expect(rootConfig.Registry.Server).To(Equal("registry.example.com")) + Expect(rootConfig.Codesphere.Migration).NotTo(BeNil()) + Expect(rootConfig.Codesphere.Migration.Postgres).NotTo(BeNil()) + Expect(rootConfig.Codesphere.Migration.Postgres.Host).To(Equal("10.0.0.25")) + Expect(rootConfig.Codesphere.Migration.Postgres.Port).To(Equal(30432)) + Expect(rootConfig.Codesphere.Migration.Postgres.Database).To(Equal("masterdata")) + Expect(rootConfig.Codesphere.Migration.Postgres.AltName).To(Equal("masterdata-rw.codesphere.svc.cluster.local")) Expect(rootConfig.Codesphere.DeployConfig.Images).To(HaveKey("workspace-agent-24.04")) Expect(rootConfig.Codesphere.DeployConfig.Images).To(HaveKey("workspace-agent-20.04")) Expect(rootConfig.Codesphere.DeployConfig.Images).To(HaveKey("ide-service")) From a14ef9b394c3432381cd4115508e75dae2603865 Mon Sep 17 00:00:00 2001 From: schrodit <7979201+schrodit@users.noreply.github.com> Date: Sun, 22 Mar 2026 14:06:41 +0000 Subject: [PATCH 28/31] chore(docs): Auto-update docs and licenses Signed-off-by: schrodit <7979201+schrodit@users.noreply.github.com> --- NOTICE | 24 ------------------------ internal/tmpl/NOTICE | 24 ------------------------ 2 files changed, 48 deletions(-) diff --git a/NOTICE b/NOTICE index c9784406..16a5e1ac 100644 --- a/NOTICE +++ b/NOTICE @@ -195,12 +195,6 @@ Version: Unknown License: Apache-2.0 License URL: https://github.com/codesphere-cloud/oms/blob/HEAD/internal/tmpl/LICENSE ----------- -Module: github.com/containernetworking/cni -Version: v1.2.0-rc1 -License: Apache-2.0 -License URL: https://github.com/containernetworking/cni/blob/v1.2.0-rc1/LICENSE - ---------- Module: github.com/cpuguy83/go-md2man/v2/md2man Version: v2.0.7 @@ -543,12 +537,6 @@ Version: v1.1.12 License: MIT License URL: https://github.com/json-iterator/go/blob/v1.1.12/LICENSE ----------- -Module: github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg -Version: v1.7.7 -License: Apache-2.0 -License URL: https://github.com/k8snetworkplumbingwg/network-attachment-definition-client/blob/v1.7.7/LICENSE - ---------- Module: github.com/kr/fs Version: v0.1.0 @@ -687,24 +675,12 @@ Version: v1.1.1 License: Apache-2.0 License URL: https://github.com/opencontainers/image-spec/blob/v1.1.1/LICENSE ----------- -Module: github.com/openshift/api/security/v1 -Version: v0.0.0-20260306105915-ec7ab20aa8c4 -License: Apache-2.0 -License URL: https://github.com/openshift/api/blob/ec7ab20aa8c4/LICENSE - ---------- Module: github.com/peterbourgon/diskv Version: v2.0.1 License: MIT License URL: https://github.com/peterbourgon/diskv/blob/v2.0.1/LICENSE ----------- -Module: github.com/pkg/errors -Version: v0.9.1 -License: BSD-2-Clause -License URL: https://github.com/pkg/errors/blob/v0.9.1/LICENSE - ---------- Module: github.com/pkg/sftp Version: v1.13.10 diff --git a/internal/tmpl/NOTICE b/internal/tmpl/NOTICE index c9784406..16a5e1ac 100644 --- a/internal/tmpl/NOTICE +++ b/internal/tmpl/NOTICE @@ -195,12 +195,6 @@ Version: Unknown License: Apache-2.0 License URL: https://github.com/codesphere-cloud/oms/blob/HEAD/internal/tmpl/LICENSE ----------- -Module: github.com/containernetworking/cni -Version: v1.2.0-rc1 -License: Apache-2.0 -License URL: https://github.com/containernetworking/cni/blob/v1.2.0-rc1/LICENSE - ---------- Module: github.com/cpuguy83/go-md2man/v2/md2man Version: v2.0.7 @@ -543,12 +537,6 @@ Version: v1.1.12 License: MIT License URL: https://github.com/json-iterator/go/blob/v1.1.12/LICENSE ----------- -Module: github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg -Version: v1.7.7 -License: Apache-2.0 -License URL: https://github.com/k8snetworkplumbingwg/network-attachment-definition-client/blob/v1.7.7/LICENSE - ---------- Module: github.com/kr/fs Version: v0.1.0 @@ -687,24 +675,12 @@ Version: v1.1.1 License: Apache-2.0 License URL: https://github.com/opencontainers/image-spec/blob/v1.1.1/LICENSE ----------- -Module: github.com/openshift/api/security/v1 -Version: v0.0.0-20260306105915-ec7ab20aa8c4 -License: Apache-2.0 -License URL: https://github.com/openshift/api/blob/ec7ab20aa8c4/LICENSE - ---------- Module: github.com/peterbourgon/diskv Version: v2.0.1 License: MIT License URL: https://github.com/peterbourgon/diskv/blob/v2.0.1/LICENSE ----------- -Module: github.com/pkg/errors -Version: v0.9.1 -License: BSD-2-Clause -License URL: https://github.com/pkg/errors/blob/v0.9.1/LICENSE - ---------- Module: github.com/pkg/sftp Version: v1.13.10 From fb8962e27ad15cf04bb54c6f7233448f42b2d6be Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Mon, 23 Mar 2026 19:03:30 +0100 Subject: [PATCH 29/31] use sops/age directly to parse recipient Signed-off-by: Tim Schrodi --- go.mod | 27 ++++--- go.sum | 68 ++++++++--------- internal/bootstrap/bootstrap_suite_test.go | 16 ---- internal/installer/config_manager.go | 2 +- internal/installer/config_manager_test.go | 2 +- internal/installer/files/config_yaml.go | 11 ++- internal/installer/secrets_test.go | 6 +- internal/installer/vault_encryption.go | 87 ++++++++++++---------- 8 files changed, 105 insertions(+), 114 deletions(-) delete mode 100644 internal/bootstrap/bootstrap_suite_test.go diff --git a/go.mod b/go.mod index 5a98ec71..b8c4f87f 100644 --- a/go.mod +++ b/go.mod @@ -14,17 +14,17 @@ require ( cloud.google.com/go/iam v1.5.3 cloud.google.com/go/resourcemanager v1.10.7 cloud.google.com/go/serviceusage v1.9.7 + filippo.io/age v1.3.1 github.com/Masterminds/semver/v3 v3.4.0 github.com/cloudnative-pg/cloudnative-pg v1.28.1 github.com/codesphere-cloud/cs-go v0.22.0 github.com/creativeprojects/go-selfupdate v1.5.2 + github.com/getsops/sops/v3 v3.12.2 github.com/google/go-github/v74 v74.0.0 github.com/jedib0t/go-pretty/v6 v6.7.8 - github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.7 github.com/lithammer/shortuuid v3.0.0+incompatible github.com/onsi/ginkgo/v2 v2.28.1 github.com/onsi/gomega v1.39.1 - github.com/openshift/api v0.0.0-20260306105915-ec7ab20aa8c4 github.com/pkg/sftp v1.13.10 github.com/spf13/cobra v1.10.2 github.com/stretchr/testify v1.11.1 @@ -53,10 +53,10 @@ require ( cloud.google.com/go/auth v0.18.2 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/compute/metadata v0.9.0 // indirect - cloud.google.com/go/kms v1.25.0 // indirect + cloud.google.com/go/kms v1.26.0 // indirect cloud.google.com/go/longrunning v0.8.0 // indirect cloud.google.com/go/monitoring v1.24.3 // indirect - cloud.google.com/go/storage v1.58.0 // indirect + cloud.google.com/go/storage v1.60.0 // indirect code.gitea.io/sdk/gitea v0.23.2 // indirect codeberg.org/chavacava/garif v0.2.0 // indirect codeberg.org/polyfloyd/go-errorlint v1.9.0 // indirect @@ -73,7 +73,7 @@ require ( github.com/Antonboom/nilnil v1.1.1 // indirect github.com/Antonboom/testifylint v1.6.4 // indirect github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0 // indirect @@ -95,8 +95,8 @@ require ( github.com/BurntSushi/toml v1.6.0 // indirect github.com/Djarvur/go-err113 v0.1.1 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -122,7 +122,7 @@ require ( github.com/aws/aws-sdk-go-v2/config v1.32.10 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.19.10 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.18 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.18 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.2 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.19 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.19 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect @@ -133,7 +133,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.11 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.19 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.19 // indirect - github.com/aws/aws-sdk-go-v2/service/kms v1.49.4 // indirect + github.com/aws/aws-sdk-go-v2/service/kms v1.50.0 // indirect github.com/aws/aws-sdk-go-v2/service/s3 v1.96.4 // indirect github.com/aws/aws-sdk-go-v2/service/signin v1.0.6 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.30.11 // indirect @@ -405,7 +405,7 @@ require ( github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/pjbgf/sha1cd v0.5.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/pkg/errors v0.9.1 + github.com/pkg/errors v0.9.1 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/polydawn/refmt v0.89.1-0.20221221234430-40501e09de1f // indirect github.com/prometheus/client_golang v1.23.2 // indirect @@ -526,6 +526,8 @@ require ( ) require ( + filippo.io/edwards25519 v1.2.0 // indirect + filippo.io/hpke v0.4.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/squirrel v1.5.4 // indirect github.com/blang/semver/v4 v4.0.0 // indirect @@ -534,7 +536,6 @@ require ( github.com/cloudnative-pg/barman-cloud v0.4.1-0.20260108104508-ced266c145f5 // indirect github.com/cloudnative-pg/cnpg-i v0.3.1 // indirect github.com/cloudnative-pg/machinery v0.3.3 // indirect - github.com/containernetworking/cni v1.2.0-rc1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/denis-tingaikin/go-header v0.5.0 // indirect @@ -544,6 +545,7 @@ require ( github.com/extism/go-sdk v1.7.1 // indirect github.com/fluxcd/cli-utils v0.37.2-flux.1 // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/getsops/gopgagent v0.0.0-20241224165529-7044f28e491e // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-logr/logr v1.4.3 // indirect @@ -557,13 +559,14 @@ require ( github.com/google/go-querystring v1.2.0 // indirect github.com/google/licenseclassifier/v2 v2.0.0 // indirect github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/goreleaser/go-shellwords v1.0.13 // indirect github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/ianlancetaylor/demangle v0.0.0-20250417193237-f615e6bd150b // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmoiron/sqlx v1.4.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect + github.com/json-iterator/go v1.1.13-0.20220915233716-71ac16282d12 // indirect github.com/kubernetes-csi/external-snapshotter/client/v8 v8.4.0 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect diff --git a/go.sum b/go.sum index 5d3e27dc..84fce48d 100644 --- a/go.sum +++ b/go.sum @@ -4,6 +4,8 @@ 4d63.com/gochecknoglobals v0.2.2/go.mod h1:lLxwTQjL5eIesRbvnzIP3jZtG140FnTdz+AlMa+ogt0= al.essio.dev/pkg/shellescape v1.6.0 h1:NxFcEqzFSEVCGN2yq7Huv/9hyCEGVa/TncnOOBBeXHA= al.essio.dev/pkg/shellescape v1.6.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= +c2sp.org/CCTV/age v0.0.0-20251208015420-e9274a7bdbfd h1:ZLsPO6WdZ5zatV4UfVpr7oAwLGRZ+sebTUruuM4Ra3M= +c2sp.org/CCTV/age v0.0.0-20251208015420-e9274a7bdbfd/go.mod h1:SrHC2C7r5GkDk8R+NFVzYy/sdj0Ypg9htaPXQq5Cqeo= cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= charm.land/lipgloss/v2 v2.0.0 h1:sd8N/B3x892oiOjFfBQdXBQp3cAkvjGaU5TvVZC3ivo= @@ -23,8 +25,8 @@ cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdB cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= -cloud.google.com/go/kms v1.25.0 h1:gVqvGGUmz0nYCmtoxWmdc1wli2L1apgP8U4fghPGSbQ= -cloud.google.com/go/kms v1.25.0/go.mod h1:XIdHkzfj0bUO3E+LvwPg+oc7s58/Ns8Nd8Sdtljihbk= +cloud.google.com/go/kms v1.26.0 h1:cK9mN2cf+9V63D3H1f6koxTatWy39aTI/hCjz1I+adU= +cloud.google.com/go/kms v1.26.0/go.mod h1:pHKOdFJm63hxBsiPkYtowZPltu9dW0MWvBa6IA4HM58= cloud.google.com/go/logging v1.13.2 h1:qqlHCBvieJT9Cdq4QqYx1KPadCQ2noD4FK02eNqHAjA= cloud.google.com/go/logging v1.13.2/go.mod h1:zaybliM3yun1J8mU2dVQ1/qDzjbOqEijZCn6hSBtKak= cloud.google.com/go/longrunning v0.8.0 h1:LiKK77J3bx5gDLi4SMViHixjD2ohlkwBi+mKA7EhfW8= @@ -35,8 +37,8 @@ cloud.google.com/go/resourcemanager v1.10.7 h1:oPZKIdjyVTuag+D4HF7HO0mnSqcqgjcuA cloud.google.com/go/resourcemanager v1.10.7/go.mod h1:rScGkr6j2eFwxAjctvOP/8sqnEpDbQ9r5CKwKfomqjs= cloud.google.com/go/serviceusage v1.9.7 h1:vrBBeI2ESmri4BLGPz1YH2o37loIQ3DDTloYiDOe2lY= cloud.google.com/go/serviceusage v1.9.7/go.mod h1:JpBpv+4Zbe7+RiC9ydc6xgBUOntIL9tA85d2xKgV83g= -cloud.google.com/go/storage v1.58.0 h1:PflFXlmFJjG/nBeR9B7pKddLQWaFaRWx4uUi/LyNxxo= -cloud.google.com/go/storage v1.58.0/go.mod h1:cMWbtM+anpC74gn6qjLh+exqYcfmB9Hqe5z6adx+CLI= +cloud.google.com/go/storage v1.60.0 h1:oBfZrSOCimggVNz9Y/bXY35uUcts7OViubeddTTVzQ8= +cloud.google.com/go/storage v1.60.0/go.mod h1:q+5196hXfejkctrnx+VYU8RKQr/L3c0cBIlrjmiAKE0= cloud.google.com/go/trace v1.11.7 h1:kDNDX8JkaAG3R2nq1lIdkb7FCSi1rCmsEtKVsty7p+U= cloud.google.com/go/trace v1.11.7/go.mod h1:TNn9d5V3fQVf6s4SCveVMIBS2LJUqo73GACmq/Tky0s= code.gitea.io/sdk/gitea v0.23.2 h1:iJB1FDmLegwfwjX8gotBDHdPSbk/ZR8V9VmEJaVsJYg= @@ -51,9 +53,13 @@ dev.gaijin.team/go/exhaustruct/v4 v4.0.0 h1:873r7aNneqoBB3IaFIzhvt2RFYTuHgmMjoKf dev.gaijin.team/go/exhaustruct/v4 v4.0.0/go.mod h1:aZ/k2o4Y05aMJtiux15x8iXaumE88YdiB0Ai4fXOzPI= dev.gaijin.team/go/golib v0.8.0 h1:BiDNudpoFizoU5VHdQUiabtHSt9fyPX11Fr4OU9PaUQ= dev.gaijin.team/go/golib v0.8.0/go.mod h1:c5fu7t1RSGMxSQgcUYO1sODbzsYnOCXJLmHeNG1Eb+0= +filippo.io/age v1.3.1 h1:hbzdQOJkuaMEpRCLSN1/C5DX74RPcNCk6oqhKMXmZi0= +filippo.io/age v1.3.1/go.mod h1:EZorDTYUxt836i3zdori5IJX/v2Lj6kWFU0cfh6C0D4= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= filippo.io/edwards25519 v1.2.0 h1:crnVqOiS4jqYleHd9vaKZ+HKtHfllngJIiOpNpoJsjo= filippo.io/edwards25519 v1.2.0/go.mod h1:xzAOLCNug/yB62zG1bQ8uziwrIqIuxhctzJT18Q77mc= +filippo.io/hpke v0.4.0 h1:p575VVQ6ted4pL+it6M00V/f2qTZITO0zgmdKCkd5+A= +filippo.io/hpke v0.4.0/go.mod h1:EmAN849/P3qdeK+PCMkDpDm83vRHM5cDipBJ8xbQLVY= github.com/42wim/httpsig v1.2.3 h1:xb0YyWhkYj57SPtfSttIobJUPJZB9as1nsfo7KWVcEs= github.com/42wim/httpsig v1.2.3/go.mod h1:nZq9OlYKDrUBhptd77IHx4/sZZD+IxTBADvAPI9G/EM= github.com/4meepo/tagalign v1.4.3 h1:Bnu7jGWwbfpAie2vyl63Zup5KuRv21olsPIha53BJr8= @@ -78,8 +84,8 @@ github.com/Antonboom/testifylint v1.6.4 h1:gs9fUEy+egzxkEbq9P4cpcMB6/G0DYdMeiFS8 github.com/Antonboom/testifylint v1.6.4/go.mod h1:YO33FROXX2OoUfwjz8g+gUxQXio5i9qpVy7nXGbxDD4= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 h1:fou+2+WFTib47nS+nz/ozhEBnvU96bKHy6LjRsY4E28= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0/go.mod h1:t76Ruy8AHvUAC8GfMWJMa0ElSbuIcO03NLpynfbgsPA= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= @@ -143,12 +149,12 @@ github.com/Djarvur/go-err113 v0.1.1 h1:eHfopDqXRwAi+YmCUas75ZE0+hoBHJ2GQNLYRSxao github.com/Djarvur/go-err113 v0.1.1/go.mod h1:IaWJdYFLg76t2ihfflPZnM1LIQszWOsFDh2hhhAVF6k= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0 h1:lhhYARPUu3LmHysQ/igznQphfzynnqI3D75oUyw1HXk= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0/go.mod h1:l9rva3ApbBpEJxSNYnwT9N4CDLrWgtq3u8736C5hyJw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.54.0 h1:xfK3bbi6F2RDtaZFtUdKO3osOBIhNb+xTs8lFW6yx9o= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.54.0/go.mod h1:vB2GH9GAYYJTO3mEn8oYwzEdhlayZIdQz6zdzgUIRvA= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0 h1:s0WlVbf9qpvkh1c/uDAPElam0WrL7fHRIidgZJ7UqZI= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0/go.mod h1:Mf6O40IAyB9zR/1J8nGDDPirZQQPbYJni8Yisy7NTMc= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0 h1:UnDZ/zFfG1JhH/DqxIZYU/1CUAlTUScoXD/LcM2Ykk8= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0/go.mod h1:IA1C1U7jO/ENqm/vhi7V9YYpBsp+IMyqNrEN94N7tVc= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.55.0 h1:7t/qx5Ost0s0wbA/VDrByOooURhp+ikYwv20i9Y07TQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.55.0/go.mod h1:vB2GH9GAYYJTO3mEn8oYwzEdhlayZIdQz6zdzgUIRvA= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0 h1:0s6TxfCu2KHkkZPnBfsQ2y5qia0jl3MMrmBhu3nCOYk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0/go.mod h1:Mf6O40IAyB9zR/1J8nGDDPirZQQPbYJni8Yisy7NTMc= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= @@ -227,8 +233,8 @@ github.com/aws/aws-sdk-go-v2/credentials v1.19.10 h1:EEhmEUFCE1Yhl7vDhNOI5OCL/iK github.com/aws/aws-sdk-go-v2/credentials v1.19.10/go.mod h1:RnnlFCAlxQCkN2Q379B67USkBMu1PipEEiibzYN5UTE= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.18 h1:Ii4s+Sq3yDfaMLpjrJsqD6SmG/Wq/P5L/hw2qa78UAY= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.18/go.mod h1:6x81qnY++ovptLE6nWQeWrpXxbnlIex+4H4eYYGcqfc= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.18 h1:9vWXHtaepwoAl/UuKzxwgOoJDXPCC3hvgNMfcmdS2Tk= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.18/go.mod h1:sKuUZ+MwUTuJbYvZ8pK0x10LvgcJK3Y4rmh63YBekwk= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.2 h1:1i1SUOTLk0TbMh7+eJYxgv1r1f47BfR69LL6yaELoI0= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.2/go.mod h1:bo7DhmS/OyVeAJTC768nEk92YKWskqJ4gn0gB5e59qQ= github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.19 h1:/sECfyq2JTifMI2JPyZ4bdRN77zJmr6SrS1eL3augIA= github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.19/go.mod h1:dMf8A5oAqr9/oxOfLkC/c2LU/uMcALP0Rgn2BD5LWn0= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.19 h1:AWeJMk33GTBf6J20XJe6qZoRSJo0WfUhsMdUKhoODXE= @@ -249,8 +255,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.19 h1:X1Tow7su github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.19/go.mod h1:/rARO8psX+4sfjUQXp5LLifjUt8DuATZ31WptNJTyQA= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.19 h1:JnQeStZvPHFHeyky/7LbMlyQjUa+jIBj36OlWm0pzIk= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.19/go.mod h1:HGyasyHvYdFQeJhvDHfH7HXkHh57htcJGKDZ+7z+I24= -github.com/aws/aws-sdk-go-v2/service/kms v1.49.4 h1:2gom8MohxN0SnhHZBYAC4S8jHG+ENEnXjyJ5xKe3vLc= -github.com/aws/aws-sdk-go-v2/service/kms v1.49.4/go.mod h1:HO31s0qt0lso/ADvZQyzKs8js/ku0fMHsfyXW8OPVYc= +github.com/aws/aws-sdk-go-v2/service/kms v1.50.0 h1:XSvRJBoDObL6Sn4cRmvH9wqjxjL7wf1ZDolUEyP7hw4= +github.com/aws/aws-sdk-go-v2/service/kms v1.50.0/go.mod h1:1SdcmEGUEQE1mrU2sIgeHtcMSxHuybhPvuEPANzIDfI= github.com/aws/aws-sdk-go-v2/service/s3 v1.96.4 h1:4ExZyubQ6LQQVuF2Qp9OsfEvsTdAWh5Gfwf6PgIdLdk= github.com/aws/aws-sdk-go-v2/service/s3 v1.96.4/go.mod h1:NF3JcMGOiARAss1ld3WGORCw71+4ExDD2cbbdKS5PpA= github.com/aws/aws-sdk-go-v2/service/signin v1.0.6 h1:MzORe+J94I+hYu2a6XmV5yC9huoTv8NRcCrUNedDypQ= @@ -395,8 +401,6 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/stargz-snapshotter/estargz v0.18.2 h1:yXkZFYIzz3eoLwlTUZKz2iQ4MrckBxJjkmD16ynUTrw= github.com/containerd/stargz-snapshotter/estargz v0.18.2/go.mod h1:XyVU5tcJ3PRpkA9XS2T5us6Eg35yM0214Y+wvrZTBrY= -github.com/containernetworking/cni v1.2.0-rc1 h1:AKI3+pXtgY4PDLN9+50o9IaywWVuey0Jkw3Lvzp0HCY= -github.com/containernetworking/cni v1.2.0-rc1/go.mod h1:Lt0TQcZQVDju64fYxUhDziTgXCDe3Olzi9I4zZJLWHg= github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc= github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= @@ -530,6 +534,10 @@ github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/gabriel-vasile/mimetype v1.4.12 h1:e9hWvmLYvtp846tLHam2o++qitpguFiYCKbn0w9jyqw= github.com/gabriel-vasile/mimetype v1.4.12/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= +github.com/getsops/gopgagent v0.0.0-20241224165529-7044f28e491e h1:y/1nzrdF+RPds4lfoEpNhjfmzlgZtPqyO3jMzrqDQws= +github.com/getsops/gopgagent v0.0.0-20241224165529-7044f28e491e/go.mod h1:awFzISqLJoZLm+i9QQ4SgMNHDqljH6jWV0B36V5MrUM= +github.com/getsops/sops/v3 v3.12.2 h1:4ctEFDNpAAubW8EMICytX8+BFDBSFJkrKvQ9ahSs0a4= +github.com/getsops/sops/v3 v3.12.2/go.mod h1:BACmHQl0J8nPNXBDSJKRT5oUdZx36CkbohGDj9+bD9M= github.com/ghostiam/protogetter v0.3.20 h1:oW7OPFit2FxZOpmMRPP9FffU4uUpfeE/rEdE1f+MzD0= github.com/ghostiam/protogetter v0.3.20/go.mod h1:FjIu5Yfs6FT391m+Fjp3fbAYJ6rkL/J6ySpZBfnODuI= github.com/github/smimesign v0.2.0 h1:Hho4YcX5N1I9XNqhq0fNx0Sts8MhLonHd+HRXVGNjvk= @@ -932,14 +940,12 @@ github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE= github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/json-iterator/go v1.1.13-0.20220915233716-71ac16282d12 h1:9Nu54bhS/H/Kgo2/7xNSUuC5G28VR8ljfrLKU2G4IjU= +github.com/json-iterator/go v1.1.13-0.20220915233716-71ac16282d12/go.mod h1:TBzl5BIHNXfS9+C35ZyJaklL7mLDbgUkcgXzSLa8Tk0= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julz/importas v0.2.0 h1:y+MJN/UdL63QbFJHws9BVC5RpA2iq0kpjrFajTGivjQ= github.com/julz/importas v0.2.0/go.mod h1:pThlt589EnCYtMnmhmRYY/qn9lCf/frPOK+WMx3xiJY= -github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.7 h1:z4P744DR+PIpkjwXSEc6TvN3L6LVzmUquFgmNm8wSUc= -github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.7/go.mod h1:CM7HAH5PNuIsqjMN0fGc1ydM74Uj+0VZFhob620nklw= github.com/karamaru-alpha/copyloopvar v1.2.2 h1:yfNQvP9YaGQR7VaWLYcfZUlRP2eo2vhExWKxD/fP6q0= github.com/karamaru-alpha/copyloopvar v1.2.2/go.mod h1:oY4rGZqZ879JkJMtX3RRkcXRkmUvH0x35ykgaKgsgJY= github.com/kevinburke/ssh_config v1.4.0 h1:6xxtP5bZ2E4NF5tuQulISpTO2z8XbtH8cg1PWkxoFkQ= @@ -1073,10 +1079,10 @@ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zx github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/moby/api v1.52.0 h1:00BtlJY4MXkkt84WhUZPRqt5TvPbgig2FZvTbe3igYg= -github.com/moby/moby/api v1.52.0/go.mod h1:8mb+ReTlisw4pS6BRzCMts5M49W5M7bKt1cJy/YbAqc= -github.com/moby/moby/client v0.2.1 h1:1Grh1552mvv6i+sYOdY+xKKVTvzJegcVMhuXocyDz/k= -github.com/moby/moby/client v0.2.1/go.mod h1:O+/tw5d4a1Ha/ZA/tPxIZJapJRUS6LNZ1wiVRxYHyUE= +github.com/moby/moby/api v1.53.0 h1:PihqG1ncw4W+8mZs69jlwGXdaYBeb5brF6BL7mPIS/w= +github.com/moby/moby/api v1.53.0/go.mod h1:8mb+ReTlisw4pS6BRzCMts5M49W5M7bKt1cJy/YbAqc= +github.com/moby/moby/client v0.2.2 h1:Pt4hRMCAIlyjL3cr8M5TrXCwKzguebPAc2do2ur7dEM= +github.com/moby/moby/client v0.2.2/go.mod h1:2EkIPVNCqR05CMIzL1mfA07t0HvVUUOl85pasRz/GmQ= github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= @@ -1139,12 +1145,8 @@ github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= github.com/nunnatsa/ginkgolinter v0.23.0 h1:x3o4DGYOWbBMP/VdNQKgSj+25aJKx2Pe6lHr8gBcgf8= github.com/nunnatsa/ginkgolinter v0.23.0/go.mod h1:9qN1+0akwXEccwV1CAcCDfcoBlWXHB+ML9884pL4SZ4= -github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= -github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI= github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE= github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28= @@ -1155,8 +1157,6 @@ github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJw github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opencontainers/runc v1.2.8 h1:RnEICeDReapbZ5lZEgHvj7E9Q3Eex9toYmaGBsbvU5Q= github.com/opencontainers/runc v1.2.8/go.mod h1:cC0YkmZcuvr+rtBZ6T7NBoVbMGNAdLa/21vIElJDOzI= -github.com/openshift/api v0.0.0-20260306105915-ec7ab20aa8c4 h1:WJFsWIK6RgKablY+IXyIeTLPT2ZRHdHXYnG9Eq+9bhM= -github.com/openshift/api v0.0.0-20260306105915-ec7ab20aa8c4/go.mod h1:pyVjK0nZ4sRs4fuQVQ4rubsJdahI1PB94LnQ8sGdvxo= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/ory/dockertest/v3 v3.12.0 h1:3oV9d0sDzlSQfHtIaB5k6ghUCVMVLpAY8hwrqoCyRCw= @@ -1424,8 +1424,6 @@ github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CP github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/vektra/mockery/v3 v3.7.0 h1:Dd0EeaOcRJBVP9n3oYOVPV7KdPaaE3EcwTppaZIsFSM= github.com/vektra/mockery/v3 v3.7.0/go.mod h1:z9Wr23Ha8etImqQwS3boTNR9WkjX6tIklW5c88DRkSw= -github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= -github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/wagoodman/go-progress v0.0.0-20230925121702-07e42b3cdba0 h1:0KGbf+0SMg+UFy4e1A/CPVvXn21f1qtWdeJwxZFoQG8= github.com/wagoodman/go-progress v0.0.0-20230925121702-07e42b3cdba0/go.mod h1:jLXFoL31zFaHKAAyZUh+sxiTDFe1L1ZHrcK2T1itVKA= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= @@ -1777,8 +1775,6 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/mail.v2 v2.3.1 h1:WYFn/oANrAGP2C0dcV6/pbkPzv8yGzqTjPmTeO7qoXk= gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/validator.v2 v2.0.1 h1:xF0KWyGWXm/LM2G1TrEjqOu4pa6coO9AlWSf3msVfDY= gopkg.in/validator.v2 v2.0.1/go.mod h1:lIUZBlB3Im4s/eYp39Ry/wkR02yOPhZ9IwIRBjuPuG8= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= diff --git a/internal/bootstrap/bootstrap_suite_test.go b/internal/bootstrap/bootstrap_suite_test.go deleted file mode 100644 index 8b287edb..00000000 --- a/internal/bootstrap/bootstrap_suite_test.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) Codesphere Inc. -// SPDX-License-Identifier: Apache-2.0 - -package bootstrap_test - -import ( - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -func TestBootstrap(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Bootstrap Suite") -} diff --git a/internal/installer/config_manager.go b/internal/installer/config_manager.go index b8a0e32f..0aef9372 100644 --- a/internal/installer/config_manager.go +++ b/internal/installer/config_manager.go @@ -242,7 +242,7 @@ func (g *InstallConfig) WriteVault(vaultPath string, withComments bool) error { return fmt.Errorf("no configuration provided - config is nil") } - vault := g.Config.ExtractVault(g.GetVault()) + vault := g.Config.AddSecretsToVault(g.GetVault()) vaultYAML, err := vault.Marshal() if err != nil { return fmt.Errorf("failed to marshal vault.yaml: %w", err) diff --git a/internal/installer/config_manager_test.go b/internal/installer/config_manager_test.go index a7680473..7b1c6810 100644 --- a/internal/installer/config_manager_test.go +++ b/internal/installer/config_manager_test.go @@ -496,7 +496,7 @@ var _ = Describe("ConfigManager", func() { err = configManager.GenerateSecrets() Expect(err).ToNot(HaveOccurred()) - vault := configManager.Config.ExtractVault(nil) + vault := configManager.Config.ExtractVault() configManager.Vault = vault errors := configManager.ValidateVault() diff --git a/internal/installer/files/config_yaml.go b/internal/installer/files/config_yaml.go index 04266f42..b2a77d1a 100644 --- a/internal/installer/files/config_yaml.go +++ b/internal/installer/files/config_yaml.go @@ -19,6 +19,7 @@ func (v *InstallVault) Marshal() ([]byte, error) { return yaml.Marshal(v) } +// SetSecret adds or updates a secret entry in the vault. func (v *InstallVault) SetSecret(entry SecretEntry) { for i, s := range v.Secrets { if s.Name == entry.Name { @@ -605,14 +606,16 @@ func (c *RootConfig) ExtractWorkspaceDockerfiles() map[string]string { return dockerfiles } -func (c *RootConfig) ExtractVault(baseVault *InstallVault) *InstallVault { +func (c *RootConfig) ExtractVault() *InstallVault { vault := &InstallVault{ Secrets: []SecretEntry{}, } - if baseVault != nil { - vault.Secrets = append(vault.Secrets, baseVault.Secrets...) - } + c.AddSecretsToVault(vault) + return vault +} + +func (c *RootConfig) AddSecretsToVault(vault *InstallVault) *InstallVault { c.addCodesphereSecrets(vault) c.addIngressCASecret(vault) c.addACMESecrets(vault) diff --git a/internal/installer/secrets_test.go b/internal/installer/secrets_test.go index 265f9aa0..b99fec1c 100644 --- a/internal/installer/secrets_test.go +++ b/internal/installer/secrets_test.go @@ -56,7 +56,7 @@ var _ = Describe("ExtractVault", func() { Registry: &files.RegistryConfig{}, } - vault := config.ExtractVault(nil) + vault := config.ExtractVault() Expect(vault.Secrets).NotTo(BeEmpty()) @@ -128,7 +128,7 @@ var _ = Describe("ExtractVault", func() { DomainAuthPublicKey: "test-pub", } - vault := config.ExtractVault(nil) + vault := config.ExtractVault() kubeConfigFound := false for _, secret := range vault.Secrets { @@ -156,7 +156,7 @@ var _ = Describe("ExtractVault", func() { DomainAuthPublicKey: "test", } - vault := config.ExtractVault(nil) + vault := config.ExtractVault() for _, service := range services { foundUser := false diff --git a/internal/installer/vault_encryption.go b/internal/installer/vault_encryption.go index 45031461..2af2e2c8 100644 --- a/internal/installer/vault_encryption.go +++ b/internal/installer/vault_encryption.go @@ -5,18 +5,19 @@ package installer import ( "fmt" + "io" "os" "os/exec" "path/filepath" + "runtime" "strings" + + "filippo.io/age" + sopsage "github.com/getsops/sops/v3/age" ) -const ( - // Standard environment variables for SOPS age keys. - envSOPSAgeKey = "SOPS_AGE_KEY" - envSOPSAgeKeyFile = "SOPS_AGE_KEY_FILE" - // Default age key file location (relative to $HOME). - defaultAgeKeyRelPath = ".config/sops/age/keys.txt" +var ( + xdgConfigHome = "XDG_CONFIG_HOME" ) // ResolveAgeKey resolves an existing age key or generates a new one. @@ -30,20 +31,16 @@ const ( // the key was supplied via SOPS_AGE_KEY). func ResolveAgeKey(fallbackDir string) (recipient string, keyPath string, err error) { // 1. SOPS_AGE_KEY env var – contains raw key content. - if raw := os.Getenv(envSOPSAgeKey); raw != "" { - recipient, err = parseAgeRecipient(raw) + if raw := os.Getenv(sopsage.SopsAgeKeyEnv); raw != "" { + recipient, err = parseAgeRecipient(strings.NewReader(raw)) if err != nil { - // Fall back to deriving the public key with age-keygen -y. - recipient, err = deriveRecipient(raw) - if err != nil { - return "", "", fmt.Errorf("SOPS_AGE_KEY is set but could not extract public key: %w", err) - } + return "", "", fmt.Errorf("failed to parse age key from SOPS_AGE_KEY environment variable: %w", err) } return recipient, "", nil } // 2. SOPS_AGE_KEY_FILE env var. - if keyFile := os.Getenv(envSOPSAgeKeyFile); keyFile != "" { + if keyFile := os.Getenv(sopsage.SopsAgeKeyFileEnv); keyFile != "" { recipient, err = readRecipientFromFile(keyFile) if err != nil { return "", "", fmt.Errorf("failed to read age key from %s: %w", keyFile, err) @@ -52,9 +49,9 @@ func ResolveAgeKey(fallbackDir string) (recipient string, keyPath string, err er } // 3. Default location: ~/.config/sops/age/keys.txt. - home, homeErr := os.UserHomeDir() - if homeErr == nil { - defaultPath := filepath.Join(home, defaultAgeKeyRelPath) + defaultPath, configErr := getUserConfigDir() + if configErr == nil { + defaultPath = filepath.Join(defaultPath, sopsage.SopsAgeKeyUserConfigPath) recipient, err = readRecipientFromFile(defaultPath) if err == nil { return recipient, defaultPath, nil @@ -81,40 +78,48 @@ func ResolveAgeKey(fallbackDir string) (recipient string, keyPath string, err er return recipient, keyPath, nil } -// parseAgeRecipient extracts the public key from age key file content. -// It looks for comment lines of the form "# public key: age1…". -func parseAgeRecipient(content string) (string, error) { - for _, line := range strings.Split(content, "\n") { - line = strings.TrimSpace(line) - if strings.HasPrefix(line, "# public key: ") { - return strings.TrimPrefix(line, "# public key: "), nil - } +// parseAgeRecipient extracts the public key from age key given by reader. +func parseAgeRecipient(reader io.Reader) (string, error) { + ids, err := age.ParseIdentities(reader) + if err != nil { + return "", fmt.Errorf("failed to parse age identities from file: %w", err) + } + if len(ids) == 0 { + return "", fmt.Errorf("no age identities found in file") + } + if len(ids) > 1 { + return "", fmt.Errorf("multiple age identities found in file, expected only one") + } + id := ids[0] + switch id := id.(type) { + case *age.X25519Identity: + return id.Recipient().String(), nil + case *age.HybridIdentity: + return id.Recipient().String(), nil + default: + return "", fmt.Errorf("internal error: unexpected identity type: %T", id) } - return "", fmt.Errorf("no public key comment found in age key content") } // readRecipientFromFile reads an age key file and extracts the public key. -func readRecipientFromFile(path string) (string, error) { - data, err := os.ReadFile(path) +func readRecipientFromFile(path string) (recipient string, err error) { + file, err := os.Open(path) if err != nil { return "", err } - return parseAgeRecipient(string(data)) + defer func() { + err = file.Close() + }() + return parseAgeRecipient(file) } -// deriveRecipient uses `age-keygen -y` to derive the public key from a private key. -func deriveRecipient(privateKey string) (string, error) { - cmd := exec.Command("age-keygen", "-y") - cmd.Stdin = strings.NewReader(privateKey) - out, err := cmd.Output() - if err != nil { - return "", fmt.Errorf("age-keygen -y failed: %w", err) - } - recipient := strings.TrimSpace(string(out)) - if recipient == "" || !strings.HasPrefix(recipient, "age1") { - return "", fmt.Errorf("unexpected age-keygen output: %q", recipient) +func getUserConfigDir() (string, error) { + if runtime.GOOS == "darwin" { + if userConfigDir, ok := os.LookupEnv(xdgConfigHome); ok && userConfigDir != "" { + return userConfigDir, nil + } } - return recipient, nil + return os.UserConfigDir() } // generateAgeKey generates a new age keypair and writes it to the given path. From 713c9b1a65984be437e1f53939358f12ed3f0233 Mon Sep 17 00:00:00 2001 From: schrodit <7979201+schrodit@users.noreply.github.com> Date: Mon, 23 Mar 2026 18:16:40 +0000 Subject: [PATCH 30/31] chore(docs): Auto-update docs and licenses Signed-off-by: schrodit <7979201+schrodit@users.noreply.github.com> --- NOTICE | 50 +++++++++++++++++++++++++++++++++++---- internal/tmpl/NOTICE | 56 ++++++++++++++++++++++++++++++++++++-------- 2 files changed, 92 insertions(+), 14 deletions(-) diff --git a/NOTICE b/NOTICE index 01e5a76b..3330b208 100644 --- a/NOTICE +++ b/NOTICE @@ -69,6 +69,24 @@ Version: v1.0.2 License: BSD-3-Clause License URL: https://github.com/imdario/mergo/blob/v1.0.2/LICENSE +---------- +Module: filippo.io/age +Version: v1.3.1 +License: BSD-3-Clause +License URL: https://github.com/FiloSottile/age/blob/v1.3.1/LICENSE + +---------- +Module: filippo.io/edwards25519 +Version: v1.2.0 +License: BSD-3-Clause +License URL: https://github.com/FiloSottile/edwards25519/blob/v1.2.0/LICENSE + +---------- +Module: filippo.io/hpke +Version: v0.4.0 +License: BSD-3-Clause +License URL: https://github.com/FiloSottile/hpke/blob/v0.4.0/LICENSE + ---------- Module: github.com/42wim/httpsig Version: v1.2.3 @@ -285,6 +303,18 @@ Version: v2.9.0 License: MIT License URL: https://github.com/fxamacker/cbor/blob/v2.9.0/LICENSE +---------- +Module: github.com/getsops/gopgagent +Version: v0.0.0-20241224165529-7044f28e491e +License: Apache-2.0 +License URL: https://github.com/getsops/gopgagent/blob/7044f28e491e/LICENSE + +---------- +Module: github.com/getsops/sops/v3 +Version: v3.12.2 +License: MPL-2.0 +License URL: https://github.com/getsops/sops/blob/v3.12.2/LICENSE + ---------- Module: github.com/go-errors/errors Version: v1.5.1 @@ -447,6 +477,12 @@ Version: v0.1.9 License: Apache-2.0 License URL: https://github.com/google/s2a-go/blob/v0.1.9/LICENSE.md +---------- +Module: github.com/google/shlex +Version: v0.0.0-20191202100458-e7afc7fbc510 +License: Apache-2.0 +License URL: https://github.com/google/shlex/blob/e7afc7fbc510/COPYING + ---------- Module: github.com/google/uuid Version: v1.6.0 @@ -533,9 +569,9 @@ License URL: https://github.com/jmoiron/sqlx/blob/v1.4.0/LICENSE ---------- Module: github.com/json-iterator/go -Version: v1.1.12 +Version: v1.1.13-0.20220915233716-71ac16282d12 License: MIT -License URL: https://github.com/json-iterator/go/blob/v1.1.12/LICENSE +License URL: https://github.com/json-iterator/go/blob/71ac16282d12/LICENSE ---------- Module: github.com/kr/fs @@ -759,6 +795,12 @@ Version: v1.4.0 License: MIT License URL: https://github.com/shopspring/decimal/blob/v1.4.0/LICENSE +---------- +Module: github.com/sirupsen/logrus +Version: v1.9.4 +License: MIT +License URL: https://github.com/sirupsen/logrus/blob/v1.9.4/LICENSE + ---------- Module: github.com/spf13/cast Version: v1.10.0 @@ -929,9 +971,9 @@ License URL: https://cs.opensource.google/go/x/crypto/+/v0.49.0:LICENSE ---------- Module: golang.org/x/mod/semver -Version: v0.33.0 +Version: v0.34.0 License: BSD-3-Clause -License URL: https://cs.opensource.google/go/x/mod/+/v0.33.0:LICENSE +License URL: https://cs.opensource.google/go/x/mod/+/v0.34.0:LICENSE ---------- Module: golang.org/x/net diff --git a/internal/tmpl/NOTICE b/internal/tmpl/NOTICE index b15644cc..3330b208 100644 --- a/internal/tmpl/NOTICE +++ b/internal/tmpl/NOTICE @@ -69,6 +69,24 @@ Version: v1.0.2 License: BSD-3-Clause License URL: https://github.com/imdario/mergo/blob/v1.0.2/LICENSE +---------- +Module: filippo.io/age +Version: v1.3.1 +License: BSD-3-Clause +License URL: https://github.com/FiloSottile/age/blob/v1.3.1/LICENSE + +---------- +Module: filippo.io/edwards25519 +Version: v1.2.0 +License: BSD-3-Clause +License URL: https://github.com/FiloSottile/edwards25519/blob/v1.2.0/LICENSE + +---------- +Module: filippo.io/hpke +Version: v0.4.0 +License: BSD-3-Clause +License URL: https://github.com/FiloSottile/hpke/blob/v0.4.0/LICENSE + ---------- Module: github.com/42wim/httpsig Version: v1.2.3 @@ -160,7 +178,6 @@ License: BSD-3-Clause License URL: https://github.com/cloudflare/circl/blob/v1.6.3/LICENSE ---------- -<<<<<<< HEAD Module: github.com/cloudnative-pg/barman-cloud/pkg/api Version: v0.4.1-0.20260108104508-ced266c145f5 License: Apache-2.0 @@ -188,11 +205,6 @@ License URL: https://github.com/cloudnative-pg/machinery/blob/v0.3.3/LICENSE Module: github.com/codesphere-cloud/cs-go Version: v0.22.0 License: Apache-2.0 -======= -Module: github.com/codesphere-cloud/cs-go -Version: v0.22.0 -License: Apache-2.0 ->>>>>>> origin/main License URL: https://github.com/codesphere-cloud/cs-go/blob/v0.22.0/LICENSE ---------- @@ -291,6 +303,18 @@ Version: v2.9.0 License: MIT License URL: https://github.com/fxamacker/cbor/blob/v2.9.0/LICENSE +---------- +Module: github.com/getsops/gopgagent +Version: v0.0.0-20241224165529-7044f28e491e +License: Apache-2.0 +License URL: https://github.com/getsops/gopgagent/blob/7044f28e491e/LICENSE + +---------- +Module: github.com/getsops/sops/v3 +Version: v3.12.2 +License: MPL-2.0 +License URL: https://github.com/getsops/sops/blob/v3.12.2/LICENSE + ---------- Module: github.com/go-errors/errors Version: v1.5.1 @@ -453,6 +477,12 @@ Version: v0.1.9 License: Apache-2.0 License URL: https://github.com/google/s2a-go/blob/v0.1.9/LICENSE.md +---------- +Module: github.com/google/shlex +Version: v0.0.0-20191202100458-e7afc7fbc510 +License: Apache-2.0 +License URL: https://github.com/google/shlex/blob/e7afc7fbc510/COPYING + ---------- Module: github.com/google/uuid Version: v1.6.0 @@ -539,9 +569,9 @@ License URL: https://github.com/jmoiron/sqlx/blob/v1.4.0/LICENSE ---------- Module: github.com/json-iterator/go -Version: v1.1.12 +Version: v1.1.13-0.20220915233716-71ac16282d12 License: MIT -License URL: https://github.com/json-iterator/go/blob/v1.1.12/LICENSE +License URL: https://github.com/json-iterator/go/blob/71ac16282d12/LICENSE ---------- Module: github.com/kr/fs @@ -765,6 +795,12 @@ Version: v1.4.0 License: MIT License URL: https://github.com/shopspring/decimal/blob/v1.4.0/LICENSE +---------- +Module: github.com/sirupsen/logrus +Version: v1.9.4 +License: MIT +License URL: https://github.com/sirupsen/logrus/blob/v1.9.4/LICENSE + ---------- Module: github.com/spf13/cast Version: v1.10.0 @@ -935,9 +971,9 @@ License URL: https://cs.opensource.google/go/x/crypto/+/v0.49.0:LICENSE ---------- Module: golang.org/x/mod/semver -Version: v0.33.0 +Version: v0.34.0 License: BSD-3-Clause -License URL: https://cs.opensource.google/go/x/mod/+/v0.33.0:LICENSE +License URL: https://cs.opensource.google/go/x/mod/+/v0.34.0:LICENSE ---------- Module: golang.org/x/net From b251e8b91a04c7f0d50f9262acdc895058cc9209 Mon Sep 17 00:00:00 2001 From: Tim Schrodi Date: Tue, 24 Mar 2026 09:06:30 +0100 Subject: [PATCH 31/31] fix vault test Signed-off-by: Tim Schrodi --- internal/installer/vault_encryption_test.go | 34 ++++----------------- 1 file changed, 6 insertions(+), 28 deletions(-) diff --git a/internal/installer/vault_encryption_test.go b/internal/installer/vault_encryption_test.go index e931a710..8611259b 100644 --- a/internal/installer/vault_encryption_test.go +++ b/internal/installer/vault_encryption_test.go @@ -61,18 +61,6 @@ var _ = Describe("VaultEncryption", func() { } }) - Context("with SOPS_AGE_KEY env var containing key file content", func() { - It("should extract the recipient from the public key comment", func() { - keyContent := "# created: 2024-01-01T00:00:00Z\n# public key: age1testrecipient123\nAGE-SECRET-KEY-FAKE\n" - Expect(os.Setenv("SOPS_AGE_KEY", keyContent)).To(Succeed()) - - recipient, keyPath, err := installer.ResolveAgeKey(tmpDir) - Expect(err).ToNot(HaveOccurred()) - Expect(recipient).To(Equal("age1testrecipient123")) - Expect(keyPath).To(BeEmpty(), "keyPath should be empty when using SOPS_AGE_KEY env var") - }) - }) - Context("with SOPS_AGE_KEY env var containing only a private key (no comment)", func() { It("should fall back to age-keygen -y to derive the recipient", func() { if !sopsAndAgeAvailable() { @@ -107,16 +95,18 @@ var _ = Describe("VaultEncryption", func() { Context("with SOPS_AGE_KEY_FILE env var pointing to a key file", func() { It("should read the recipient from the referenced file", func() { + if !sopsAndAgeAvailable() { + Skip("age-keygen not available") + } keyFile := filepath.Join(tmpDir, "keys.txt") - keyContent := "# created: 2024-01-01T00:00:00Z\n# public key: age1fromfile456\nAGE-SECRET-KEY-FAKE\n" - err := os.WriteFile(keyFile, []byte(keyContent), 0600) - Expect(err).ToNot(HaveOccurred()) + out, err := exec.Command("age-keygen", "-o", keyFile).CombinedOutput() + Expect(err).ToNot(HaveOccurred(), string(out)) Expect(os.Setenv("SOPS_AGE_KEY_FILE", keyFile)).To(Succeed()) recipient, keyPath, err := installer.ResolveAgeKey(tmpDir) Expect(err).ToNot(HaveOccurred()) - Expect(recipient).To(Equal("age1fromfile456")) + Expect(recipient).To(HavePrefix("age1")) Expect(keyPath).To(Equal(keyFile)) }) @@ -127,18 +117,6 @@ var _ = Describe("VaultEncryption", func() { Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("failed to read age key")) }) - - It("should return error if the file has no public key comment", func() { - keyFile := filepath.Join(tmpDir, "bad_key.txt") - err := os.WriteFile(keyFile, []byte("AGE-SECRET-KEY-NOPUBLICCOMMENT\n"), 0600) - Expect(err).ToNot(HaveOccurred()) - - Expect(os.Setenv("SOPS_AGE_KEY_FILE", keyFile)).To(Succeed()) - - _, _, err = installer.ResolveAgeKey(tmpDir) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("no public key comment")) - }) }) Context("with no env vars set", func() {