mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-02-05 08:17:59 +00:00
Compare commits
1 Commits
tinkerbell
...
cdi-scratc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
61f8786a96 |
47
.github/workflows/pre-commit.yml
vendored
47
.github/workflows/pre-commit.yml
vendored
@@ -1,47 +0,0 @@
|
|||||||
name: Pre-Commit Checks
|
|
||||||
|
|
||||||
on: [push, pull_request]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
pre-commit:
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set up Python
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: '3.11'
|
|
||||||
|
|
||||||
- name: Install pre-commit
|
|
||||||
run: pip install pre-commit
|
|
||||||
|
|
||||||
- name: Install generate
|
|
||||||
run: |
|
|
||||||
sudo apt update
|
|
||||||
sudo apt install curl -y
|
|
||||||
curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -
|
|
||||||
sudo apt install nodejs -y
|
|
||||||
git clone https://github.com/bitnami/readme-generator-for-helm
|
|
||||||
cd ./readme-generator-for-helm
|
|
||||||
npm install
|
|
||||||
npm install -g pkg
|
|
||||||
pkg . -o /usr/local/bin/readme-generator
|
|
||||||
|
|
||||||
- name: Run pre-commit hooks
|
|
||||||
run: |
|
|
||||||
git fetch origin main || git fetch origin master
|
|
||||||
base_commit=$(git rev-parse --verify origin/main || git rev-parse --verify origin/master || echo "")
|
|
||||||
|
|
||||||
if [ -z "$base_commit" ]; then
|
|
||||||
files=$(git ls-files '*.yaml' '*.md')
|
|
||||||
else
|
|
||||||
files=$(git diff --name-only "$base_commit" -- '*.yaml' '*.md')
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$files" ]; then
|
|
||||||
echo "$files" | xargs pre-commit run --files
|
|
||||||
else
|
|
||||||
echo "No YAML or Markdown files to lint"
|
|
||||||
fi
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
repos:
|
|
||||||
- repo: local
|
|
||||||
hooks:
|
|
||||||
- id: gen-versions-map
|
|
||||||
name: Generate versions map and check for changes
|
|
||||||
entry: sh -c 'make -C packages/apps check-version-map && make -C packages/extra check-version-map'
|
|
||||||
language: system
|
|
||||||
types: [file]
|
|
||||||
pass_filenames: false
|
|
||||||
description: Run the script and fail if it generates changes
|
|
||||||
- id: run-make-generate
|
|
||||||
name: Run 'make generate' in all app directories
|
|
||||||
entry: |
|
|
||||||
/bin/bash -c '
|
|
||||||
for dir in ./packages/apps/*/; do
|
|
||||||
if [ -d "$dir" ]; then
|
|
||||||
echo "Running make generate in $dir"
|
|
||||||
(cd "$dir" && make generate)
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
'
|
|
||||||
language: script
|
|
||||||
files: ^.*$
|
|
||||||
@@ -28,5 +28,4 @@ This list is sorted in chronological order, based on the submission date.
|
|||||||
| [Ænix](https://aenix.io/) | @kvaps | 2024-02-14 | Ænix provides consulting services for cloud providers and uses Cozystack as the main tool for organizing managed services for them. |
|
| [Ænix](https://aenix.io/) | @kvaps | 2024-02-14 | Ænix provides consulting services for cloud providers and uses Cozystack as the main tool for organizing managed services for them. |
|
||||||
| [Mediatech](https://mediatech.dev/) | @ugenk | 2024-05-01 | We're developing and hosting software for our and our custmer services. We're using cozystack as a kubernetes distribution for that. |
|
| [Mediatech](https://mediatech.dev/) | @ugenk | 2024-05-01 | We're developing and hosting software for our and our custmer services. We're using cozystack as a kubernetes distribution for that. |
|
||||||
| [Bootstack](https://bootstack.app/) | @mrkhachaturov | 2024-08-01| At Bootstack, we utilize a Kubernetes operator specifically designed to simplify and streamline cloud infrastructure creation.|
|
| [Bootstack](https://bootstack.app/) | @mrkhachaturov | 2024-08-01| At Bootstack, we utilize a Kubernetes operator specifically designed to simplify and streamline cloud infrastructure creation.|
|
||||||
| [gohost](https://gohost.kz/) | @karabass_off | 2024-02-01 | Our company has been working in the market of Kazakhstan for more than 15 years, providing clients with a standard set of services: VPS/VDC, IaaS, shared hosting, etc. Now we are expanding the lineup by introducing Bare Metal Kubenetes cluster under Cozystack management. |
|
| [gohost](https://gohost.kz/) | @karabass_off | 2024-02-01| Our company has been working in the market of Kazakhstan for more than 15 years, providing clients with a standard set of services: VPS/VDC, IaaS, shared hosting, etc. Now we are expanding the lineup by introducing Bare Metal Kubenetes cluster under Cozystack management.|
|
||||||
| [Urmanac](https://urmanac.com) | @kingdonb | 2024-12-04 | Urmanac is the future home of a hosting platform for the knowledge base of a community of personal server enthusiasts. We use Cozystack to provide support services for web sites hosted using both conventional deployments and on SpinKube, with WASM. |
|
|
||||||
|
|||||||
@@ -1,12 +1,7 @@
|
|||||||
# The Cozystack Maintainers
|
# The Cozystack Maintainers
|
||||||
|
|
||||||
| Maintainer | GitHub Username | Company | Responsibility |
|
| Maintainer | GitHub Username | Company |
|
||||||
| ---------- | --------------- | ------- | --------------------------------- |
|
| ---------- | --------------- | ------- |
|
||||||
| Andrei Kvapil | [@kvaps](https://github.com/kvaps) | Ænix | Core Maintainer |
|
| Andrei Kvapil | [@kvaps](https://github.com/kvaps) | Ænix |
|
||||||
| George Gaál | [@gecube](https://github.com/gecube) | Ænix | DevOps Practices in Platform, Developers Advocate |
|
| George Gaál | [@gecube](https://github.com/gecube) | Ænix |
|
||||||
| Kingdon Barrett | [@kingdonb](https://github.com/kingdonb) | Urmanac | FluxCD and flux-operator |
|
| Eduard Generalov | [@egeneralov](https://github.com/egeneralov) | Ænix |
|
||||||
| Timofei Larkin | [@lllamnyp](https://github.com/lllamnyp) | 3commas | Etcd-operator Lead |
|
|
||||||
| Artem Bortnikov | [@aobort](https://github.com/aobort) | Timescale | Etcd-operator Lead |
|
|
||||||
| Andrei Gumilev | [@chumkaska](https://github.com/chumkaska) | Ænix | Platform Documentation |
|
|
||||||
| Timur Tukaev | [@tym83](https://github.com/tym83) | Ænix | Cozystack Website, Marketing, Community Management |
|
|
||||||
| Kirill Klinchenkov | [@klinch0](https://github.com/klinch0) | Ænix | Core Maintainer |
|
|
||||||
|
|||||||
7
Makefile
7
Makefile
@@ -6,8 +6,6 @@ build:
|
|||||||
make -C packages/apps/mysql image
|
make -C packages/apps/mysql image
|
||||||
make -C packages/apps/clickhouse image
|
make -C packages/apps/clickhouse image
|
||||||
make -C packages/apps/kubernetes image
|
make -C packages/apps/kubernetes image
|
||||||
make -C packages/system/cozystack-api image
|
|
||||||
make -C packages/system/cozystack-controller image
|
|
||||||
make -C packages/system/cilium image
|
make -C packages/system/cilium image
|
||||||
make -C packages/system/kubeovn image
|
make -C packages/system/kubeovn image
|
||||||
make -C packages/system/dashboard image
|
make -C packages/system/dashboard image
|
||||||
@@ -37,7 +35,4 @@ assets:
|
|||||||
test:
|
test:
|
||||||
make -C packages/core/testing apply
|
make -C packages/core/testing apply
|
||||||
make -C packages/core/testing test
|
make -C packages/core/testing test
|
||||||
make -C packages/core/testing test-applications
|
make -C packages/core/testing delete
|
||||||
|
|
||||||
generate:
|
|
||||||
hack/update-codegen.sh
|
|
||||||
|
|||||||
@@ -1,25 +0,0 @@
|
|||||||
API rule violation: list_type_missing,github.com/aenix-io/cozystack/pkg/apis/apps/v1alpha1,ApplicationStatus,Conditions
|
|
||||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Ref
|
|
||||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Schema
|
|
||||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XEmbeddedResource
|
|
||||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XIntOrString
|
|
||||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XListMapKeys
|
|
||||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XListType
|
|
||||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XMapType
|
|
||||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XPreserveUnknownFields
|
|
||||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XValidations
|
|
||||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaPropsOrArray,JSONSchemas
|
|
||||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaPropsOrArray,Schema
|
|
||||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaPropsOrBool,Allows
|
|
||||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaPropsOrBool,Schema
|
|
||||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaPropsOrStringArray,Property
|
|
||||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaPropsOrStringArray,Schema
|
|
||||||
API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,APIResourceList,APIResources
|
|
||||||
API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,Duration,Duration
|
|
||||||
API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,InternalEvent,Object
|
|
||||||
API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,InternalEvent,Type
|
|
||||||
API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,MicroTime,Time
|
|
||||||
API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,StatusCause,Type
|
|
||||||
API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,Time,Time
|
|
||||||
API rule violation: names_match,k8s.io/apimachinery/pkg/runtime,Unknown,ContentEncoding
|
|
||||||
API rule violation: names_match,k8s.io/apimachinery/pkg/runtime,Unknown,ContentType
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2025.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Package v1alpha1 contains API Schema definitions for the v1alpha1 API group.
|
|
||||||
// +kubebuilder:object:generate=true
|
|
||||||
// +groupName=cozystack.io
|
|
||||||
package v1alpha1
|
|
||||||
|
|
||||||
import (
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/scheme"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// GroupVersion is group version used to register these objects.
|
|
||||||
GroupVersion = schema.GroupVersion{Group: "cozystack.io", Version: "v1alpha1"}
|
|
||||||
|
|
||||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
|
|
||||||
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
|
||||||
|
|
||||||
// AddToScheme adds the types in this group-version to the given scheme.
|
|
||||||
AddToScheme = SchemeBuilder.AddToScheme
|
|
||||||
)
|
|
||||||
@@ -1,70 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2025.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package v1alpha1
|
|
||||||
|
|
||||||
import (
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WorkloadStatus defines the observed state of Workload
|
|
||||||
type WorkloadStatus struct {
|
|
||||||
// Kind represents the type of workload (redis, postgres, etc.)
|
|
||||||
// +required
|
|
||||||
Kind string `json:"kind"`
|
|
||||||
|
|
||||||
// Type represents the specific role of the workload (redis, sentinel, etc.)
|
|
||||||
// If not specified, defaults to Kind
|
|
||||||
// +optional
|
|
||||||
Type string `json:"type,omitempty"`
|
|
||||||
|
|
||||||
// Resources specifies the compute resources allocated to this workload
|
|
||||||
// +required
|
|
||||||
Resources map[string]resource.Quantity `json:"resources"`
|
|
||||||
|
|
||||||
// Operational indicates if all pods of the workload are ready
|
|
||||||
// +optional
|
|
||||||
Operational bool `json:"operational"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// +kubebuilder:object:root=true
|
|
||||||
// +kubebuilder:printcolumn:name="Kind",type="string",JSONPath=".status.kind"
|
|
||||||
// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".status.type"
|
|
||||||
// +kubebuilder:printcolumn:name="CPU",type="string",JSONPath=".status.resources.cpu"
|
|
||||||
// +kubebuilder:printcolumn:name="Memory",type="string",JSONPath=".status.resources.memory"
|
|
||||||
// +kubebuilder:printcolumn:name="Operational",type="boolean",JSONPath=`.status.operational`
|
|
||||||
|
|
||||||
// Workload is the Schema for the workloads API
|
|
||||||
type Workload struct {
|
|
||||||
metav1.TypeMeta `json:",inline"`
|
|
||||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
|
||||||
|
|
||||||
Status WorkloadStatus `json:"status,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// +kubebuilder:object:root=true
|
|
||||||
|
|
||||||
// WorkloadList contains a list of Workload
|
|
||||||
type WorkloadList struct {
|
|
||||||
metav1.TypeMeta `json:",inline"`
|
|
||||||
metav1.ListMeta `json:"metadata,omitempty"`
|
|
||||||
Items []Workload `json:"items"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
SchemeBuilder.Register(&Workload{}, &WorkloadList{})
|
|
||||||
}
|
|
||||||
@@ -1,91 +0,0 @@
|
|||||||
package v1alpha1
|
|
||||||
|
|
||||||
import (
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WorkloadMonitorSpec defines the desired state of WorkloadMonitor
|
|
||||||
type WorkloadMonitorSpec struct {
|
|
||||||
// Selector is a label selector to find workloads to monitor
|
|
||||||
// +required
|
|
||||||
Selector map[string]string `json:"selector"`
|
|
||||||
|
|
||||||
// Kind specifies the kind of the workload
|
|
||||||
// +optional
|
|
||||||
Kind string `json:"kind,omitempty"`
|
|
||||||
|
|
||||||
// Type specifies the type of the workload
|
|
||||||
// +optional
|
|
||||||
Type string `json:"type,omitempty"`
|
|
||||||
|
|
||||||
// Version specifies the version of the workload
|
|
||||||
// +optional
|
|
||||||
Version string `json:"version,omitempty"`
|
|
||||||
|
|
||||||
// MinReplicas specifies the minimum number of replicas that should be available
|
|
||||||
// +kubebuilder:validation:Minimum=0
|
|
||||||
// +optional
|
|
||||||
MinReplicas *int32 `json:"minReplicas,omitempty"`
|
|
||||||
|
|
||||||
// Replicas is the desired number of replicas
|
|
||||||
// If not specified, will use observedReplicas as the target
|
|
||||||
// +kubebuilder:validation:Minimum=0
|
|
||||||
// +optional
|
|
||||||
Replicas *int32 `json:"replicas,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// WorkloadMonitorStatus defines the observed state of WorkloadMonitor
|
|
||||||
type WorkloadMonitorStatus struct {
|
|
||||||
// Operational indicates if the workload meets all operational requirements
|
|
||||||
// +optional
|
|
||||||
Operational *bool `json:"operational,omitempty"`
|
|
||||||
|
|
||||||
// AvailableReplicas is the number of ready replicas
|
|
||||||
// +optional
|
|
||||||
AvailableReplicas int32 `json:"availableReplicas"`
|
|
||||||
|
|
||||||
// ObservedReplicas is the total number of pods observed
|
|
||||||
// +optional
|
|
||||||
ObservedReplicas int32 `json:"observedReplicas"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// +kubebuilder:object:root=true
|
|
||||||
// +kubebuilder:subresource:status
|
|
||||||
// +kubebuilder:printcolumn:name="Kind",type="string",JSONPath=".spec.kind"
|
|
||||||
// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".spec.type"
|
|
||||||
// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version"
|
|
||||||
// +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".spec.replicas"
|
|
||||||
// +kubebuilder:printcolumn:name="MinReplicas",type="integer",JSONPath=".spec.minReplicas"
|
|
||||||
// +kubebuilder:printcolumn:name="Available",type="integer",JSONPath=".status.availableReplicas"
|
|
||||||
// +kubebuilder:printcolumn:name="Observed",type="integer",JSONPath=".status.observedReplicas"
|
|
||||||
// +kubebuilder:printcolumn:name="Operational",type="boolean",JSONPath=".status.operational"
|
|
||||||
|
|
||||||
// WorkloadMonitor is the Schema for the workloadmonitors API
|
|
||||||
type WorkloadMonitor struct {
|
|
||||||
metav1.TypeMeta `json:",inline"`
|
|
||||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
|
||||||
|
|
||||||
Spec WorkloadMonitorSpec `json:"spec,omitempty"`
|
|
||||||
Status WorkloadMonitorStatus `json:"status,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// +kubebuilder:object:root=true
|
|
||||||
|
|
||||||
// WorkloadMonitorList contains a list of WorkloadMonitor
|
|
||||||
type WorkloadMonitorList struct {
|
|
||||||
metav1.TypeMeta `json:",inline"`
|
|
||||||
metav1.ListMeta `json:"metadata,omitempty"`
|
|
||||||
Items []WorkloadMonitor `json:"items"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
SchemeBuilder.Register(&WorkloadMonitor{}, &WorkloadMonitorList{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSelector returns the label selector from metadata
|
|
||||||
func (w *WorkloadMonitor) GetSelector() map[string]string {
|
|
||||||
return w.Spec.Selector
|
|
||||||
}
|
|
||||||
|
|
||||||
// Selector specifies the label selector for workloads
|
|
||||||
type Selector map[string]string
|
|
||||||
@@ -1,238 +0,0 @@
|
|||||||
//go:build !ignore_autogenerated
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright 2025 The Cozystack Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Code generated by controller-gen. DO NOT EDIT.
|
|
||||||
|
|
||||||
package v1alpha1
|
|
||||||
|
|
||||||
import (
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
|
||||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in Selector) DeepCopyInto(out *Selector) {
|
|
||||||
{
|
|
||||||
in := &in
|
|
||||||
*out = make(Selector, len(*in))
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Selector.
|
|
||||||
func (in Selector) DeepCopy() Selector {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(Selector)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return *out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *Workload) DeepCopyInto(out *Workload) {
|
|
||||||
*out = *in
|
|
||||||
out.TypeMeta = in.TypeMeta
|
|
||||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
|
||||||
in.Status.DeepCopyInto(&out.Status)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workload.
|
|
||||||
func (in *Workload) DeepCopy() *Workload {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(Workload)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
|
||||||
func (in *Workload) DeepCopyObject() runtime.Object {
|
|
||||||
if c := in.DeepCopy(); c != nil {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *WorkloadList) DeepCopyInto(out *WorkloadList) {
|
|
||||||
*out = *in
|
|
||||||
out.TypeMeta = in.TypeMeta
|
|
||||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
|
||||||
if in.Items != nil {
|
|
||||||
in, out := &in.Items, &out.Items
|
|
||||||
*out = make([]Workload, len(*in))
|
|
||||||
for i := range *in {
|
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadList.
|
|
||||||
func (in *WorkloadList) DeepCopy() *WorkloadList {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(WorkloadList)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
|
||||||
func (in *WorkloadList) DeepCopyObject() runtime.Object {
|
|
||||||
if c := in.DeepCopy(); c != nil {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *WorkloadMonitor) DeepCopyInto(out *WorkloadMonitor) {
|
|
||||||
*out = *in
|
|
||||||
out.TypeMeta = in.TypeMeta
|
|
||||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
|
||||||
in.Spec.DeepCopyInto(&out.Spec)
|
|
||||||
in.Status.DeepCopyInto(&out.Status)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadMonitor.
|
|
||||||
func (in *WorkloadMonitor) DeepCopy() *WorkloadMonitor {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(WorkloadMonitor)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
|
||||||
func (in *WorkloadMonitor) DeepCopyObject() runtime.Object {
|
|
||||||
if c := in.DeepCopy(); c != nil {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *WorkloadMonitorList) DeepCopyInto(out *WorkloadMonitorList) {
|
|
||||||
*out = *in
|
|
||||||
out.TypeMeta = in.TypeMeta
|
|
||||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
|
||||||
if in.Items != nil {
|
|
||||||
in, out := &in.Items, &out.Items
|
|
||||||
*out = make([]WorkloadMonitor, len(*in))
|
|
||||||
for i := range *in {
|
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadMonitorList.
|
|
||||||
func (in *WorkloadMonitorList) DeepCopy() *WorkloadMonitorList {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(WorkloadMonitorList)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
|
||||||
func (in *WorkloadMonitorList) DeepCopyObject() runtime.Object {
|
|
||||||
if c := in.DeepCopy(); c != nil {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *WorkloadMonitorSpec) DeepCopyInto(out *WorkloadMonitorSpec) {
|
|
||||||
*out = *in
|
|
||||||
if in.Selector != nil {
|
|
||||||
in, out := &in.Selector, &out.Selector
|
|
||||||
*out = make(map[string]string, len(*in))
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.MinReplicas != nil {
|
|
||||||
in, out := &in.MinReplicas, &out.MinReplicas
|
|
||||||
*out = new(int32)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.Replicas != nil {
|
|
||||||
in, out := &in.Replicas, &out.Replicas
|
|
||||||
*out = new(int32)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadMonitorSpec.
|
|
||||||
func (in *WorkloadMonitorSpec) DeepCopy() *WorkloadMonitorSpec {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(WorkloadMonitorSpec)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *WorkloadMonitorStatus) DeepCopyInto(out *WorkloadMonitorStatus) {
|
|
||||||
*out = *in
|
|
||||||
if in.Operational != nil {
|
|
||||||
in, out := &in.Operational, &out.Operational
|
|
||||||
*out = new(bool)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadMonitorStatus.
|
|
||||||
func (in *WorkloadMonitorStatus) DeepCopy() *WorkloadMonitorStatus {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(WorkloadMonitorStatus)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *WorkloadStatus) DeepCopyInto(out *WorkloadStatus) {
|
|
||||||
*out = *in
|
|
||||||
if in.Resources != nil {
|
|
||||||
in, out := &in.Resources, &out.Resources
|
|
||||||
*out = make(map[string]resource.Quantity, len(*in))
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = val.DeepCopy()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadStatus.
|
|
||||||
func (in *WorkloadStatus) DeepCopy() *WorkloadStatus {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(WorkloadStatus)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2024 The Cozystack Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/aenix-io/cozystack/pkg/cmd/server"
|
|
||||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
|
||||||
"k8s.io/component-base/cli"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
ctx := genericapiserver.SetupSignalContext()
|
|
||||||
options := server.NewAppsServerOptions(os.Stdout, os.Stderr)
|
|
||||||
cmd := server.NewCommandStartAppsServer(ctx, options)
|
|
||||||
code := cli.Run(cmd)
|
|
||||||
os.Exit(code)
|
|
||||||
}
|
|
||||||
@@ -1,210 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2025.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"flag"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
|
|
||||||
// to ensure that exec-entrypoint and run can make use of them.
|
|
||||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
|
||||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
|
|
||||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
|
||||||
|
|
||||||
cozystackiov1alpha1 "github.com/aenix-io/cozystack/api/v1alpha1"
|
|
||||||
"github.com/aenix-io/cozystack/internal/controller"
|
|
||||||
"github.com/aenix-io/cozystack/internal/telemetry"
|
|
||||||
// +kubebuilder:scaffold:imports
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
scheme = runtime.NewScheme()
|
|
||||||
setupLog = ctrl.Log.WithName("setup")
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
|
||||||
|
|
||||||
utilruntime.Must(cozystackiov1alpha1.AddToScheme(scheme))
|
|
||||||
// +kubebuilder:scaffold:scheme
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
var metricsAddr string
|
|
||||||
var enableLeaderElection bool
|
|
||||||
var probeAddr string
|
|
||||||
var secureMetrics bool
|
|
||||||
var enableHTTP2 bool
|
|
||||||
var disableTelemetry bool
|
|
||||||
var telemetryEndpoint string
|
|
||||||
var telemetryInterval string
|
|
||||||
var cozystackVersion string
|
|
||||||
var tlsOpts []func(*tls.Config)
|
|
||||||
flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+
|
|
||||||
"Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.")
|
|
||||||
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
|
|
||||||
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
|
|
||||||
"Enable leader election for controller manager. "+
|
|
||||||
"Enabling this will ensure there is only one active controller manager.")
|
|
||||||
flag.BoolVar(&secureMetrics, "metrics-secure", true,
|
|
||||||
"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.")
|
|
||||||
flag.BoolVar(&enableHTTP2, "enable-http2", false,
|
|
||||||
"If set, HTTP/2 will be enabled for the metrics and webhook servers")
|
|
||||||
flag.BoolVar(&disableTelemetry, "disable-telemetry", false,
|
|
||||||
"Disable telemetry collection")
|
|
||||||
flag.StringVar(&telemetryEndpoint, "telemetry-endpoint", "https://telemetry.cozystack.io",
|
|
||||||
"Endpoint for sending telemetry data")
|
|
||||||
flag.StringVar(&telemetryInterval, "telemetry-interval", "15m",
|
|
||||||
"Interval between telemetry data collection (e.g. 15m, 1h)")
|
|
||||||
flag.StringVar(&cozystackVersion, "cozystack-version", "unknown",
|
|
||||||
"Version of Cozystack")
|
|
||||||
opts := zap.Options{
|
|
||||||
Development: false,
|
|
||||||
}
|
|
||||||
opts.BindFlags(flag.CommandLine)
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
// Parse telemetry interval
|
|
||||||
interval, err := time.ParseDuration(telemetryInterval)
|
|
||||||
if err != nil {
|
|
||||||
setupLog.Error(err, "invalid telemetry interval")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Configure telemetry
|
|
||||||
telemetryConfig := telemetry.Config{
|
|
||||||
Disabled: disableTelemetry,
|
|
||||||
Endpoint: telemetryEndpoint,
|
|
||||||
Interval: interval,
|
|
||||||
CozystackVersion: cozystackVersion,
|
|
||||||
}
|
|
||||||
|
|
||||||
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
|
|
||||||
|
|
||||||
// if the enable-http2 flag is false (the default), http/2 should be disabled
|
|
||||||
// due to its vulnerabilities. More specifically, disabling http/2 will
|
|
||||||
// prevent from being vulnerable to the HTTP/2 Stream Cancellation and
|
|
||||||
// Rapid Reset CVEs. For more information see:
|
|
||||||
// - https://github.com/advisories/GHSA-qppj-fm5r-hxr3
|
|
||||||
// - https://github.com/advisories/GHSA-4374-p667-p6c8
|
|
||||||
disableHTTP2 := func(c *tls.Config) {
|
|
||||||
setupLog.Info("disabling http/2")
|
|
||||||
c.NextProtos = []string{"http/1.1"}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !enableHTTP2 {
|
|
||||||
tlsOpts = append(tlsOpts, disableHTTP2)
|
|
||||||
}
|
|
||||||
|
|
||||||
webhookServer := webhook.NewServer(webhook.Options{
|
|
||||||
TLSOpts: tlsOpts,
|
|
||||||
})
|
|
||||||
|
|
||||||
// Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server.
|
|
||||||
// More info:
|
|
||||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/metrics/server
|
|
||||||
// - https://book.kubebuilder.io/reference/metrics.html
|
|
||||||
metricsServerOptions := metricsserver.Options{
|
|
||||||
BindAddress: metricsAddr,
|
|
||||||
SecureServing: secureMetrics,
|
|
||||||
TLSOpts: tlsOpts,
|
|
||||||
}
|
|
||||||
|
|
||||||
if secureMetrics {
|
|
||||||
// FilterProvider is used to protect the metrics endpoint with authn/authz.
|
|
||||||
// These configurations ensure that only authorized users and service accounts
|
|
||||||
// can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info:
|
|
||||||
// https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/metrics/filters#WithAuthenticationAndAuthorization
|
|
||||||
metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization
|
|
||||||
|
|
||||||
// TODO(user): If CertDir, CertName, and KeyName are not specified, controller-runtime will automatically
|
|
||||||
// generate self-signed certificates for the metrics server. While convenient for development and testing,
|
|
||||||
// this setup is not recommended for production.
|
|
||||||
}
|
|
||||||
|
|
||||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
|
||||||
Scheme: scheme,
|
|
||||||
Metrics: metricsServerOptions,
|
|
||||||
WebhookServer: webhookServer,
|
|
||||||
HealthProbeBindAddress: probeAddr,
|
|
||||||
LeaderElection: enableLeaderElection,
|
|
||||||
LeaderElectionID: "19a0338c.cozystack.io",
|
|
||||||
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
|
|
||||||
// when the Manager ends. This requires the binary to immediately end when the
|
|
||||||
// Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
|
|
||||||
// speeds up voluntary leader transitions as the new leader don't have to wait
|
|
||||||
// LeaseDuration time first.
|
|
||||||
//
|
|
||||||
// In the default scaffold provided, the program ends immediately after
|
|
||||||
// the manager stops, so would be fine to enable this option. However,
|
|
||||||
// if you are doing or is intended to do any operation such as perform cleanups
|
|
||||||
// after the manager stops then its usage might be unsafe.
|
|
||||||
// LeaderElectionReleaseOnCancel: true,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
setupLog.Error(err, "unable to start manager")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = (&controller.WorkloadMonitorReconciler{
|
|
||||||
Client: mgr.GetClient(),
|
|
||||||
Scheme: mgr.GetScheme(),
|
|
||||||
}).SetupWithManager(mgr); err != nil {
|
|
||||||
setupLog.Error(err, "unable to create controller", "controller", "WorkloadMonitor")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
// +kubebuilder:scaffold:builder
|
|
||||||
|
|
||||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
|
||||||
setupLog.Error(err, "unable to set up health check")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
|
|
||||||
setupLog.Error(err, "unable to set up ready check")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize telemetry collector
|
|
||||||
collector, err := telemetry.NewCollector(mgr.GetClient(), &telemetryConfig, mgr.GetConfig())
|
|
||||||
if err != nil {
|
|
||||||
setupLog.V(1).Error(err, "unable to create telemetry collector, telemetry will be disabled")
|
|
||||||
}
|
|
||||||
|
|
||||||
if collector != nil {
|
|
||||||
if err := mgr.Add(collector); err != nil {
|
|
||||||
setupLog.Error(err, "unable to set up telemetry collector")
|
|
||||||
setupLog.V(1).Error(err, "unable to set up telemetry collector, continuing without telemetry")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
setupLog.Info("starting manager")
|
|
||||||
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
|
|
||||||
setupLog.Error(err, "problem running manager")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
117
go.mod
117
go.mod
@@ -1,117 +0,0 @@
|
|||||||
// This is a generated file. Do not edit directly.
|
|
||||||
|
|
||||||
module github.com/aenix-io/cozystack
|
|
||||||
|
|
||||||
go 1.23.0
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/fluxcd/helm-controller/api v1.1.0
|
|
||||||
github.com/google/gofuzz v1.2.0
|
|
||||||
github.com/onsi/ginkgo/v2 v2.19.0
|
|
||||||
github.com/onsi/gomega v1.33.1
|
|
||||||
github.com/spf13/cobra v1.8.1
|
|
||||||
github.com/stretchr/testify v1.9.0
|
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
|
||||||
k8s.io/api v0.31.2
|
|
||||||
k8s.io/apiextensions-apiserver v0.31.2
|
|
||||||
k8s.io/apimachinery v0.31.2
|
|
||||||
k8s.io/apiserver v0.31.2
|
|
||||||
k8s.io/client-go v0.31.2
|
|
||||||
k8s.io/component-base v0.31.2
|
|
||||||
k8s.io/klog/v2 v2.130.1
|
|
||||||
k8s.io/kube-openapi v0.0.0-20240827152857-f7e401e7b4c2
|
|
||||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
|
||||||
sigs.k8s.io/controller-runtime v0.19.0
|
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1
|
|
||||||
)
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
|
||||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
|
||||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
|
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
|
||||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
|
||||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
|
||||||
github.com/coreos/go-semver v0.3.1 // indirect
|
|
||||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
|
||||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
|
||||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
|
||||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
|
||||||
github.com/fluxcd/pkg/apis/kustomize v1.6.1 // indirect
|
|
||||||
github.com/fluxcd/pkg/apis/meta v1.6.1 // indirect
|
|
||||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
|
||||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
|
||||||
github.com/go-logr/logr v1.4.2 // indirect
|
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
|
||||||
github.com/go-logr/zapr v1.3.0 // indirect
|
|
||||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
|
||||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
|
||||||
github.com/go-openapi/swag v0.23.0 // indirect
|
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
|
||||||
github.com/golang/protobuf v1.5.4 // indirect
|
|
||||||
github.com/google/cel-go v0.21.0 // indirect
|
|
||||||
github.com/google/gnostic-models v0.6.8 // indirect
|
|
||||||
github.com/google/go-cmp v0.6.0 // indirect
|
|
||||||
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect
|
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
|
||||||
github.com/imdario/mergo v0.3.6 // indirect
|
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
|
||||||
github.com/mailru/easyjson v0.7.7 // indirect
|
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
|
||||||
github.com/prometheus/client_golang v1.19.1 // indirect
|
|
||||||
github.com/prometheus/client_model v0.6.1 // indirect
|
|
||||||
github.com/prometheus/common v0.55.0 // indirect
|
|
||||||
github.com/prometheus/procfs v0.15.1 // indirect
|
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
|
||||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
|
||||||
github.com/x448/float16 v0.8.4 // indirect
|
|
||||||
go.etcd.io/etcd/api/v3 v3.5.16 // indirect
|
|
||||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect
|
|
||||||
go.etcd.io/etcd/client/v3 v3.5.16 // indirect
|
|
||||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
|
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
|
||||||
go.opentelemetry.io/otel v1.28.0 // indirect
|
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
|
|
||||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
|
||||||
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
|
|
||||||
go.opentelemetry.io/otel/trace v1.28.0 // indirect
|
|
||||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
|
||||||
go.uber.org/zap v1.27.0 // indirect
|
|
||||||
golang.org/x/crypto v0.28.0 // indirect
|
|
||||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
|
||||||
golang.org/x/net v0.30.0 // indirect
|
|
||||||
golang.org/x/oauth2 v0.23.0 // indirect
|
|
||||||
golang.org/x/sync v0.8.0 // indirect
|
|
||||||
golang.org/x/sys v0.26.0 // indirect
|
|
||||||
golang.org/x/term v0.25.0 // indirect
|
|
||||||
golang.org/x/text v0.19.0 // indirect
|
|
||||||
golang.org/x/time v0.7.0 // indirect
|
|
||||||
golang.org/x/tools v0.26.0 // indirect
|
|
||||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect
|
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
|
|
||||||
google.golang.org/grpc v1.65.0 // indirect
|
|
||||||
google.golang.org/protobuf v1.34.2 // indirect
|
|
||||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
|
||||||
k8s.io/kms v0.31.2 // indirect
|
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
|
|
||||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
|
||||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
|
||||||
)
|
|
||||||
313
go.sum
313
go.sum
@@ -1,313 +0,0 @@
|
|||||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
|
||||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
|
||||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
|
||||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
|
||||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
|
||||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
|
||||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
|
||||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
|
||||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
|
||||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
|
||||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
|
||||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
|
||||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
|
||||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
|
||||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
|
||||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
|
||||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
|
||||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
|
||||||
github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k=
|
|
||||||
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
|
|
||||||
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
|
|
||||||
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
|
||||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
|
||||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
|
||||||
github.com/fluxcd/helm-controller/api v1.1.0 h1:NS5Wm3U6Kv4w7Cw2sDOV++vf2ecGfFV00x1+2Y3QcOY=
|
|
||||||
github.com/fluxcd/helm-controller/api v1.1.0/go.mod h1:BgHMgMY6CWynzl4KIbHpd6Wpn3FN9BqgkwmvoKCp6iE=
|
|
||||||
github.com/fluxcd/pkg/apis/kustomize v1.6.1 h1:22FJc69Mq4i8aCxnKPlddHhSMyI4UPkQkqiAdWFcqe0=
|
|
||||||
github.com/fluxcd/pkg/apis/kustomize v1.6.1/go.mod h1:5dvQ4IZwz0hMGmuj8tTWGtarsuxW0rWsxJOwC6i+0V8=
|
|
||||||
github.com/fluxcd/pkg/apis/meta v1.6.1 h1:maLhcRJ3P/70ArLCY/LF/YovkxXbX+6sTWZwZQBeNq0=
|
|
||||||
github.com/fluxcd/pkg/apis/meta v1.6.1/go.mod h1:YndB/gxgGZmKfqpAfFxyCDNFJFP0ikpeJzs66jwq280=
|
|
||||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
|
||||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
|
||||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
|
||||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
|
||||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
|
||||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
|
||||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
|
||||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
|
||||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
|
||||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
|
||||||
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
|
||||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
|
||||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
|
||||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
|
||||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
|
||||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
|
||||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
|
||||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
|
||||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
|
||||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
|
||||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
|
||||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
|
||||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
|
||||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
|
||||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
|
||||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
|
||||||
github.com/google/cel-go v0.21.0 h1:cl6uW/gxN+Hy50tNYvI691+sXxioCnstFzLp2WO4GCI=
|
|
||||||
github.com/google/cel-go v0.21.0/go.mod h1:rHUlWCcBKgyEk+eV03RPdZUekPp6YcJwV0FxuUksYxc=
|
|
||||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
|
||||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
|
||||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
|
||||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
|
||||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
|
||||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
|
||||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
|
||||||
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k=
|
|
||||||
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
|
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
|
||||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
|
||||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
|
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
|
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
|
|
||||||
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
|
||||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
|
||||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
|
||||||
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
|
|
||||||
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
|
||||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
|
||||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
|
||||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
|
||||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
|
||||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
|
||||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
|
||||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
|
||||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
|
||||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
|
||||||
github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
|
|
||||||
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
|
|
||||||
github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
|
|
||||||
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
|
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
|
||||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
|
||||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
|
||||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
|
||||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
|
||||||
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
|
||||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
|
||||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
|
||||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
|
||||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
|
||||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
|
||||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
|
||||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
|
||||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
|
||||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
|
||||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
|
||||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
|
||||||
github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
|
|
||||||
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
|
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
|
||||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
|
||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
|
||||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
|
||||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
|
|
||||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
|
||||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
|
||||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
|
||||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
|
||||||
go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI=
|
|
||||||
go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
|
|
||||||
go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0=
|
|
||||||
go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28=
|
|
||||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q=
|
|
||||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E=
|
|
||||||
go.etcd.io/etcd/client/v2 v2.305.13 h1:RWfV1SX5jTU0lbCvpVQe3iPQeAHETWdOTb6pxhd77C8=
|
|
||||||
go.etcd.io/etcd/client/v2 v2.305.13/go.mod h1:iQnL7fepbiomdXMb3om1rHq96htNNGv2sJkEcZGDRRg=
|
|
||||||
go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE=
|
|
||||||
go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50=
|
|
||||||
go.etcd.io/etcd/pkg/v3 v3.5.13 h1:st9bDWNsKkBNpP4PR1MvM/9NqUPfvYZx/YXegsYEH8M=
|
|
||||||
go.etcd.io/etcd/pkg/v3 v3.5.13/go.mod h1:N+4PLrp7agI/Viy+dUYpX7iRtSPvKq+w8Y14d1vX+m0=
|
|
||||||
go.etcd.io/etcd/raft/v3 v3.5.13 h1:7r/NKAOups1YnKcfro2RvGGo2PTuizF/xh26Z2CTAzA=
|
|
||||||
go.etcd.io/etcd/raft/v3 v3.5.13/go.mod h1:uUFibGLn2Ksm2URMxN1fICGhk8Wu96EfDQyuLhAcAmw=
|
|
||||||
go.etcd.io/etcd/server/v3 v3.5.13 h1:V6KG+yMfMSqWt+lGnhFpP5z5dRUj1BDRJ5k1fQ9DFok=
|
|
||||||
go.etcd.io/etcd/server/v3 v3.5.13/go.mod h1:K/8nbsGupHqmr5MkgaZpLlH1QdX1pcNQLAkODy44XcQ=
|
|
||||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g=
|
|
||||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74=
|
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
|
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
|
|
||||||
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
|
|
||||||
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
|
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
|
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
|
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA=
|
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ=
|
|
||||||
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
|
|
||||||
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
|
|
||||||
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
|
|
||||||
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
|
|
||||||
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
|
|
||||||
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
|
|
||||||
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
|
|
||||||
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
|
|
||||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
|
||||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
|
||||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
|
||||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
|
||||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
|
||||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
|
||||||
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
|
|
||||||
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
|
|
||||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
|
||||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
|
||||||
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
|
|
||||||
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
|
||||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
|
||||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
|
||||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
|
|
||||||
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
|
||||||
golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
|
|
||||||
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|
||||||
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
|
|
||||||
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
|
||||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
|
||||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
|
||||||
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
|
|
||||||
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
|
|
||||||
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
|
||||||
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
|
|
||||||
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
|
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw=
|
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
|
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
|
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
|
|
||||||
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
|
|
||||||
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
|
|
||||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
|
||||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
|
||||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
|
||||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
|
||||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
|
||||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
|
||||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
|
||||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0=
|
|
||||||
k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk=
|
|
||||||
k8s.io/apiextensions-apiserver v0.31.2 h1:W8EwUb8+WXBLu56ser5IudT2cOho0gAKeTOnywBLxd0=
|
|
||||||
k8s.io/apiextensions-apiserver v0.31.2/go.mod h1:i+Geh+nGCJEGiCGR3MlBDkS7koHIIKWVfWeRFiOsUcM=
|
|
||||||
k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw=
|
|
||||||
k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
|
||||||
k8s.io/apiserver v0.31.2 h1:VUzOEUGRCDi6kX1OyQ801m4A7AUPglpsmGvdsekmcI4=
|
|
||||||
k8s.io/apiserver v0.31.2/go.mod h1:o3nKZR7lPlJqkU5I3Ove+Zx3JuoFjQobGX1Gctw6XuE=
|
|
||||||
k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc=
|
|
||||||
k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs=
|
|
||||||
k8s.io/component-base v0.31.2 h1:Z1J1LIaC0AV+nzcPRFqfK09af6bZ4D1nAOpWsy9owlA=
|
|
||||||
k8s.io/component-base v0.31.2/go.mod h1:9PeyyFN/drHjtJZMCTkSpQJS3U9OXORnHQqMLDz0sUQ=
|
|
||||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
|
||||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
|
||||||
k8s.io/kms v0.31.2 h1:pyx7l2qVOkClzFMIWMVF/FxsSkgd+OIGH7DecpbscJI=
|
|
||||||
k8s.io/kms v0.31.2/go.mod h1:OZKwl1fan3n3N5FFxnW5C4V3ygrah/3YXeJWS3O6+94=
|
|
||||||
k8s.io/kube-openapi v0.0.0-20240827152857-f7e401e7b4c2 h1:GKE9U8BH16uynoxQii0auTjmmmuZ3O0LFMN6S0lPPhI=
|
|
||||||
k8s.io/kube-openapi v0.0.0-20240827152857-f7e401e7b4c2/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA=
|
|
||||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
|
|
||||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo=
|
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
|
||||||
sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q=
|
|
||||||
sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
|
|
||||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
|
||||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
|
||||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
|
||||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2025 The Cozystack Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
@@ -1,165 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
RED='\033[0;31m'
|
|
||||||
GREEN='\033[0;32m'
|
|
||||||
RESET='\033[0m'
|
|
||||||
YELLOW='\033[0;33m'
|
|
||||||
|
|
||||||
|
|
||||||
ROOT_NS="tenant-root"
|
|
||||||
TEST_TENANT="tenant-e2e"
|
|
||||||
|
|
||||||
values_base_path="/hack/testdata/"
|
|
||||||
checks_base_path="/hack/testdata/"
|
|
||||||
|
|
||||||
function delete_hr() {
|
|
||||||
local release_name="$1"
|
|
||||||
local namespace="$2"
|
|
||||||
|
|
||||||
if [[ -z "$release_name" ]]; then
|
|
||||||
echo -e "${RED}Error: Release name is required.${RESET}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -z "$namespace" ]]; then
|
|
||||||
echo -e "${RED}Error: Namespace name is required.${RESET}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$release_name" == "tenant-e2e" ]]; then
|
|
||||||
echo -e "${YELLOW}Skipping deletion for release tenant-e2e.${RESET}"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
kubectl delete helmrelease $release_name -n $namespace
|
|
||||||
}
|
|
||||||
|
|
||||||
function install_helmrelease() {
|
|
||||||
local release_name="$1"
|
|
||||||
local namespace="$2"
|
|
||||||
local chart_path="$3"
|
|
||||||
local repo_name="$4"
|
|
||||||
local repo_ns="$5"
|
|
||||||
local values_file="$6"
|
|
||||||
|
|
||||||
if [[ -z "$release_name" ]]; then
|
|
||||||
echo -e "${RED}Error: Release name is required.${RESET}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -z "$namespace" ]]; then
|
|
||||||
echo -e "${RED}Error: Namespace name is required.${RESET}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -z "$chart_path" ]]; then
|
|
||||||
echo -e "${RED}Error: Chart path name is required.${RESET}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -n "$values_file" && -f "$values_file" ]]; then
|
|
||||||
local values_section
|
|
||||||
values_section=$(echo " values:" && sed 's/^/ /' "$values_file")
|
|
||||||
fi
|
|
||||||
|
|
||||||
local helmrelease_file=$(mktemp /tmp/HelmRelease.XXXXXX.yaml)
|
|
||||||
{
|
|
||||||
echo "apiVersion: helm.toolkit.fluxcd.io/v2"
|
|
||||||
echo "kind: HelmRelease"
|
|
||||||
echo "metadata:"
|
|
||||||
echo " labels:"
|
|
||||||
echo " cozystack.io/ui: \"true\""
|
|
||||||
echo " name: \"$release_name\""
|
|
||||||
echo " namespace: \"$namespace\""
|
|
||||||
echo "spec:"
|
|
||||||
echo " chart:"
|
|
||||||
echo " spec:"
|
|
||||||
echo " chart: \"$chart_path\""
|
|
||||||
echo " reconcileStrategy: Revision"
|
|
||||||
echo " sourceRef:"
|
|
||||||
echo " kind: HelmRepository"
|
|
||||||
echo " name: \"$repo_name\""
|
|
||||||
echo " namespace: \"$repo_ns\""
|
|
||||||
echo " version: '*'"
|
|
||||||
echo " interval: 1m0s"
|
|
||||||
echo " timeout: 5m0s"
|
|
||||||
[[ -n "$values_section" ]] && echo "$values_section"
|
|
||||||
} > "$helmrelease_file"
|
|
||||||
|
|
||||||
kubectl apply -f "$helmrelease_file"
|
|
||||||
|
|
||||||
rm -f "$helmrelease_file"
|
|
||||||
}
|
|
||||||
|
|
||||||
function install_tenant (){
|
|
||||||
local release_name="$1"
|
|
||||||
local namespace="$2"
|
|
||||||
local values_file="${values_base_path}tenant/values.yaml"
|
|
||||||
local repo_name="cozystack-apps"
|
|
||||||
local repo_ns="cozy-public"
|
|
||||||
install_helmrelease "$release_name" "$namespace" "tenant" "$repo_name" "$repo_ns" "$values_file"
|
|
||||||
}
|
|
||||||
|
|
||||||
function make_extra_checks(){
|
|
||||||
local checks_file="$1"
|
|
||||||
echo "after exec make $checks_file"
|
|
||||||
if [[ -n "$checks_file" && -f "$checks_file" ]]; then
|
|
||||||
echo -e "${YELLOW}Start extra checks with file: ${checks_file}${RESET}"
|
|
||||||
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
function check_helmrelease_status() {
|
|
||||||
local release_name="$1"
|
|
||||||
local namespace="$2"
|
|
||||||
local checks_file="$3"
|
|
||||||
local timeout=300 # Timeout in seconds
|
|
||||||
local interval=5 # Interval between checks in seconds
|
|
||||||
local elapsed=0
|
|
||||||
|
|
||||||
|
|
||||||
while [[ $elapsed -lt $timeout ]]; do
|
|
||||||
local status_output
|
|
||||||
status_output=$(kubectl get helmrelease "$release_name" -n "$namespace" -o json | jq -r '.status.conditions[-1].reason')
|
|
||||||
|
|
||||||
if [[ "$status_output" == "InstallSucceeded" || "$status_output" == "UpgradeSucceeded" ]]; then
|
|
||||||
echo -e "${GREEN}Helm release '$release_name' is ready.${RESET}"
|
|
||||||
make_extra_checks "$checks_file"
|
|
||||||
delete_hr $release_name $namespace
|
|
||||||
return 0
|
|
||||||
elif [[ "$status_output" == "InstallFailed" ]]; then
|
|
||||||
echo -e "${RED}Helm release '$release_name': InstallFailed${RESET}"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo -e "${YELLOW}Helm release '$release_name' is not ready. Current status: $status_output${RESET}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
sleep "$interval"
|
|
||||||
elapsed=$((elapsed + interval))
|
|
||||||
done
|
|
||||||
|
|
||||||
echo -e "${RED}Timeout reached. Helm release '$release_name' is still not ready after $timeout seconds.${RESET}"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
chart_name="$1"
|
|
||||||
|
|
||||||
if [ -z "$chart_name" ]; then
|
|
||||||
echo -e "${RED}No chart name provided. Exiting...${RESET}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
checks_file="${checks_base_path}${chart_name}/check.sh"
|
|
||||||
repo_name="cozystack-apps"
|
|
||||||
repo_ns="cozy-public"
|
|
||||||
release_name="$chart_name-e2e"
|
|
||||||
values_file="${values_base_path}${chart_name}/values.yaml"
|
|
||||||
|
|
||||||
install_tenant $TEST_TENANT $ROOT_NS
|
|
||||||
check_helmrelease_status $TEST_TENANT $ROOT_NS "${checks_base_path}tenant/check.sh"
|
|
||||||
|
|
||||||
echo -e "${YELLOW}Running tests for chart: $chart_name${RESET}"
|
|
||||||
|
|
||||||
install_helmrelease $release_name $TEST_TENANT $chart_name $repo_name $repo_ns $values_file
|
|
||||||
check_helmrelease_status $release_name $TEST_TENANT $checks_file
|
|
||||||
43
hack/e2e.sh
43
hack/e2e.sh
@@ -113,6 +113,8 @@ machine:
|
|||||||
- usermode_helper=disabled
|
- usermode_helper=disabled
|
||||||
- name: zfs
|
- name: zfs
|
||||||
- name: spl
|
- name: spl
|
||||||
|
install:
|
||||||
|
image: ghcr.io/aenix-io/cozystack/talos:v1.8.0
|
||||||
files:
|
files:
|
||||||
- content: |
|
- content: |
|
||||||
[plugins]
|
[plugins]
|
||||||
@@ -122,12 +124,6 @@ machine:
|
|||||||
op: create
|
op: create
|
||||||
|
|
||||||
cluster:
|
cluster:
|
||||||
apiServer:
|
|
||||||
extraArgs:
|
|
||||||
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
|
|
||||||
oidc-client-id: "kubernetes"
|
|
||||||
oidc-username-claim: "preferred_username"
|
|
||||||
oidc-groups-claim: "groups"
|
|
||||||
network:
|
network:
|
||||||
cni:
|
cni:
|
||||||
name: none
|
name: none
|
||||||
@@ -140,9 +136,6 @@ EOT
|
|||||||
|
|
||||||
cat > patch-controlplane.yaml <<\EOT
|
cat > patch-controlplane.yaml <<\EOT
|
||||||
machine:
|
machine:
|
||||||
nodeLabels:
|
|
||||||
node.kubernetes.io/exclude-from-external-load-balancers:
|
|
||||||
$patch: delete
|
|
||||||
network:
|
network:
|
||||||
interfaces:
|
interfaces:
|
||||||
- interface: eth0
|
- interface: eth0
|
||||||
@@ -186,11 +179,10 @@ talosctl apply -f controlplane.yaml -n 192.168.123.13 -e 192.168.123.13 -i
|
|||||||
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
|
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
|
||||||
|
|
||||||
# Bootstrap
|
# Bootstrap
|
||||||
timeout 10 sh -c 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
|
talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11
|
||||||
|
|
||||||
# Wait for etcd
|
# Wait for etcd
|
||||||
timeout 180 sh -c 'until timeout -s 9 2 talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1; do sleep 1; done'
|
timeout 180 sh -c 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep "rpc error"; do sleep 1; done'
|
||||||
timeout 60 sh -c 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep "rpc error"; do sleep 1; done'
|
|
||||||
|
|
||||||
rm -f kubeconfig
|
rm -f kubeconfig
|
||||||
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
|
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
|
||||||
@@ -198,7 +190,7 @@ export KUBECONFIG=$PWD/kubeconfig
|
|||||||
|
|
||||||
# Wait for kubernetes nodes appear
|
# Wait for kubernetes nodes appear
|
||||||
timeout 60 sh -c 'until [ $(kubectl get node -o name | wc -l) = 3 ]; do sleep 1; done'
|
timeout 60 sh -c 'until [ $(kubectl get node -o name | wc -l) = 3 ]; do sleep 1; done'
|
||||||
kubectl create ns cozy-system -o yaml | kubectl apply -f -
|
kubectl create ns cozy-system
|
||||||
kubectl create -f - <<\EOT
|
kubectl create -f - <<\EOT
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
@@ -211,8 +203,6 @@ data:
|
|||||||
ipv4-pod-gateway: "10.244.0.1"
|
ipv4-pod-gateway: "10.244.0.1"
|
||||||
ipv4-svc-cidr: "10.96.0.0/16"
|
ipv4-svc-cidr: "10.96.0.0/16"
|
||||||
ipv4-join-cidr: "100.64.0.0/16"
|
ipv4-join-cidr: "100.64.0.0/16"
|
||||||
root-host: example.org
|
|
||||||
api-server-endpoint: https://192.168.123.10:6443
|
|
||||||
EOT
|
EOT
|
||||||
|
|
||||||
#
|
#
|
||||||
@@ -229,7 +219,6 @@ sleep 5
|
|||||||
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
|
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
|
||||||
|
|
||||||
# Wait for Cluster-API providers
|
# Wait for Cluster-API providers
|
||||||
timeout 30 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
|
|
||||||
kubectl wait deploy --timeout=30s --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
|
kubectl wait deploy --timeout=30s --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
|
||||||
|
|
||||||
# Wait for linstor controller
|
# Wait for linstor controller
|
||||||
@@ -298,16 +287,13 @@ spec:
|
|||||||
avoidBuggyIPs: false
|
avoidBuggyIPs: false
|
||||||
EOT
|
EOT
|
||||||
|
|
||||||
# Wait for cozystack-api
|
kubectl patch -n tenant-root hr/tenant-root --type=merge -p '{"spec":{ "values":{
|
||||||
kubectl wait --for=condition=Available apiservices v1alpha1.apps.cozystack.io --timeout=2m
|
|
||||||
|
|
||||||
kubectl patch -n tenant-root tenants.apps.cozystack.io root --type=merge -p '{"spec":{
|
|
||||||
"host": "example.org",
|
"host": "example.org",
|
||||||
"ingress": true,
|
"ingress": true,
|
||||||
"monitoring": true,
|
"monitoring": true,
|
||||||
"etcd": true,
|
"etcd": true,
|
||||||
"isolated": true
|
"isolated": true
|
||||||
}}'
|
}}}'
|
||||||
|
|
||||||
# Wait for HelmRelease be created
|
# Wait for HelmRelease be created
|
||||||
timeout 60 sh -c 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root; do sleep 1; done'
|
timeout 60 sh -c 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root; do sleep 1; done'
|
||||||
@@ -315,9 +301,9 @@ timeout 60 sh -c 'until kubectl get hr -n tenant-root etcd ingress monitoring te
|
|||||||
# Wait for HelmReleases be installed
|
# Wait for HelmReleases be installed
|
||||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress monitoring tenant-root
|
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress monitoring tenant-root
|
||||||
|
|
||||||
kubectl patch -n tenant-root ingresses.apps.cozystack.io ingress --type=merge -p '{"spec":{
|
kubectl patch -n tenant-root hr/ingress --type=merge -p '{"spec":{ "values":{
|
||||||
"dashboard": true
|
"dashboard": true
|
||||||
}}'
|
}}}'
|
||||||
|
|
||||||
# Wait for nginx-ingress-controller
|
# Wait for nginx-ingress-controller
|
||||||
timeout 60 sh -c 'until kubectl get deploy -n tenant-root root-ingress-controller; do sleep 1; done'
|
timeout 60 sh -c 'until kubectl get deploy -n tenant-root root-ingress-controller; do sleep 1; done'
|
||||||
@@ -327,7 +313,7 @@ kubectl wait --timeout=5m --for=condition=available -n tenant-root deploy root-i
|
|||||||
kubectl wait --timeout=5m --for=jsonpath=.status.readyReplicas=3 -n tenant-root sts etcd
|
kubectl wait --timeout=5m --for=jsonpath=.status.readyReplicas=3 -n tenant-root sts etcd
|
||||||
|
|
||||||
# Wait for Victoria metrics
|
# Wait for Victoria metrics
|
||||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vmalert/vmalert-shortterm vmalertmanager/alertmanager
|
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vmalert/vmalert-longterm vmalert/vmalert-shortterm vmalertmanager/alertmanager
|
||||||
kubectl wait --timeout=5m --for=jsonpath=.status.status=operational -n tenant-root vlogs/generic
|
kubectl wait --timeout=5m --for=jsonpath=.status.status=operational -n tenant-root vlogs/generic
|
||||||
kubectl wait --timeout=5m --for=jsonpath=.status.clusterStatus=operational -n tenant-root vmcluster/shortterm vmcluster/longterm
|
kubectl wait --timeout=5m --for=jsonpath=.status.clusterStatus=operational -n tenant-root vmcluster/shortterm vmcluster/longterm
|
||||||
|
|
||||||
@@ -340,12 +326,3 @@ ip=$(kubectl get svc -n tenant-root root-ingress-controller -o jsonpath='{.statu
|
|||||||
|
|
||||||
# Check Grafana
|
# Check Grafana
|
||||||
curl -sS -k "https://$ip" -H 'Host: grafana.example.org' | grep Found
|
curl -sS -k "https://$ip" -H 'Host: grafana.example.org' | grep Found
|
||||||
|
|
||||||
|
|
||||||
# Test OIDC
|
|
||||||
kubectl patch -n cozy-system cm/cozystack --type=merge -p '{"data":{
|
|
||||||
"oidc-enabled": "true"
|
|
||||||
}}'
|
|
||||||
|
|
||||||
timeout 60 sh -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator; do sleep 1; done'
|
|
||||||
kubectl wait --timeout=10m --for=condition=ready -n cozy-keycloak hr keycloak keycloak-configure keycloak-operator
|
|
||||||
|
|||||||
@@ -1,23 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
YQ_VERSION="v4.35.1"
|
|
||||||
RED='\033[31m'
|
|
||||||
RESET='\033[0m'
|
|
||||||
|
|
||||||
check-yq-version() {
|
|
||||||
current_version=$(yq -V | awk '$(NF-1) == "version" {print $NF}')
|
|
||||||
if [ -z "$current_version" ]; then
|
|
||||||
echo "yq is not installed or version cannot be determined."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "Current yq version: $current_version"
|
|
||||||
|
|
||||||
if [ "$(printf '%s\n' "$YQ_VERSION" "$current_version" | sort -V | head -n1)" = "$YQ_VERSION" ]; then
|
|
||||||
echo "Greater than or equal to $YQ_VERSION"
|
|
||||||
else
|
|
||||||
echo -e "${RED}ERROR: yq version less than $YQ_VERSION${RESET}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
check-yq-version
|
|
||||||
1
hack/testdata/http-cache/check.sh
vendored
1
hack/testdata/http-cache/check.sh
vendored
@@ -1 +0,0 @@
|
|||||||
return 0
|
|
||||||
2
hack/testdata/http-cache/values.yaml
vendored
2
hack/testdata/http-cache/values.yaml
vendored
@@ -1,2 +0,0 @@
|
|||||||
endpoints:
|
|
||||||
- 8.8.8.8:443
|
|
||||||
1
hack/testdata/kubernetes/check.sh
vendored
1
hack/testdata/kubernetes/check.sh
vendored
@@ -1 +0,0 @@
|
|||||||
return 0
|
|
||||||
62
hack/testdata/kubernetes/values.yaml
vendored
62
hack/testdata/kubernetes/values.yaml
vendored
@@ -1,62 +0,0 @@
|
|||||||
## @section Common parameters
|
|
||||||
|
|
||||||
## @param host The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).
|
|
||||||
## @param controlPlane.replicas Number of replicas for Kubernetes contorl-plane components
|
|
||||||
## @param storageClass StorageClass used to store user data
|
|
||||||
##
|
|
||||||
host: ""
|
|
||||||
controlPlane:
|
|
||||||
replicas: 2
|
|
||||||
storageClass: replicated
|
|
||||||
|
|
||||||
## @param nodeGroups [object] nodeGroups configuration
|
|
||||||
##
|
|
||||||
nodeGroups:
|
|
||||||
md0:
|
|
||||||
minReplicas: 0
|
|
||||||
maxReplicas: 10
|
|
||||||
instanceType: "u1.medium"
|
|
||||||
ephemeralStorage: 20Gi
|
|
||||||
roles:
|
|
||||||
- ingress-nginx
|
|
||||||
|
|
||||||
resources:
|
|
||||||
cpu: ""
|
|
||||||
memory: ""
|
|
||||||
|
|
||||||
## @section Cluster Addons
|
|
||||||
##
|
|
||||||
addons:
|
|
||||||
|
|
||||||
## Cert-manager: automatically creates and manages SSL/TLS certificate
|
|
||||||
##
|
|
||||||
certManager:
|
|
||||||
## @param addons.certManager.enabled Enables the cert-manager
|
|
||||||
## @param addons.certManager.valuesOverride Custom values to override
|
|
||||||
enabled: true
|
|
||||||
valuesOverride: {}
|
|
||||||
|
|
||||||
## Ingress-NGINX Controller
|
|
||||||
##
|
|
||||||
ingressNginx:
|
|
||||||
## @param addons.ingressNginx.enabled Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)
|
|
||||||
## @param addons.ingressNginx.valuesOverride Custom values to override
|
|
||||||
##
|
|
||||||
enabled: true
|
|
||||||
## @param addons.ingressNginx.hosts List of domain names that should be passed through to the cluster by upper cluster
|
|
||||||
## e.g:
|
|
||||||
## hosts:
|
|
||||||
## - example.org
|
|
||||||
## - foo.example.net
|
|
||||||
##
|
|
||||||
hosts: []
|
|
||||||
valuesOverride: {}
|
|
||||||
|
|
||||||
## Flux CD
|
|
||||||
##
|
|
||||||
fluxcd:
|
|
||||||
## @param addons.fluxcd.enabled Enables Flux CD
|
|
||||||
## @param addons.fluxcd.valuesOverride Custom values to override
|
|
||||||
##
|
|
||||||
enabled: true
|
|
||||||
valuesOverride: {}
|
|
||||||
1
hack/testdata/nats/check.sh
vendored
1
hack/testdata/nats/check.sh
vendored
@@ -1 +0,0 @@
|
|||||||
return 0
|
|
||||||
10
hack/testdata/nats/values.yaml
vendored
10
hack/testdata/nats/values.yaml
vendored
@@ -1,10 +0,0 @@
|
|||||||
|
|
||||||
## @section Common parameters
|
|
||||||
|
|
||||||
## @param external Enable external access from outside the cluster
|
|
||||||
## @param replicas Persistent Volume size for NATS
|
|
||||||
## @param storageClass StorageClass used to store the data
|
|
||||||
##
|
|
||||||
external: false
|
|
||||||
replicas: 2
|
|
||||||
storageClass: ""
|
|
||||||
1
hack/testdata/tenant/check.sh
vendored
1
hack/testdata/tenant/check.sh
vendored
@@ -1 +0,0 @@
|
|||||||
return 0
|
|
||||||
6
hack/testdata/tenant/values.yaml
vendored
6
hack/testdata/tenant/values.yaml
vendored
@@ -1,6 +0,0 @@
|
|||||||
host: ""
|
|
||||||
etcd: false
|
|
||||||
monitoring: false
|
|
||||||
ingress: false
|
|
||||||
seaweedfs: false
|
|
||||||
isolated: true
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# Copyright 2024 The Cozystack Authors.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
set -o errexit
|
|
||||||
set -o nounset
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
|
||||||
CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
|
|
||||||
API_KNOWN_VIOLATIONS_DIR="${API_KNOWN_VIOLATIONS_DIR:-"${SCRIPT_ROOT}/api/api-rules"}"
|
|
||||||
UPDATE_API_KNOWN_VIOLATIONS="${UPDATE_API_KNOWN_VIOLATIONS:-true}"
|
|
||||||
CONTROLLER_GEN="go run sigs.k8s.io/controller-tools/cmd/controller-gen@v0.16.4"
|
|
||||||
|
|
||||||
source "${CODEGEN_PKG}/kube_codegen.sh"
|
|
||||||
|
|
||||||
THIS_PKG="k8s.io/sample-apiserver"
|
|
||||||
|
|
||||||
kube::codegen::gen_helpers \
|
|
||||||
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
|
|
||||||
"${SCRIPT_ROOT}/pkg/apis"
|
|
||||||
|
|
||||||
if [[ -n "${API_KNOWN_VIOLATIONS_DIR:-}" ]]; then
|
|
||||||
report_filename="${API_KNOWN_VIOLATIONS_DIR}/cozystack_api_violation_exceptions.list"
|
|
||||||
if [[ "${UPDATE_API_KNOWN_VIOLATIONS:-}" == "true" ]]; then
|
|
||||||
update_report="--update-report"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
kube::codegen::gen_openapi \
|
|
||||||
--extra-pkgs "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" \
|
|
||||||
--output-dir "${SCRIPT_ROOT}/pkg/generated/openapi" \
|
|
||||||
--output-pkg "${THIS_PKG}/pkg/generated/openapi" \
|
|
||||||
--report-filename "${report_filename:-"/dev/null"}" \
|
|
||||||
${update_report:+"${update_report}"} \
|
|
||||||
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
|
|
||||||
"${SCRIPT_ROOT}/pkg/apis"
|
|
||||||
|
|
||||||
$CONTROLLER_GEN object:headerFile="hack/boilerplate.go.txt" paths="./api/..."
|
|
||||||
$CONTROLLER_GEN rbac:roleName=manager-role crd paths="./api/..." output:crd:artifacts:config=packages/system/cozystack-controller/templates/crds
|
|
||||||
@@ -1,96 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2025.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package controller
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo/v2"
|
|
||||||
. "github.com/onsi/gomega"
|
|
||||||
|
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
|
||||||
"k8s.io/client-go/rest"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
|
||||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
|
||||||
|
|
||||||
cozystackiov1alpha1 "github.com/aenix-io/cozystack/api/v1alpha1"
|
|
||||||
// +kubebuilder:scaffold:imports
|
|
||||||
)
|
|
||||||
|
|
||||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
|
||||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
|
||||||
|
|
||||||
var cfg *rest.Config
|
|
||||||
var k8sClient client.Client
|
|
||||||
var testEnv *envtest.Environment
|
|
||||||
var ctx context.Context
|
|
||||||
var cancel context.CancelFunc
|
|
||||||
|
|
||||||
func TestControllers(t *testing.T) {
|
|
||||||
RegisterFailHandler(Fail)
|
|
||||||
|
|
||||||
RunSpecs(t, "Controller Suite")
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ = BeforeSuite(func() {
|
|
||||||
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
|
|
||||||
|
|
||||||
ctx, cancel = context.WithCancel(context.TODO())
|
|
||||||
|
|
||||||
By("bootstrapping test environment")
|
|
||||||
testEnv = &envtest.Environment{
|
|
||||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
|
|
||||||
ErrorIfCRDPathMissing: true,
|
|
||||||
|
|
||||||
// The BinaryAssetsDirectory is only required if you want to run the tests directly
|
|
||||||
// without call the makefile target test. If not informed it will look for the
|
|
||||||
// default path defined in controller-runtime which is /usr/local/kubebuilder/.
|
|
||||||
// Note that you must have the required binaries setup under the bin directory to perform
|
|
||||||
// the tests directly. When we run make test it will be setup and used automatically.
|
|
||||||
BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s",
|
|
||||||
fmt.Sprintf("1.31.0-%s-%s", runtime.GOOS, runtime.GOARCH)),
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
// cfg is defined in this file globally.
|
|
||||||
cfg, err = testEnv.Start()
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Expect(cfg).NotTo(BeNil())
|
|
||||||
|
|
||||||
err = cozystackiov1alpha1.AddToScheme(scheme.Scheme)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
// +kubebuilder:scaffold:scheme
|
|
||||||
|
|
||||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Expect(k8sClient).NotTo(BeNil())
|
|
||||||
|
|
||||||
})
|
|
||||||
|
|
||||||
var _ = AfterSuite(func() {
|
|
||||||
By("tearing down the test environment")
|
|
||||||
cancel()
|
|
||||||
err := testEnv.Stop()
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
})
|
|
||||||
@@ -1,273 +0,0 @@
|
|||||||
package controller
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"sort"
|
|
||||||
|
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"k8s.io/utils/pointer"
|
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
|
|
||||||
cozyv1alpha1 "github.com/aenix-io/cozystack/api/v1alpha1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WorkloadMonitorReconciler reconciles a WorkloadMonitor object
|
|
||||||
type WorkloadMonitorReconciler struct {
|
|
||||||
client.Client
|
|
||||||
Scheme *runtime.Scheme
|
|
||||||
}
|
|
||||||
|
|
||||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloadmonitors,verbs=get;list;watch;create;update;patch;delete
|
|
||||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloadmonitors/status,verbs=get;update;patch
|
|
||||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads,verbs=get;list;watch;create;update;patch;delete
|
|
||||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads/status,verbs=get;update;patch
|
|
||||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch
|
|
||||||
|
|
||||||
// isPodReady checks if the Pod is in the Ready condition.
|
|
||||||
func (r *WorkloadMonitorReconciler) isPodReady(pod *corev1.Pod) bool {
|
|
||||||
for _, c := range pod.Status.Conditions {
|
|
||||||
if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateOwnerReferences adds the given monitor as a new owner reference to the object if not already present.
|
|
||||||
// It then sorts the owner references to enforce a consistent order.
|
|
||||||
func updateOwnerReferences(obj metav1.Object, monitor client.Object) {
|
|
||||||
// Retrieve current owner references
|
|
||||||
owners := obj.GetOwnerReferences()
|
|
||||||
|
|
||||||
// Check if current monitor is already in owner references
|
|
||||||
var alreadyOwned bool
|
|
||||||
for _, ownerRef := range owners {
|
|
||||||
if ownerRef.UID == monitor.GetUID() {
|
|
||||||
alreadyOwned = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
runtimeObj, ok := monitor.(runtime.Object)
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
gvk := runtimeObj.GetObjectKind().GroupVersionKind()
|
|
||||||
|
|
||||||
// If not already present, add new owner reference without controller flag
|
|
||||||
if !alreadyOwned {
|
|
||||||
newOwnerRef := metav1.OwnerReference{
|
|
||||||
APIVersion: gvk.GroupVersion().String(),
|
|
||||||
Kind: gvk.Kind,
|
|
||||||
Name: monitor.GetName(),
|
|
||||||
UID: monitor.GetUID(),
|
|
||||||
// Set Controller to false to avoid conflict as multiple controllers are not allowed
|
|
||||||
Controller: pointer.BoolPtr(false),
|
|
||||||
BlockOwnerDeletion: pointer.BoolPtr(true),
|
|
||||||
}
|
|
||||||
owners = append(owners, newOwnerRef)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort owner references to enforce a consistent order by UID
|
|
||||||
sort.SliceStable(owners, func(i, j int) bool {
|
|
||||||
return owners[i].UID < owners[j].UID
|
|
||||||
})
|
|
||||||
|
|
||||||
// Update the owner references of the object
|
|
||||||
obj.SetOwnerReferences(owners)
|
|
||||||
}
|
|
||||||
|
|
||||||
// reconcilePodForMonitor creates or updates a Workload object for the given Pod and WorkloadMonitor.
|
|
||||||
func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
|
||||||
ctx context.Context,
|
|
||||||
monitor *cozyv1alpha1.WorkloadMonitor,
|
|
||||||
pod corev1.Pod,
|
|
||||||
) error {
|
|
||||||
logger := log.FromContext(ctx)
|
|
||||||
|
|
||||||
// Combine both init containers and normal containers to sum resources properly
|
|
||||||
combinedContainers := append(pod.Spec.InitContainers, pod.Spec.Containers...)
|
|
||||||
|
|
||||||
// totalResources will store the sum of all container resource limits
|
|
||||||
totalResources := make(map[string]resource.Quantity)
|
|
||||||
|
|
||||||
// Iterate over all containers to aggregate their Limits
|
|
||||||
for _, container := range combinedContainers {
|
|
||||||
for name, qty := range container.Resources.Limits {
|
|
||||||
if existing, exists := totalResources[name.String()]; exists {
|
|
||||||
existing.Add(qty)
|
|
||||||
totalResources[name.String()] = existing
|
|
||||||
} else {
|
|
||||||
totalResources[name.String()] = qty.DeepCopy()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If annotation "workload.cozystack.io/resources" is present, parse and merge
|
|
||||||
if resourcesStr, ok := pod.Annotations["workload.cozystack.io/resources"]; ok {
|
|
||||||
annRes := map[string]string{}
|
|
||||||
if err := json.Unmarshal([]byte(resourcesStr), &annRes); err != nil {
|
|
||||||
logger.Error(err, "Failed to parse resources annotation", "pod", pod.Name)
|
|
||||||
} else {
|
|
||||||
for k, v := range annRes {
|
|
||||||
parsed, err := resource.ParseQuantity(v)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error(err, "Failed to parse resource quantity from annotation", "key", k, "value", v)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
totalResources[k] = parsed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
workload := &cozyv1alpha1.Workload{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: pod.Name,
|
|
||||||
Namespace: pod.Namespace,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
|
||||||
// Update owner references with the new monitor
|
|
||||||
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
|
||||||
|
|
||||||
// Copy labels from the Pod if needed
|
|
||||||
workload.Labels = pod.Labels
|
|
||||||
|
|
||||||
// Fill Workload status fields:
|
|
||||||
workload.Status.Kind = monitor.Spec.Kind
|
|
||||||
workload.Status.Type = monitor.Spec.Type
|
|
||||||
workload.Status.Resources = totalResources
|
|
||||||
workload.Status.Operational = r.isPodReady(&pod)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
logger.Error(err, "Failed to CreateOrUpdate Workload", "workload", workload.Name)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reconcile is the main reconcile loop.
|
|
||||||
// 1. It reconciles WorkloadMonitor objects themselves (create/update/delete).
|
|
||||||
// 2. It also reconciles Pod events mapped to WorkloadMonitor via label selector.
|
|
||||||
func (r *WorkloadMonitorReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
|
||||||
logger := log.FromContext(ctx)
|
|
||||||
|
|
||||||
// Fetch the WorkloadMonitor object if it exists
|
|
||||||
monitor := &cozyv1alpha1.WorkloadMonitor{}
|
|
||||||
err := r.Get(ctx, req.NamespacedName, monitor)
|
|
||||||
if err != nil {
|
|
||||||
// If the resource is not found, it may be a Pod event (mapFunc).
|
|
||||||
if apierrors.IsNotFound(err) {
|
|
||||||
return ctrl.Result{}, nil
|
|
||||||
}
|
|
||||||
logger.Error(err, "Unable to fetch WorkloadMonitor")
|
|
||||||
return ctrl.Result{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// List Pods that match the WorkloadMonitor's selector
|
|
||||||
podList := &corev1.PodList{}
|
|
||||||
if err := r.List(
|
|
||||||
ctx,
|
|
||||||
podList,
|
|
||||||
client.InNamespace(monitor.Namespace),
|
|
||||||
client.MatchingLabels(monitor.Spec.Selector),
|
|
||||||
); err != nil {
|
|
||||||
logger.Error(err, "Unable to list Pods for WorkloadMonitor", "monitor", monitor.Name)
|
|
||||||
return ctrl.Result{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var observedReplicas, availableReplicas int32
|
|
||||||
|
|
||||||
// For each matching Pod, reconcile the corresponding Workload
|
|
||||||
for _, pod := range podList.Items {
|
|
||||||
observedReplicas++
|
|
||||||
if err := r.reconcilePodForMonitor(ctx, monitor, pod); err != nil {
|
|
||||||
logger.Error(err, "Failed to reconcile Workload for Pod", "pod", pod.Name)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if r.isPodReady(&pod) {
|
|
||||||
availableReplicas++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update WorkloadMonitor status based on observed pods
|
|
||||||
monitor.Status.ObservedReplicas = observedReplicas
|
|
||||||
monitor.Status.AvailableReplicas = availableReplicas
|
|
||||||
|
|
||||||
// Default to operational = true, but check MinReplicas if set
|
|
||||||
monitor.Status.Operational = pointer.Bool(true)
|
|
||||||
if monitor.Spec.MinReplicas != nil && availableReplicas < *monitor.Spec.MinReplicas {
|
|
||||||
monitor.Status.Operational = pointer.Bool(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the WorkloadMonitor status in the cluster
|
|
||||||
if err := r.Status().Update(ctx, monitor); err != nil {
|
|
||||||
logger.Error(err, "Unable to update WorkloadMonitor status", "monitor", monitor.Name)
|
|
||||||
return ctrl.Result{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return without requeue if we want purely event-driven reconciliations
|
|
||||||
return ctrl.Result{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetupWithManager registers our controller with the Manager and sets up watches.
|
|
||||||
func (r *WorkloadMonitorReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|
||||||
return ctrl.NewControllerManagedBy(mgr).
|
|
||||||
// Watch WorkloadMonitor objects
|
|
||||||
For(&cozyv1alpha1.WorkloadMonitor{}).
|
|
||||||
// Also watch Pod objects and map them back to WorkloadMonitor if labels match
|
|
||||||
Watches(
|
|
||||||
&corev1.Pod{},
|
|
||||||
handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
|
|
||||||
pod, ok := obj.(*corev1.Pod)
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var monitorList cozyv1alpha1.WorkloadMonitorList
|
|
||||||
// List all WorkloadMonitors in the same namespace
|
|
||||||
if err := r.List(ctx, &monitorList, client.InNamespace(pod.Namespace)); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Match each monitor's selector with the Pod's labels
|
|
||||||
var requests []reconcile.Request
|
|
||||||
for _, m := range monitorList.Items {
|
|
||||||
matches := true
|
|
||||||
for k, v := range m.Spec.Selector {
|
|
||||||
if podVal, exists := pod.Labels[k]; !exists || podVal != v {
|
|
||||||
matches = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if matches {
|
|
||||||
requests = append(requests, reconcile.Request{
|
|
||||||
NamespacedName: types.NamespacedName{
|
|
||||||
Namespace: m.Namespace,
|
|
||||||
Name: m.Name,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return requests
|
|
||||||
}),
|
|
||||||
).
|
|
||||||
// Watch for changes to Workload objects we create (owned by WorkloadMonitor)
|
|
||||||
Owns(&cozyv1alpha1.Workload{}).
|
|
||||||
Complete(r)
|
|
||||||
}
|
|
||||||
@@ -1,292 +0,0 @@
|
|||||||
package telemetry
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"k8s.io/client-go/discovery"
|
|
||||||
"k8s.io/client-go/rest"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
|
||||||
|
|
||||||
cozyv1alpha1 "github.com/aenix-io/cozystack/api/v1alpha1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Collector handles telemetry data collection and sending
|
|
||||||
type Collector struct {
|
|
||||||
client client.Client
|
|
||||||
discoveryClient discovery.DiscoveryInterface
|
|
||||||
config *Config
|
|
||||||
ticker *time.Ticker
|
|
||||||
stopCh chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCollector creates a new telemetry collector
|
|
||||||
func NewCollector(client client.Client, config *Config, kubeConfig *rest.Config) (*Collector, error) {
|
|
||||||
discoveryClient, err := discovery.NewDiscoveryClientForConfig(kubeConfig)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create discovery client: %w", err)
|
|
||||||
}
|
|
||||||
return &Collector{
|
|
||||||
client: client,
|
|
||||||
discoveryClient: discoveryClient,
|
|
||||||
config: config,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start implements manager.Runnable
|
|
||||||
func (c *Collector) Start(ctx context.Context) error {
|
|
||||||
if c.config.Disabled {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
c.ticker = time.NewTicker(c.config.Interval)
|
|
||||||
c.stopCh = make(chan struct{})
|
|
||||||
|
|
||||||
// Initial collection
|
|
||||||
c.collect(ctx)
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
c.ticker.Stop()
|
|
||||||
close(c.stopCh)
|
|
||||||
return nil
|
|
||||||
case <-c.ticker.C:
|
|
||||||
c.collect(ctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NeedLeaderElection implements manager.LeaderElectionRunnable
|
|
||||||
func (c *Collector) NeedLeaderElection() bool {
|
|
||||||
// Only run telemetry collector on the leader
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop halts telemetry collection
|
|
||||||
func (c *Collector) Stop() {
|
|
||||||
close(c.stopCh)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getSizeGroup returns the exponential size group for PVC
|
|
||||||
func getSizeGroup(size resource.Quantity) string {
|
|
||||||
gb := size.Value() / (1024 * 1024 * 1024)
|
|
||||||
switch {
|
|
||||||
case gb <= 1:
|
|
||||||
return "1Gi"
|
|
||||||
case gb <= 5:
|
|
||||||
return "5Gi"
|
|
||||||
case gb <= 10:
|
|
||||||
return "10Gi"
|
|
||||||
case gb <= 25:
|
|
||||||
return "25Gi"
|
|
||||||
case gb <= 50:
|
|
||||||
return "50Gi"
|
|
||||||
case gb <= 100:
|
|
||||||
return "100Gi"
|
|
||||||
case gb <= 250:
|
|
||||||
return "250Gi"
|
|
||||||
case gb <= 500:
|
|
||||||
return "500Gi"
|
|
||||||
case gb <= 1024:
|
|
||||||
return "1Ti"
|
|
||||||
case gb <= 2048:
|
|
||||||
return "2Ti"
|
|
||||||
case gb <= 5120:
|
|
||||||
return "5Ti"
|
|
||||||
default:
|
|
||||||
return "10Ti"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// collect gathers and sends telemetry data
|
|
||||||
func (c *Collector) collect(ctx context.Context) {
|
|
||||||
logger := log.FromContext(ctx).V(1)
|
|
||||||
|
|
||||||
// Get cluster ID from kube-system namespace
|
|
||||||
var kubeSystemNS corev1.Namespace
|
|
||||||
if err := c.client.Get(ctx, types.NamespacedName{Name: "kube-system"}, &kubeSystemNS); err != nil {
|
|
||||||
logger.Info(fmt.Sprintf("Failed to get kube-system namespace: %v", err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
clusterID := string(kubeSystemNS.UID)
|
|
||||||
|
|
||||||
var cozystackCM corev1.ConfigMap
|
|
||||||
if err := c.client.Get(ctx, types.NamespacedName{Namespace: "cozy-system", Name: "cozystack"}, &cozystackCM); err != nil {
|
|
||||||
logger.Info(fmt.Sprintf("Failed to get cozystack configmap in cozy-system namespace: %v", err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
oidcEnabled := cozystackCM.Data["oidc-enabled"]
|
|
||||||
bundle := cozystackCM.Data["bundle-name"]
|
|
||||||
bundleEnable := cozystackCM.Data["bundle-enable"]
|
|
||||||
bundleDisable := cozystackCM.Data["bundle-disable"]
|
|
||||||
|
|
||||||
// Get Kubernetes version from nodes
|
|
||||||
var nodeList corev1.NodeList
|
|
||||||
if err := c.client.List(ctx, &nodeList); err != nil {
|
|
||||||
logger.Info(fmt.Sprintf("Failed to list nodes: %v", err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create metrics buffer
|
|
||||||
var metrics strings.Builder
|
|
||||||
|
|
||||||
// Add Cozystack info metric
|
|
||||||
if len(nodeList.Items) > 0 {
|
|
||||||
k8sVersion, _ := c.discoveryClient.ServerVersion()
|
|
||||||
metrics.WriteString(fmt.Sprintf(
|
|
||||||
"cozy_cluster_info{cozystack_version=\"%s\",kubernetes_version=\"%s\",oidc_enabled=\"%s\",bundle_name=\"%s\",bunde_enable=\"%s\",bunde_disable=\"%s\"} 1\n",
|
|
||||||
c.config.CozystackVersion,
|
|
||||||
k8sVersion,
|
|
||||||
oidcEnabled,
|
|
||||||
bundle,
|
|
||||||
bundleEnable,
|
|
||||||
bundleDisable,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect node metrics
|
|
||||||
nodeOSCount := make(map[string]int)
|
|
||||||
for _, node := range nodeList.Items {
|
|
||||||
key := fmt.Sprintf("%s (%s)", node.Status.NodeInfo.OperatingSystem, node.Status.NodeInfo.OSImage)
|
|
||||||
nodeOSCount[key] = nodeOSCount[key] + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
for osKey, count := range nodeOSCount {
|
|
||||||
metrics.WriteString(fmt.Sprintf(
|
|
||||||
"cozy_nodes_count{os=\"%s\",kernel=\"%s\"} %d\n",
|
|
||||||
osKey,
|
|
||||||
nodeList.Items[0].Status.NodeInfo.KernelVersion,
|
|
||||||
count,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect LoadBalancer services metrics
|
|
||||||
var serviceList corev1.ServiceList
|
|
||||||
if err := c.client.List(ctx, &serviceList); err != nil {
|
|
||||||
logger.Info(fmt.Sprintf("Failed to list Services: %v", err))
|
|
||||||
} else {
|
|
||||||
lbCount := 0
|
|
||||||
for _, svc := range serviceList.Items {
|
|
||||||
if svc.Spec.Type == corev1.ServiceTypeLoadBalancer {
|
|
||||||
lbCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
metrics.WriteString(fmt.Sprintf("cozy_loadbalancers_count %d\n", lbCount))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count tenant namespaces
|
|
||||||
var nsList corev1.NamespaceList
|
|
||||||
if err := c.client.List(ctx, &nsList); err != nil {
|
|
||||||
logger.Info(fmt.Sprintf("Failed to list Namespaces: %v", err))
|
|
||||||
} else {
|
|
||||||
tenantCount := 0
|
|
||||||
for _, ns := range nsList.Items {
|
|
||||||
if strings.HasPrefix(ns.Name, "tenant-") {
|
|
||||||
tenantCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
metrics.WriteString(fmt.Sprintf("cozy_tenants_count %d\n", tenantCount))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect PV metrics grouped by driver and size
|
|
||||||
var pvList corev1.PersistentVolumeList
|
|
||||||
if err := c.client.List(ctx, &pvList); err != nil {
|
|
||||||
logger.Info(fmt.Sprintf("Failed to list PVs: %v", err))
|
|
||||||
} else {
|
|
||||||
// Map to store counts by size and driver
|
|
||||||
pvMetrics := make(map[string]map[string]int)
|
|
||||||
|
|
||||||
for _, pv := range pvList.Items {
|
|
||||||
if capacity, ok := pv.Spec.Capacity[corev1.ResourceStorage]; ok {
|
|
||||||
sizeGroup := getSizeGroup(capacity)
|
|
||||||
|
|
||||||
// Get the CSI driver name
|
|
||||||
driver := "unknown"
|
|
||||||
if pv.Spec.CSI != nil {
|
|
||||||
driver = pv.Spec.CSI.Driver
|
|
||||||
} else if pv.Spec.HostPath != nil {
|
|
||||||
driver = "hostpath"
|
|
||||||
} else if pv.Spec.NFS != nil {
|
|
||||||
driver = "nfs"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize nested map if needed
|
|
||||||
if _, exists := pvMetrics[sizeGroup]; !exists {
|
|
||||||
pvMetrics[sizeGroup] = make(map[string]int)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Increment count for this size/driver combination
|
|
||||||
pvMetrics[sizeGroup][driver]++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write metrics
|
|
||||||
for size, drivers := range pvMetrics {
|
|
||||||
for driver, count := range drivers {
|
|
||||||
metrics.WriteString(fmt.Sprintf(
|
|
||||||
"cozy_pvs_count{driver=\"%s\",size=\"%s\"} %d\n",
|
|
||||||
driver,
|
|
||||||
size,
|
|
||||||
count,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect workload metrics
|
|
||||||
var monitorList cozyv1alpha1.WorkloadMonitorList
|
|
||||||
if err := c.client.List(ctx, &monitorList); err != nil {
|
|
||||||
logger.Info(fmt.Sprintf("Failed to list WorkloadMonitors: %v", err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, monitor := range monitorList.Items {
|
|
||||||
metrics.WriteString(fmt.Sprintf(
|
|
||||||
"cozy_workloads_count{uid=\"%s\",kind=\"%s\",type=\"%s\",version=\"%s\"} %d\n",
|
|
||||||
monitor.UID,
|
|
||||||
monitor.Spec.Kind,
|
|
||||||
monitor.Spec.Type,
|
|
||||||
monitor.Spec.Version,
|
|
||||||
monitor.Status.ObservedReplicas,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send metrics
|
|
||||||
if err := c.sendMetrics(clusterID, metrics.String()); err != nil {
|
|
||||||
logger.Info(fmt.Sprintf("Failed to send metrics: %v", err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendMetrics sends collected metrics to the configured endpoint
|
|
||||||
func (c *Collector) sendMetrics(clusterID, metrics string) error {
|
|
||||||
req, err := http.NewRequest("POST", c.config.Endpoint, bytes.NewBufferString(metrics))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create request: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Header.Set("Content-Type", "text/plain")
|
|
||||||
req.Header.Set("X-Cluster-ID", clusterID)
|
|
||||||
|
|
||||||
resp, err := http.DefaultClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to send request: %w", err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return fmt.Errorf("unexpected status code: %d", resp.StatusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,27 +0,0 @@
|
|||||||
package telemetry
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Config holds telemetry configuration
|
|
||||||
type Config struct {
|
|
||||||
// Disable telemetry collection if set to true
|
|
||||||
Disabled bool
|
|
||||||
// Endpoint to send telemetry data to
|
|
||||||
Endpoint string
|
|
||||||
// Interval between telemetry data collection
|
|
||||||
Interval time.Duration
|
|
||||||
// CozystackVersion represents the current version of Cozystack
|
|
||||||
CozystackVersion string
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultConfig returns default telemetry configuration
|
|
||||||
func DefaultConfig() *Config {
|
|
||||||
return &Config{
|
|
||||||
Disabled: false,
|
|
||||||
Endpoint: "https://telemetry.cozystack.io",
|
|
||||||
Interval: 15 * time.Minute,
|
|
||||||
CozystackVersion: "unknown",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -68,7 +68,7 @@ spec:
|
|||||||
serviceAccountName: cozystack
|
serviceAccountName: cozystack
|
||||||
containers:
|
containers:
|
||||||
- name: cozystack
|
- name: cozystack
|
||||||
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.23.1"
|
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.16.4"
|
||||||
env:
|
env:
|
||||||
- name: KUBERNETES_SERVICE_HOST
|
- name: KUBERNETES_SERVICE_HOST
|
||||||
value: localhost
|
value: localhost
|
||||||
@@ -87,7 +87,7 @@ spec:
|
|||||||
fieldRef:
|
fieldRef:
|
||||||
fieldPath: metadata.name
|
fieldPath: metadata.name
|
||||||
- name: darkhttpd
|
- name: darkhttpd
|
||||||
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.23.1"
|
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.16.4"
|
||||||
command:
|
command:
|
||||||
- /usr/bin/darkhttpd
|
- /usr/bin/darkhttpd
|
||||||
- /cozystack/assets
|
- /cozystack/assets
|
||||||
|
|||||||
@@ -1,9 +0,0 @@
|
|||||||
### How to test packages local
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cd packages/core/installer
|
|
||||||
make image-cozystack REGISTRY=YOUR_CUSTOM_REGISTRY
|
|
||||||
make apply
|
|
||||||
kubectl delete pod dashboard-redis-master-0 -n cozy-dashboard
|
|
||||||
kubectl delete po -l app=source-controller -n cozy-fluxcd
|
|
||||||
```
|
|
||||||
@@ -16,10 +16,10 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.6.1
|
version: 0.5.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||||
# It is recommended to use it with quotes.
|
# It is recommended to use it with quotes.
|
||||||
appVersion: "24.9.2"
|
appVersion: "24.3.0"
|
||||||
|
|||||||
@@ -19,14 +19,12 @@ more details:
|
|||||||
|
|
||||||
### Common parameters
|
### Common parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ---------------- | ----------------------------------- | ------ |
|
| -------------- | ----------------------------------- | ------ |
|
||||||
| `size` | Persistent Volume size | `10Gi` |
|
| `size` | Persistent Volume size | `10Gi` |
|
||||||
| `logStorageSize` | Persistent Volume for logs size | `2Gi` |
|
| `shards` | Number of Clickhouse replicas | `1` |
|
||||||
| `shards` | Number of Clickhouse replicas | `1` |
|
| `replicas` | Number of Clickhouse shards | `2` |
|
||||||
| `replicas` | Number of Clickhouse shards | `2` |
|
| `storageClass` | StorageClass used to store the data | `""` |
|
||||||
| `storageClass` | StorageClass used to store the data | `""` |
|
|
||||||
| `logTTL` | for query_log and query_thread_log | `15` |
|
|
||||||
|
|
||||||
### Configuration parameters
|
### Configuration parameters
|
||||||
|
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/aenix-io/cozystack/clickhouse-backup:0.6.1@sha256:7a99cabdfd541f863aa5d1b2f7b49afd39838fb94c8448986634a1dc9050751c
|
ghcr.io/aenix-io/cozystack/clickhouse-backup:0.5.0@sha256:dda84420cb8648721299221268a00d72a05c7af5b7fb452619bac727068b9e61
|
||||||
|
|||||||
@@ -32,12 +32,11 @@ kind: "ClickHouseInstallation"
|
|||||||
metadata:
|
metadata:
|
||||||
name: "{{ .Release.Name }}"
|
name: "{{ .Release.Name }}"
|
||||||
spec:
|
spec:
|
||||||
namespaceDomainPattern: "%s.svc.cozy.local"
|
{{- with .Values.size }}
|
||||||
defaults:
|
defaults:
|
||||||
templates:
|
templates:
|
||||||
dataVolumeClaimTemplate: data-volume-template
|
dataVolumeClaimTemplate: data-volume-template
|
||||||
podTemplate: clickhouse-per-host
|
{{- end }}
|
||||||
serviceTemplate: svc-template
|
|
||||||
configuration:
|
configuration:
|
||||||
{{- with $users }}
|
{{- with $users }}
|
||||||
users:
|
users:
|
||||||
@@ -47,41 +46,6 @@ spec:
|
|||||||
{{ $name }}/networks/ip: ["::/0"]
|
{{ $name }}/networks/ip: ["::/0"]
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
files:
|
|
||||||
config.d/z_log_disable.xml: |
|
|
||||||
<clickhouse>
|
|
||||||
<asynchronous_metric_log remove="1"/>
|
|
||||||
<metric_log remove="1"/>
|
|
||||||
<query_views_log remove="1" />
|
|
||||||
<part_log remove="1"/>
|
|
||||||
<session_log remove="1"/>
|
|
||||||
<text_log remove="1" />
|
|
||||||
<trace_log remove="1"/>
|
|
||||||
<crash_log remove="1"/>
|
|
||||||
<opentelemetry_span_log remove="1"/>
|
|
||||||
<processors_profile_log remove="1"/>
|
|
||||||
</clickhouse>
|
|
||||||
config.d/query_log_ttl.xml: |
|
|
||||||
<clickhouse>
|
|
||||||
<query_log replace="1">
|
|
||||||
<database>system</database>
|
|
||||||
<table>query_log</table>
|
|
||||||
<engine>ENGINE = MergeTree PARTITION BY (event_date)
|
|
||||||
ORDER BY (event_time)
|
|
||||||
TTL event_date + INTERVAL {{ .Values.logTTL }} DAY DELETE
|
|
||||||
</engine>
|
|
||||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
|
||||||
</query_log>
|
|
||||||
<query_thread_log replace="1">
|
|
||||||
<database>system</database>
|
|
||||||
<table>query_thread_log</table>
|
|
||||||
<engine>ENGINE = MergeTree PARTITION BY (event_date)
|
|
||||||
ORDER BY (event_time)
|
|
||||||
TTL event_date + INTERVAL {{ .Values.logTTL }} DAY DELETE
|
|
||||||
</engine>
|
|
||||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
|
||||||
</query_thread_log>
|
|
||||||
</clickhouse>
|
|
||||||
profiles:
|
profiles:
|
||||||
readonly/readonly: "1"
|
readonly/readonly: "1"
|
||||||
clusters:
|
clusters:
|
||||||
@@ -89,49 +53,17 @@ spec:
|
|||||||
layout:
|
layout:
|
||||||
shardsCount: {{ .Values.shards }}
|
shardsCount: {{ .Values.shards }}
|
||||||
replicasCount: {{ .Values.replicas }}
|
replicasCount: {{ .Values.replicas }}
|
||||||
|
{{- with .Values.size }}
|
||||||
templates:
|
templates:
|
||||||
volumeClaimTemplates:
|
volumeClaimTemplates:
|
||||||
- name: data-volume-template
|
- name: data-volume-template
|
||||||
spec:
|
spec:
|
||||||
accessModes:
|
accessModes:
|
||||||
- ReadWriteOnce
|
- ReadWriteOnce
|
||||||
|
{{- with $.Values.storageClass }}
|
||||||
|
storageClassName: {{ . }}
|
||||||
|
{{- end }}
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
storage: {{ .Values.size }}
|
storage: {{ . }}
|
||||||
- name: log-volume-template
|
{{- end }}
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteOnce
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: {{ .Values.logStorageSize }}
|
|
||||||
podTemplates:
|
|
||||||
- name: clickhouse-per-host
|
|
||||||
spec:
|
|
||||||
affinity:
|
|
||||||
podAntiAffinity:
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
- labelSelector:
|
|
||||||
matchExpressions:
|
|
||||||
- key: "clickhouse.altinity.com/chi"
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- "{{ .Release.Name }}"
|
|
||||||
topologyKey: "kubernetes.io/hostname"
|
|
||||||
containers:
|
|
||||||
- name: clickhouse
|
|
||||||
image: clickhouse/clickhouse-server:24.9.2.42
|
|
||||||
volumeMounts:
|
|
||||||
- name: data-volume-template
|
|
||||||
mountPath: /var/lib/clickhouse
|
|
||||||
- name: log-volume-template
|
|
||||||
mountPath: /var/log/clickhouse-server
|
|
||||||
serviceTemplates:
|
|
||||||
- name: svc-template
|
|
||||||
generateName: chendpoint-{chi}
|
|
||||||
spec:
|
|
||||||
ports:
|
|
||||||
- name: http
|
|
||||||
port: 8123
|
|
||||||
- name: tcp
|
|
||||||
port: 9000
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ rules:
|
|||||||
resources:
|
resources:
|
||||||
- services
|
- services
|
||||||
resourceNames:
|
resourceNames:
|
||||||
- chendpoint-{{ .Release.Name }}
|
- chi-clickhouse-test-clickhouse-0-0
|
||||||
verbs: ["get", "list", "watch"]
|
verbs: ["get", "list", "watch"]
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
|
|||||||
@@ -7,11 +7,6 @@
|
|||||||
"description": "Persistent Volume size",
|
"description": "Persistent Volume size",
|
||||||
"default": "10Gi"
|
"default": "10Gi"
|
||||||
},
|
},
|
||||||
"logStorageSize": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Persistent Volume for logs size",
|
|
||||||
"default": "2Gi"
|
|
||||||
},
|
|
||||||
"shards": {
|
"shards": {
|
||||||
"type": "number",
|
"type": "number",
|
||||||
"description": "Number of Clickhouse replicas",
|
"description": "Number of Clickhouse replicas",
|
||||||
@@ -27,11 +22,6 @@
|
|||||||
"description": "StorageClass used to store the data",
|
"description": "StorageClass used to store the data",
|
||||||
"default": ""
|
"default": ""
|
||||||
},
|
},
|
||||||
"logTTL": {
|
|
||||||
"type": "number",
|
|
||||||
"description": "for query_log and query_thread_log",
|
|
||||||
"default": 15
|
|
||||||
},
|
|
||||||
"backup": {
|
"backup": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
|||||||
@@ -1,18 +1,14 @@
|
|||||||
## @section Common parameters
|
## @section Common parameters
|
||||||
|
|
||||||
## @param size Persistent Volume size
|
## @param size Persistent Volume size
|
||||||
## @param logStorageSize Persistent Volume for logs size
|
|
||||||
## @param shards Number of Clickhouse replicas
|
## @param shards Number of Clickhouse replicas
|
||||||
## @param replicas Number of Clickhouse shards
|
## @param replicas Number of Clickhouse shards
|
||||||
## @param storageClass StorageClass used to store the data
|
## @param storageClass StorageClass used to store the data
|
||||||
## @param logTTL for query_log and query_thread_log
|
|
||||||
##
|
##
|
||||||
size: 10Gi
|
size: 10Gi
|
||||||
logStorageSize: 2Gi
|
|
||||||
shards: 1
|
shards: 1
|
||||||
replicas: 2
|
replicas: 2
|
||||||
storageClass: ""
|
storageClass: ""
|
||||||
logTTL: 15
|
|
||||||
|
|
||||||
## @section Configuration parameters
|
## @section Configuration parameters
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.4.1
|
version: 0.4.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/aenix-io/cozystack/postgres-backup:0.8.0@sha256:6a8ec7e7052f2d02ec5457d7cbac6ee52b3ed93a883988a192d1394fc7c88117
|
ghcr.io/aenix-io/cozystack/postgres-backup:0.7.0@sha256:d2015c6dba92293bda652d055e97d1be80e8414c2dc78037c12812d1a2e2cba1
|
||||||
|
|||||||
@@ -34,9 +34,6 @@ stringData:
|
|||||||
init.sh: |
|
init.sh: |
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
until pg_isready ; do sleep 5; done
|
|
||||||
|
|
||||||
echo "== create users"
|
echo "== create users"
|
||||||
{{- if .Values.users }}
|
{{- if .Values.users }}
|
||||||
psql -v ON_ERROR_STOP=1 <<\EOT
|
psql -v ON_ERROR_STOP=1 <<\EOT
|
||||||
@@ -63,7 +60,7 @@ stringData:
|
|||||||
DROP USER $user;
|
DROP USER $user;
|
||||||
EOT
|
EOT
|
||||||
done
|
done
|
||||||
|
|
||||||
echo "== create roles"
|
echo "== create roles"
|
||||||
psql -v ON_ERROR_STOP=1 --echo-all <<\EOT
|
psql -v ON_ERROR_STOP=1 --echo-all <<\EOT
|
||||||
SELECT 'CREATE ROLE app_admin NOINHERIT;'
|
SELECT 'CREATE ROLE app_admin NOINHERIT;'
|
||||||
@@ -83,7 +80,7 @@ stringData:
|
|||||||
FOR schema_record IN SELECT schema_name FROM information_schema.schemata WHERE schema_name NOT IN ('pg_catalog', 'information_schema') LOOP
|
FOR schema_record IN SELECT schema_name FROM information_schema.schemata WHERE schema_name NOT IN ('pg_catalog', 'information_schema') LOOP
|
||||||
-- Changing Schema Ownership
|
-- Changing Schema Ownership
|
||||||
EXECUTE format('ALTER SCHEMA %I OWNER TO %I', schema_record.schema_name, 'app_admin');
|
EXECUTE format('ALTER SCHEMA %I OWNER TO %I', schema_record.schema_name, 'app_admin');
|
||||||
|
|
||||||
-- Add rights for the admin role
|
-- Add rights for the admin role
|
||||||
EXECUTE format('GRANT ALL ON SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
|
EXECUTE format('GRANT ALL ON SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
|
||||||
EXECUTE format('GRANT ALL ON ALL TABLES IN SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
|
EXECUTE format('GRANT ALL ON ALL TABLES IN SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/aenix-io/cozystack/nginx-cache:0.3.1@sha256:a3c25199acb8e8426e6952658ccc4acaadb50fe2cfa6359743b64e5166b3fc70
|
ghcr.io/aenix-io/cozystack/nginx-cache:0.3.1@sha256:cd744b2d1d50191f4908f2db83079b32973d1c009fe9468627be72efbfa0a107
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.3.1
|
version: 0.3.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -1,19 +0,0 @@
|
|||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: Role
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}-dashboard-resources
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- services
|
|
||||||
resourceNames:
|
|
||||||
- {{ .Release.Name }}-kafka-bootstrap
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- secrets
|
|
||||||
resourceNames:
|
|
||||||
- {{ .Release.Name }}-clients-ca
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.15.0
|
version: 0.12.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -27,181 +27,26 @@ How to access to deployed cluster:
|
|||||||
kubectl get secret -n <namespace> kubernetes-<clusterName>-admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "super-admin.conf" | base64decode) }}' > test
|
kubectl get secret -n <namespace> kubernetes-<clusterName>-admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "super-admin.conf" | base64decode) }}' > test
|
||||||
```
|
```
|
||||||
|
|
||||||
# Series
|
## Parameters
|
||||||
|
|
||||||
<!-- source: https://github.com/kubevirt/common-instancetypes/blob/main/README.md -->
|
### Common parameters
|
||||||
|
|
||||||
. | U | O | CX | M | RT
|
| Name | Description | Value |
|
||||||
----------------------------|-----|-----|------|-----|------
|
| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | ------------ |
|
||||||
*Has GPUs* | | | | |
|
| `host` | The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host). | `""` |
|
||||||
*Hugepages* | | | ✓ | ✓ | ✓
|
| `controlPlane.replicas` | Number of replicas for Kubernetes contorl-plane components | `2` |
|
||||||
*Overcommitted Memory* | | ✓ | | |
|
| `storageClass` | StorageClass used to store user data | `replicated` |
|
||||||
*Dedicated CPU* | | | ✓ | | ✓
|
| `nodeGroups` | nodeGroups configuration | `{}` |
|
||||||
*Burstable CPU performance* | ✓ | ✓ | | ✓ |
|
|
||||||
*Isolated emulator threads* | | | ✓ | | ✓
|
|
||||||
*vNUMA* | | | ✓ | | ✓
|
|
||||||
*vCPU-To-Memory Ratio* | 1:4 | 1:4 | 1:2 | 1:8 | 1:4
|
|
||||||
|
|
||||||
|
### Cluster Addons
|
||||||
|
|
||||||
## U Series
|
| Name | Description | Value |
|
||||||
|
| ------------------------------------ | ---------------------------------------------------------------------------------- | ------- |
|
||||||
|
| `addons.certManager.enabled` | Enables the cert-manager | `false` |
|
||||||
|
| `addons.certManager.valuesOverride` | Custom values to override | `{}` |
|
||||||
|
| `addons.ingressNginx.enabled` | Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role) | `false` |
|
||||||
|
| `addons.ingressNginx.valuesOverride` | Custom values to override | `{}` |
|
||||||
|
| `addons.ingressNginx.hosts` | List of domain names that should be passed through to the cluster by upper cluster | `[]` |
|
||||||
|
| `addons.fluxcd.enabled` | Enables Flux CD | `false` |
|
||||||
|
| `addons.fluxcd.valuesOverride` | Custom values to override | `{}` |
|
||||||
|
|
||||||
The U Series is quite neutral and provides resources for
|
|
||||||
general purpose applications.
|
|
||||||
|
|
||||||
*U* is the abbreviation for "Universal", hinting at the universal
|
|
||||||
attitude towards workloads.
|
|
||||||
|
|
||||||
VMs of instance types will share physical CPU cores on a
|
|
||||||
time-slice basis with other VMs.
|
|
||||||
|
|
||||||
### U Series Characteristics
|
|
||||||
|
|
||||||
Specific characteristics of this series are:
|
|
||||||
- *Burstable CPU performance* - The workload has a baseline compute
|
|
||||||
performance but is permitted to burst beyond this baseline, if
|
|
||||||
excess compute resources are available.
|
|
||||||
- *vCPU-To-Memory Ratio (1:4)* - A vCPU-to-Memory ratio of 1:4, for less
|
|
||||||
noise per node.
|
|
||||||
|
|
||||||
## O Series
|
|
||||||
|
|
||||||
The O Series is based on the U Series, with the only difference
|
|
||||||
being that memory is overcommitted.
|
|
||||||
|
|
||||||
*O* is the abbreviation for "Overcommitted".
|
|
||||||
|
|
||||||
### UO Series Characteristics
|
|
||||||
|
|
||||||
Specific characteristics of this series are:
|
|
||||||
- *Burstable CPU performance* - The workload has a baseline compute
|
|
||||||
performance but is permitted to burst beyond this baseline, if
|
|
||||||
excess compute resources are available.
|
|
||||||
- *Overcommitted Memory* - Memory is over-committed in order to achieve
|
|
||||||
a higher workload density.
|
|
||||||
- *vCPU-To-Memory Ratio (1:4)* - A vCPU-to-Memory ratio of 1:4, for less
|
|
||||||
noise per node.
|
|
||||||
|
|
||||||
## CX Series
|
|
||||||
|
|
||||||
The CX Series provides exclusive compute resources for compute
|
|
||||||
intensive applications.
|
|
||||||
|
|
||||||
*CX* is the abbreviation of "Compute Exclusive".
|
|
||||||
|
|
||||||
The exclusive resources are given to the compute threads of the
|
|
||||||
VM. In order to ensure this, some additional cores (depending
|
|
||||||
on the number of disks and NICs) will be requested to offload
|
|
||||||
the IO threading from cores dedicated to the workload.
|
|
||||||
In addition, in this series, the NUMA topology of the used
|
|
||||||
cores is provided to the VM.
|
|
||||||
|
|
||||||
### CX Series Characteristics
|
|
||||||
|
|
||||||
Specific characteristics of this series are:
|
|
||||||
- *Hugepages* - Hugepages are used in order to improve memory
|
|
||||||
performance.
|
|
||||||
- *Dedicated CPU* - Physical cores are exclusively assigned to every
|
|
||||||
vCPU in order to provide fixed and high compute guarantees to the
|
|
||||||
workload.
|
|
||||||
- *Isolated emulator threads* - Hypervisor emulator threads are isolated
|
|
||||||
from the vCPUs in order to reduce emaulation related impact on the
|
|
||||||
workload.
|
|
||||||
- *vNUMA* - Physical NUMA topology is reflected in the guest in order to
|
|
||||||
optimize guest sided cache utilization.
|
|
||||||
- *vCPU-To-Memory Ratio (1:2)* - A vCPU-to-Memory ratio of 1:2.
|
|
||||||
|
|
||||||
## M Series
|
|
||||||
|
|
||||||
The M Series provides resources for memory intensive
|
|
||||||
applications.
|
|
||||||
|
|
||||||
*M* is the abbreviation of "Memory".
|
|
||||||
|
|
||||||
### M Series Characteristics
|
|
||||||
|
|
||||||
Specific characteristics of this series are:
|
|
||||||
- *Hugepages* - Hugepages are used in order to improve memory
|
|
||||||
performance.
|
|
||||||
- *Burstable CPU performance* - The workload has a baseline compute
|
|
||||||
performance but is permitted to burst beyond this baseline, if
|
|
||||||
excess compute resources are available.
|
|
||||||
- *vCPU-To-Memory Ratio (1:8)* - A vCPU-to-Memory ratio of 1:8, for much
|
|
||||||
less noise per node.
|
|
||||||
|
|
||||||
## RT Series
|
|
||||||
|
|
||||||
The RT Series provides resources for realtime applications, like Oslat.
|
|
||||||
|
|
||||||
*RT* is the abbreviation for "realtime".
|
|
||||||
|
|
||||||
This series of instance types requires nodes capable of running
|
|
||||||
realtime applications.
|
|
||||||
|
|
||||||
### RT Series Characteristics
|
|
||||||
|
|
||||||
Specific characteristics of this series are:
|
|
||||||
- *Hugepages* - Hugepages are used in order to improve memory
|
|
||||||
performance.
|
|
||||||
- *Dedicated CPU* - Physical cores are exclusively assigned to every
|
|
||||||
vCPU in order to provide fixed and high compute guarantees to the
|
|
||||||
workload.
|
|
||||||
- *Isolated emulator threads* - Hypervisor emulator threads are isolated
|
|
||||||
from the vCPUs in order to reduce emaulation related impact on the
|
|
||||||
workload.
|
|
||||||
- *vCPU-To-Memory Ratio (1:4)* - A vCPU-to-Memory ratio of 1:4 starting from
|
|
||||||
the medium size.
|
|
||||||
|
|
||||||
## Resources
|
|
||||||
|
|
||||||
The following instancetype resources are provided by Cozystack:
|
|
||||||
|
|
||||||
Name | vCPUs | Memory
|
|
||||||
-----|-------|-------
|
|
||||||
cx1.2xlarge | 8 | 16Gi
|
|
||||||
cx1.4xlarge | 16 | 32Gi
|
|
||||||
cx1.8xlarge | 32 | 64Gi
|
|
||||||
cx1.large | 2 | 4Gi
|
|
||||||
cx1.medium | 1 | 2Gi
|
|
||||||
cx1.xlarge | 4 | 8Gi
|
|
||||||
gn1.2xlarge | 8 | 32Gi
|
|
||||||
gn1.4xlarge | 16 | 64Gi
|
|
||||||
gn1.8xlarge | 32 | 128Gi
|
|
||||||
gn1.xlarge | 4 | 16Gi
|
|
||||||
m1.2xlarge | 8 | 64Gi
|
|
||||||
m1.4xlarge | 16 | 128Gi
|
|
||||||
m1.8xlarge | 32 | 256Gi
|
|
||||||
m1.large | 2 | 16Gi
|
|
||||||
m1.xlarge | 4 | 32Gi
|
|
||||||
n1.2xlarge | 16 | 32Gi
|
|
||||||
n1.4xlarge | 32 | 64Gi
|
|
||||||
n1.8xlarge | 64 | 128Gi
|
|
||||||
n1.large | 4 | 8Gi
|
|
||||||
n1.medium | 4 | 4Gi
|
|
||||||
n1.xlarge | 8 | 16Gi
|
|
||||||
o1.2xlarge | 8 | 32Gi
|
|
||||||
o1.4xlarge | 16 | 64Gi
|
|
||||||
o1.8xlarge | 32 | 128Gi
|
|
||||||
o1.large | 2 | 8Gi
|
|
||||||
o1.medium | 1 | 4Gi
|
|
||||||
o1.micro | 1 | 1Gi
|
|
||||||
o1.nano | 1 | 512Mi
|
|
||||||
o1.small | 1 | 2Gi
|
|
||||||
o1.xlarge | 4 | 16Gi
|
|
||||||
rt1.2xlarge | 8 | 32Gi
|
|
||||||
rt1.4xlarge | 16 | 64Gi
|
|
||||||
rt1.8xlarge | 32 | 128Gi
|
|
||||||
rt1.large | 2 | 8Gi
|
|
||||||
rt1.medium | 1 | 4Gi
|
|
||||||
rt1.micro | 1 | 1Gi
|
|
||||||
rt1.small | 1 | 2Gi
|
|
||||||
rt1.xlarge | 4 | 16Gi
|
|
||||||
u1.2xlarge | 8 | 32Gi
|
|
||||||
u1.2xmedium | 2 | 4Gi
|
|
||||||
u1.4xlarge | 16 | 64Gi
|
|
||||||
u1.8xlarge | 32 | 128Gi
|
|
||||||
u1.large | 2 | 8Gi
|
|
||||||
u1.medium | 1 | 4Gi
|
|
||||||
u1.micro | 1 | 1Gi
|
|
||||||
u1.nano | 1 | 512Mi
|
|
||||||
u1.small | 1 | 2Gi
|
|
||||||
u1.xlarge | 4 | 16Gi
|
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/aenix-io/cozystack/cluster-autoscaler:0.15.0@sha256:538ee308f16c9e627ed16ee7c4aaa65919c2e6c4c2778f964a06e4797610d1cd
|
ghcr.io/aenix-io/cozystack/cluster-autoscaler:0.12.0@sha256:7f617de5a24de790a15d9e97c6287ff2b390922e6e74c7a665cbf498f634514d
|
||||||
|
|||||||
@@ -1,14 +1,12 @@
|
|||||||
# Source: https://raw.githubusercontent.com/kubernetes/autoscaler/refs/heads/master/cluster-autoscaler/Dockerfile.amd64
|
# Source: https://raw.githubusercontent.com/kubernetes/autoscaler/refs/heads/master/cluster-autoscaler/Dockerfile.amd64
|
||||||
ARG builder_image=docker.io/library/golang:1.23.4
|
ARG builder_image=docker.io/library/golang:1.22.5
|
||||||
ARG BASEIMAGE=gcr.io/distroless/static:nonroot-amd64
|
ARG BASEIMAGE=gcr.io/distroless/static:nonroot-amd64
|
||||||
FROM ${builder_image} AS builder
|
FROM ${builder_image} AS builder
|
||||||
RUN git clone https://github.com/kubernetes/autoscaler /src/autoscaler \
|
RUN git clone https://github.com/kubernetes/autoscaler /src/autoscaler \
|
||||||
&& cd /src/autoscaler/cluster-autoscaler \
|
&& cd /src/autoscaler/cluster-autoscaler \
|
||||||
&& git checkout cluster-autoscaler-1.32.0
|
&& git checkout cluster-autoscaler-1.31.0
|
||||||
|
|
||||||
WORKDIR /src/autoscaler/cluster-autoscaler
|
WORKDIR /src/autoscaler/cluster-autoscaler
|
||||||
COPY fix-downscale.diff /fix-downscale.diff
|
|
||||||
RUN git apply /fix-downscale.diff
|
|
||||||
RUN make build
|
RUN make build
|
||||||
|
|
||||||
FROM $BASEIMAGE
|
FROM $BASEIMAGE
|
||||||
|
|||||||
@@ -1,13 +0,0 @@
|
|||||||
diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go
|
|
||||||
index 4eec0e4bf..f28fd9241 100644
|
|
||||||
--- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go
|
|
||||||
+++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go
|
|
||||||
@@ -106,8 +106,6 @@ func (r unstructuredScalableResource) Replicas() (int, error) {
|
|
||||||
|
|
||||||
func (r unstructuredScalableResource) SetSize(nreplicas int) error {
|
|
||||||
switch {
|
|
||||||
- case nreplicas > r.maxSize:
|
|
||||||
- return fmt.Errorf("size increase too large - desired:%d max:%d", nreplicas, r.maxSize)
|
|
||||||
case nreplicas < r.minSize:
|
|
||||||
return fmt.Errorf("size decrease too large - desired:%d min:%d", nreplicas, r.minSize)
|
|
||||||
}
|
|
||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/aenix-io/cozystack/kubevirt-cloud-provider:0.15.0@sha256:7716c88947d13dc90ccfcc3e60bfdd6e6fa9b201339a75e9c84bf825c76e2b1f
|
ghcr.io/aenix-io/cozystack/kubevirt-cloud-provider:0.12.0@sha256:df4a937b6fb2b345110174227170691d48189ffe1900c3f848cd5085990a58df
|
||||||
@@ -3,14 +3,13 @@ FROM --platform=linux/amd64 golang:1.20.6 AS builder
|
|||||||
|
|
||||||
RUN git clone https://github.com/kubevirt/cloud-provider-kubevirt /go/src/kubevirt.io/cloud-provider-kubevirt \
|
RUN git clone https://github.com/kubevirt/cloud-provider-kubevirt /go/src/kubevirt.io/cloud-provider-kubevirt \
|
||||||
&& cd /go/src/kubevirt.io/cloud-provider-kubevirt \
|
&& cd /go/src/kubevirt.io/cloud-provider-kubevirt \
|
||||||
&& git checkout da9e0cf
|
&& git checkout adbd6c27468b86b020cf38490e84f124ef24ab62
|
||||||
|
|
||||||
WORKDIR /go/src/kubevirt.io/cloud-provider-kubevirt
|
WORKDIR /go/src/kubevirt.io/cloud-provider-kubevirt
|
||||||
|
|
||||||
# see: https://github.com/kubevirt/cloud-provider-kubevirt/pull/335
|
# see: https://github.com/kubevirt/cloud-provider-kubevirt/pull/291
|
||||||
# see: https://github.com/kubevirt/cloud-provider-kubevirt/pull/336
|
|
||||||
ADD patches /patches
|
ADD patches /patches
|
||||||
RUN git apply /patches/*.diff
|
RUN git apply /patches/external-traffic-policy-local.diff
|
||||||
RUN go get 'k8s.io/endpointslice/util@v0.28' 'k8s.io/apiserver@v0.28'
|
RUN go get 'k8s.io/endpointslice/util@v0.28' 'k8s.io/apiserver@v0.28'
|
||||||
RUN go mod tidy
|
RUN go mod tidy
|
||||||
RUN go mod vendor
|
RUN go mod vendor
|
||||||
|
|||||||
@@ -1,20 +0,0 @@
|
|||||||
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller.go b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
|
||||||
index a3c1aa33..95c31438 100644
|
|
||||||
--- a/pkg/controller/kubevirteps/kubevirteps_controller.go
|
|
||||||
+++ b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
|
||||||
@@ -412,11 +412,11 @@ func (c *Controller) reconcileByAddressType(service *v1.Service, tenantSlices []
|
|
||||||
// Create the desired port configuration
|
|
||||||
var desiredPorts []discovery.EndpointPort
|
|
||||||
|
|
||||||
- for _, port := range service.Spec.Ports {
|
|
||||||
+ for i := range service.Spec.Ports {
|
|
||||||
desiredPorts = append(desiredPorts, discovery.EndpointPort{
|
|
||||||
- Port: &port.TargetPort.IntVal,
|
|
||||||
- Protocol: &port.Protocol,
|
|
||||||
- Name: &port.Name,
|
|
||||||
+ Port: &service.Spec.Ports[i].TargetPort.IntVal,
|
|
||||||
+ Protocol: &service.Spec.Ports[i].Protocol,
|
|
||||||
+ Name: &service.Spec.Ports[i].Name,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -1,129 +0,0 @@
|
|||||||
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller.go b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
|
||||||
index a3c1aa33..6f6e3d32 100644
|
|
||||||
--- a/pkg/controller/kubevirteps/kubevirteps_controller.go
|
|
||||||
+++ b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
|
||||||
@@ -108,32 +108,24 @@ func newRequest(reqType ReqType, obj interface{}, oldObj interface{}) *Request {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Controller) Init() error {
|
|
||||||
-
|
|
||||||
- // Act on events from Services on the infra cluster. These are created by the EnsureLoadBalancer function.
|
|
||||||
- // We need to watch for these events so that we can update the EndpointSlices in the infra cluster accordingly.
|
|
||||||
+ // Existing Service event handlers...
|
|
||||||
_, err := c.infraFactory.Core().V1().Services().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
|
||||||
AddFunc: func(obj interface{}) {
|
|
||||||
- // cast obj to Service
|
|
||||||
svc := obj.(*v1.Service)
|
|
||||||
- // Only act on Services of type LoadBalancer
|
|
||||||
if svc.Spec.Type == v1.ServiceTypeLoadBalancer {
|
|
||||||
klog.Infof("Service added: %v/%v", svc.Namespace, svc.Name)
|
|
||||||
c.queue.Add(newRequest(AddReq, obj, nil))
|
|
||||||
}
|
|
||||||
},
|
|
||||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
|
||||||
- // cast obj to Service
|
|
||||||
newSvc := newObj.(*v1.Service)
|
|
||||||
- // Only act on Services of type LoadBalancer
|
|
||||||
if newSvc.Spec.Type == v1.ServiceTypeLoadBalancer {
|
|
||||||
klog.Infof("Service updated: %v/%v", newSvc.Namespace, newSvc.Name)
|
|
||||||
c.queue.Add(newRequest(UpdateReq, newObj, oldObj))
|
|
||||||
}
|
|
||||||
},
|
|
||||||
DeleteFunc: func(obj interface{}) {
|
|
||||||
- // cast obj to Service
|
|
||||||
svc := obj.(*v1.Service)
|
|
||||||
- // Only act on Services of type LoadBalancer
|
|
||||||
if svc.Spec.Type == v1.ServiceTypeLoadBalancer {
|
|
||||||
klog.Infof("Service deleted: %v/%v", svc.Namespace, svc.Name)
|
|
||||||
c.queue.Add(newRequest(DeleteReq, obj, nil))
|
|
||||||
@@ -144,7 +136,7 @@ func (c *Controller) Init() error {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
- // Monitor endpoint slices that we are interested in based on known services in the infra cluster
|
|
||||||
+ // Existing EndpointSlice event handlers in tenant cluster...
|
|
||||||
_, err = c.tenantFactory.Discovery().V1().EndpointSlices().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
|
||||||
AddFunc: func(obj interface{}) {
|
|
||||||
eps := obj.(*discovery.EndpointSlice)
|
|
||||||
@@ -194,10 +186,80 @@ func (c *Controller) Init() error {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
- //TODO: Add informer for EndpointSlices in the infra cluster to watch for (unwanted) changes
|
|
||||||
+ // Add an informer for EndpointSlices in the infra cluster
|
|
||||||
+ _, err = c.infraFactory.Discovery().V1().EndpointSlices().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
|
||||||
+ AddFunc: func(obj interface{}) {
|
|
||||||
+ eps := obj.(*discovery.EndpointSlice)
|
|
||||||
+ if c.managedByController(eps) {
|
|
||||||
+ svc, svcErr := c.getInfraServiceForEPS(context.TODO(), eps)
|
|
||||||
+ if svcErr != nil {
|
|
||||||
+ klog.Errorf("Failed to get infra Service for EndpointSlice %s/%s: %v", eps.Namespace, eps.Name, svcErr)
|
|
||||||
+ return
|
|
||||||
+ }
|
|
||||||
+ if svc != nil {
|
|
||||||
+ klog.Infof("Infra EndpointSlice added: %v/%v, requeuing Service: %v/%v", eps.Namespace, eps.Name, svc.Namespace, svc.Name)
|
|
||||||
+ c.queue.Add(newRequest(AddReq, svc, nil))
|
|
||||||
+ }
|
|
||||||
+ }
|
|
||||||
+ },
|
|
||||||
+ UpdateFunc: func(oldObj, newObj interface{}) {
|
|
||||||
+ eps := newObj.(*discovery.EndpointSlice)
|
|
||||||
+ if c.managedByController(eps) {
|
|
||||||
+ svc, svcErr := c.getInfraServiceForEPS(context.TODO(), eps)
|
|
||||||
+ if svcErr != nil {
|
|
||||||
+ klog.Errorf("Failed to get infra Service for EndpointSlice %s/%s: %v", eps.Namespace, eps.Name, svcErr)
|
|
||||||
+ return
|
|
||||||
+ }
|
|
||||||
+ if svc != nil {
|
|
||||||
+ klog.Infof("Infra EndpointSlice updated: %v/%v, requeuing Service: %v/%v", eps.Namespace, eps.Name, svc.Namespace, svc.Name)
|
|
||||||
+ c.queue.Add(newRequest(UpdateReq, svc, nil))
|
|
||||||
+ }
|
|
||||||
+ }
|
|
||||||
+ },
|
|
||||||
+ DeleteFunc: func(obj interface{}) {
|
|
||||||
+ eps := obj.(*discovery.EndpointSlice)
|
|
||||||
+ if c.managedByController(eps) {
|
|
||||||
+ svc, svcErr := c.getInfraServiceForEPS(context.TODO(), eps)
|
|
||||||
+ if svcErr != nil {
|
|
||||||
+ klog.Errorf("Failed to get infra Service for EndpointSlice %s/%s on delete: %v", eps.Namespace, eps.Name, svcErr)
|
|
||||||
+ return
|
|
||||||
+ }
|
|
||||||
+ if svc != nil {
|
|
||||||
+ klog.Infof("Infra EndpointSlice deleted: %v/%v, requeuing Service: %v/%v", eps.Namespace, eps.Name, svc.Namespace, svc.Name)
|
|
||||||
+ c.queue.Add(newRequest(DeleteReq, svc, nil))
|
|
||||||
+ }
|
|
||||||
+ }
|
|
||||||
+ },
|
|
||||||
+ })
|
|
||||||
+ if err != nil {
|
|
||||||
+ return err
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
+// getInfraServiceForEPS returns the Service in the infra cluster associated with the given EndpointSlice.
|
|
||||||
+// It does this by reading the "kubernetes.io/service-name" label from the EndpointSlice, which should correspond
|
|
||||||
+// to the Service name. If not found or if the Service doesn't exist, it returns nil.
|
|
||||||
+func (c *Controller) getInfraServiceForEPS(ctx context.Context, eps *discovery.EndpointSlice) (*v1.Service, error) {
|
|
||||||
+ svcName := eps.Labels[discovery.LabelServiceName]
|
|
||||||
+ if svcName == "" {
|
|
||||||
+ // No service name label found, can't determine infra service.
|
|
||||||
+ return nil, nil
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ svc, err := c.infraClient.CoreV1().Services(c.infraNamespace).Get(ctx, svcName, metav1.GetOptions{})
|
|
||||||
+ if err != nil {
|
|
||||||
+ if k8serrors.IsNotFound(err) {
|
|
||||||
+ // Service doesn't exist
|
|
||||||
+ return nil, nil
|
|
||||||
+ }
|
|
||||||
+ return nil, err
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ return svc, nil
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
// Run starts an asynchronous loop that monitors and updates GKENetworkParamSet in the cluster.
|
|
||||||
func (c *Controller) Run(numWorkers int, stopCh <-chan struct{}, controllerManagerMetrics *controllersmetrics.ControllerManagerMetrics) {
|
|
||||||
defer utilruntime.HandleCrash()
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1 +1 @@
|
|||||||
ghcr.io/aenix-io/cozystack/kubevirt-csi-driver:0.15.0@sha256:be5e0eef92dada3ace5cddda5c68b30c9fe4682774c5e6e938ed31efba11ebbf
|
ghcr.io/aenix-io/cozystack/kubevirt-csi-driver:0.12.0@sha256:86029548078960feecca116087b2135230d676b83c503f292eb50e1199be2790
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/aenix-io/cozystack/ubuntu-container-disk:v1.30.1@sha256:8392f00a7182294ce6fd417d254f7c2aa09fb9203d829dec70344a8050369430
|
ghcr.io/aenix-io/cozystack/ubuntu-container-disk:v1.30.1@sha256:1f249fbe52821a62f706c6038b13401234e1b758ac498e53395b8f9a642b015f
|
||||||
|
|||||||
@@ -30,12 +30,6 @@ spec:
|
|||||||
- /cluster-autoscaler
|
- /cluster-autoscaler
|
||||||
args:
|
args:
|
||||||
- --cloud-provider=clusterapi
|
- --cloud-provider=clusterapi
|
||||||
- --enforce-node-group-min-size=true
|
|
||||||
- --ignore-daemonsets-utilization=true
|
|
||||||
- --ignore-mirror-pods-utilization=true
|
|
||||||
- --scale-down-unneeded-time=30s
|
|
||||||
- --scan-interval=25s
|
|
||||||
- --force-delete-unregistered-nodes=true
|
|
||||||
- --kubeconfig=/etc/kubernetes/kubeconfig/super-admin.svc
|
- --kubeconfig=/etc/kubernetes/kubeconfig/super-admin.svc
|
||||||
- --clusterapi-cloud-config-authoritative
|
- --clusterapi-cloud-config-authoritative
|
||||||
- --node-group-auto-discovery=clusterapi:namespace={{ .Release.Namespace }},clusterName={{ .Release.Name }}
|
- --node-group-auto-discovery=clusterapi:namespace={{ .Release.Namespace }},clusterName={{ .Release.Name }}
|
||||||
|
|||||||
@@ -15,11 +15,6 @@ spec:
|
|||||||
node-role.kubernetes.io/{{ . }}: ""
|
node-role.kubernetes.io/{{ . }}: ""
|
||||||
{{- end }}
|
{{- end }}
|
||||||
spec:
|
spec:
|
||||||
{{- with .group.instanceType }}
|
|
||||||
instancetype:
|
|
||||||
kind: VirtualMachineClusterInstancetype
|
|
||||||
name: {{ . }}
|
|
||||||
{{- end }}
|
|
||||||
runStrategy: Always
|
runStrategy: Always
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
@@ -29,15 +24,12 @@ spec:
|
|||||||
{{- range .group.roles }}
|
{{- range .group.roles }}
|
||||||
node-role.kubernetes.io/{{ . }}: ""
|
node-role.kubernetes.io/{{ . }}: ""
|
||||||
{{- end }}
|
{{- end }}
|
||||||
cluster.x-k8s.io/deployment-name: {{ $.Release.Name }}-{{ .groupName }}
|
|
||||||
spec:
|
spec:
|
||||||
domain:
|
domain:
|
||||||
{{- if and .group.resources .group.resources.cpu }}
|
|
||||||
cpu:
|
cpu:
|
||||||
threads: 1
|
threads: 1
|
||||||
cores: {{ .group.resources.cpu }}
|
cores: {{ .group.resources.cpu }}
|
||||||
sockets: 1
|
sockets: 1
|
||||||
{{- end }}
|
|
||||||
devices:
|
devices:
|
||||||
disks:
|
disks:
|
||||||
- name: system
|
- name: system
|
||||||
@@ -51,10 +43,8 @@ spec:
|
|||||||
interfaces:
|
interfaces:
|
||||||
- name: default
|
- name: default
|
||||||
bridge: {}
|
bridge: {}
|
||||||
{{- if and .group.resources .group.resources.memory }}
|
|
||||||
memory:
|
memory:
|
||||||
guest: {{ .group.resources.memory }}
|
guest: {{ .group.resources.memory }}
|
||||||
{{- end }}
|
|
||||||
evictionStrategy: External
|
evictionStrategy: External
|
||||||
volumes:
|
volumes:
|
||||||
- name: system
|
- name: system
|
||||||
@@ -127,21 +117,6 @@ spec:
|
|||||||
replicas: 2
|
replicas: 2
|
||||||
version: 1.30.1
|
version: 1.30.1
|
||||||
---
|
---
|
||||||
apiVersion: cozystack.io/v1alpha1
|
|
||||||
kind: WorkloadMonitor
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
spec:
|
|
||||||
replicas: 2
|
|
||||||
minReplicas: 1
|
|
||||||
kind: kubernetes
|
|
||||||
type: control-plane
|
|
||||||
selector:
|
|
||||||
kamaji.clastix.io/component: deployment
|
|
||||||
kamaji.clastix.io/name: {{ .Release.Name }}
|
|
||||||
version: {{ $.Chart.Version }}
|
|
||||||
---
|
|
||||||
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
|
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
|
||||||
kind: KubevirtCluster
|
kind: KubevirtCluster
|
||||||
metadata:
|
metadata:
|
||||||
@@ -188,7 +163,6 @@ spec:
|
|||||||
---
|
---
|
||||||
{{- $context := deepCopy $ }}
|
{{- $context := deepCopy $ }}
|
||||||
{{- $_ := set $context "group" $group }}
|
{{- $_ := set $context "group" $group }}
|
||||||
{{- $_ := set $context "groupName" $groupName }}
|
|
||||||
{{- $kubevirtmachinetemplate := include "kubevirtmachinetemplate" $context }}
|
{{- $kubevirtmachinetemplate := include "kubevirtmachinetemplate" $context }}
|
||||||
{{- $kubevirtmachinetemplateHash := $kubevirtmachinetemplate | sha256sum | trunc 6 }}
|
{{- $kubevirtmachinetemplateHash := $kubevirtmachinetemplate | sha256sum | trunc 6 }}
|
||||||
{{- $kubevirtmachinetemplateName := printf "%s-%s-%s" $.Release.Name $groupName $kubevirtmachinetemplateHash }}
|
{{- $kubevirtmachinetemplateName := printf "%s-%s-%s" $.Release.Name $groupName $kubevirtmachinetemplateHash }}
|
||||||
@@ -202,14 +176,6 @@ spec:
|
|||||||
template:
|
template:
|
||||||
{{- $kubevirtmachinetemplate | nindent 4 }}
|
{{- $kubevirtmachinetemplate | nindent 4 }}
|
||||||
---
|
---
|
||||||
{{- $instanceType := dict }}
|
|
||||||
{{- if $group.instanceType }}
|
|
||||||
{{- $instanceType = (lookup "instancetype.kubevirt.io/v1beta1" "VirtualMachineClusterInstancetype" "" $group.instanceType) }}
|
|
||||||
{{- if not $instanceType }}
|
|
||||||
{{- fail (printf "Specified instancetype not exists in cluster: %s" $group.instanceType) }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
apiVersion: cluster.x-k8s.io/v1beta1
|
apiVersion: cluster.x-k8s.io/v1beta1
|
||||||
kind: MachineDeployment
|
kind: MachineDeployment
|
||||||
metadata:
|
metadata:
|
||||||
@@ -218,16 +184,8 @@ metadata:
|
|||||||
annotations:
|
annotations:
|
||||||
cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size: "{{ $group.minReplicas }}"
|
cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size: "{{ $group.minReplicas }}"
|
||||||
cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "{{ $group.maxReplicas }}"
|
cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "{{ $group.maxReplicas }}"
|
||||||
{{- if and $group.resources $group.resources.memory }}
|
|
||||||
capacity.cluster-autoscaler.kubernetes.io/memory: "{{ $group.resources.memory }}"
|
capacity.cluster-autoscaler.kubernetes.io/memory: "{{ $group.resources.memory }}"
|
||||||
{{- else }}
|
|
||||||
capacity.cluster-autoscaler.kubernetes.io/memory: "{{ $instanceType.spec.memory.guest }}"
|
|
||||||
{{- end }}
|
|
||||||
{{- if and $group.resources $group.resources.cpu }}
|
|
||||||
capacity.cluster-autoscaler.kubernetes.io/cpu: "{{ $group.resources.cpu }}"
|
capacity.cluster-autoscaler.kubernetes.io/cpu: "{{ $group.resources.cpu }}"
|
||||||
{{- else }}
|
|
||||||
capacity.cluster-autoscaler.kubernetes.io/cpu: "{{ $instanceType.spec.cpu.guest }}"
|
|
||||||
{{- end }}
|
|
||||||
spec:
|
spec:
|
||||||
clusterName: {{ $.Release.Name }}
|
clusterName: {{ $.Release.Name }}
|
||||||
template:
|
template:
|
||||||
@@ -271,22 +229,7 @@ spec:
|
|||||||
timeout: 30s
|
timeout: 30s
|
||||||
- type: Ready
|
- type: Ready
|
||||||
status: "False"
|
status: "False"
|
||||||
timeout: 300s
|
timeout: 30s
|
||||||
---
|
|
||||||
apiVersion: cozystack.io/v1alpha1
|
|
||||||
kind: WorkloadMonitor
|
|
||||||
metadata:
|
|
||||||
name: {{ $.Release.Name }}-{{ $groupName }}
|
|
||||||
namespace: {{ $.Release.Namespace }}
|
|
||||||
spec:
|
|
||||||
minReplicas: {{ $group.minReplicas }}
|
|
||||||
kind: kubernetes
|
|
||||||
type: worker
|
|
||||||
selector:
|
|
||||||
cluster.x-k8s.io/cluster-name: {{ $.Release.Name }}
|
|
||||||
cluster.x-k8s.io/deployment-name: {{ $.Release.Name }}-{{ $groupName }}
|
|
||||||
cluster.x-k8s.io/role: worker
|
|
||||||
version: {{ $.Chart.Version }}
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
---
|
---
|
||||||
{{- /*
|
{{- /*
|
||||||
|
|||||||
@@ -24,13 +24,3 @@ rules:
|
|||||||
resourceNames:
|
resourceNames:
|
||||||
- {{ .Release.Name }}
|
- {{ .Release.Name }}
|
||||||
verbs: ["get", "list", "watch"]
|
verbs: ["get", "list", "watch"]
|
||||||
- apiGroups:
|
|
||||||
- cozystack.io
|
|
||||||
resources:
|
|
||||||
- workloadmonitors
|
|
||||||
resourceNames:
|
|
||||||
- {{ .Release.Name }}
|
|
||||||
{{- range $groupName, $group := .Values.nodeGroups }}
|
|
||||||
- {{ $.Release.Name }}-{{ $groupName }}
|
|
||||||
{{- end }}
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
|
|||||||
@@ -1,54 +0,0 @@
|
|||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
|
||||||
kind: HelmRelease
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}-cert-manager-crds
|
|
||||||
labels:
|
|
||||||
cozystack.io/repository: system
|
|
||||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
|
||||||
spec:
|
|
||||||
interval: 5m
|
|
||||||
releaseName: cert-manager-crds
|
|
||||||
chart:
|
|
||||||
spec:
|
|
||||||
chart: cozy-cert-manager-crds
|
|
||||||
reconcileStrategy: Revision
|
|
||||||
sourceRef:
|
|
||||||
kind: HelmRepository
|
|
||||||
name: cozystack-system
|
|
||||||
namespace: cozy-system
|
|
||||||
kubeConfig:
|
|
||||||
secretRef:
|
|
||||||
name: {{ .Release.Name }}-kubeconfig
|
|
||||||
targetNamespace: cozy-cert-manager-crds
|
|
||||||
storageNamespace: cozy-cert-manager-crds
|
|
||||||
install:
|
|
||||||
createNamespace: true
|
|
||||||
remediation:
|
|
||||||
retries: -1
|
|
||||||
upgrade:
|
|
||||||
remediation:
|
|
||||||
retries: -1
|
|
||||||
{{- if .Values.addons.certManager.valuesOverride }}
|
|
||||||
valuesFrom:
|
|
||||||
- kind: Secret
|
|
||||||
name: {{ .Release.Name }}-cert-manager-crds-values-override
|
|
||||||
valuesKey: values
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
dependsOn:
|
|
||||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
|
||||||
- name: {{ .Release.Name }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
{{- end }}
|
|
||||||
- name: {{ .Release.Name }}-cilium
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
{{- if .Values.addons.certManager.valuesOverride }}
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}-cert-manager-crds-values-override
|
|
||||||
stringData:
|
|
||||||
values: |
|
|
||||||
{{- toYaml .Values.addons.certManager.valuesOverride | nindent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
@@ -43,8 +43,6 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
- name: {{ .Release.Name }}-cilium
|
- name: {{ .Release.Name }}-cilium
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
- name: {{ .Release.Name }}-cert-manager-crds
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.addons.certManager.valuesOverride }}
|
{{- if .Values.addons.certManager.valuesOverride }}
|
||||||
---
|
---
|
||||||
|
|||||||
@@ -1,104 +0,0 @@
|
|||||||
{{- $myNS := lookup "v1" "Namespace" "" .Release.Namespace }}
|
|
||||||
{{- $targetTenant := index $myNS.metadata.annotations "namespace.cozystack.io/monitoring" }}
|
|
||||||
{{- if .Values.addons.monitoringAgents.enabled }}
|
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
|
||||||
kind: HelmRelease
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}-monitoring-agents
|
|
||||||
labels:
|
|
||||||
cozystack.io/repository: system
|
|
||||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
|
||||||
spec:
|
|
||||||
interval: 5m
|
|
||||||
releaseName: cozy-monitoring-agents
|
|
||||||
chart:
|
|
||||||
spec:
|
|
||||||
chart: cozy-monitoring-agents
|
|
||||||
reconcileStrategy: Revision
|
|
||||||
sourceRef:
|
|
||||||
kind: HelmRepository
|
|
||||||
name: cozystack-system
|
|
||||||
namespace: cozy-system
|
|
||||||
kubeConfig:
|
|
||||||
secretRef:
|
|
||||||
name: {{ .Release.Name }}-kubeconfig
|
|
||||||
targetNamespace: cozy-monitoring-agents
|
|
||||||
storageNamespace: cozy-monitoring-agents
|
|
||||||
install:
|
|
||||||
createNamespace: true
|
|
||||||
timeout: "300s"
|
|
||||||
remediation:
|
|
||||||
retries: -1
|
|
||||||
upgrade:
|
|
||||||
remediation:
|
|
||||||
retries: -1
|
|
||||||
dependsOn:
|
|
||||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
|
||||||
- name: {{ .Release.Name }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
{{- end }}
|
|
||||||
- name: {{ .Release.Name }}-cilium
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
- name: {{ .Release.Name }}-cozy-victoria-metrics-operator
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
values:
|
|
||||||
vmagent:
|
|
||||||
externalLabels:
|
|
||||||
cluster: {{ .Release.Name }}
|
|
||||||
tenant: {{ .Release.Namespace }}
|
|
||||||
remoteWrite:
|
|
||||||
url: http://vminsert-shortterm.{{ $targetTenant }}.svc:8480/insert/0/prometheus
|
|
||||||
fluent-bit:
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /
|
|
||||||
daemonSetVolumes:
|
|
||||||
- name: varlog
|
|
||||||
hostPath:
|
|
||||||
path: /var/log
|
|
||||||
- name: varlibdockercontainers
|
|
||||||
hostPath:
|
|
||||||
path: /var/lib/docker/containers
|
|
||||||
daemonSetVolumeMounts:
|
|
||||||
- name: varlog
|
|
||||||
mountPath: /var/log
|
|
||||||
- name: varlibdockercontainers
|
|
||||||
mountPath: /var/lib/docker/containers
|
|
||||||
readOnly: true
|
|
||||||
config:
|
|
||||||
outputs: |
|
|
||||||
[OUTPUT]
|
|
||||||
Name http
|
|
||||||
Match kube.*
|
|
||||||
Host vlogs-generic.{{ $targetTenant }}.svc
|
|
||||||
port 9428
|
|
||||||
compress gzip
|
|
||||||
uri /insert/jsonline?_stream_fields=stream,kubernetes_pod_name,kubernetes_container_name,kubernetes_namespace_name&_msg_field=log&_time_field=date
|
|
||||||
format json_lines
|
|
||||||
json_date_format iso8601
|
|
||||||
header AccountID 0
|
|
||||||
header ProjectID 0
|
|
||||||
filters: |
|
|
||||||
[FILTER]
|
|
||||||
Name kubernetes
|
|
||||||
Match kube.*
|
|
||||||
Merge_Log On
|
|
||||||
Keep_Log On
|
|
||||||
K8S-Logging.Parser On
|
|
||||||
K8S-Logging.Exclude On
|
|
||||||
[FILTER]
|
|
||||||
Name nest
|
|
||||||
Match *
|
|
||||||
Wildcard pod_name
|
|
||||||
Operation lift
|
|
||||||
Nested_under kubernetes
|
|
||||||
Add_prefix kubernetes_
|
|
||||||
[FILTER]
|
|
||||||
Name modify
|
|
||||||
Match *
|
|
||||||
Add tenant {{ .Release.Namespace }}
|
|
||||||
[FILTER]
|
|
||||||
Name modify
|
|
||||||
Match *
|
|
||||||
Add cluster {{ .Release.Name }}
|
|
||||||
{{- end }}
|
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
{{- if .Values.addons.monitoringAgents.enabled }}
|
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
|
||||||
kind: HelmRelease
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}-cozy-victoria-metrics-operator
|
|
||||||
labels:
|
|
||||||
cozystack.io/repository: system
|
|
||||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
|
||||||
spec:
|
|
||||||
interval: 5m
|
|
||||||
releaseName: cozy-victoria-metrics-operator
|
|
||||||
chart:
|
|
||||||
spec:
|
|
||||||
chart: cozy-victoria-metrics-operator
|
|
||||||
reconcileStrategy: Revision
|
|
||||||
sourceRef:
|
|
||||||
kind: HelmRepository
|
|
||||||
name: cozystack-system
|
|
||||||
namespace: cozy-system
|
|
||||||
kubeConfig:
|
|
||||||
secretRef:
|
|
||||||
name: {{ .Release.Name }}-kubeconfig
|
|
||||||
targetNamespace: cozy-victoria-metrics-operator
|
|
||||||
storageNamespace: cozy-victoria-metrics-operator
|
|
||||||
install:
|
|
||||||
createNamespace: true
|
|
||||||
remediation:
|
|
||||||
retries: -1
|
|
||||||
upgrade:
|
|
||||||
remediation:
|
|
||||||
retries: -1
|
|
||||||
dependsOn:
|
|
||||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
|
||||||
- name: {{ .Release.Name }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
{{- end }}
|
|
||||||
- name: {{ .Release.Name }}-cilium
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
- name: {{ .Release.Name }}-cert-manager-crds
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
{{- end }}
|
|
||||||
@@ -75,23 +75,8 @@
|
|||||||
"default": {}
|
"default": {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"monitoringAgents": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"enabled": {
|
|
||||||
"type": "boolean",
|
|
||||||
"description": "Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage",
|
|
||||||
"default": false
|
|
||||||
},
|
|
||||||
"valuesOverride": {
|
|
||||||
"type": "object",
|
|
||||||
"description": "Custom values to override",
|
|
||||||
"default": {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -15,15 +15,13 @@ nodeGroups:
|
|||||||
md0:
|
md0:
|
||||||
minReplicas: 0
|
minReplicas: 0
|
||||||
maxReplicas: 10
|
maxReplicas: 10
|
||||||
instanceType: "u1.medium"
|
resources:
|
||||||
|
cpu: 2
|
||||||
|
memory: 1024Mi
|
||||||
ephemeralStorage: 20Gi
|
ephemeralStorage: 20Gi
|
||||||
roles:
|
roles:
|
||||||
- ingress-nginx
|
- ingress-nginx
|
||||||
|
|
||||||
resources:
|
|
||||||
cpu: ""
|
|
||||||
memory: ""
|
|
||||||
|
|
||||||
## @section Cluster Addons
|
## @section Cluster Addons
|
||||||
##
|
##
|
||||||
addons:
|
addons:
|
||||||
@@ -60,12 +58,3 @@ addons:
|
|||||||
##
|
##
|
||||||
enabled: false
|
enabled: false
|
||||||
valuesOverride: {}
|
valuesOverride: {}
|
||||||
|
|
||||||
## MonitoringAgents
|
|
||||||
##
|
|
||||||
monitoringAgents:
|
|
||||||
## @param addons.monitoringAgents.enabled Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage
|
|
||||||
## @param addons.monitoringAgents.valuesOverride Custom values to override
|
|
||||||
##
|
|
||||||
enabled: false
|
|
||||||
valuesOverride: {}
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.5.2
|
version: 0.5.1
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/aenix-io/cozystack/mariadb-backup:0.5.2@sha256:4bbfbb397bd7ecea45507ca47989c51429c4a24f40853ac92583e5b5b352fbea
|
ghcr.io/aenix-io/cozystack/mariadb-backup:0.5.1@sha256:793edb25a29cbc00781e40af883815ca36937e736e2b0d202ea9c9619fb6ca11
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ spec:
|
|||||||
port: 3306
|
port: 3306
|
||||||
|
|
||||||
replicas: {{ .Values.replicas }}
|
replicas: {{ .Values.replicas }}
|
||||||
replicasAllowEvenNumber: true
|
|
||||||
affinity:
|
affinity:
|
||||||
podAntiAffinity:
|
podAntiAffinity:
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
|||||||
@@ -8,7 +8,7 @@
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- $usersWithRoot := .Values.users }}
|
{{- $usersWithRoot := .Values.users }}
|
||||||
{{- if not (and .Values.users.root .Values.users.root.password) }}
|
{{- if (and .Values.users.root .Values.users.root.password) }}
|
||||||
{{- $_ := set $usersWithRoot "root" dict }}
|
{{- $_ := set $usersWithRoot "root" dict }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.4.0
|
version: 0.2.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -4,13 +4,9 @@
|
|||||||
|
|
||||||
### Common parameters
|
### Common parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------------------- | -------------------------------------------------- | ------- |
|
| -------------- | ----------------------------------------------- | ------- |
|
||||||
| `external` | Enable external access from outside the cluster | `false` |
|
| `external` | Enable external access from outside the cluster | `false` |
|
||||||
| `replicas` | Persistent Volume size for NATS | `2` |
|
| `replicas` | Persistent Volume size for NATS | `2` |
|
||||||
| `storageClass` | StorageClass used to store the data | `""` |
|
| `storageClass` | StorageClass used to store the data | `""` |
|
||||||
| `users` | Users configuration | `{}` |
|
|
||||||
| `jetstream.size` | Jetstream persistent storage size | `10Gi` |
|
|
||||||
| `jetstream.enabled` | Enable or disable Jetstream | `true` |
|
|
||||||
| `config.merge` | Additional configuration to merge into NATS config | `{}` |
|
|
||||||
| `config.resolver` | Additional configuration to merge into NATS config | `{}` |
|
|
||||||
|
|||||||
@@ -1,25 +1,3 @@
|
|||||||
{{- $passwords := dict }}
|
|
||||||
{{- range $user, $u := .Values.users }}
|
|
||||||
{{- if $u.password }}
|
|
||||||
{{- $_ := set $passwords $user $u.password }}
|
|
||||||
{{- else if not (index $passwords $user) }}
|
|
||||||
{{- $_ := set $passwords $user (randAlphaNum 16) }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
{{- if .Values.users }}
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}-credentials
|
|
||||||
stringData:
|
|
||||||
{{- range $user, $u := .Values.users }}
|
|
||||||
{{ quote $user }}: {{ quote (index $passwords $user) }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||||
kind: HelmRelease
|
kind: HelmRelease
|
||||||
metadata:
|
metadata:
|
||||||
@@ -40,25 +18,6 @@ spec:
|
|||||||
nats:
|
nats:
|
||||||
fullnameOverride: {{ .Release.Name }}
|
fullnameOverride: {{ .Release.Name }}
|
||||||
config:
|
config:
|
||||||
{{- if or (gt (len $passwords) 0) (gt (len .Values.config.merge) 0) }}
|
|
||||||
merge:
|
|
||||||
{{- if gt (len $passwords) 0 }}
|
|
||||||
accounts:
|
|
||||||
A:
|
|
||||||
users:
|
|
||||||
{{- range $username, $password := $passwords }}
|
|
||||||
- user: "{{ $username }}"
|
|
||||||
password: "{{ $password }}"
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if and .Values.config (hasKey .Values.config "merge") }}
|
|
||||||
{{ toYaml .Values.config.merge | nindent 12 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if and .Values.config (hasKey .Values.config "resolver") }}
|
|
||||||
resolver:
|
|
||||||
{{ toYaml .Values.config.resolver | nindent 12 }}
|
|
||||||
{{- end }}
|
|
||||||
cluster:
|
cluster:
|
||||||
enabled: true
|
enabled: true
|
||||||
replicas: {{ .Values.replicas }}
|
replicas: {{ .Values.replicas }}
|
||||||
@@ -67,10 +26,10 @@ spec:
|
|||||||
jetstream:
|
jetstream:
|
||||||
enabled: true
|
enabled: true
|
||||||
fileStore:
|
fileStore:
|
||||||
enabled: {{ .Values.jetstream.enabled }}
|
enabled: true
|
||||||
pvc:
|
pvc:
|
||||||
enabled: true
|
enabled: true
|
||||||
size: {{ .Values.jetstream.size }}
|
size: 10Gi
|
||||||
{{- with .Values.storageClass }}
|
{{- with .Values.storageClass }}
|
||||||
storageClassName: {{ . }}
|
storageClassName: {{ . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|||||||
@@ -1,19 +0,0 @@
|
|||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: Role
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}-dashboard-resources
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- services
|
|
||||||
resourceNames:
|
|
||||||
- {{ .Release.Name }}
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- secrets
|
|
||||||
resourceNames:
|
|
||||||
- {{ .Release.Name }}-credentials
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
@@ -16,36 +16,6 @@
|
|||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "StorageClass used to store the data",
|
"description": "StorageClass used to store the data",
|
||||||
"default": ""
|
"default": ""
|
||||||
},
|
|
||||||
"jetstream": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"size": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Jetstream persistent storage size",
|
|
||||||
"default": "10Gi"
|
|
||||||
},
|
|
||||||
"enabled": {
|
|
||||||
"type": "boolean",
|
|
||||||
"description": "Enable or disable Jetstream",
|
|
||||||
"default": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"config": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"merge": {
|
|
||||||
"type": "object",
|
|
||||||
"description": "Additional configuration to merge into NATS config",
|
|
||||||
"default": {}
|
|
||||||
},
|
|
||||||
"resolver": {
|
|
||||||
"type": "object",
|
|
||||||
"description": "Additional configuration to merge into NATS config",
|
|
||||||
"default": {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -8,56 +8,3 @@
|
|||||||
external: false
|
external: false
|
||||||
replicas: 2
|
replicas: 2
|
||||||
storageClass: ""
|
storageClass: ""
|
||||||
## @param users [object] Users configuration
|
|
||||||
## Example:
|
|
||||||
## users:
|
|
||||||
## user1:
|
|
||||||
## password: strongpassword
|
|
||||||
## user2: {}
|
|
||||||
users: {}
|
|
||||||
|
|
||||||
jetstream:
|
|
||||||
## @param jetstream.size Jetstream persistent storage size
|
|
||||||
## Specifies the size of the persistent storage for Jetstream (message store).
|
|
||||||
## Default: 10Gi
|
|
||||||
size: 10Gi
|
|
||||||
|
|
||||||
## @param jetstream.enabled Enable or disable Jetstream
|
|
||||||
## Set to true to enable Jetstream for persistent messaging in NATS.
|
|
||||||
## Default: true
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
config:
|
|
||||||
## @param config.merge Additional configuration to merge into NATS config
|
|
||||||
## Allows you to customize NATS server settings by merging additional configurations.
|
|
||||||
## For example, you can add extra parameters, configure authentication, or set custom settings.
|
|
||||||
## Default: {}
|
|
||||||
## example:
|
|
||||||
##
|
|
||||||
## merge:
|
|
||||||
## $include: ./my-config.conf
|
|
||||||
## zzz$include: ./my-config-last.conf
|
|
||||||
## server_name: nats
|
|
||||||
## authorization:
|
|
||||||
## token: << $TOKEN >>
|
|
||||||
## jetstream:
|
|
||||||
## max_memory_store: << 1GB >>
|
|
||||||
##
|
|
||||||
## will yield the config:
|
|
||||||
## {
|
|
||||||
## include ./my-config.conf;
|
|
||||||
## "authorization": {
|
|
||||||
## "token": $TOKEN
|
|
||||||
## },
|
|
||||||
## "jetstream": {
|
|
||||||
## "max_memory_store": 1GB
|
|
||||||
## },
|
|
||||||
## "server_name": "nats",
|
|
||||||
## include ./my-config-last.conf;
|
|
||||||
## }
|
|
||||||
merge: {}
|
|
||||||
## @param config.resolver Additional configuration to merge into NATS config
|
|
||||||
## Allows you to customize NATS server settings by merging resolver configurations.
|
|
||||||
## Default: {}
|
|
||||||
## Example see: https://github.com/nats-io/k8s/blob/main/helm/charts/nats/values.yaml#L247
|
|
||||||
resolver: {}
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.8.0
|
version: 0.7.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -6,34 +6,30 @@ PostgreSQL is currently the leading choice among relational databases, known for
|
|||||||
|
|
||||||
This managed service is controlled by the CloudNativePG operator, ensuring efficient management and seamless operation.
|
This managed service is controlled by the CloudNativePG operator, ensuring efficient management and seamless operation.
|
||||||
|
|
||||||
- Docs: <https://cloudnative-pg.io/docs/>
|
- Docs: https://cloudnative-pg.io/docs/
|
||||||
- Github: <https://github.com/cloudnative-pg/cloudnative-pg>
|
- Github: https://github.com/cloudnative-pg/cloudnative-pg
|
||||||
|
|
||||||
## HowTos
|
## HowTos
|
||||||
|
|
||||||
### How to switch master/slave replica
|
### How to switch master/slave replica
|
||||||
|
|
||||||
See:
|
See:
|
||||||
|
- https://cloudnative-pg.io/documentation/1.15/rolling_update/#manual-updates-supervised
|
||||||
|
|
||||||
- <https://cloudnative-pg.io/documentation/1.15/rolling_update/#manual-updates-supervised>
|
### How to restore backup:
|
||||||
|
|
||||||
### How to restore backup
|
|
||||||
|
|
||||||
find snapshot:
|
find snapshot:
|
||||||
|
```
|
||||||
```bash
|
|
||||||
restic -r s3:s3.example.org/postgres-backups/database_name snapshots
|
restic -r s3:s3.example.org/postgres-backups/database_name snapshots
|
||||||
```
|
```
|
||||||
|
|
||||||
restore:
|
restore:
|
||||||
|
```
|
||||||
```bash
|
|
||||||
restic -r s3:s3.example.org/postgres-backups/database_name restore latest --target /tmp/
|
restic -r s3:s3.example.org/postgres-backups/database_name restore latest --target /tmp/
|
||||||
```
|
```
|
||||||
|
|
||||||
more details:
|
more details:
|
||||||
|
- https://itnext.io/restic-effective-backup-from-stdin-4bc1e8f083c1
|
||||||
- <https://itnext.io/restic-effective-backup-from-stdin-4bc1e8f083c1>
|
|
||||||
|
|
||||||
## Parameters
|
## Parameters
|
||||||
|
|
||||||
@@ -68,3 +64,5 @@ more details:
|
|||||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/aenix-io/cozystack/postgres-backup:0.8.0@sha256:6a8ec7e7052f2d02ec5457d7cbac6ee52b3ed93a883988a192d1394fc7c88117
|
ghcr.io/aenix-io/cozystack/postgres-backup:0.7.0@sha256:d2015c6dba92293bda652d055e97d1be80e8414c2dc78037c12812d1a2e2cba1
|
||||||
|
|||||||
@@ -19,10 +19,3 @@ rules:
|
|||||||
resourceNames:
|
resourceNames:
|
||||||
- {{ .Release.Name }}-credentials
|
- {{ .Release.Name }}-credentials
|
||||||
verbs: ["get", "list", "watch"]
|
verbs: ["get", "list", "watch"]
|
||||||
- apiGroups:
|
|
||||||
- cozystack.io
|
|
||||||
resources:
|
|
||||||
- workloadmonitors
|
|
||||||
resourceNames:
|
|
||||||
- {{ .Release.Name }}
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
|
|||||||
@@ -29,17 +29,3 @@ spec:
|
|||||||
inheritedMetadata:
|
inheritedMetadata:
|
||||||
labels:
|
labels:
|
||||||
policy.cozystack.io/allow-to-apiserver: "true"
|
policy.cozystack.io/allow-to-apiserver: "true"
|
||||||
---
|
|
||||||
apiVersion: cozystack.io/v1alpha1
|
|
||||||
kind: WorkloadMonitor
|
|
||||||
metadata:
|
|
||||||
name: {{ $.Release.Name }}
|
|
||||||
spec:
|
|
||||||
replicas: {{ .Values.replicas }}
|
|
||||||
minReplicas: 1
|
|
||||||
kind: postgres
|
|
||||||
type: postgres
|
|
||||||
selector:
|
|
||||||
cnpg.io/cluster: {{ .Release.Name }}
|
|
||||||
cnpg.io/podRole: instance
|
|
||||||
version: {{ $.Chart.Version }}
|
|
||||||
|
|||||||
@@ -34,9 +34,6 @@ stringData:
|
|||||||
init.sh: |
|
init.sh: |
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
until pg_isready ; do sleep 5; done
|
|
||||||
|
|
||||||
echo "== create users"
|
echo "== create users"
|
||||||
{{- if .Values.users }}
|
{{- if .Values.users }}
|
||||||
psql -v ON_ERROR_STOP=1 <<\EOT
|
psql -v ON_ERROR_STOP=1 <<\EOT
|
||||||
@@ -63,7 +60,7 @@ stringData:
|
|||||||
DROP USER $user;
|
DROP USER $user;
|
||||||
EOT
|
EOT
|
||||||
done
|
done
|
||||||
|
|
||||||
echo "== create databases and roles"
|
echo "== create databases and roles"
|
||||||
{{- if .Values.databases }}
|
{{- if .Values.databases }}
|
||||||
psql -v ON_ERROR_STOP=1 --echo-all <<\EOT
|
psql -v ON_ERROR_STOP=1 --echo-all <<\EOT
|
||||||
@@ -95,7 +92,7 @@ stringData:
|
|||||||
FOR schema_record IN SELECT schema_name FROM information_schema.schemata WHERE schema_name NOT IN ('pg_catalog', 'information_schema') LOOP
|
FOR schema_record IN SELECT schema_name FROM information_schema.schemata WHERE schema_name NOT IN ('pg_catalog', 'information_schema') LOOP
|
||||||
-- Changing Schema Ownership
|
-- Changing Schema Ownership
|
||||||
EXECUTE format('ALTER SCHEMA %I OWNER TO %I', schema_record.schema_name, '{{ $database }}_admin');
|
EXECUTE format('ALTER SCHEMA %I OWNER TO %I', schema_record.schema_name, '{{ $database }}_admin');
|
||||||
|
|
||||||
-- Add rights for the admin role
|
-- Add rights for the admin role
|
||||||
EXECUTE format('GRANT ALL ON SCHEMA %I TO %I', schema_record.schema_name, '{{ $database }}_admin');
|
EXECUTE format('GRANT ALL ON SCHEMA %I TO %I', schema_record.schema_name, '{{ $database }}_admin');
|
||||||
EXECUTE format('GRANT ALL ON ALL TABLES IN SCHEMA %I TO %I', schema_record.schema_name, '{{ $database }}_admin');
|
EXECUTE format('GRANT ALL ON ALL TABLES IN SCHEMA %I TO %I', schema_record.schema_name, '{{ $database }}_admin');
|
||||||
@@ -104,7 +101,7 @@ stringData:
|
|||||||
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON TABLES TO %I', schema_record.schema_name, '{{ $database }}_admin');
|
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON TABLES TO %I', schema_record.schema_name, '{{ $database }}_admin');
|
||||||
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON SEQUENCES TO %I', schema_record.schema_name, '{{ $database }}_admin');
|
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON SEQUENCES TO %I', schema_record.schema_name, '{{ $database }}_admin');
|
||||||
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON FUNCTIONS TO %I', schema_record.schema_name, '{{ $database }}_admin');
|
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON FUNCTIONS TO %I', schema_record.schema_name, '{{ $database }}_admin');
|
||||||
|
|
||||||
-- Add rights for the readonly role
|
-- Add rights for the readonly role
|
||||||
EXECUTE format('GRANT USAGE ON SCHEMA %I TO %I', schema_record.schema_name, '{{ $database }}_readonly');
|
EXECUTE format('GRANT USAGE ON SCHEMA %I TO %I', schema_record.schema_name, '{{ $database }}_readonly');
|
||||||
EXECUTE format('GRANT SELECT ON ALL TABLES IN SCHEMA %I TO %I', schema_record.schema_name, '{{ $database }}_readonly');
|
EXECUTE format('GRANT SELECT ON ALL TABLES IN SCHEMA %I TO %I', schema_record.schema_name, '{{ $database }}_readonly');
|
||||||
@@ -122,9 +119,9 @@ stringData:
|
|||||||
CREATE OR REPLACE FUNCTION auto_grant_schema_privileges()
|
CREATE OR REPLACE FUNCTION auto_grant_schema_privileges()
|
||||||
RETURNS event_trigger LANGUAGE plpgsql AS $$
|
RETURNS event_trigger LANGUAGE plpgsql AS $$
|
||||||
DECLARE
|
DECLARE
|
||||||
obj record;
|
obj record;
|
||||||
BEGIN
|
BEGIN
|
||||||
FOR obj IN SELECT * FROM pg_event_trigger_ddl_commands() WHERE command_tag = 'CREATE SCHEMA' LOOP
|
FOR obj IN SELECT * FROM pg_event_trigger_ddl_commands() WHERE command_tag = 'CREATE SCHEMA' LOOP
|
||||||
EXECUTE format('ALTER SCHEMA %I OWNER TO %I', obj.object_identity, '{{ $database }}_admin');
|
EXECUTE format('ALTER SCHEMA %I OWNER TO %I', obj.object_identity, '{{ $database }}_admin');
|
||||||
EXECUTE format('GRANT ALL ON SCHEMA %I TO %I', obj.object_identity, '{{ $database }}_admin');
|
EXECUTE format('GRANT ALL ON SCHEMA %I TO %I', obj.object_identity, '{{ $database }}_admin');
|
||||||
EXECUTE format('GRANT USAGE ON SCHEMA %I TO %I', obj.object_identity, '{{ $database }}_readonly');
|
EXECUTE format('GRANT USAGE ON SCHEMA %I TO %I', obj.object_identity, '{{ $database }}_readonly');
|
||||||
@@ -149,7 +146,7 @@ stringData:
|
|||||||
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT SELECT ON TABLES TO %I', obj.object_identity, '{{ $database }}_readonly');
|
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT SELECT ON TABLES TO %I', obj.object_identity, '{{ $database }}_readonly');
|
||||||
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT USAGE ON SEQUENCES TO %I', obj.object_identity, '{{ $database }}_readonly');
|
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT USAGE ON SEQUENCES TO %I', obj.object_identity, '{{ $database }}_readonly');
|
||||||
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT EXECUTE ON FUNCTIONS TO %I', obj.object_identity, '{{ $database }}_readonly');
|
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT EXECUTE ON FUNCTIONS TO %I', obj.object_identity, '{{ $database }}_readonly');
|
||||||
END LOOP;
|
END LOOP;
|
||||||
END;
|
END;
|
||||||
$$;
|
$$;
|
||||||
|
|
||||||
|
|||||||
@@ -103,4 +103,4 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.4.3
|
version: 0.4.2
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ spec:
|
|||||||
template:
|
template:
|
||||||
spec:
|
spec:
|
||||||
enableServiceLinks: false
|
enableServiceLinks: false
|
||||||
containers: []
|
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
policy.cozystack.io/allow-to-apiserver: "true"
|
policy.cozystack.io/allow-to-apiserver: "true"
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.5.0
|
version: 0.3.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -19,6 +19,5 @@ Service utilizes the Spotahome Redis Operator for efficient management and orche
|
|||||||
| `size` | Persistent Volume size | `1Gi` |
|
| `size` | Persistent Volume size | `1Gi` |
|
||||||
| `replicas` | Number of Redis replicas | `2` |
|
| `replicas` | Number of Redis replicas | `2` |
|
||||||
| `storageClass` | StorageClass used to store the data | `""` |
|
| `storageClass` | StorageClass used to store the data | `""` |
|
||||||
| `authEnabled` | Enable password generation | `true` |
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,30 +0,0 @@
|
|||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: Role
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}-dashboard-resources
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- services
|
|
||||||
resourceNames:
|
|
||||||
- rfs-{{ .Release.Name }}
|
|
||||||
- rfrm-{{ .Release.Name }}
|
|
||||||
- rfrs-{{ .Release.Name }}
|
|
||||||
- "{{ .Release.Name }}-external-lb"
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- secrets
|
|
||||||
resourceNames:
|
|
||||||
- "{{ .Release.Name }}-auth"
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
- apiGroups:
|
|
||||||
- cozystack.io
|
|
||||||
resources:
|
|
||||||
- workloadmonitors
|
|
||||||
resourceNames:
|
|
||||||
- {{ .Release.Name }}-redis
|
|
||||||
- {{ .Release.Name }}-sentinel
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
@@ -1,20 +1,3 @@
|
|||||||
{{- if .Values.authEnabled }}
|
|
||||||
{{- $existingPassword := lookup "v1" "Secret" .Release.Namespace (printf "%s-auth" .Release.Name) }}
|
|
||||||
{{- $password := randAlphaNum 32 | b64enc }}
|
|
||||||
{{- if $existingPassword }}
|
|
||||||
{{- $password = index $existingPassword.data "password" }}
|
|
||||||
{{- end }}
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}-auth
|
|
||||||
data:
|
|
||||||
password: {{ $password }}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
apiVersion: databases.spotahome.com/v1
|
apiVersion: databases.spotahome.com/v1
|
||||||
kind: RedisFailover
|
kind: RedisFailover
|
||||||
metadata:
|
metadata:
|
||||||
@@ -37,6 +20,7 @@ spec:
|
|||||||
cpu: 150m
|
cpu: 150m
|
||||||
memory: 400Mi
|
memory: 400Mi
|
||||||
limits:
|
limits:
|
||||||
|
cpu: 2
|
||||||
memory: 1000Mi
|
memory: 1000Mi
|
||||||
{{- with .Values.size }}
|
{{- with .Values.size }}
|
||||||
storage:
|
storage:
|
||||||
@@ -53,7 +37,7 @@ spec:
|
|||||||
storageClassName: {{ . }}
|
storageClassName: {{ . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
exporter:
|
exporter:
|
||||||
enabled: true
|
enabled: true
|
||||||
image: oliver006/redis_exporter:v1.55.0-alpine
|
image: oliver006/redis_exporter:v1.55.0-alpine
|
||||||
args:
|
args:
|
||||||
@@ -69,38 +53,3 @@ spec:
|
|||||||
- appendonly no
|
- appendonly no
|
||||||
- save ""
|
- save ""
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.authEnabled }}
|
|
||||||
auth:
|
|
||||||
secretPath: {{ .Release.Name }}-auth
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: cozystack.io/v1alpha1
|
|
||||||
kind: WorkloadMonitor
|
|
||||||
metadata:
|
|
||||||
name: {{ $.Release.Name }}-redis
|
|
||||||
namespace: {{ $.Release.Namespace }}
|
|
||||||
spec:
|
|
||||||
minReplicas: 1
|
|
||||||
replicas: {{ .Values.replicas }}
|
|
||||||
kind: redis
|
|
||||||
type: redis
|
|
||||||
selector:
|
|
||||||
app.kubernetes.io/component: redis
|
|
||||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
|
||||||
version: {{ $.Chart.Version }}
|
|
||||||
---
|
|
||||||
apiVersion: cozystack.io/v1alpha1
|
|
||||||
kind: WorkloadMonitor
|
|
||||||
metadata:
|
|
||||||
name: {{ $.Release.Name }}-sentinel
|
|
||||||
namespace: {{ $.Release.Namespace }}
|
|
||||||
spec:
|
|
||||||
minReplicas: 2
|
|
||||||
replicas: 3
|
|
||||||
kind: redis
|
|
||||||
type: sentinel
|
|
||||||
selector:
|
|
||||||
app.kubernetes.io/component: sentinel
|
|
||||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
|
||||||
version: {{ $.Chart.Version }}
|
|
||||||
|
|||||||
@@ -21,11 +21,6 @@
|
|||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "StorageClass used to store the data",
|
"description": "StorageClass used to store the data",
|
||||||
"default": ""
|
"default": ""
|
||||||
},
|
|
||||||
"authEnabled": {
|
|
||||||
"type": "boolean",
|
|
||||||
"description": "Enable password generation",
|
|
||||||
"default": true
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -4,10 +4,8 @@
|
|||||||
## @param size Persistent Volume size
|
## @param size Persistent Volume size
|
||||||
## @param replicas Number of Redis replicas
|
## @param replicas Number of Redis replicas
|
||||||
## @param storageClass StorageClass used to store the data
|
## @param storageClass StorageClass used to store the data
|
||||||
## @param authEnabled Enable password generation
|
|
||||||
##
|
##
|
||||||
external: false
|
external: false
|
||||||
size: 1Gi
|
size: 1Gi
|
||||||
replicas: 2
|
replicas: 2
|
||||||
storageClass: ""
|
storageClass: ""
|
||||||
authEnabled: true
|
|
||||||
|
|||||||
@@ -4,4 +4,4 @@ description: Separated tenant namespace
|
|||||||
icon: /logos/tenant.svg
|
icon: /logos/tenant.svg
|
||||||
|
|
||||||
type: application
|
type: application
|
||||||
version: 1.6.7
|
version: 1.4.0
|
||||||
|
|||||||
@@ -1,13 +0,0 @@
|
|||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: Role
|
|
||||||
metadata:
|
|
||||||
name: {{ include "tenant.name" . }}-dashboard-resources
|
|
||||||
namespace: {{ .Release.namespace }}
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- secrets
|
|
||||||
resourceNames:
|
|
||||||
- kubeconfig-{{ include "tenant.name" . }}
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
@@ -1,53 +0,0 @@
|
|||||||
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
|
||||||
{{- $oidcEnabled := index $cozyConfig.data "oidc-enabled" }}
|
|
||||||
{{- if $oidcEnabled }}
|
|
||||||
apiVersion: v1.edp.epam.com/v1
|
|
||||||
kind: KeycloakRealmGroup
|
|
||||||
metadata:
|
|
||||||
name: {{ include "tenant.name" . }}-view
|
|
||||||
namespace: {{ include "tenant.name" . }}
|
|
||||||
spec:
|
|
||||||
name: {{ include "tenant.name" . }}-view
|
|
||||||
realmRef:
|
|
||||||
name: keycloakrealm-cozy
|
|
||||||
kind: ClusterKeycloakRealm
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
apiVersion: v1.edp.epam.com/v1
|
|
||||||
kind: KeycloakRealmGroup
|
|
||||||
metadata:
|
|
||||||
name: {{ include "tenant.name" . }}-use
|
|
||||||
namespace: {{ include "tenant.name" . }}
|
|
||||||
spec:
|
|
||||||
name: {{ include "tenant.name" . }}-use
|
|
||||||
realmRef:
|
|
||||||
name: keycloakrealm-cozy
|
|
||||||
kind: ClusterKeycloakRealm
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
apiVersion: v1.edp.epam.com/v1
|
|
||||||
kind: KeycloakRealmGroup
|
|
||||||
metadata:
|
|
||||||
name: {{ include "tenant.name" . }}-admin
|
|
||||||
namespace: {{ include "tenant.name" . }}
|
|
||||||
spec:
|
|
||||||
name: {{ include "tenant.name" . }}-admin
|
|
||||||
realmRef:
|
|
||||||
name: keycloakrealm-cozy
|
|
||||||
kind: ClusterKeycloakRealm
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
apiVersion: v1.edp.epam.com/v1
|
|
||||||
kind: KeycloakRealmGroup
|
|
||||||
metadata:
|
|
||||||
name: {{ include "tenant.name" . }}-super-admin
|
|
||||||
namespace: {{ include "tenant.name" . }}
|
|
||||||
spec:
|
|
||||||
name: {{ include "tenant.name" . }}-super-admin
|
|
||||||
realmRef:
|
|
||||||
name: keycloakrealm-cozy
|
|
||||||
kind: ClusterKeycloakRealm
|
|
||||||
{{- end }}
|
|
||||||
@@ -1,48 +0,0 @@
|
|||||||
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
|
||||||
{{- $host := index $cozyConfig.data "root-host" }}
|
|
||||||
{{- $k8sClientSecret := lookup "v1" "Secret" "cozy-keycloak" "k8s-client" }}
|
|
||||||
|
|
||||||
{{- if $k8sClientSecret }}
|
|
||||||
{{- $apiServerEndpoint := index $cozyConfig.data "api-server-endpoint" }}
|
|
||||||
{{- $managementKubeconfigEndpoint := default "" (get $cozyConfig.data "management-kubeconfig-endpoint") }}
|
|
||||||
{{- if and $managementKubeconfigEndpoint (ne $managementKubeconfigEndpoint "") }}
|
|
||||||
{{- $apiServerEndpoint = $managementKubeconfigEndpoint }}
|
|
||||||
{{- end }}
|
|
||||||
{{- $k8sClient := index $k8sClientSecret.data "client-secret-key" | b64dec }}
|
|
||||||
{{- $rootSaConfigMap := lookup "v1" "ConfigMap" "kube-system" "kube-root-ca.crt" }}
|
|
||||||
{{- $k8sCa := index $rootSaConfigMap.data "ca.crt" | b64enc }}
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: kubeconfig-{{ include "tenant.name" . }}
|
|
||||||
namespace: tenant-root
|
|
||||||
stringData:
|
|
||||||
kubeconfig: |
|
|
||||||
apiVersion: v1
|
|
||||||
clusters:
|
|
||||||
- cluster:
|
|
||||||
server: {{ $apiServerEndpoint }}
|
|
||||||
certificate-authority-data: {{ $k8sCa }}
|
|
||||||
name: cluster
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: cluster
|
|
||||||
namespace: {{ include "tenant.name" . }}
|
|
||||||
user: keycloak
|
|
||||||
name: {{ include "tenant.name" . }}
|
|
||||||
current-context: {{ include "tenant.name" . }}
|
|
||||||
users:
|
|
||||||
- name: keycloak
|
|
||||||
user:
|
|
||||||
exec:
|
|
||||||
apiVersion: client.authentication.k8s.io/v1beta1
|
|
||||||
args:
|
|
||||||
- oidc-login
|
|
||||||
- get-token
|
|
||||||
- --oidc-issuer-url=https://keycloak.{{ $host }}/realms/cozy
|
|
||||||
- --oidc-client-id=kubernetes
|
|
||||||
- --oidc-client-secret={{ $k8sClient }}
|
|
||||||
- --skip-open-browser
|
|
||||||
command: kubectl
|
|
||||||
{{- end }}
|
|
||||||
@@ -26,24 +26,12 @@ spec:
|
|||||||
metricsStorages:
|
metricsStorages:
|
||||||
- name: shortterm
|
- name: shortterm
|
||||||
retentionPeriod: "3d"
|
retentionPeriod: "3d"
|
||||||
deduplicationInterval: "15s"
|
|
||||||
storage: 10Gi
|
|
||||||
vminsert:
|
|
||||||
resources: {}
|
|
||||||
vmselect:
|
|
||||||
resources: {}
|
|
||||||
vmstorage:
|
|
||||||
resources: {}
|
|
||||||
- name: longterm
|
|
||||||
retentionPeriod: "14d"
|
|
||||||
deduplicationInterval: "5m"
|
deduplicationInterval: "5m"
|
||||||
storage: 10Gi
|
storage: 10Gi
|
||||||
vminsert:
|
- name: longterm
|
||||||
resources: {}
|
retentionPeriod: "14d"
|
||||||
vmselect:
|
deduplicationInterval: "15s"
|
||||||
resources: {}
|
storage: 10Gi
|
||||||
vmstorage:
|
|
||||||
resources: {}
|
|
||||||
oncall:
|
oncall:
|
||||||
enabled: false
|
enabled: false
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|||||||
@@ -159,30 +159,6 @@ spec:
|
|||||||
---
|
---
|
||||||
apiVersion: cilium.io/v2
|
apiVersion: cilium.io/v2
|
||||||
kind: CiliumNetworkPolicy
|
kind: CiliumNetworkPolicy
|
||||||
metadata:
|
|
||||||
name: allow-to-keycloak
|
|
||||||
namespace: {{ include "tenant.name" . }}
|
|
||||||
spec:
|
|
||||||
endpointSelector: {}
|
|
||||||
egress:
|
|
||||||
- toEndpoints:
|
|
||||||
- matchLabels:
|
|
||||||
"k8s:io.kubernetes.pod.namespace": cozy-keycloak
|
|
||||||
---
|
|
||||||
apiVersion: cilium.io/v2
|
|
||||||
kind: CiliumNetworkPolicy
|
|
||||||
metadata:
|
|
||||||
name: allow-to-cdi-upload-proxy
|
|
||||||
namespace: {{ include "tenant.name" . }}
|
|
||||||
spec:
|
|
||||||
endpointSelector: {}
|
|
||||||
egress:
|
|
||||||
- toEndpoints:
|
|
||||||
- matchLabels:
|
|
||||||
"k8s:io.kubernetes.pod.namespace": cozy-kubevirt-cdi
|
|
||||||
---
|
|
||||||
apiVersion: cilium.io/v2
|
|
||||||
kind: CiliumNetworkPolicy
|
|
||||||
metadata:
|
metadata:
|
||||||
name: allow-to-ingress
|
name: allow-to-ingress
|
||||||
namespace: {{ include "tenant.name" . }}
|
namespace: {{ include "tenant.name" . }}
|
||||||
|
|||||||
@@ -14,8 +14,6 @@ metadata:
|
|||||||
kubernetes.io/service-account.name: {{ include "tenant.name" . }}
|
kubernetes.io/service-account.name: {{ include "tenant.name" . }}
|
||||||
type: kubernetes.io/service-account-token
|
type: kubernetes.io/service-account-token
|
||||||
---
|
---
|
||||||
# == default role ==
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: Role
|
kind: Role
|
||||||
metadata:
|
metadata:
|
||||||
@@ -31,10 +29,9 @@ rules:
|
|||||||
- apiGroups: ["rbac.authorization.k8s.io"]
|
- apiGroups: ["rbac.authorization.k8s.io"]
|
||||||
resources: ["roles"]
|
resources: ["roles"]
|
||||||
verbs: ["get"]
|
verbs: ["get"]
|
||||||
- apiGroups: ["apps.cozystack.io"]
|
- apiGroups: ["helm.toolkit.fluxcd.io"]
|
||||||
resources: ['*']
|
resources: ["helmreleases"]
|
||||||
verbs: ['*']
|
verbs: ["*"]
|
||||||
|
|
||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: RoleBinding
|
kind: RoleBinding
|
||||||
@@ -65,307 +62,6 @@ roleRef:
|
|||||||
name: {{ include "tenant.name" . }}
|
name: {{ include "tenant.name" . }}
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
---
|
---
|
||||||
# == view role ==
|
|
||||||
---
|
|
||||||
kind: Role
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: {{ include "tenant.name" . }}-view
|
|
||||||
namespace: {{ include "tenant.name" . }}
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- rbac.authorization.k8s.io
|
|
||||||
resources:
|
|
||||||
- roles
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- apiGroups:
|
|
||||||
- apps.cozystack.io
|
|
||||||
resources:
|
|
||||||
- "*"
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- "*"
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
|
||||||
- networking.k8s.io
|
|
||||||
resources:
|
|
||||||
- ingresses
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
---
|
|
||||||
kind: RoleBinding
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: {{ include "tenant.name" . }}-view
|
|
||||||
namespace: {{ include "tenant.name" . }}
|
|
||||||
subjects:
|
|
||||||
{{- if ne .Release.Namespace "tenant-root" }}
|
|
||||||
- kind: Group
|
|
||||||
name: tenant-root-view
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
{{- end }}
|
|
||||||
- kind: Group
|
|
||||||
name: {{ include "tenant.name" . }}-view
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
{{- if hasPrefix "tenant-" .Release.Namespace }}
|
|
||||||
{{- $parts := splitList "-" .Release.Namespace }}
|
|
||||||
{{- range $i, $v := $parts }}
|
|
||||||
{{- if ne $i 0 }}
|
|
||||||
- kind: Group
|
|
||||||
name: {{ join "-" (slice $parts 0 (add $i 1)) }}-view
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
roleRef:
|
|
||||||
kind: Role
|
|
||||||
name: {{ include "tenant.name" . }}-view
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
|
|
||||||
---
|
|
||||||
# == use role ==
|
|
||||||
---
|
|
||||||
kind: Role
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: {{ include "tenant.name" . }}-use
|
|
||||||
namespace: {{ include "tenant.name" . }}
|
|
||||||
rules:
|
|
||||||
- apiGroups: [rbac.authorization.k8s.io]
|
|
||||||
resources:
|
|
||||||
- roles
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- apiGroups: ["apps.cozystack.io"]
|
|
||||||
resources:
|
|
||||||
- "*"
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources:
|
|
||||||
- "*"
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- apiGroups: ["networking.k8s.io"]
|
|
||||||
resources:
|
|
||||||
- ingresses
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- apiGroups: ["subresources.kubevirt.io"]
|
|
||||||
resources:
|
|
||||||
- virtualmachineinstances/console
|
|
||||||
- virtualmachineinstances/vnc
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
---
|
|
||||||
kind: RoleBinding
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: {{ include "tenant.name" . }}-use
|
|
||||||
namespace: {{ include "tenant.name" . }}
|
|
||||||
subjects:
|
|
||||||
{{- if ne .Release.Namespace "tenant-root" }}
|
|
||||||
- kind: Group
|
|
||||||
name: tenant-root-use
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
{{- end }}
|
|
||||||
- kind: Group
|
|
||||||
name: {{ include "tenant.name" . }}-use
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
{{- if hasPrefix "tenant-" .Release.Namespace }}
|
|
||||||
{{- $parts := splitList "-" .Release.Namespace }}
|
|
||||||
{{- range $i, $v := $parts }}
|
|
||||||
{{- if ne $i 0 }}
|
|
||||||
- kind: Group
|
|
||||||
name: {{ join "-" (slice $parts 0 (add $i 1)) }}-use
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
roleRef:
|
|
||||||
kind: Role
|
|
||||||
name: {{ include "tenant.name" . }}-use
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
---
|
|
||||||
# == admin role ==
|
|
||||||
---
|
|
||||||
kind: Role
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: {{ include "tenant.name" . }}-admin
|
|
||||||
namespace: {{ include "tenant.name" . }}
|
|
||||||
rules:
|
|
||||||
- apiGroups: [rbac.authorization.k8s.io]
|
|
||||||
resources:
|
|
||||||
- roles
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources:
|
|
||||||
- "*"
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- delete
|
|
||||||
- apiGroups: ["kubevirt.io"]
|
|
||||||
resources:
|
|
||||||
- virtualmachines
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- apiGroups: ["subresources.kubevirt.io"]
|
|
||||||
resources:
|
|
||||||
- virtualmachineinstances/console
|
|
||||||
- virtualmachineinstances/vnc
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- apiGroups: ["apps.cozystack.io"]
|
|
||||||
resources:
|
|
||||||
- buckets
|
|
||||||
- clickhouses
|
|
||||||
- ferretdb
|
|
||||||
- foos
|
|
||||||
- httpcaches
|
|
||||||
- kafkas
|
|
||||||
- kuberneteses
|
|
||||||
- mysqls
|
|
||||||
- natses
|
|
||||||
- postgreses
|
|
||||||
- rabbitmqs
|
|
||||||
- redises
|
|
||||||
- seaweedfses
|
|
||||||
- tcpbalancers
|
|
||||||
- virtualmachines
|
|
||||||
- vmdisks
|
|
||||||
- vminstances
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- create
|
|
||||||
- update
|
|
||||||
- patch
|
|
||||||
- delete
|
|
||||||
---
|
|
||||||
kind: RoleBinding
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: {{ include "tenant.name" . }}-admin
|
|
||||||
namespace: {{ include "tenant.name" . }}
|
|
||||||
subjects:
|
|
||||||
{{- if ne .Release.Namespace "tenant-root" }}
|
|
||||||
- kind: Group
|
|
||||||
name: tenant-root-admin
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
{{- end }}
|
|
||||||
- kind: Group
|
|
||||||
name: {{ include "tenant.name" . }}-admin
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
{{- if hasPrefix "tenant-" .Release.Namespace }}
|
|
||||||
{{- $parts := splitList "-" .Release.Namespace }}
|
|
||||||
{{- range $i, $v := $parts }}
|
|
||||||
{{- if ne $i 0 }}
|
|
||||||
- kind: Group
|
|
||||||
name: {{ join "-" (slice $parts 0 (add $i 1)) }}-admin
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
roleRef:
|
|
||||||
kind: Role
|
|
||||||
name: {{ include "tenant.name" . }}-admin
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
---
|
|
||||||
# == super admin role ==
|
|
||||||
---
|
|
||||||
kind: Role
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: {{ include "tenant.name" . }}-super-admin
|
|
||||||
namespace: {{ include "tenant.name" . }}
|
|
||||||
rules:
|
|
||||||
- apiGroups: [rbac.authorization.k8s.io]
|
|
||||||
resources:
|
|
||||||
- roles
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources:
|
|
||||||
- "*"
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- delete
|
|
||||||
- apiGroups: ["kubevirt.io"]
|
|
||||||
resources:
|
|
||||||
- virtualmachines
|
|
||||||
verbs:
|
|
||||||
- '*'
|
|
||||||
- apiGroups: ["subresources.kubevirt.io"]
|
|
||||||
resources:
|
|
||||||
- virtualmachineinstances/console
|
|
||||||
- virtualmachineinstances/vnc
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- apiGroups: ["apps.cozystack.io"]
|
|
||||||
resources:
|
|
||||||
- '*'
|
|
||||||
verbs:
|
|
||||||
- '*'
|
|
||||||
---
|
|
||||||
kind: RoleBinding
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: {{ include "tenant.name" . }}-super-admin
|
|
||||||
namespace: {{ include "tenant.name" . }}
|
|
||||||
subjects:
|
|
||||||
{{- if ne .Release.Namespace "tenant-root" }}
|
|
||||||
- kind: Group
|
|
||||||
name: tenant-root-super-admin
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
{{- end }}
|
|
||||||
- kind: Group
|
|
||||||
name: {{ include "tenant.name" . }}-super-admin
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
{{- if hasPrefix "tenant-" .Release.Namespace }}
|
|
||||||
{{- $parts := splitList "-" .Release.Namespace }}
|
|
||||||
{{- range $i, $v := $parts }}
|
|
||||||
{{- if ne $i 0 }}
|
|
||||||
- kind: Group
|
|
||||||
name: {{ join "-" (slice $parts 0 (add $i 1)) }}-super-admin
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
roleRef:
|
|
||||||
kind: Role
|
|
||||||
name: {{ include "tenant.name" . }}-super-admin
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
---
|
|
||||||
# == dashboard role ==
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: Role
|
kind: Role
|
||||||
metadata:
|
metadata:
|
||||||
@@ -377,7 +73,7 @@ rules:
|
|||||||
verbs: ["get", "list"]
|
verbs: ["get", "list"]
|
||||||
- apiGroups: ["source.toolkit.fluxcd.io"]
|
- apiGroups: ["source.toolkit.fluxcd.io"]
|
||||||
resources: ["helmcharts"]
|
resources: ["helmcharts"]
|
||||||
verbs: ["get", "list"]
|
verbs: ["*"]
|
||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: RoleBinding
|
kind: RoleBinding
|
||||||
@@ -385,18 +81,6 @@ metadata:
|
|||||||
name: {{ include "tenant.name" . }}
|
name: {{ include "tenant.name" . }}
|
||||||
namespace: cozy-public
|
namespace: cozy-public
|
||||||
subjects:
|
subjects:
|
||||||
- kind: Group
|
|
||||||
name: {{ include "tenant.name" . }}-super-admin
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
- kind: Group
|
|
||||||
name: {{ include "tenant.name" . }}-admin
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
- kind: Group
|
|
||||||
name: {{ include "tenant.name" . }}-use
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
- kind: Group
|
|
||||||
name: {{ include "tenant.name" . }}-view
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: {{ include "tenant.name" . }}
|
name: {{ include "tenant.name" . }}
|
||||||
namespace: {{ include "tenant.name" . }}
|
namespace: {{ include "tenant.name" . }}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user