mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-01-28 18:18:41 +00:00
Compare commits
119 Commits
secureboot
...
v0.26.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8c4605284c | ||
|
|
f708dc2043 | ||
|
|
160e4e2a32 | ||
|
|
79eadda494 | ||
|
|
3da1a4ed92 | ||
|
|
a5dc2d5382 | ||
|
|
705eb06078 | ||
|
|
e735f96555 | ||
|
|
f976ff8ed3 | ||
|
|
9ae6b2b0da | ||
|
|
86bb64000e | ||
|
|
19e0e4c2dc | ||
|
|
86724a6860 | ||
|
|
a226fdd242 | ||
|
|
e2369bae68 | ||
|
|
46f0bb2078 | ||
|
|
6ff8b527ea | ||
|
|
0f87c73051 | ||
|
|
d0d62e8847 | ||
|
|
439381e474 | ||
|
|
a6a95b0091 | ||
|
|
392cd862e9 | ||
|
|
b32106484f | ||
|
|
77df31e105 | ||
|
|
24fa722276 | ||
|
|
0211c57bed | ||
|
|
135b0609b4 | ||
|
|
6c73e3f3ae | ||
|
|
bc95159a80 | ||
|
|
0f68db6793 | ||
|
|
9a55747885 | ||
|
|
bd90eb267f | ||
|
|
a31c3a5796 | ||
|
|
7d5b22e662 | ||
|
|
42f1dabc31 | ||
|
|
eefef8b09f | ||
|
|
93c4616115 | ||
|
|
1f6ea333b6 | ||
|
|
4cc48e6f34 | ||
|
|
ecfb02a76f | ||
|
|
cc0222aa11 | ||
|
|
65036e8145 | ||
|
|
e2e32096a3 | ||
|
|
84a23947b0 | ||
|
|
d234d58a16 | ||
|
|
b75aaf177b | ||
|
|
87328a6ff3 | ||
|
|
3fa4dd3af9 | ||
|
|
6245976d3e | ||
|
|
dacabe6317 | ||
|
|
bf68404c53 | ||
|
|
5f40685161 | ||
|
|
f768dc1632 | ||
|
|
1a88883a3b | ||
|
|
a42f98e04c | ||
|
|
842d3e55bc | ||
|
|
f02397aab5 | ||
|
|
5a47754a92 | ||
|
|
d91bc52594 | ||
|
|
f67816e2d3 | ||
|
|
861e6c464b | ||
|
|
835ee117f7 | ||
|
|
e5e14722b8 | ||
|
|
af48519d65 | ||
|
|
d6e9765604 | ||
|
|
0ab39f207c | ||
|
|
ef2e065c77 | ||
|
|
80b4c151bd | ||
|
|
719cedde02 | ||
|
|
cc5eb4765c | ||
|
|
d557050eca | ||
|
|
469d1e9801 | ||
|
|
05857b954d | ||
|
|
81819661dc | ||
|
|
06afcf27a3 | ||
|
|
9587caa4f7 | ||
|
|
2f0d0924a7 | ||
|
|
2a976afe99 | ||
|
|
fb723bc650 | ||
|
|
e23286a336 | ||
|
|
2f5336388c | ||
|
|
af58018a1e | ||
|
|
cfb171b000 | ||
|
|
e037cb0e3e | ||
|
|
749110aaa2 | ||
|
|
59b4a0fb91 | ||
|
|
191c8b4061 | ||
|
|
4e68e65cd9 | ||
|
|
33d2b24ff2 | ||
|
|
9c8652cd5b | ||
|
|
dd4fcd5539 | ||
|
|
9de782e719 | ||
|
|
9f9a774340 | ||
|
|
8193d788fc | ||
|
|
5f1c2a4f7e | ||
|
|
8cce943cb9 | ||
|
|
1256c81bd0 | ||
|
|
6310096e85 | ||
|
|
b246f9dfe2 | ||
|
|
34d6ab032f | ||
|
|
3bb975965d | ||
|
|
4547efab09 | ||
|
|
65593f459f | ||
|
|
b08a5d3e2f | ||
|
|
c3d55e2295 | ||
|
|
cb7b8158e0 | ||
|
|
0e7288707e | ||
|
|
38a993b356 | ||
|
|
107f390ae8 | ||
|
|
0a9b0761dc | ||
|
|
d4634797f3 | ||
|
|
7a53378799 | ||
|
|
5cbe958645 | ||
|
|
2892c220ac | ||
|
|
227848a59d | ||
|
|
b16d954dd3 | ||
|
|
7cfb90df10 | ||
|
|
26388c7757 | ||
|
|
fde4bcfa3b |
@@ -1,7 +1,12 @@
|
||||
# The Cozystack Maintainers
|
||||
|
||||
| Maintainer | GitHub Username | Company |
|
||||
| ---------- | --------------- | ------- |
|
||||
| Andrei Kvapil | [@kvaps](https://github.com/kvaps) | Ænix |
|
||||
| George Gaál | [@gecube](https://github.com/gecube) | Ænix |
|
||||
| Eduard Generalov | [@egeneralov](https://github.com/egeneralov) | Ænix |
|
||||
| Maintainer | GitHub Username | Company | Responsibility |
|
||||
| ---------- | --------------- | ------- | --------------------------------- |
|
||||
| Andrei Kvapil | [@kvaps](https://github.com/kvaps) | Ænix | Core Maintainer |
|
||||
| George Gaál | [@gecube](https://github.com/gecube) | Ænix | DevOps Practices in Platform, Developers Advocate |
|
||||
| Kingdon Barrett | [@kingdonb](https://github.com/kingdonb) | Urmanac | FluxCD and flux-operator |
|
||||
| Timofei Larkin | [@lllamnyp](https://github.com/lllamnyp) | 3commas | Etcd-operator Lead |
|
||||
| Artem Bortnikov | [@aobort](https://github.com/aobort) | Timescale | Etcd-operator Lead |
|
||||
| Andrei Gumilev | [@chumkaska](https://github.com/chumkaska) | Ænix | Platform Documentation |
|
||||
| Timur Tukaev | [@tym83](https://github.com/tym83) | Ænix | Cozystack Website, Marketing, Community Management |
|
||||
| Kirill Klinchenkov | [@klinch0](https://github.com/klinch0) | Ænix | Core Maintainer |
|
||||
|
||||
6
Makefile
6
Makefile
@@ -6,7 +6,9 @@ build:
|
||||
make -C packages/apps/mysql image
|
||||
make -C packages/apps/clickhouse image
|
||||
make -C packages/apps/kubernetes image
|
||||
make -C packages/extra/monitoring image
|
||||
make -C packages/system/cozystack-api image
|
||||
make -C packages/system/cozystack-controller image
|
||||
make -C packages/system/cilium image
|
||||
make -C packages/system/kubeovn image
|
||||
make -C packages/system/dashboard image
|
||||
@@ -34,6 +36,10 @@ assets:
|
||||
make -C packages/core/installer/ assets
|
||||
|
||||
test:
|
||||
test -f _out/assets/nocloud-amd64.raw.xz || make -C packages/core/installer talos-nocloud
|
||||
make -C packages/core/testing apply
|
||||
make -C packages/core/testing test
|
||||
make -C packages/core/testing test-applications
|
||||
|
||||
generate:
|
||||
hack/update-codegen.sh
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
API rule violation: list_type_missing,github.com/aenix.io/cozystack/pkg/apis/apps/v1alpha1,ApplicationStatus,Conditions
|
||||
API rule violation: list_type_missing,github.com/aenix-io/cozystack/pkg/apis/apps/v1alpha1,ApplicationStatus,Conditions
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Ref
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Schema
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XEmbeddedResource
|
||||
|
||||
36
api/v1alpha1/groupversion_info.go
Normal file
36
api/v1alpha1/groupversion_info.go
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
Copyright 2025.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package v1alpha1 contains API Schema definitions for the v1alpha1 API group.
|
||||
// +kubebuilder:object:generate=true
|
||||
// +groupName=cozystack.io
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/scheme"
|
||||
)
|
||||
|
||||
var (
|
||||
// GroupVersion is group version used to register these objects.
|
||||
GroupVersion = schema.GroupVersion{Group: "cozystack.io", Version: "v1alpha1"}
|
||||
|
||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
|
||||
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
||||
|
||||
// AddToScheme adds the types in this group-version to the given scheme.
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
70
api/v1alpha1/workload_types.go
Normal file
70
api/v1alpha1/workload_types.go
Normal file
@@ -0,0 +1,70 @@
|
||||
/*
|
||||
Copyright 2025.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// WorkloadStatus defines the observed state of Workload
|
||||
type WorkloadStatus struct {
|
||||
// Kind represents the type of workload (redis, postgres, etc.)
|
||||
// +required
|
||||
Kind string `json:"kind"`
|
||||
|
||||
// Type represents the specific role of the workload (redis, sentinel, etc.)
|
||||
// If not specified, defaults to Kind
|
||||
// +optional
|
||||
Type string `json:"type,omitempty"`
|
||||
|
||||
// Resources specifies the compute resources allocated to this workload
|
||||
// +required
|
||||
Resources map[string]resource.Quantity `json:"resources"`
|
||||
|
||||
// Operational indicates if all pods of the workload are ready
|
||||
// +optional
|
||||
Operational bool `json:"operational"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:printcolumn:name="Kind",type="string",JSONPath=".status.kind"
|
||||
// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".status.type"
|
||||
// +kubebuilder:printcolumn:name="CPU",type="string",JSONPath=".status.resources.cpu"
|
||||
// +kubebuilder:printcolumn:name="Memory",type="string",JSONPath=".status.resources.memory"
|
||||
// +kubebuilder:printcolumn:name="Operational",type="boolean",JSONPath=`.status.operational`
|
||||
|
||||
// Workload is the Schema for the workloads API
|
||||
type Workload struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Status WorkloadStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// WorkloadList contains a list of Workload
|
||||
type WorkloadList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []Workload `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&Workload{}, &WorkloadList{})
|
||||
}
|
||||
91
api/v1alpha1/workloadmonitor_types.go
Normal file
91
api/v1alpha1/workloadmonitor_types.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// WorkloadMonitorSpec defines the desired state of WorkloadMonitor
|
||||
type WorkloadMonitorSpec struct {
|
||||
// Selector is a label selector to find workloads to monitor
|
||||
// +required
|
||||
Selector map[string]string `json:"selector"`
|
||||
|
||||
// Kind specifies the kind of the workload
|
||||
// +optional
|
||||
Kind string `json:"kind,omitempty"`
|
||||
|
||||
// Type specifies the type of the workload
|
||||
// +optional
|
||||
Type string `json:"type,omitempty"`
|
||||
|
||||
// Version specifies the version of the workload
|
||||
// +optional
|
||||
Version string `json:"version,omitempty"`
|
||||
|
||||
// MinReplicas specifies the minimum number of replicas that should be available
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
// +optional
|
||||
MinReplicas *int32 `json:"minReplicas,omitempty"`
|
||||
|
||||
// Replicas is the desired number of replicas
|
||||
// If not specified, will use observedReplicas as the target
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
// +optional
|
||||
Replicas *int32 `json:"replicas,omitempty"`
|
||||
}
|
||||
|
||||
// WorkloadMonitorStatus defines the observed state of WorkloadMonitor
|
||||
type WorkloadMonitorStatus struct {
|
||||
// Operational indicates if the workload meets all operational requirements
|
||||
// +optional
|
||||
Operational *bool `json:"operational,omitempty"`
|
||||
|
||||
// AvailableReplicas is the number of ready replicas
|
||||
// +optional
|
||||
AvailableReplicas int32 `json:"availableReplicas"`
|
||||
|
||||
// ObservedReplicas is the total number of pods observed
|
||||
// +optional
|
||||
ObservedReplicas int32 `json:"observedReplicas"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:name="Kind",type="string",JSONPath=".spec.kind"
|
||||
// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".spec.type"
|
||||
// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version"
|
||||
// +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".spec.replicas"
|
||||
// +kubebuilder:printcolumn:name="MinReplicas",type="integer",JSONPath=".spec.minReplicas"
|
||||
// +kubebuilder:printcolumn:name="Available",type="integer",JSONPath=".status.availableReplicas"
|
||||
// +kubebuilder:printcolumn:name="Observed",type="integer",JSONPath=".status.observedReplicas"
|
||||
// +kubebuilder:printcolumn:name="Operational",type="boolean",JSONPath=".status.operational"
|
||||
|
||||
// WorkloadMonitor is the Schema for the workloadmonitors API
|
||||
type WorkloadMonitor struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec WorkloadMonitorSpec `json:"spec,omitempty"`
|
||||
Status WorkloadMonitorStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// WorkloadMonitorList contains a list of WorkloadMonitor
|
||||
type WorkloadMonitorList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []WorkloadMonitor `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&WorkloadMonitor{}, &WorkloadMonitorList{})
|
||||
}
|
||||
|
||||
// GetSelector returns the label selector from metadata
|
||||
func (w *WorkloadMonitor) GetSelector() map[string]string {
|
||||
return w.Spec.Selector
|
||||
}
|
||||
|
||||
// Selector specifies the label selector for workloads
|
||||
type Selector map[string]string
|
||||
238
api/v1alpha1/zz_generated.deepcopy.go
Normal file
238
api/v1alpha1/zz_generated.deepcopy.go
Normal file
@@ -0,0 +1,238 @@
|
||||
//go:build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in Selector) DeepCopyInto(out *Selector) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(Selector, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Selector.
|
||||
func (in Selector) DeepCopy() Selector {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Selector)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Workload) DeepCopyInto(out *Workload) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workload.
|
||||
func (in *Workload) DeepCopy() *Workload {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Workload)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *Workload) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkloadList) DeepCopyInto(out *WorkloadList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Workload, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadList.
|
||||
func (in *WorkloadList) DeepCopy() *WorkloadList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WorkloadList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *WorkloadList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkloadMonitor) DeepCopyInto(out *WorkloadMonitor) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadMonitor.
|
||||
func (in *WorkloadMonitor) DeepCopy() *WorkloadMonitor {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WorkloadMonitor)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *WorkloadMonitor) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkloadMonitorList) DeepCopyInto(out *WorkloadMonitorList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]WorkloadMonitor, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadMonitorList.
|
||||
func (in *WorkloadMonitorList) DeepCopy() *WorkloadMonitorList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WorkloadMonitorList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *WorkloadMonitorList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkloadMonitorSpec) DeepCopyInto(out *WorkloadMonitorSpec) {
|
||||
*out = *in
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.MinReplicas != nil {
|
||||
in, out := &in.MinReplicas, &out.MinReplicas
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.Replicas != nil {
|
||||
in, out := &in.Replicas, &out.Replicas
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadMonitorSpec.
|
||||
func (in *WorkloadMonitorSpec) DeepCopy() *WorkloadMonitorSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WorkloadMonitorSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkloadMonitorStatus) DeepCopyInto(out *WorkloadMonitorStatus) {
|
||||
*out = *in
|
||||
if in.Operational != nil {
|
||||
in, out := &in.Operational, &out.Operational
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadMonitorStatus.
|
||||
func (in *WorkloadMonitorStatus) DeepCopy() *WorkloadMonitorStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WorkloadMonitorStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkloadStatus) DeepCopyInto(out *WorkloadStatus) {
|
||||
*out = *in
|
||||
if in.Resources != nil {
|
||||
in, out := &in.Resources, &out.Resources
|
||||
*out = make(map[string]resource.Quantity, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadStatus.
|
||||
func (in *WorkloadStatus) DeepCopy() *WorkloadStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WorkloadStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
@@ -19,7 +19,7 @@ package main
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/aenix.io/cozystack/pkg/cmd/server"
|
||||
"github.com/aenix-io/cozystack/pkg/cmd/server"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/component-base/cli"
|
||||
)
|
||||
|
||||
29
cmd/cozystack-assets-server/main.go
Normal file
29
cmd/cozystack-assets-server/main.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func main() {
|
||||
addr := flag.String("address", ":8123", "Address to listen on")
|
||||
dir := flag.String("dir", "/cozystack/assets", "Directory to serve files from")
|
||||
flag.Parse()
|
||||
|
||||
absDir, err := filepath.Abs(*dir)
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting absolute path for %s: %v", *dir, err)
|
||||
}
|
||||
|
||||
fs := http.FileServer(http.Dir(absDir))
|
||||
http.Handle("/", fs)
|
||||
|
||||
log.Printf("Server starting on %s, serving directory %s", *addr, absDir)
|
||||
|
||||
err = http.ListenAndServe(*addr, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Server failed to start: %v", err)
|
||||
}
|
||||
}
|
||||
210
cmd/cozystack-controller/main.go
Normal file
210
cmd/cozystack-controller/main.go
Normal file
@@ -0,0 +1,210 @@
|
||||
/*
|
||||
Copyright 2025.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
|
||||
// to ensure that exec-entrypoint and run can make use of them.
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
|
||||
cozystackiov1alpha1 "github.com/aenix-io/cozystack/api/v1alpha1"
|
||||
"github.com/aenix-io/cozystack/internal/controller"
|
||||
"github.com/aenix-io/cozystack/internal/telemetry"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
var (
|
||||
scheme = runtime.NewScheme()
|
||||
setupLog = ctrl.Log.WithName("setup")
|
||||
)
|
||||
|
||||
func init() {
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||
|
||||
utilruntime.Must(cozystackiov1alpha1.AddToScheme(scheme))
|
||||
// +kubebuilder:scaffold:scheme
|
||||
}
|
||||
|
||||
func main() {
|
||||
var metricsAddr string
|
||||
var enableLeaderElection bool
|
||||
var probeAddr string
|
||||
var secureMetrics bool
|
||||
var enableHTTP2 bool
|
||||
var disableTelemetry bool
|
||||
var telemetryEndpoint string
|
||||
var telemetryInterval string
|
||||
var cozystackVersion string
|
||||
var tlsOpts []func(*tls.Config)
|
||||
flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+
|
||||
"Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.")
|
||||
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
|
||||
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
|
||||
"Enable leader election for controller manager. "+
|
||||
"Enabling this will ensure there is only one active controller manager.")
|
||||
flag.BoolVar(&secureMetrics, "metrics-secure", true,
|
||||
"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.")
|
||||
flag.BoolVar(&enableHTTP2, "enable-http2", false,
|
||||
"If set, HTTP/2 will be enabled for the metrics and webhook servers")
|
||||
flag.BoolVar(&disableTelemetry, "disable-telemetry", false,
|
||||
"Disable telemetry collection")
|
||||
flag.StringVar(&telemetryEndpoint, "telemetry-endpoint", "https://telemetry.cozystack.io",
|
||||
"Endpoint for sending telemetry data")
|
||||
flag.StringVar(&telemetryInterval, "telemetry-interval", "15m",
|
||||
"Interval between telemetry data collection (e.g. 15m, 1h)")
|
||||
flag.StringVar(&cozystackVersion, "cozystack-version", "unknown",
|
||||
"Version of Cozystack")
|
||||
opts := zap.Options{
|
||||
Development: false,
|
||||
}
|
||||
opts.BindFlags(flag.CommandLine)
|
||||
flag.Parse()
|
||||
|
||||
// Parse telemetry interval
|
||||
interval, err := time.ParseDuration(telemetryInterval)
|
||||
if err != nil {
|
||||
setupLog.Error(err, "invalid telemetry interval")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Configure telemetry
|
||||
telemetryConfig := telemetry.Config{
|
||||
Disabled: disableTelemetry,
|
||||
Endpoint: telemetryEndpoint,
|
||||
Interval: interval,
|
||||
CozystackVersion: cozystackVersion,
|
||||
}
|
||||
|
||||
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
|
||||
|
||||
// if the enable-http2 flag is false (the default), http/2 should be disabled
|
||||
// due to its vulnerabilities. More specifically, disabling http/2 will
|
||||
// prevent from being vulnerable to the HTTP/2 Stream Cancellation and
|
||||
// Rapid Reset CVEs. For more information see:
|
||||
// - https://github.com/advisories/GHSA-qppj-fm5r-hxr3
|
||||
// - https://github.com/advisories/GHSA-4374-p667-p6c8
|
||||
disableHTTP2 := func(c *tls.Config) {
|
||||
setupLog.Info("disabling http/2")
|
||||
c.NextProtos = []string{"http/1.1"}
|
||||
}
|
||||
|
||||
if !enableHTTP2 {
|
||||
tlsOpts = append(tlsOpts, disableHTTP2)
|
||||
}
|
||||
|
||||
webhookServer := webhook.NewServer(webhook.Options{
|
||||
TLSOpts: tlsOpts,
|
||||
})
|
||||
|
||||
// Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server.
|
||||
// More info:
|
||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/metrics/server
|
||||
// - https://book.kubebuilder.io/reference/metrics.html
|
||||
metricsServerOptions := metricsserver.Options{
|
||||
BindAddress: metricsAddr,
|
||||
SecureServing: secureMetrics,
|
||||
TLSOpts: tlsOpts,
|
||||
}
|
||||
|
||||
if secureMetrics {
|
||||
// FilterProvider is used to protect the metrics endpoint with authn/authz.
|
||||
// These configurations ensure that only authorized users and service accounts
|
||||
// can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info:
|
||||
// https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/metrics/filters#WithAuthenticationAndAuthorization
|
||||
metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization
|
||||
|
||||
// TODO(user): If CertDir, CertName, and KeyName are not specified, controller-runtime will automatically
|
||||
// generate self-signed certificates for the metrics server. While convenient for development and testing,
|
||||
// this setup is not recommended for production.
|
||||
}
|
||||
|
||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||
Scheme: scheme,
|
||||
Metrics: metricsServerOptions,
|
||||
WebhookServer: webhookServer,
|
||||
HealthProbeBindAddress: probeAddr,
|
||||
LeaderElection: enableLeaderElection,
|
||||
LeaderElectionID: "19a0338c.cozystack.io",
|
||||
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
|
||||
// when the Manager ends. This requires the binary to immediately end when the
|
||||
// Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
|
||||
// speeds up voluntary leader transitions as the new leader don't have to wait
|
||||
// LeaseDuration time first.
|
||||
//
|
||||
// In the default scaffold provided, the program ends immediately after
|
||||
// the manager stops, so would be fine to enable this option. However,
|
||||
// if you are doing or is intended to do any operation such as perform cleanups
|
||||
// after the manager stops then its usage might be unsafe.
|
||||
// LeaderElectionReleaseOnCancel: true,
|
||||
})
|
||||
if err != nil {
|
||||
setupLog.Error(err, "unable to start manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controller.WorkloadMonitorReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "WorkloadMonitor")
|
||||
os.Exit(1)
|
||||
}
|
||||
// +kubebuilder:scaffold:builder
|
||||
|
||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||
setupLog.Error(err, "unable to set up health check")
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
|
||||
setupLog.Error(err, "unable to set up ready check")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Initialize telemetry collector
|
||||
collector, err := telemetry.NewCollector(mgr.GetClient(), &telemetryConfig, mgr.GetConfig())
|
||||
if err != nil {
|
||||
setupLog.V(1).Error(err, "unable to create telemetry collector, telemetry will be disabled")
|
||||
}
|
||||
|
||||
if collector != nil {
|
||||
if err := mgr.Add(collector); err != nil {
|
||||
setupLog.Error(err, "unable to set up telemetry collector")
|
||||
setupLog.V(1).Error(err, "unable to set up telemetry collector, continuing without telemetry")
|
||||
}
|
||||
}
|
||||
|
||||
setupLog.Info("starting manager")
|
||||
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
|
||||
setupLog.Error(err, "problem running manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
3611
dashboards/control-plane/kube-etcd.json
Normal file
3611
dashboards/control-plane/kube-etcd.json
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
1725
dashboards/flux/flux-control-plane.json
Normal file
1725
dashboards/flux/flux-control-plane.json
Normal file
File diff suppressed because it is too large
Load Diff
1391
dashboards/flux/flux-stats.json
Normal file
1391
dashboards/flux/flux-stats.json
Normal file
File diff suppressed because it is too large
Load Diff
1206
dashboards/goldpinger/goldpinger.json
Normal file
1206
dashboards/goldpinger/goldpinger.json
Normal file
File diff suppressed because it is too large
Load Diff
2940
dashboards/kafka/strimzi-kafka.json
Normal file
2940
dashboards/kafka/strimzi-kafka.json
Normal file
File diff suppressed because it is too large
Load Diff
5196
dashboards/kubevirt/kubevirt-control-plane.json
Normal file
5196
dashboards/kubevirt/kubevirt-control-plane.json
Normal file
File diff suppressed because it is too large
Load Diff
24
go.mod
24
go.mod
@@ -1,22 +1,27 @@
|
||||
// This is a generated file. Do not edit directly.
|
||||
|
||||
module github.com/aenix.io/cozystack
|
||||
module github.com/aenix-io/cozystack
|
||||
|
||||
go 1.23.0
|
||||
|
||||
require (
|
||||
github.com/emicklei/go-restful/v3 v3.11.0
|
||||
github.com/fluxcd/helm-controller/api v1.1.0
|
||||
github.com/google/gofuzz v1.2.0
|
||||
github.com/onsi/ginkgo/v2 v2.19.0
|
||||
github.com/onsi/gomega v1.33.1
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/stretchr/testify v1.9.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.31.2
|
||||
k8s.io/apiextensions-apiserver v0.31.2
|
||||
k8s.io/apimachinery v0.31.2
|
||||
k8s.io/apiserver v0.31.2
|
||||
k8s.io/client-go v0.31.2
|
||||
k8s.io/code-generator v0.31.2
|
||||
k8s.io/component-base v0.31.2
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/kube-openapi v0.0.0-20240827152857-f7e401e7b4c2
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
||||
sigs.k8s.io/controller-runtime v0.19.0
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1
|
||||
)
|
||||
|
||||
@@ -31,23 +36,27 @@ require (
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fluxcd/helm-controller/api v1.1.0 // indirect
|
||||
github.com/fluxcd/pkg/apis/kustomize v1.6.1 // indirect
|
||||
github.com/fluxcd/pkg/apis/meta v1.6.1 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/cel-go v0.21.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
@@ -84,7 +93,6 @@ require (
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.28.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/mod v0.21.0 // indirect
|
||||
golang.org/x/net v0.30.0 // indirect
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/sync v0.8.0 // indirect
|
||||
@@ -93,6 +101,7 @@ require (
|
||||
golang.org/x/text v0.19.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
golang.org/x/tools v0.26.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
|
||||
google.golang.org/grpc v1.65.0 // indirect
|
||||
@@ -100,14 +109,9 @@ require (
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/api v0.31.2 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kms v0.31.2 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
|
||||
sigs.k8s.io/controller-runtime v0.19.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
||||
12
go.sum
12
go.sum
@@ -26,6 +26,10 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k=
|
||||
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
|
||||
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fluxcd/helm-controller/api v1.1.0 h1:NS5Wm3U6Kv4w7Cw2sDOV++vf2ecGfFV00x1+2Y3QcOY=
|
||||
@@ -214,8 +218,6 @@ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
|
||||
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
@@ -252,6 +254,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
||||
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
|
||||
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw=
|
||||
@@ -287,12 +291,8 @@ k8s.io/apiserver v0.31.2 h1:VUzOEUGRCDi6kX1OyQ801m4A7AUPglpsmGvdsekmcI4=
|
||||
k8s.io/apiserver v0.31.2/go.mod h1:o3nKZR7lPlJqkU5I3Ove+Zx3JuoFjQobGX1Gctw6XuE=
|
||||
k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc=
|
||||
k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs=
|
||||
k8s.io/code-generator v0.31.2 h1:xLWxG0HEpMSHfcM//3u3Ro2Hmc6AyyLINQS//Z2GEOI=
|
||||
k8s.io/code-generator v0.31.2/go.mod h1:eEQHXgBU/m7LDaToDoiz3t97dUUVyOblQdwOr8rivqc=
|
||||
k8s.io/component-base v0.31.2 h1:Z1J1LIaC0AV+nzcPRFqfK09af6bZ4D1nAOpWsy9owlA=
|
||||
k8s.io/component-base v0.31.2/go.mod h1:9PeyyFN/drHjtJZMCTkSpQJS3U9OXORnHQqMLDz0sUQ=
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4=
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kms v0.31.2 h1:pyx7l2qVOkClzFMIWMVF/FxsSkgd+OIGH7DecpbscJI=
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2024 The Cozystack Authors.
|
||||
Copyright 2025 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -21,7 +21,7 @@ fix_d8() {
|
||||
}
|
||||
|
||||
swap_pvc_overview() {
|
||||
jq '(.panels[] | select(.title=="PVC Detailed") | .panels[] | select(.title=="Overview")) as $a | del(.panels[] | select(.title=="PVC Detailed").panels[] | select(.title=="Overview")) | ( (.panels[] | select(.title=="PVC Detailed"))) as $b | del( .panels[] | select(.title=="PVC Detailed")) | (.panels[.panels|length]=($a|.gridPos.y=$b.gridPos.y)) | (.panels[.panels|length]=($b|.gridPos.y=$a.gridPos.y))'
|
||||
jq '(.panels[] | select(.title=="PVC Detailed") | .panels[] | select(.title=="Overview")) as $a | del(.panels[] | select(.title=="PVC Detailed").panels[] | select(.title=="Overview")) | ( (.panels[] | select(.title=="PVC Detailed"))) as $b | del( .panels[] | select(.title=="PVC Detailed")) | (.panels[.panels|length]=($a|.gridPos.y=$b.gridPos.y)) | (.panels[.panels|length]=($b|.gridPos.y=$a.gridPos.y))'
|
||||
}
|
||||
|
||||
deprectaed_remove_faq() {
|
||||
@@ -68,7 +68,7 @@ modules/402-ingress-nginx/monitoring/grafana-dashboards/ingress-nginx/namespace/
|
||||
modules/402-ingress-nginx/monitoring/grafana-dashboards/ingress-nginx/vhost/vhost_detail.json
|
||||
modules/402-ingress-nginx/monitoring/grafana-dashboards/ingress-nginx/vhost/vhosts.json
|
||||
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/control-plane-status.json
|
||||
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/kube-etcd3.json #TODO
|
||||
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/kube-etcd.json #TODO
|
||||
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/deprecated-resources.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//kubernetes-cluster/nodes/ntp.json #TODO
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//kubernetes-cluster/nodes/nodes.json
|
||||
@@ -78,6 +78,10 @@ modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/pod.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/namespace/namespaces.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/namespace/namespace.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/capacity-planning/capacity-planning.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//flux/flux-control-plane.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//flux/flux-stats.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//kafka/strimzi-kafka.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//goldpinger/goldpinger.json
|
||||
EOT
|
||||
|
||||
|
||||
@@ -109,4 +113,3 @@ done <<\EOT
|
||||
https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-namespaces.json
|
||||
https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-pods.json
|
||||
EOT
|
||||
|
||||
|
||||
@@ -113,8 +113,6 @@ machine:
|
||||
- usermode_helper=disabled
|
||||
- name: zfs
|
||||
- name: spl
|
||||
install:
|
||||
image: ghcr.io/aenix-io/cozystack/talos:v1.8.4
|
||||
files:
|
||||
- content: |
|
||||
[plugins]
|
||||
@@ -231,6 +229,7 @@ sleep 5
|
||||
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
|
||||
|
||||
# Wait for Cluster-API providers
|
||||
timeout 30 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
|
||||
kubectl wait deploy --timeout=30s --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
|
||||
|
||||
# Wait for linstor controller
|
||||
@@ -299,6 +298,9 @@ spec:
|
||||
avoidBuggyIPs: false
|
||||
EOT
|
||||
|
||||
# Wait for cozystack-api
|
||||
kubectl wait --for=condition=Available apiservices v1alpha1.apps.cozystack.io --timeout=2m
|
||||
|
||||
kubectl patch -n tenant-root tenants.apps.cozystack.io root --type=merge -p '{"spec":{
|
||||
"host": "example.org",
|
||||
"ingress": true,
|
||||
|
||||
@@ -22,6 +22,7 @@ SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||
CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
|
||||
API_KNOWN_VIOLATIONS_DIR="${API_KNOWN_VIOLATIONS_DIR:-"${SCRIPT_ROOT}/api/api-rules"}"
|
||||
UPDATE_API_KNOWN_VIOLATIONS="${UPDATE_API_KNOWN_VIOLATIONS:-true}"
|
||||
CONTROLLER_GEN="go run sigs.k8s.io/controller-tools/cmd/controller-gen@v0.16.4"
|
||||
|
||||
source "${CODEGEN_PKG}/kube_codegen.sh"
|
||||
|
||||
@@ -46,3 +47,6 @@ kube::codegen::gen_openapi \
|
||||
${update_report:+"${update_report}"} \
|
||||
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
|
||||
"${SCRIPT_ROOT}/pkg/apis"
|
||||
|
||||
$CONTROLLER_GEN object:headerFile="hack/boilerplate.go.txt" paths="./api/..."
|
||||
$CONTROLLER_GEN rbac:roleName=manager-role crd paths="./api/..." output:crd:artifacts:config=packages/system/cozystack-controller/templates/crds
|
||||
|
||||
96
internal/controller/suite_test.go
Normal file
96
internal/controller/suite_test.go
Normal file
@@ -0,0 +1,96 @@
|
||||
/*
|
||||
Copyright 2025.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
|
||||
cozystackiov1alpha1 "github.com/aenix-io/cozystack/api/v1alpha1"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
var cfg *rest.Config
|
||||
var k8sClient client.Client
|
||||
var testEnv *envtest.Environment
|
||||
var ctx context.Context
|
||||
var cancel context.CancelFunc
|
||||
|
||||
func TestControllers(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
RunSpecs(t, "Controller Suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
|
||||
|
||||
ctx, cancel = context.WithCancel(context.TODO())
|
||||
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
|
||||
ErrorIfCRDPathMissing: true,
|
||||
|
||||
// The BinaryAssetsDirectory is only required if you want to run the tests directly
|
||||
// without call the makefile target test. If not informed it will look for the
|
||||
// default path defined in controller-runtime which is /usr/local/kubebuilder/.
|
||||
// Note that you must have the required binaries setup under the bin directory to perform
|
||||
// the tests directly. When we run make test it will be setup and used automatically.
|
||||
BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s",
|
||||
fmt.Sprintf("1.31.0-%s-%s", runtime.GOOS, runtime.GOARCH)),
|
||||
}
|
||||
|
||||
var err error
|
||||
// cfg is defined in this file globally.
|
||||
cfg, err = testEnv.Start()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(cfg).NotTo(BeNil())
|
||||
|
||||
err = cozystackiov1alpha1.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// +kubebuilder:scaffold:scheme
|
||||
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(k8sClient).NotTo(BeNil())
|
||||
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
cancel()
|
||||
err := testEnv.Stop()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
273
internal/controller/workloadmonitor_controller.go
Normal file
273
internal/controller/workloadmonitor_controller.go
Normal file
@@ -0,0 +1,273 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"sort"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/pointer"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
cozyv1alpha1 "github.com/aenix-io/cozystack/api/v1alpha1"
|
||||
)
|
||||
|
||||
// WorkloadMonitorReconciler reconciles a WorkloadMonitor object
|
||||
type WorkloadMonitorReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloadmonitors,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloadmonitors/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch
|
||||
|
||||
// isPodReady checks if the Pod is in the Ready condition.
|
||||
func (r *WorkloadMonitorReconciler) isPodReady(pod *corev1.Pod) bool {
|
||||
for _, c := range pod.Status.Conditions {
|
||||
if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// updateOwnerReferences adds the given monitor as a new owner reference to the object if not already present.
|
||||
// It then sorts the owner references to enforce a consistent order.
|
||||
func updateOwnerReferences(obj metav1.Object, monitor client.Object) {
|
||||
// Retrieve current owner references
|
||||
owners := obj.GetOwnerReferences()
|
||||
|
||||
// Check if current monitor is already in owner references
|
||||
var alreadyOwned bool
|
||||
for _, ownerRef := range owners {
|
||||
if ownerRef.UID == monitor.GetUID() {
|
||||
alreadyOwned = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
runtimeObj, ok := monitor.(runtime.Object)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
gvk := runtimeObj.GetObjectKind().GroupVersionKind()
|
||||
|
||||
// If not already present, add new owner reference without controller flag
|
||||
if !alreadyOwned {
|
||||
newOwnerRef := metav1.OwnerReference{
|
||||
APIVersion: gvk.GroupVersion().String(),
|
||||
Kind: gvk.Kind,
|
||||
Name: monitor.GetName(),
|
||||
UID: monitor.GetUID(),
|
||||
// Set Controller to false to avoid conflict as multiple controllers are not allowed
|
||||
Controller: pointer.BoolPtr(false),
|
||||
BlockOwnerDeletion: pointer.BoolPtr(true),
|
||||
}
|
||||
owners = append(owners, newOwnerRef)
|
||||
}
|
||||
|
||||
// Sort owner references to enforce a consistent order by UID
|
||||
sort.SliceStable(owners, func(i, j int) bool {
|
||||
return owners[i].UID < owners[j].UID
|
||||
})
|
||||
|
||||
// Update the owner references of the object
|
||||
obj.SetOwnerReferences(owners)
|
||||
}
|
||||
|
||||
// reconcilePodForMonitor creates or updates a Workload object for the given Pod and WorkloadMonitor.
|
||||
func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
||||
ctx context.Context,
|
||||
monitor *cozyv1alpha1.WorkloadMonitor,
|
||||
pod corev1.Pod,
|
||||
) error {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
// Combine both init containers and normal containers to sum resources properly
|
||||
combinedContainers := append(pod.Spec.InitContainers, pod.Spec.Containers...)
|
||||
|
||||
// totalResources will store the sum of all container resource limits
|
||||
totalResources := make(map[string]resource.Quantity)
|
||||
|
||||
// Iterate over all containers to aggregate their Limits
|
||||
for _, container := range combinedContainers {
|
||||
for name, qty := range container.Resources.Limits {
|
||||
if existing, exists := totalResources[name.String()]; exists {
|
||||
existing.Add(qty)
|
||||
totalResources[name.String()] = existing
|
||||
} else {
|
||||
totalResources[name.String()] = qty.DeepCopy()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If annotation "workload.cozystack.io/resources" is present, parse and merge
|
||||
if resourcesStr, ok := pod.Annotations["workload.cozystack.io/resources"]; ok {
|
||||
annRes := map[string]string{}
|
||||
if err := json.Unmarshal([]byte(resourcesStr), &annRes); err != nil {
|
||||
logger.Error(err, "Failed to parse resources annotation", "pod", pod.Name)
|
||||
} else {
|
||||
for k, v := range annRes {
|
||||
parsed, err := resource.ParseQuantity(v)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to parse resource quantity from annotation", "key", k, "value", v)
|
||||
continue
|
||||
}
|
||||
totalResources[k] = parsed
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
workload := &cozyv1alpha1.Workload{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||
// Update owner references with the new monitor
|
||||
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
||||
|
||||
// Copy labels from the Pod if needed
|
||||
workload.Labels = pod.Labels
|
||||
|
||||
// Fill Workload status fields:
|
||||
workload.Status.Kind = monitor.Spec.Kind
|
||||
workload.Status.Type = monitor.Spec.Type
|
||||
workload.Status.Resources = totalResources
|
||||
workload.Status.Operational = r.isPodReady(&pod)
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to CreateOrUpdate Workload", "workload", workload.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reconcile is the main reconcile loop.
|
||||
// 1. It reconciles WorkloadMonitor objects themselves (create/update/delete).
|
||||
// 2. It also reconciles Pod events mapped to WorkloadMonitor via label selector.
|
||||
func (r *WorkloadMonitorReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
// Fetch the WorkloadMonitor object if it exists
|
||||
monitor := &cozyv1alpha1.WorkloadMonitor{}
|
||||
err := r.Get(ctx, req.NamespacedName, monitor)
|
||||
if err != nil {
|
||||
// If the resource is not found, it may be a Pod event (mapFunc).
|
||||
if apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
logger.Error(err, "Unable to fetch WorkloadMonitor")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// List Pods that match the WorkloadMonitor's selector
|
||||
podList := &corev1.PodList{}
|
||||
if err := r.List(
|
||||
ctx,
|
||||
podList,
|
||||
client.InNamespace(monitor.Namespace),
|
||||
client.MatchingLabels(monitor.Spec.Selector),
|
||||
); err != nil {
|
||||
logger.Error(err, "Unable to list Pods for WorkloadMonitor", "monitor", monitor.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
var observedReplicas, availableReplicas int32
|
||||
|
||||
// For each matching Pod, reconcile the corresponding Workload
|
||||
for _, pod := range podList.Items {
|
||||
observedReplicas++
|
||||
if err := r.reconcilePodForMonitor(ctx, monitor, pod); err != nil {
|
||||
logger.Error(err, "Failed to reconcile Workload for Pod", "pod", pod.Name)
|
||||
continue
|
||||
}
|
||||
if r.isPodReady(&pod) {
|
||||
availableReplicas++
|
||||
}
|
||||
}
|
||||
|
||||
// Update WorkloadMonitor status based on observed pods
|
||||
monitor.Status.ObservedReplicas = observedReplicas
|
||||
monitor.Status.AvailableReplicas = availableReplicas
|
||||
|
||||
// Default to operational = true, but check MinReplicas if set
|
||||
monitor.Status.Operational = pointer.Bool(true)
|
||||
if monitor.Spec.MinReplicas != nil && availableReplicas < *monitor.Spec.MinReplicas {
|
||||
monitor.Status.Operational = pointer.Bool(false)
|
||||
}
|
||||
|
||||
// Update the WorkloadMonitor status in the cluster
|
||||
if err := r.Status().Update(ctx, monitor); err != nil {
|
||||
logger.Error(err, "Unable to update WorkloadMonitor status", "monitor", monitor.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Return without requeue if we want purely event-driven reconciliations
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// SetupWithManager registers our controller with the Manager and sets up watches.
|
||||
func (r *WorkloadMonitorReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
// Watch WorkloadMonitor objects
|
||||
For(&cozyv1alpha1.WorkloadMonitor{}).
|
||||
// Also watch Pod objects and map them back to WorkloadMonitor if labels match
|
||||
Watches(
|
||||
&corev1.Pod{},
|
||||
handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
pod, ok := obj.(*corev1.Pod)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var monitorList cozyv1alpha1.WorkloadMonitorList
|
||||
// List all WorkloadMonitors in the same namespace
|
||||
if err := r.List(ctx, &monitorList, client.InNamespace(pod.Namespace)); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Match each monitor's selector with the Pod's labels
|
||||
var requests []reconcile.Request
|
||||
for _, m := range monitorList.Items {
|
||||
matches := true
|
||||
for k, v := range m.Spec.Selector {
|
||||
if podVal, exists := pod.Labels[k]; !exists || podVal != v {
|
||||
matches = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if matches {
|
||||
requests = append(requests, reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Namespace: m.Namespace,
|
||||
Name: m.Name,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
return requests
|
||||
}),
|
||||
).
|
||||
// Watch for changes to Workload objects we create (owned by WorkloadMonitor)
|
||||
Owns(&cozyv1alpha1.Workload{}).
|
||||
Complete(r)
|
||||
}
|
||||
292
internal/telemetry/collector.go
Normal file
292
internal/telemetry/collector.go
Normal file
@@ -0,0 +1,292 @@
|
||||
package telemetry
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
cozyv1alpha1 "github.com/aenix-io/cozystack/api/v1alpha1"
|
||||
)
|
||||
|
||||
// Collector handles telemetry data collection and sending
|
||||
type Collector struct {
|
||||
client client.Client
|
||||
discoveryClient discovery.DiscoveryInterface
|
||||
config *Config
|
||||
ticker *time.Ticker
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// NewCollector creates a new telemetry collector
|
||||
func NewCollector(client client.Client, config *Config, kubeConfig *rest.Config) (*Collector, error) {
|
||||
discoveryClient, err := discovery.NewDiscoveryClientForConfig(kubeConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create discovery client: %w", err)
|
||||
}
|
||||
return &Collector{
|
||||
client: client,
|
||||
discoveryClient: discoveryClient,
|
||||
config: config,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start implements manager.Runnable
|
||||
func (c *Collector) Start(ctx context.Context) error {
|
||||
if c.config.Disabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.ticker = time.NewTicker(c.config.Interval)
|
||||
c.stopCh = make(chan struct{})
|
||||
|
||||
// Initial collection
|
||||
c.collect(ctx)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
c.ticker.Stop()
|
||||
close(c.stopCh)
|
||||
return nil
|
||||
case <-c.ticker.C:
|
||||
c.collect(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NeedLeaderElection implements manager.LeaderElectionRunnable
|
||||
func (c *Collector) NeedLeaderElection() bool {
|
||||
// Only run telemetry collector on the leader
|
||||
return true
|
||||
}
|
||||
|
||||
// Stop halts telemetry collection
|
||||
func (c *Collector) Stop() {
|
||||
close(c.stopCh)
|
||||
}
|
||||
|
||||
// getSizeGroup returns the exponential size group for PVC
|
||||
func getSizeGroup(size resource.Quantity) string {
|
||||
gb := size.Value() / (1024 * 1024 * 1024)
|
||||
switch {
|
||||
case gb <= 1:
|
||||
return "1Gi"
|
||||
case gb <= 5:
|
||||
return "5Gi"
|
||||
case gb <= 10:
|
||||
return "10Gi"
|
||||
case gb <= 25:
|
||||
return "25Gi"
|
||||
case gb <= 50:
|
||||
return "50Gi"
|
||||
case gb <= 100:
|
||||
return "100Gi"
|
||||
case gb <= 250:
|
||||
return "250Gi"
|
||||
case gb <= 500:
|
||||
return "500Gi"
|
||||
case gb <= 1024:
|
||||
return "1Ti"
|
||||
case gb <= 2048:
|
||||
return "2Ti"
|
||||
case gb <= 5120:
|
||||
return "5Ti"
|
||||
default:
|
||||
return "10Ti"
|
||||
}
|
||||
}
|
||||
|
||||
// collect gathers and sends telemetry data
|
||||
func (c *Collector) collect(ctx context.Context) {
|
||||
logger := log.FromContext(ctx).V(1)
|
||||
|
||||
// Get cluster ID from kube-system namespace
|
||||
var kubeSystemNS corev1.Namespace
|
||||
if err := c.client.Get(ctx, types.NamespacedName{Name: "kube-system"}, &kubeSystemNS); err != nil {
|
||||
logger.Info(fmt.Sprintf("Failed to get kube-system namespace: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
clusterID := string(kubeSystemNS.UID)
|
||||
|
||||
var cozystackCM corev1.ConfigMap
|
||||
if err := c.client.Get(ctx, types.NamespacedName{Namespace: "cozy-system", Name: "cozystack"}, &cozystackCM); err != nil {
|
||||
logger.Info(fmt.Sprintf("Failed to get cozystack configmap in cozy-system namespace: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
oidcEnabled := cozystackCM.Data["oidc-enabled"]
|
||||
bundle := cozystackCM.Data["bundle-name"]
|
||||
bundleEnable := cozystackCM.Data["bundle-enable"]
|
||||
bundleDisable := cozystackCM.Data["bundle-disable"]
|
||||
|
||||
// Get Kubernetes version from nodes
|
||||
var nodeList corev1.NodeList
|
||||
if err := c.client.List(ctx, &nodeList); err != nil {
|
||||
logger.Info(fmt.Sprintf("Failed to list nodes: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Create metrics buffer
|
||||
var metrics strings.Builder
|
||||
|
||||
// Add Cozystack info metric
|
||||
if len(nodeList.Items) > 0 {
|
||||
k8sVersion, _ := c.discoveryClient.ServerVersion()
|
||||
metrics.WriteString(fmt.Sprintf(
|
||||
"cozy_cluster_info{cozystack_version=\"%s\",kubernetes_version=\"%s\",oidc_enabled=\"%s\",bundle_name=\"%s\",bunde_enable=\"%s\",bunde_disable=\"%s\"} 1\n",
|
||||
c.config.CozystackVersion,
|
||||
k8sVersion,
|
||||
oidcEnabled,
|
||||
bundle,
|
||||
bundleEnable,
|
||||
bundleDisable,
|
||||
))
|
||||
}
|
||||
|
||||
// Collect node metrics
|
||||
nodeOSCount := make(map[string]int)
|
||||
for _, node := range nodeList.Items {
|
||||
key := fmt.Sprintf("%s (%s)", node.Status.NodeInfo.OperatingSystem, node.Status.NodeInfo.OSImage)
|
||||
nodeOSCount[key] = nodeOSCount[key] + 1
|
||||
}
|
||||
|
||||
for osKey, count := range nodeOSCount {
|
||||
metrics.WriteString(fmt.Sprintf(
|
||||
"cozy_nodes_count{os=\"%s\",kernel=\"%s\"} %d\n",
|
||||
osKey,
|
||||
nodeList.Items[0].Status.NodeInfo.KernelVersion,
|
||||
count,
|
||||
))
|
||||
}
|
||||
|
||||
// Collect LoadBalancer services metrics
|
||||
var serviceList corev1.ServiceList
|
||||
if err := c.client.List(ctx, &serviceList); err != nil {
|
||||
logger.Info(fmt.Sprintf("Failed to list Services: %v", err))
|
||||
} else {
|
||||
lbCount := 0
|
||||
for _, svc := range serviceList.Items {
|
||||
if svc.Spec.Type == corev1.ServiceTypeLoadBalancer {
|
||||
lbCount++
|
||||
}
|
||||
}
|
||||
metrics.WriteString(fmt.Sprintf("cozy_loadbalancers_count %d\n", lbCount))
|
||||
}
|
||||
|
||||
// Count tenant namespaces
|
||||
var nsList corev1.NamespaceList
|
||||
if err := c.client.List(ctx, &nsList); err != nil {
|
||||
logger.Info(fmt.Sprintf("Failed to list Namespaces: %v", err))
|
||||
} else {
|
||||
tenantCount := 0
|
||||
for _, ns := range nsList.Items {
|
||||
if strings.HasPrefix(ns.Name, "tenant-") {
|
||||
tenantCount++
|
||||
}
|
||||
}
|
||||
metrics.WriteString(fmt.Sprintf("cozy_tenants_count %d\n", tenantCount))
|
||||
}
|
||||
|
||||
// Collect PV metrics grouped by driver and size
|
||||
var pvList corev1.PersistentVolumeList
|
||||
if err := c.client.List(ctx, &pvList); err != nil {
|
||||
logger.Info(fmt.Sprintf("Failed to list PVs: %v", err))
|
||||
} else {
|
||||
// Map to store counts by size and driver
|
||||
pvMetrics := make(map[string]map[string]int)
|
||||
|
||||
for _, pv := range pvList.Items {
|
||||
if capacity, ok := pv.Spec.Capacity[corev1.ResourceStorage]; ok {
|
||||
sizeGroup := getSizeGroup(capacity)
|
||||
|
||||
// Get the CSI driver name
|
||||
driver := "unknown"
|
||||
if pv.Spec.CSI != nil {
|
||||
driver = pv.Spec.CSI.Driver
|
||||
} else if pv.Spec.HostPath != nil {
|
||||
driver = "hostpath"
|
||||
} else if pv.Spec.NFS != nil {
|
||||
driver = "nfs"
|
||||
}
|
||||
|
||||
// Initialize nested map if needed
|
||||
if _, exists := pvMetrics[sizeGroup]; !exists {
|
||||
pvMetrics[sizeGroup] = make(map[string]int)
|
||||
}
|
||||
|
||||
// Increment count for this size/driver combination
|
||||
pvMetrics[sizeGroup][driver]++
|
||||
}
|
||||
}
|
||||
|
||||
// Write metrics
|
||||
for size, drivers := range pvMetrics {
|
||||
for driver, count := range drivers {
|
||||
metrics.WriteString(fmt.Sprintf(
|
||||
"cozy_pvs_count{driver=\"%s\",size=\"%s\"} %d\n",
|
||||
driver,
|
||||
size,
|
||||
count,
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Collect workload metrics
|
||||
var monitorList cozyv1alpha1.WorkloadMonitorList
|
||||
if err := c.client.List(ctx, &monitorList); err != nil {
|
||||
logger.Info(fmt.Sprintf("Failed to list WorkloadMonitors: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
for _, monitor := range monitorList.Items {
|
||||
metrics.WriteString(fmt.Sprintf(
|
||||
"cozy_workloads_count{uid=\"%s\",kind=\"%s\",type=\"%s\",version=\"%s\"} %d\n",
|
||||
monitor.UID,
|
||||
monitor.Spec.Kind,
|
||||
monitor.Spec.Type,
|
||||
monitor.Spec.Version,
|
||||
monitor.Status.ObservedReplicas,
|
||||
))
|
||||
}
|
||||
|
||||
// Send metrics
|
||||
if err := c.sendMetrics(clusterID, metrics.String()); err != nil {
|
||||
logger.Info(fmt.Sprintf("Failed to send metrics: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
// sendMetrics sends collected metrics to the configured endpoint
|
||||
func (c *Collector) sendMetrics(clusterID, metrics string) error {
|
||||
req, err := http.NewRequest("POST", c.config.Endpoint, bytes.NewBufferString(metrics))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "text/plain")
|
||||
req.Header.Set("X-Cluster-ID", clusterID)
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("unexpected status code: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
27
internal/telemetry/config.go
Normal file
27
internal/telemetry/config.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package telemetry
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Config holds telemetry configuration
|
||||
type Config struct {
|
||||
// Disable telemetry collection if set to true
|
||||
Disabled bool
|
||||
// Endpoint to send telemetry data to
|
||||
Endpoint string
|
||||
// Interval between telemetry data collection
|
||||
Interval time.Duration
|
||||
// CozystackVersion represents the current version of Cozystack
|
||||
CozystackVersion string
|
||||
}
|
||||
|
||||
// DefaultConfig returns default telemetry configuration
|
||||
func DefaultConfig() *Config {
|
||||
return &Config{
|
||||
Disabled: false,
|
||||
Endpoint: "https://telemetry.cozystack.io",
|
||||
Interval: 15 * time.Minute,
|
||||
CozystackVersion: "unknown",
|
||||
}
|
||||
}
|
||||
@@ -68,7 +68,7 @@ spec:
|
||||
serviceAccountName: cozystack
|
||||
containers:
|
||||
- name: cozystack
|
||||
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.21.0"
|
||||
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.26.1"
|
||||
env:
|
||||
- name: KUBERNETES_SERVICE_HOST
|
||||
value: localhost
|
||||
@@ -86,13 +86,12 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: darkhttpd
|
||||
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.21.0"
|
||||
- name: assets
|
||||
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.26.1"
|
||||
command:
|
||||
- /usr/bin/darkhttpd
|
||||
- /cozystack/assets
|
||||
- --port
|
||||
- "8123"
|
||||
- /usr/bin/cozystack-assets-server
|
||||
- "-dir=/cozystack/assets"
|
||||
- "-address=:8123"
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8123
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/aenix-io/cozystack/clickhouse-backup:0.6.1@sha256:dda84420cb8648721299221268a00d72a05c7af5b7fb452619bac727068b9e61
|
||||
ghcr.io/aenix-io/cozystack/clickhouse-backup:0.6.1@sha256:7a99cabdfd541f863aa5d1b2f7b49afd39838fb94c8448986634a1dc9050751c
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/aenix-io/cozystack/postgres-backup:0.7.1@sha256:4d2271b345240c6c5b37599996745646012004b0f57e31c4c9deb1aba7408a51
|
||||
ghcr.io/aenix-io/cozystack/postgres-backup:0.8.0@sha256:d1f7692b6761f46f24687d885ec335330280346ae4a9ff28b3179681b36106b7
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/aenix-io/cozystack/nginx-cache:0.3.1@sha256:3e8ae1bd576858a88c995aefb1431a1b89f55b7a1ef60575fecae4bbf5aa0d4e
|
||||
ghcr.io/aenix-io/cozystack/nginx-cache:0.3.1@sha256:854b3908114de1876038eb9902577595cce93553ce89bf75ac956d22f1e8b8cc
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.3.1
|
||||
version: 0.3.2
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -57,6 +57,12 @@ spec:
|
||||
class: {{ . }}
|
||||
{{- end }}
|
||||
deleteClaim: true
|
||||
metricsConfig:
|
||||
type: jmxPrometheusExporter
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-metrics
|
||||
key: kafka-metrics-config.yml
|
||||
zookeeper:
|
||||
replicas: {{ .Values.zookeeper.replicas }}
|
||||
storage:
|
||||
@@ -68,6 +74,12 @@ spec:
|
||||
class: {{ . }}
|
||||
{{- end }}
|
||||
deleteClaim: false
|
||||
metricsConfig:
|
||||
type: jmxPrometheusExporter
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-metrics
|
||||
key: kafka-metrics-config.yml
|
||||
entityOperator:
|
||||
topicOperator: {}
|
||||
userOperator: {}
|
||||
|
||||
198
packages/apps/kafka/templates/metrics-configmap.yaml
Normal file
198
packages/apps/kafka/templates/metrics-configmap.yaml
Normal file
@@ -0,0 +1,198 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-metrics
|
||||
data:
|
||||
kafka-metrics-config.yml: |
|
||||
# See https://github.com/prometheus/jmx_exporter for more info about JMX Prometheus Exporter metrics
|
||||
lowercaseOutputName: true
|
||||
rules:
|
||||
# Special cases and very specific rules
|
||||
- pattern: kafka.server<type=(.+), name=(.+), clientId=(.+), topic=(.+), partition=(.*)><>Value
|
||||
name: kafka_server_$1_$2
|
||||
type: GAUGE
|
||||
labels:
|
||||
clientId: "$3"
|
||||
topic: "$4"
|
||||
partition: "$5"
|
||||
- pattern: kafka.server<type=(.+), name=(.+), clientId=(.+), brokerHost=(.+), brokerPort=(.+)><>Value
|
||||
name: kafka_server_$1_$2
|
||||
type: GAUGE
|
||||
labels:
|
||||
clientId: "$3"
|
||||
broker: "$4:$5"
|
||||
- pattern: kafka.server<type=(.+), cipher=(.+), protocol=(.+), listener=(.+), networkProcessor=(.+)><>connections
|
||||
name: kafka_server_$1_connections_tls_info
|
||||
type: GAUGE
|
||||
labels:
|
||||
cipher: "$2"
|
||||
protocol: "$3"
|
||||
listener: "$4"
|
||||
networkProcessor: "$5"
|
||||
- pattern: kafka.server<type=(.+), clientSoftwareName=(.+), clientSoftwareVersion=(.+), listener=(.+), networkProcessor=(.+)><>connections
|
||||
name: kafka_server_$1_connections_software
|
||||
type: GAUGE
|
||||
labels:
|
||||
clientSoftwareName: "$2"
|
||||
clientSoftwareVersion: "$3"
|
||||
listener: "$4"
|
||||
networkProcessor: "$5"
|
||||
- pattern: "kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+-total):"
|
||||
name: kafka_server_$1_$4
|
||||
type: COUNTER
|
||||
labels:
|
||||
listener: "$2"
|
||||
networkProcessor: "$3"
|
||||
- pattern: "kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+):"
|
||||
name: kafka_server_$1_$4
|
||||
type: GAUGE
|
||||
labels:
|
||||
listener: "$2"
|
||||
networkProcessor: "$3"
|
||||
- pattern: kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+-total)
|
||||
name: kafka_server_$1_$4
|
||||
type: COUNTER
|
||||
labels:
|
||||
listener: "$2"
|
||||
networkProcessor: "$3"
|
||||
- pattern: kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+)
|
||||
name: kafka_server_$1_$4
|
||||
type: GAUGE
|
||||
labels:
|
||||
listener: "$2"
|
||||
networkProcessor: "$3"
|
||||
# Some percent metrics use MeanRate attribute
|
||||
# Ex) kafka.server<type=(KafkaRequestHandlerPool), name=(RequestHandlerAvgIdlePercent)><>MeanRate
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*><>MeanRate
|
||||
name: kafka_$1_$2_$3_percent
|
||||
type: GAUGE
|
||||
# Generic gauges for percents
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*><>Value
|
||||
name: kafka_$1_$2_$3_percent
|
||||
type: GAUGE
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*, (.+)=(.+)><>Value
|
||||
name: kafka_$1_$2_$3_percent
|
||||
type: GAUGE
|
||||
labels:
|
||||
"$4": "$5"
|
||||
# Generic per-second counters with 0-2 key/value pairs
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*, (.+)=(.+), (.+)=(.+)><>Count
|
||||
name: kafka_$1_$2_$3_total
|
||||
type: COUNTER
|
||||
labels:
|
||||
"$4": "$5"
|
||||
"$6": "$7"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*, (.+)=(.+)><>Count
|
||||
name: kafka_$1_$2_$3_total
|
||||
type: COUNTER
|
||||
labels:
|
||||
"$4": "$5"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*><>Count
|
||||
name: kafka_$1_$2_$3_total
|
||||
type: COUNTER
|
||||
# Generic gauges with 0-2 key/value pairs
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+), (.+)=(.+)><>Value
|
||||
name: kafka_$1_$2_$3
|
||||
type: GAUGE
|
||||
labels:
|
||||
"$4": "$5"
|
||||
"$6": "$7"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+)><>Value
|
||||
name: kafka_$1_$2_$3
|
||||
type: GAUGE
|
||||
labels:
|
||||
"$4": "$5"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>Value
|
||||
name: kafka_$1_$2_$3
|
||||
type: GAUGE
|
||||
# Emulate Prometheus 'Summary' metrics for the exported 'Histogram's.
|
||||
# Note that these are missing the '_sum' metric!
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+), (.+)=(.+)><>Count
|
||||
name: kafka_$1_$2_$3_count
|
||||
type: COUNTER
|
||||
labels:
|
||||
"$4": "$5"
|
||||
"$6": "$7"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.*), (.+)=(.+)><>(\d+)thPercentile
|
||||
name: kafka_$1_$2_$3
|
||||
type: GAUGE
|
||||
labels:
|
||||
"$4": "$5"
|
||||
"$6": "$7"
|
||||
quantile: "0.$8"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+)><>Count
|
||||
name: kafka_$1_$2_$3_count
|
||||
type: COUNTER
|
||||
labels:
|
||||
"$4": "$5"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.*)><>(\d+)thPercentile
|
||||
name: kafka_$1_$2_$3
|
||||
type: GAUGE
|
||||
labels:
|
||||
"$4": "$5"
|
||||
quantile: "0.$6"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>Count
|
||||
name: kafka_$1_$2_$3_count
|
||||
type: COUNTER
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>(\d+)thPercentile
|
||||
name: kafka_$1_$2_$3
|
||||
type: GAUGE
|
||||
labels:
|
||||
quantile: "0.$4"
|
||||
# KRaft overall related metrics
|
||||
# distinguish between always increasing COUNTER (total and max) and variable GAUGE (all others) metrics
|
||||
- pattern: "kafka.server<type=raft-metrics><>(.+-total|.+-max):"
|
||||
name: kafka_server_raftmetrics_$1
|
||||
type: COUNTER
|
||||
- pattern: "kafka.server<type=raft-metrics><>(current-state): (.+)"
|
||||
name: kafka_server_raftmetrics_$1
|
||||
value: 1
|
||||
type: UNTYPED
|
||||
labels:
|
||||
$1: "$2"
|
||||
- pattern: "kafka.server<type=raft-metrics><>(.+):"
|
||||
name: kafka_server_raftmetrics_$1
|
||||
type: GAUGE
|
||||
# KRaft "low level" channels related metrics
|
||||
# distinguish between always increasing COUNTER (total and max) and variable GAUGE (all others) metrics
|
||||
- pattern: "kafka.server<type=raft-channel-metrics><>(.+-total|.+-max):"
|
||||
name: kafka_server_raftchannelmetrics_$1
|
||||
type: COUNTER
|
||||
- pattern: "kafka.server<type=raft-channel-metrics><>(.+):"
|
||||
name: kafka_server_raftchannelmetrics_$1
|
||||
type: GAUGE
|
||||
# Broker metrics related to fetching metadata topic records in KRaft mode
|
||||
- pattern: "kafka.server<type=broker-metadata-metrics><>(.+):"
|
||||
name: kafka_server_brokermetadatametrics_$1
|
||||
type: GAUGE
|
||||
zookeeper-metrics-config.yml: |
|
||||
# See https://github.com/prometheus/jmx_exporter for more info about JMX Prometheus Exporter metrics
|
||||
lowercaseOutputName: true
|
||||
rules:
|
||||
# replicated Zookeeper
|
||||
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+)><>(\\w+)"
|
||||
name: "zookeeper_$2"
|
||||
type: GAUGE
|
||||
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+)><>(\\w+)"
|
||||
name: "zookeeper_$3"
|
||||
type: GAUGE
|
||||
labels:
|
||||
replicaId: "$2"
|
||||
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+)><>(Packets\\w+)"
|
||||
name: "zookeeper_$4"
|
||||
type: COUNTER
|
||||
labels:
|
||||
replicaId: "$2"
|
||||
memberType: "$3"
|
||||
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+)><>(\\w+)"
|
||||
name: "zookeeper_$4"
|
||||
type: GAUGE
|
||||
labels:
|
||||
replicaId: "$2"
|
||||
memberType: "$3"
|
||||
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+), name3=(\\w+)><>(\\w+)"
|
||||
name: "zookeeper_$4_$5"
|
||||
type: GAUGE
|
||||
labels:
|
||||
replicaId: "$2"
|
||||
memberType: "$3"
|
||||
40
packages/apps/kafka/templates/podscrape.yaml
Normal file
40
packages/apps/kafka/templates/podscrape.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
apiVersion: operator.victoriametrics.com/v1beta1
|
||||
kind: VMPodScrape
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
spec:
|
||||
podMetricsEndpoints:
|
||||
- port: tcp-prometheus
|
||||
scheme: http
|
||||
relabelConfigs:
|
||||
- separator: ;
|
||||
regex: __meta_kubernetes_pod_label_(strimzi_io_.+)
|
||||
replacement: $1
|
||||
action: labelmap
|
||||
- sourceLabels: [__meta_kubernetes_namespace]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
targetLabel: namespace
|
||||
replacement: $1
|
||||
action: replace
|
||||
- sourceLabels: [__meta_kubernetes_pod_name]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
targetLabel: pod
|
||||
replacement: $1
|
||||
action: replace
|
||||
- sourceLabels: [__meta_kubernetes_pod_node_name]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
targetLabel: node
|
||||
replacement: $1
|
||||
action: replace
|
||||
- sourceLabels: [__meta_kubernetes_pod_host_ip]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
targetLabel: node_ip
|
||||
replacement: $1
|
||||
action: replace
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.14.1
|
||||
version: 0.15.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/aenix-io/cozystack/cluster-autoscaler:0.14.1@sha256:0ea139c71e08db5adb275d81a7efa9a0d8b8db61a1fc1a67167a33a347c07fd8
|
||||
ghcr.io/aenix-io/cozystack/cluster-autoscaler:0.15.1@sha256:73701e37727eedaafdf9efe4baefcf0835f064ee8731219f0c0186c0d0781a5c
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
# Source: https://raw.githubusercontent.com/kubernetes/autoscaler/refs/heads/master/cluster-autoscaler/Dockerfile.amd64
|
||||
ARG builder_image=docker.io/library/golang:1.22.5
|
||||
ARG builder_image=docker.io/library/golang:1.23.4
|
||||
ARG BASEIMAGE=gcr.io/distroless/static:nonroot-amd64
|
||||
FROM ${builder_image} AS builder
|
||||
RUN git clone https://github.com/kubernetes/autoscaler /src/autoscaler \
|
||||
&& cd /src/autoscaler/cluster-autoscaler \
|
||||
&& git checkout cluster-autoscaler-1.31.0
|
||||
&& git checkout cluster-autoscaler-1.32.0
|
||||
|
||||
WORKDIR /src/autoscaler/cluster-autoscaler
|
||||
COPY fix-downscale.diff /fix-downscale.diff
|
||||
RUN git apply /fix-downscale.diff
|
||||
RUN make build
|
||||
|
||||
FROM $BASEIMAGE
|
||||
|
||||
@@ -0,0 +1,13 @@
|
||||
diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go
|
||||
index 4eec0e4bf..f28fd9241 100644
|
||||
--- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go
|
||||
+++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go
|
||||
@@ -106,8 +106,6 @@ func (r unstructuredScalableResource) Replicas() (int, error) {
|
||||
|
||||
func (r unstructuredScalableResource) SetSize(nreplicas int) error {
|
||||
switch {
|
||||
- case nreplicas > r.maxSize:
|
||||
- return fmt.Errorf("size increase too large - desired:%d max:%d", nreplicas, r.maxSize)
|
||||
case nreplicas < r.minSize:
|
||||
return fmt.Errorf("size decrease too large - desired:%d min:%d", nreplicas, r.minSize)
|
||||
}
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/aenix-io/cozystack/kubevirt-cloud-provider:0.14.1@sha256:f595d50689405a504249c2af4b84562e8a0d16bdf9287d4eedf7c87959c4fba1
|
||||
ghcr.io/aenix-io/cozystack/kubevirt-cloud-provider:0.15.1@sha256:02037bb7a75b35ca1e34924f13e7fa7b25bac2017ddbd7e9ed004c0ff368cce3
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/aenix-io/cozystack/kubevirt-csi-driver:0.14.1@sha256:644379ba92c72dbbf07257d70f88ef3e5c1f1fb88f161c03758c13588d33ac2d
|
||||
ghcr.io/aenix-io/cozystack/kubevirt-csi-driver:0.15.1@sha256:a86d8a4722b81e89820ead959874524c4cc86654c22ad73c421bbf717d62c3f3
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/aenix-io/cozystack/ubuntu-container-disk:v1.30.1@sha256:a64fefbd94535be2f8ac92943f0cad076a7b4c61c289a6ac0086a40859ed9d0e
|
||||
ghcr.io/aenix-io/cozystack/ubuntu-container-disk:v1.30.1@sha256:6f19f3f8a68372c5b212e98a79ff132cc20641bc46fc4b8d359158945dc04043
|
||||
|
||||
@@ -30,6 +30,12 @@ spec:
|
||||
- /cluster-autoscaler
|
||||
args:
|
||||
- --cloud-provider=clusterapi
|
||||
- --enforce-node-group-min-size=true
|
||||
- --ignore-daemonsets-utilization=true
|
||||
- --ignore-mirror-pods-utilization=true
|
||||
- --scale-down-unneeded-time=30s
|
||||
- --scan-interval=25s
|
||||
- --force-delete-unregistered-nodes=true
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig/super-admin.svc
|
||||
- --clusterapi-cloud-config-authoritative
|
||||
- --node-group-auto-discovery=clusterapi:namespace={{ .Release.Namespace }},clusterName={{ .Release.Name }}
|
||||
|
||||
@@ -29,6 +29,7 @@ spec:
|
||||
{{- range .group.roles }}
|
||||
node-role.kubernetes.io/{{ . }}: ""
|
||||
{{- end }}
|
||||
cluster.x-k8s.io/deployment-name: {{ $.Release.Name }}-{{ .groupName }}
|
||||
spec:
|
||||
domain:
|
||||
{{- if and .group.resources .group.resources.cpu }}
|
||||
@@ -117,7 +118,7 @@ spec:
|
||||
ingress:
|
||||
extraAnnotations:
|
||||
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
|
||||
hostname: {{ .Values.host | default (printf "%s.%s" .Release.Name $host) }}:443
|
||||
hostname: {{ .Values.host | default (printf "%s.%s" .Release.Name $host) }}
|
||||
className: "{{ $ingress }}"
|
||||
deployment:
|
||||
podAdditionalMetadata:
|
||||
@@ -126,6 +127,21 @@ spec:
|
||||
replicas: 2
|
||||
version: 1.30.1
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
replicas: 2
|
||||
minReplicas: 1
|
||||
kind: kubernetes
|
||||
type: control-plane
|
||||
selector:
|
||||
kamaji.clastix.io/component: deployment
|
||||
kamaji.clastix.io/name: {{ .Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
---
|
||||
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
|
||||
kind: KubevirtCluster
|
||||
metadata:
|
||||
@@ -172,6 +188,7 @@ spec:
|
||||
---
|
||||
{{- $context := deepCopy $ }}
|
||||
{{- $_ := set $context "group" $group }}
|
||||
{{- $_ := set $context "groupName" $groupName }}
|
||||
{{- $kubevirtmachinetemplate := include "kubevirtmachinetemplate" $context }}
|
||||
{{- $kubevirtmachinetemplateHash := $kubevirtmachinetemplate | sha256sum | trunc 6 }}
|
||||
{{- $kubevirtmachinetemplateName := printf "%s-%s-%s" $.Release.Name $groupName $kubevirtmachinetemplateHash }}
|
||||
@@ -255,6 +272,21 @@ spec:
|
||||
- type: Ready
|
||||
status: "False"
|
||||
timeout: 300s
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-{{ $groupName }}
|
||||
namespace: {{ $.Release.Namespace }}
|
||||
spec:
|
||||
minReplicas: {{ $group.minReplicas }}
|
||||
kind: kubernetes
|
||||
type: worker
|
||||
selector:
|
||||
cluster.x-k8s.io/cluster-name: {{ $.Release.Name }}
|
||||
cluster.x-k8s.io/deployment-name: {{ $.Release.Name }}-{{ $groupName }}
|
||||
cluster.x-k8s.io/role: worker
|
||||
version: {{ $.Chart.Version }}
|
||||
{{- end }}
|
||||
---
|
||||
{{- /*
|
||||
|
||||
@@ -24,3 +24,13 @@ rules:
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
- workloadmonitors
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
{{- range $groupName, $group := .Values.nodeGroups }}
|
||||
- {{ $.Release.Name }}-{{ $groupName }}
|
||||
{{- end }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/aenix-io/cozystack/mariadb-backup:0.5.2@sha256:948d41556939d90bdc37b4406b18935d46490dcb3f38a27aa117a4c3973e5604
|
||||
ghcr.io/aenix-io/cozystack/mariadb-backup:0.5.2@sha256:9f0b2bc5135e10b29edb2824309059f5b4c4e8b744804b2cf55381171f335675
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.7.1
|
||||
version: 0.8.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/aenix-io/cozystack/postgres-backup:0.7.1@sha256:4d2271b345240c6c5b37599996745646012004b0f57e31c4c9deb1aba7408a51
|
||||
ghcr.io/aenix-io/cozystack/postgres-backup:0.8.0@sha256:d1f7692b6761f46f24687d885ec335330280346ae4a9ff28b3179681b36106b7
|
||||
|
||||
@@ -19,3 +19,10 @@ rules:
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}-credentials
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
- workloadmonitors
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
@@ -29,3 +29,17 @@ spec:
|
||||
inheritedMetadata:
|
||||
labels:
|
||||
policy.cozystack.io/allow-to-apiserver: "true"
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
minReplicas: 1
|
||||
kind: postgres
|
||||
type: postgres
|
||||
selector:
|
||||
cnpg.io/cluster: {{ .Release.Name }}
|
||||
cnpg.io/podRole: instance
|
||||
version: {{ $.Chart.Version }}
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.4.0
|
||||
version: 0.5.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -20,3 +20,11 @@ rules:
|
||||
resourceNames:
|
||||
- "{{ .Release.Name }}-auth"
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
- workloadmonitors
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}-redis
|
||||
- {{ .Release.Name }}-sentinel
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
@@ -73,3 +73,34 @@ spec:
|
||||
auth:
|
||||
secretPath: {{ .Release.Name }}-auth
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-redis
|
||||
namespace: {{ $.Release.Namespace }}
|
||||
spec:
|
||||
minReplicas: 1
|
||||
replicas: {{ .Values.replicas }}
|
||||
kind: redis
|
||||
type: redis
|
||||
selector:
|
||||
app.kubernetes.io/component: redis
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-sentinel
|
||||
namespace: {{ $.Release.Namespace }}
|
||||
spec:
|
||||
minReplicas: 2
|
||||
replicas: 3
|
||||
kind: redis
|
||||
type: sentinel
|
||||
selector:
|
||||
app.kubernetes.io/component: sentinel
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
|
||||
@@ -4,4 +4,4 @@ description: Separated tenant namespace
|
||||
icon: /logos/tenant.svg
|
||||
|
||||
type: application
|
||||
version: 1.6.5
|
||||
version: 1.8.0
|
||||
|
||||
@@ -50,11 +50,12 @@ tenant-u1
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------ | --------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `host` | The hostname used to access tenant services (defaults to using the tenant name as a subdomain for it's parent tenant host). | `""` |
|
||||
| `etcd` | Deploy own Etcd cluster | `false` |
|
||||
| `monitoring` | Deploy own Monitoring Stack | `false` |
|
||||
| `ingress` | Deploy own Ingress Controller | `false` |
|
||||
| `seaweedfs` | Deploy own SeaweedFS | `false` |
|
||||
| `isolated` | Enforce tenant namespace with network policies | `false` |
|
||||
| Name | Description | Value |
|
||||
| ---------------- | --------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `host` | The hostname used to access tenant services (defaults to using the tenant name as a subdomain for it's parent tenant host). | `""` |
|
||||
| `etcd` | Deploy own Etcd cluster | `false` |
|
||||
| `monitoring` | Deploy own Monitoring Stack | `false` |
|
||||
| `ingress` | Deploy own Ingress Controller | `false` |
|
||||
| `seaweedfs` | Deploy own SeaweedFS | `false` |
|
||||
| `isolated` | Enforce tenant namespace with network policies | `false` |
|
||||
| `resourceQuotas` | Define resource quotas for the tenant | `{}` |
|
||||
|
||||
27
packages/apps/tenant/templates/info.yaml
Normal file
27
packages/apps/tenant/templates/info.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
||||
{{- $oidcEnabled := index $cozyConfig.data "oidc-enabled" }}
|
||||
{{- if $oidcEnabled }}
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: info
|
||||
namespace: {{ include "tenant.name" . }}
|
||||
annotations:
|
||||
helm.sh/resource-policy: keep
|
||||
labels:
|
||||
cozystack.io/ui: "true"
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
chart:
|
||||
spec:
|
||||
chart: info
|
||||
reconcileStrategy: Revision
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: cozystack-extra
|
||||
namespace: cozy-public
|
||||
version: "*"
|
||||
interval: 1m0s
|
||||
timeout: 5m0s
|
||||
{{- end }}
|
||||
@@ -26,12 +26,24 @@ spec:
|
||||
metricsStorages:
|
||||
- name: shortterm
|
||||
retentionPeriod: "3d"
|
||||
deduplicationInterval: "5m"
|
||||
storage: 10Gi
|
||||
- name: longterm
|
||||
retentionPeriod: "14d"
|
||||
deduplicationInterval: "15s"
|
||||
storage: 10Gi
|
||||
vminsert:
|
||||
resources: {}
|
||||
vmselect:
|
||||
resources: {}
|
||||
vmstorage:
|
||||
resources: {}
|
||||
- name: longterm
|
||||
retentionPeriod: "14d"
|
||||
deduplicationInterval: "5m"
|
||||
storage: 10Gi
|
||||
vminsert:
|
||||
resources: {}
|
||||
vmselect:
|
||||
resources: {}
|
||||
vmstorage:
|
||||
resources: {}
|
||||
oncall:
|
||||
enabled: false
|
||||
{{- end }}
|
||||
|
||||
10
packages/apps/tenant/templates/quota.yaml
Normal file
10
packages/apps/tenant/templates/quota.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
{{- if .Values.resourceQuotas }}
|
||||
apiVersion: v1
|
||||
kind: ResourceQuota
|
||||
metadata:
|
||||
name: tenant-quota
|
||||
namespace: {{ include "tenant.name" . }}
|
||||
spec:
|
||||
hard:
|
||||
{{- toYaml .Values.resourceQuotas | nindent 4 }}
|
||||
{{- end }}
|
||||
@@ -34,7 +34,11 @@ rules:
|
||||
- apiGroups: ["apps.cozystack.io"]
|
||||
resources: ['*']
|
||||
verbs: ['*']
|
||||
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
- workloadmonitors
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
@@ -103,6 +107,11 @@ rules:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
- workloadmonitors
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
@@ -175,6 +184,11 @@ rules:
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
- workloadmonitors
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
@@ -258,6 +272,7 @@ rules:
|
||||
- virtualmachines
|
||||
- vmdisks
|
||||
- vminstances
|
||||
- infos
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
@@ -266,6 +281,11 @@ rules:
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
- workloadmonitors
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
@@ -334,6 +354,11 @@ rules:
|
||||
- '*'
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
- workloadmonitors
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
@@ -31,6 +31,11 @@
|
||||
"type": "boolean",
|
||||
"description": "Enforce tenant namespace with network policies",
|
||||
"default": false
|
||||
},
|
||||
"resourceQuotas": {
|
||||
"type": "object",
|
||||
"description": "Define resource quotas for the tenant",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -6,9 +6,18 @@
|
||||
## @param ingress Deploy own Ingress Controller
|
||||
## @param seaweedfs Deploy own SeaweedFS
|
||||
## @param isolated Enforce tenant namespace with network policies
|
||||
## @param resourceQuotas Define resource quotas for the tenant
|
||||
host: ""
|
||||
etcd: false
|
||||
monitoring: false
|
||||
ingress: false
|
||||
seaweedfs: false
|
||||
isolated: false
|
||||
resourceQuotas: {}
|
||||
# resourceQuotas:
|
||||
# requests.cpu: "1"
|
||||
# requests.memory: "1Gi"
|
||||
# limits.cpu: "2"
|
||||
# limits.memory: "2Gi"
|
||||
# requests.nvidia.com/gpu: 4
|
||||
# requests.storage: 100Gi
|
||||
|
||||
@@ -23,7 +23,8 @@ kafka 0.2.1 3ac17018
|
||||
kafka 0.2.2 d0758692
|
||||
kafka 0.2.3 5ca8823
|
||||
kafka 0.3.0 c07c4bbd
|
||||
kafka 0.3.1 HEAD
|
||||
kafka 0.3.1 b7375f73
|
||||
kafka 0.3.2 HEAD
|
||||
kubernetes 0.1.0 f642698
|
||||
kubernetes 0.2.0 7cd7de73
|
||||
kubernetes 0.3.0 7caccec1
|
||||
@@ -42,7 +43,9 @@ kubernetes 0.12.0 74649f8
|
||||
kubernetes 0.12.1 28fca4e
|
||||
kubernetes 0.13.0 ced8e5b9
|
||||
kubernetes 0.14.0 bfbde07c
|
||||
kubernetes 0.14.1 HEAD
|
||||
kubernetes 0.14.1 fde4bcfa
|
||||
kubernetes 0.15.0 cb7b8158
|
||||
kubernetes 0.15.1 HEAD
|
||||
mysql 0.1.0 f642698
|
||||
mysql 0.2.0 8b975ff0
|
||||
mysql 0.3.0 5ca8823
|
||||
@@ -65,7 +68,8 @@ postgres 0.5.0 c07c4bbd
|
||||
postgres 0.6.0 2a4768a
|
||||
postgres 0.6.2 54fd61c
|
||||
postgres 0.7.0 dc9d8bb
|
||||
postgres 0.7.1 HEAD
|
||||
postgres 0.7.1 175a65f
|
||||
postgres 0.8.0 HEAD
|
||||
rabbitmq 0.1.0 f642698
|
||||
rabbitmq 0.2.0 5ca8823
|
||||
rabbitmq 0.3.0 9e33dc0
|
||||
@@ -77,7 +81,8 @@ redis 0.1.1 f642698
|
||||
redis 0.2.0 5ca8823
|
||||
redis 0.3.0 c07c4bbd
|
||||
redis 0.3.1 b7375f73
|
||||
redis 0.4.0 HEAD
|
||||
redis 0.4.0 abc8f082
|
||||
redis 0.5.0 HEAD
|
||||
tcp-balancer 0.1.0 f642698
|
||||
tcp-balancer 0.2.0 HEAD
|
||||
tenant 0.1.3 3d1b86c
|
||||
@@ -95,16 +100,32 @@ tenant 1.6.1 edbbb9be
|
||||
tenant 1.6.2 ccedc5fe
|
||||
tenant 1.6.3 2057bb96
|
||||
tenant 1.6.4 3c9e50a4
|
||||
tenant 1.6.5 HEAD
|
||||
tenant 1.6.5 f1e11451
|
||||
tenant 1.6.6 d4634797
|
||||
tenant 1.6.7 06afcf27
|
||||
tenant 1.6.8 4cc48e6f
|
||||
tenant 1.7.0 6c73e3f3
|
||||
tenant 1.8.0 HEAD
|
||||
virtual-machine 0.1.4 f2015d6
|
||||
virtual-machine 0.1.5 7cd7de7
|
||||
virtual-machine 0.2.0 5ca8823
|
||||
virtual-machine 0.3.0 b908400
|
||||
virtual-machine 0.4.0 4746d51
|
||||
virtual-machine 0.5.0 HEAD
|
||||
virtual-machine 0.5.0 cad9cde
|
||||
virtual-machine 0.6.0 0e728870
|
||||
virtual-machine 0.7.0 af58018a
|
||||
virtual-machine 0.7.1 05857b95
|
||||
virtual-machine 0.8.0 3fa4dd3
|
||||
virtual-machine 0.8.1 3fa4dd3a
|
||||
virtual-machine 0.8.2 HEAD
|
||||
vm-disk 0.1.0 HEAD
|
||||
vm-instance 0.1.0 ced8e5b9
|
||||
vm-instance 0.2.0 HEAD
|
||||
vm-instance 0.2.0 4f767ee3
|
||||
vm-instance 0.3.0 0e728870
|
||||
vm-instance 0.4.0 af58018a
|
||||
vm-instance 0.4.1 05857b95
|
||||
vm-instance 0.5.0 3fa4dd3
|
||||
vm-instance 0.5.1 HEAD
|
||||
vpn 0.1.0 f642698
|
||||
vpn 0.2.0 7151424
|
||||
vpn 0.3.0 a2bcf100
|
||||
|
||||
@@ -17,10 +17,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.5.0
|
||||
version: 0.8.2
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "1.16.1"
|
||||
appVersion: "0.8.2"
|
||||
|
||||
@@ -8,3 +8,4 @@ generate:
|
||||
&& yq -i -o json ".properties.instanceProfile.optional=true | .properties.instanceProfile.enum = $${PREFERENCES}" values.schema.json
|
||||
yq -i -o json '.properties.externalPorts.items.type = "integer"' values.schema.json
|
||||
yq -i -o json '.properties.systemDisk.properties.image.enum = ["ubuntu", "cirros", "alpine", "fedora", "talos"]' values.schema.json
|
||||
yq -i -o json '.properties.externalMethod.enum = ["WholeIP", "PortList"]' values.schema.json
|
||||
|
||||
@@ -39,6 +39,7 @@ virtctl ssh <user>@<vm>
|
||||
| Name | Description | Value |
|
||||
| ------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `WholeIP` |
|
||||
| `externalPorts` | Specify ports to forward from outside the cluster | `[]` |
|
||||
| `running` | Determines if the virtual machine should be running | `true` |
|
||||
| `instanceType` | Virtual Machine instance type | `u1.medium` |
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
rules:
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
- workloadmonitors
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}
|
||||
spec:
|
||||
replicas: 1
|
||||
minReplicas: 1
|
||||
kind: virtual-machine
|
||||
type: virtual-machine
|
||||
selector:
|
||||
vm.kubevirt.io/name: {{ .Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
@@ -6,16 +6,24 @@ metadata:
|
||||
name: {{ include "virtual-machine.fullname" . }}
|
||||
labels:
|
||||
{{- include "virtual-machine.labels" . | nindent 4 }}
|
||||
{{- if eq .Values.externalMethod "WholeIP" }}
|
||||
annotations:
|
||||
networking.cozystack.io/wholeIP: "true"
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ ternary "LoadBalancer" "ClusterIP" .Values.external }}
|
||||
externalTrafficPolicy: Local
|
||||
allocateLoadBalancerNodePorts: false
|
||||
selector:
|
||||
{{- include "virtual-machine.labels" . | nindent 4 }}
|
||||
{{- include "virtual-machine.selectorLabels" . | nindent 4 }}
|
||||
ports:
|
||||
{{- if eq .Values.externalMethod "WholeIP" }}
|
||||
- port: 65535
|
||||
{{- else }}
|
||||
{{- range .Values.externalPorts }}
|
||||
- name: port-{{ . }}
|
||||
port: {{ . }}
|
||||
targetPort: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
119
packages/apps/virtual-machine/templates/vm-update-hook.yaml
Normal file
119
packages/apps/virtual-machine/templates/vm-update-hook.yaml
Normal file
@@ -0,0 +1,119 @@
|
||||
{{- $vmName := include "virtual-machine.fullname" . -}}
|
||||
{{- $namespace := .Release.Namespace -}}
|
||||
|
||||
{{- $existingVM := lookup "kubevirt.io/v1" "VirtualMachine" $namespace $vmName -}}
|
||||
{{- $existingPVC := lookup "v1" "PersistentVolumeClaim" $namespace $vmName -}}
|
||||
|
||||
{{- $instanceType := .Values.instanceType | default "" -}}
|
||||
{{- $instanceProfile := .Values.instanceProfile | default "" -}}
|
||||
{{- $desiredStorage := .Values.systemDisk.storage | default "" -}}
|
||||
|
||||
{{- $needUpdateType := false -}}
|
||||
{{- $needUpdateProfile := false -}}
|
||||
{{- $needResizePVC := false -}}
|
||||
|
||||
{{- if and $existingVM $instanceType -}}
|
||||
{{- if not (eq $existingVM.spec.instancetype.name $instanceType) -}}
|
||||
{{- $needUpdateType = true -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if and $existingVM $instanceProfile -}}
|
||||
{{- if not (eq $existingVM.spec.preference.name $instanceProfile) -}}
|
||||
{{- $needUpdateProfile = true -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if and $existingPVC $desiredStorage -}}
|
||||
{{- $currentStorage := $existingPVC.spec.resources.requests.storage | toString -}}
|
||||
{{- if not (eq $currentStorage $desiredStorage) -}}
|
||||
{{- $needResizePVC = true -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if or $needUpdateType $needUpdateProfile $needResizePVC }}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: "{{ $.Release.Name }}-update-hook"
|
||||
annotations:
|
||||
helm.sh/hook: pre-install,pre-upgrade
|
||||
helm.sh/hook-weight: "0"
|
||||
helm.sh/hook-delete-policy: hook-succeeded,before-hook-creation
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: "{{ $.Release.Name }}-update-hook"
|
||||
policy.cozystack.io/allow-to-apiserver: "true"
|
||||
spec:
|
||||
serviceAccountName: {{ $.Release.Name }}-update-hook
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: update-resources
|
||||
image: bitnami/kubectl:latest
|
||||
command: ["sh", "-exc"]
|
||||
args:
|
||||
- |
|
||||
{{- if $needUpdateType }}
|
||||
echo "Patching VirtualMachine for instancetype update..."
|
||||
kubectl patch virtualmachine {{ $vmName }} -n {{ $namespace }} \
|
||||
--type merge \
|
||||
-p '{"spec":{"instancetype":{"name": "{{ $instanceType }}", "revisionName": null}}}'
|
||||
{{- end }}
|
||||
|
||||
{{- if $needUpdateProfile }}
|
||||
echo "Patching VirtualMachine for preference update..."
|
||||
kubectl patch virtualmachine {{ $vmName }} -n {{ $namespace }} \
|
||||
--type merge \
|
||||
-p '{"spec":{"preference":{"name": "{{ $instanceProfile }}", "revisionName": null}}}'
|
||||
{{- end }}
|
||||
|
||||
{{- if $needResizePVC }}
|
||||
echo "Patching PVC for storage resize..."
|
||||
kubectl patch pvc {{ $vmName }} -n {{ $namespace }} \
|
||||
--type merge \
|
||||
-p '{"spec":{"resources":{"requests":{"storage":"{{ $desiredStorage }}"}}}}'
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-update-hook
|
||||
annotations:
|
||||
helm.sh/hook: pre-install,pre-upgrade
|
||||
helm.sh/hook-weight: "-5"
|
||||
helm.sh/hook-delete-policy: before-hook-creation
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-update-hook
|
||||
annotations:
|
||||
helm.sh/hook: pre-install,pre-upgrade
|
||||
helm.sh/hook-weight: "-5"
|
||||
helm.sh/hook-delete-policy: before-hook-creation
|
||||
rules:
|
||||
- apiGroups: ["kubevirt.io"]
|
||||
resources: ["virtualmachines"]
|
||||
verbs: ["patch", "get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["patch", "get", "list", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-update-hook
|
||||
annotations:
|
||||
helm.sh/hook: pre-install,pre-upgrade
|
||||
helm.sh/hook-weight: "-5"
|
||||
helm.sh/hook-delete-policy: before-hook-creation
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ $.Release.Name }}-update-hook
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ $.Release.Name }}-update-hook
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end }}
|
||||
@@ -7,6 +7,15 @@
|
||||
"description": "Enable external access from outside the cluster",
|
||||
"default": false
|
||||
},
|
||||
"externalMethod": {
|
||||
"type": "string",
|
||||
"description": "specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList`",
|
||||
"default": "WholeIP",
|
||||
"enum": [
|
||||
"WholeIP",
|
||||
"PortList"
|
||||
]
|
||||
},
|
||||
"externalPorts": {
|
||||
"type": "array",
|
||||
"description": "Specify ports to forward from outside the cluster",
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
## @section Common parameters
|
||||
|
||||
## @param external Enable external access from outside the cluster
|
||||
## @param externalMethod specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList`
|
||||
## @param externalPorts [array] Specify ports to forward from outside the cluster
|
||||
external: false
|
||||
externalMethod: WholeIP
|
||||
externalPorts:
|
||||
- 22
|
||||
|
||||
|
||||
@@ -17,10 +17,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.2.0
|
||||
version: 0.5.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.2.0"
|
||||
appVersion: "0.5.1"
|
||||
|
||||
@@ -8,3 +8,4 @@ generate:
|
||||
PREFERENCES=$$(yq e '.metadata.name' -o=json -r ../../system/kubevirt-instancetypes/templates/preferences.yaml | yq 'split(" ") | . + [""]' -o json) \
|
||||
&& yq -i -o json ".properties.instanceProfile.optional=true | .properties.instanceProfile.enum = $${PREFERENCES}" values.schema.json
|
||||
yq -i -o json '.properties.externalPorts.items.type = "integer"' values.schema.json
|
||||
yq -i -o json '.properties.externalMethod.enum = ["WholeIP", "PortList"]' values.schema.json
|
||||
|
||||
@@ -36,18 +36,19 @@ virtctl ssh <user>@<vm>
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------ | ---------------------------------------------------------------------------------- | ---------------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `externalPorts` | Specify ports to forward from outside the cluster | `[]` |
|
||||
| `running` | Determines if the virtual machine should be running | `true` |
|
||||
| `instanceType` | Virtual Machine instance type | `u1.medium` |
|
||||
| `instanceProfile` | Virtual Machine prefferences profile | `ubuntu` |
|
||||
| `disks` | List of disks to attach | `[]` |
|
||||
| `resources.cpu` | The number of CPU cores allocated to the virtual machine | `""` |
|
||||
| `resources.memory` | The amount of memory allocated to the virtual machine | `""` |
|
||||
| `sshKeys` | List of SSH public keys for authentication. Can be a single key or a list of keys. | `[]` |
|
||||
| `cloudInit` | cloud-init user data config. See cloud-init documentation for more details. | `#cloud-config
|
||||
| Name | Description | Value |
|
||||
| ------------------ | ---------------------------------------------------------------------------------------------------------- | ---------------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `WholeIP` |
|
||||
| `externalPorts` | Specify ports to forward from outside the cluster | `[]` |
|
||||
| `running` | Determines if the virtual machine should be running | `true` |
|
||||
| `instanceType` | Virtual Machine instance type | `u1.medium` |
|
||||
| `instanceProfile` | Virtual Machine prefferences profile | `ubuntu` |
|
||||
| `disks` | List of disks to attach | `[]` |
|
||||
| `resources.cpu` | The number of CPU cores allocated to the virtual machine | `""` |
|
||||
| `resources.memory` | The amount of memory allocated to the virtual machine | `""` |
|
||||
| `sshKeys` | List of SSH public keys for authentication. Can be a single key or a list of keys. | `[]` |
|
||||
| `cloudInit` | cloud-init user data config. See cloud-init documentation for more details. | `#cloud-config
|
||||
` |
|
||||
|
||||
## U Series
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
rules:
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
- workloadmonitors
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}
|
||||
namespace: {{ $.Release.Namespace }}
|
||||
spec:
|
||||
replicas: 1
|
||||
minReplicas: 1
|
||||
kind: virtual-machine
|
||||
type: virtual-machine
|
||||
selector:
|
||||
vm.kubevirt.io/name: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
@@ -6,16 +6,24 @@ metadata:
|
||||
name: {{ include "virtual-machine.fullname" . }}
|
||||
labels:
|
||||
{{- include "virtual-machine.labels" . | nindent 4 }}
|
||||
{{- if eq .Values.externalMethod "WholeIP" }}
|
||||
annotations:
|
||||
networking.cozystack.io/wholeIP: "true"
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ ternary "LoadBalancer" "ClusterIP" .Values.external }}
|
||||
externalTrafficPolicy: Local
|
||||
allocateLoadBalancerNodePorts: false
|
||||
selector:
|
||||
{{- include "virtual-machine.labels" . | nindent 4 }}
|
||||
{{- include "virtual-machine.selectorLabels" . | nindent 4 }}
|
||||
ports:
|
||||
{{- if eq .Values.externalMethod "WholeIP" }}
|
||||
- port: 65535
|
||||
{{- else }}
|
||||
{{- range .Values.externalPorts }}
|
||||
- name: port-{{ . }}
|
||||
port: {{ . }}
|
||||
targetPort: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
99
packages/apps/vm-instance/templates/vm-update-hook.yaml
Normal file
99
packages/apps/vm-instance/templates/vm-update-hook.yaml
Normal file
@@ -0,0 +1,99 @@
|
||||
{{- $vmName := include "virtual-machine.fullname" . -}}
|
||||
{{- $namespace := .Release.Namespace -}}
|
||||
|
||||
{{- $existingVM := lookup "kubevirt.io/v1" "VirtualMachine" $namespace $vmName -}}
|
||||
|
||||
{{- $instanceType := .Values.instanceType | default "" -}}
|
||||
{{- $instanceProfile := .Values.instanceProfile | default "" -}}
|
||||
|
||||
{{- $needUpdateType := false -}}
|
||||
{{- $needUpdateProfile := false -}}
|
||||
|
||||
{{- if and $existingVM $instanceType -}}
|
||||
{{- if not (eq $existingVM.spec.instancetype.name $instanceType) -}}
|
||||
{{- $needUpdateType = true -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if and $existingVM $instanceProfile -}}
|
||||
{{- if not (eq $existingVM.spec.preference.name $instanceProfile) -}}
|
||||
{{- $needUpdateProfile = true -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if or $needUpdateType $needUpdateProfile }}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: "{{ $.Release.Name }}-update-hook"
|
||||
annotations:
|
||||
helm.sh/hook: pre-install,pre-upgrade
|
||||
helm.sh/hook-weight: "0"
|
||||
helm.sh/hook-delete-policy: hook-succeeded,before-hook-creation
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: "{{ $.Release.Name }}-update-hook"
|
||||
policy.cozystack.io/allow-to-apiserver: "true"
|
||||
spec:
|
||||
serviceAccountName: {{ $.Release.Name }}-update-hook
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: update-resources
|
||||
image: bitnami/kubectl:latest
|
||||
command: ["sh", "-exc"]
|
||||
args:
|
||||
- |
|
||||
{{- if $needUpdateType }}
|
||||
echo "Patching VirtualMachine for instancetype update..."
|
||||
kubectl patch virtualmachine {{ $vmName }} -n {{ $namespace }} \
|
||||
--type merge \
|
||||
-p '{"spec":{"instancetype":{"name": "{{ $instanceType }}", "revisionName": null}}}'
|
||||
{{- end }}
|
||||
|
||||
{{- if $needUpdateProfile }}
|
||||
echo "Patching VirtualMachine for preference update..."
|
||||
kubectl patch virtualmachine {{ $vmName }} -n {{ $namespace }} \
|
||||
--type merge \
|
||||
-p '{"spec":{"preference":{"name": "{{ $instanceProfile }}", "revisionName": null}}}'
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-update-hook
|
||||
annotations:
|
||||
helm.sh/hook: pre-install,pre-upgrade
|
||||
helm.sh/hook-weight: "-5"
|
||||
helm.sh/hook-delete-policy: before-hook-creation
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-update-hook
|
||||
annotations:
|
||||
helm.sh/hook: pre-install,pre-upgrade
|
||||
helm.sh/hook-weight: "-5"
|
||||
helm.sh/hook-delete-policy: before-hook-creation
|
||||
rules:
|
||||
- apiGroups: ["kubevirt.io"]
|
||||
resources: ["virtualmachines"]
|
||||
verbs: ["patch", "get", "list", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-update-hook
|
||||
annotations:
|
||||
helm.sh/hook: pre-install,pre-upgrade
|
||||
helm.sh/hook-weight: "-5"
|
||||
helm.sh/hook-delete-policy: before-hook-creation
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ $.Release.Name }}-update-hook
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ $.Release.Name }}-update-hook
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end }}
|
||||
@@ -12,20 +12,17 @@ metadata:
|
||||
labels:
|
||||
{{- include "virtual-machine.labels" . | nindent 4 }}
|
||||
spec:
|
||||
running: {{ .Values.running | default "true" }}
|
||||
running: {{ .Values.running }}
|
||||
{{- with .Values.instanceType }}
|
||||
instancetype:
|
||||
kind: VirtualMachineClusterInstancetype
|
||||
name: {{ . }}
|
||||
revisionName: null
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.instanceProfile }}
|
||||
preference:
|
||||
kind: VirtualMachineClusterPreference
|
||||
name: {{ . }}
|
||||
revisionName: null
|
||||
{{- end }}
|
||||
|
||||
{{- end }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
@@ -47,7 +44,7 @@ spec:
|
||||
disks:
|
||||
{{- range $i, $disk := .Values.disks }}
|
||||
- name: disk-{{ .name }}
|
||||
{{- $disk := lookup "cdi.kubevirt.io/v1beta1" "DataVolume" $.Release.Namespace .name }}
|
||||
{{- $disk := lookup "cdi.kubevirt.io/v1beta1" "DataVolume" $.Release.Namespace (printf "vm-disk-%s" .name) }}
|
||||
{{- if $disk }}
|
||||
{{- if and (hasKey $disk.metadata.annotations "vm-disk.cozystack.io/optical") (eq (index $disk.metadata.annotations "vm-disk.cozystack.io/optical") "true") }}
|
||||
cdrom: {}
|
||||
|
||||
@@ -7,6 +7,15 @@
|
||||
"description": "Enable external access from outside the cluster",
|
||||
"default": false
|
||||
},
|
||||
"externalMethod": {
|
||||
"type": "string",
|
||||
"description": "specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList`",
|
||||
"default": "WholeIP",
|
||||
"enum": [
|
||||
"WholeIP",
|
||||
"PortList"
|
||||
]
|
||||
},
|
||||
"externalPorts": {
|
||||
"type": "array",
|
||||
"description": "Specify ports to forward from outside the cluster",
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
## @section Common parameters
|
||||
|
||||
## @param external Enable external access from outside the cluster
|
||||
## @param externalMethod specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList`
|
||||
## @param externalPorts [array] Specify ports to forward from outside the cluster
|
||||
external: false
|
||||
externalMethod: WholeIP
|
||||
externalPorts:
|
||||
- 22
|
||||
|
||||
|
||||
3
packages/core/builder/Chart.yaml
Executable file
3
packages/core/builder/Chart.yaml
Executable file
@@ -0,0 +1,3 @@
|
||||
apiVersion: v2
|
||||
name: builder
|
||||
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process
|
||||
35
packages/core/builder/Makefile
Executable file
35
packages/core/builder/Makefile
Executable file
@@ -0,0 +1,35 @@
|
||||
NAMESPACE=cozy-builder
|
||||
NAME := builder
|
||||
|
||||
TALOS_VERSION=$(shell awk '/^version:/ {print $$2}' ../installer/images/talos/profiles/installer.yaml)
|
||||
|
||||
include ../../../scripts/common-envs.mk
|
||||
|
||||
help: ## Show this help.
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
|
||||
show:
|
||||
helm template -n $(NAMESPACE) $(NAME) .
|
||||
|
||||
apply: ## Create builder sandbox in existing Kubernetes cluster.
|
||||
helm template -n $(NAMESPACE) $(NAME) . | kubectl apply -f -
|
||||
docker buildx ls | grep -q '^buildkit-builder*' || docker buildx create \
|
||||
--bootstrap \
|
||||
--name=buildkit-$(NAME) \
|
||||
--driver=kubernetes \
|
||||
--driver-opt=namespace=$(NAMESPACE),replicas=1 \
|
||||
--platform=linux/amd64 \
|
||||
--platform=linux/arm64 \
|
||||
--use \
|
||||
--config config.toml
|
||||
|
||||
diff:
|
||||
helm template -n $(NAMESPACE) $(NAME) . | kubectl diff -f -
|
||||
|
||||
delete: ## Remove builder sandbox from existing Kubernetes cluster.
|
||||
kubectl delete deploy -n $(NAMESPACE) $(NAME)-talos-imager
|
||||
docker buildx rm buildkit-$(NAME)
|
||||
|
||||
wait-for-builder:
|
||||
kubectl wait deploy --for=condition=Progressing -n $(NAMESPACE) $(NAME)-talos-imager
|
||||
kubectl wait pod --for=condition=Ready -n $(NAMESPACE) -l app=$(NAME)-talos-imager
|
||||
11
packages/core/builder/config.toml
Normal file
11
packages/core/builder/config.toml
Normal file
@@ -0,0 +1,11 @@
|
||||
[worker.oci]
|
||||
gc = true
|
||||
gckeepstorage = 50000
|
||||
|
||||
[[worker.oci.gcpolicy]]
|
||||
keepBytes = 10737418240
|
||||
keepDuration = 604800
|
||||
filters = [ "type==source.local", "type==exec.cachemount", "type==source.git.checkout"]
|
||||
[[worker.oci.gcpolicy]]
|
||||
all = true
|
||||
keepBytes = 53687091200
|
||||
43
packages/core/builder/templates/sandbox.yaml
Executable file
43
packages/core/builder/templates/sandbox.yaml
Executable file
@@ -0,0 +1,43 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: {{ .Release.Namespace }}
|
||||
labels:
|
||||
pod-security.kubernetes.io/enforce: privileged
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-talos-imager
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ .Release.Name }}-talos-imager
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ .Release.Name }}-talos-imager
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
terminationGracePeriodSeconds: 1
|
||||
containers:
|
||||
- name: imager
|
||||
image: "{{ .Values.talos.imager.image }}"
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- sleep
|
||||
- infinity
|
||||
volumeMounts:
|
||||
- mountPath: /dev
|
||||
name: dev
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /dev
|
||||
type: Directory
|
||||
name: dev
|
||||
3
packages/core/builder/values.yaml
Executable file
3
packages/core/builder/values.yaml
Executable file
@@ -0,0 +1,3 @@
|
||||
talos:
|
||||
imager:
|
||||
image: ghcr.io/siderolabs/imager:v1.9.3
|
||||
@@ -19,16 +19,18 @@ diff:
|
||||
|
||||
update:
|
||||
hack/gen-profiles.sh
|
||||
IMAGE=$$(yq '.input.baseInstaller.imageRef | sub("/installer:", "/imager:")' images/talos/profiles/installer.yaml) \
|
||||
yq -i '.talos.imager.image = strenv(IMAGE)' ../builder/values.yaml
|
||||
|
||||
image: pre-checks image-cozystack image-talos image-matchbox
|
||||
|
||||
image-cozystack:
|
||||
image-cozystack: run-builder
|
||||
make -C ../../.. repos
|
||||
docker buildx build -f images/cozystack/Dockerfile ../../.. \
|
||||
--provenance false \
|
||||
--tag $(REGISTRY)/cozystack:$(call settag,$(TAG)) \
|
||||
--cache-from type=registry,ref=$(REGISTRY)/cozystack:latest \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--platform linux/amd64 \
|
||||
--cache-to type=inline \
|
||||
--metadata-file images/cozystack.json \
|
||||
--push=$(PUSH) \
|
||||
@@ -37,13 +39,11 @@ image-cozystack:
|
||||
yq -i '.cozystack.image = strenv(IMAGE)' values.yaml
|
||||
rm -f images/cozystack.json
|
||||
|
||||
image-talos:
|
||||
image-talos: run-builder
|
||||
test -f ../../../_out/assets/installer-amd64.tar || make talos-installer
|
||||
docker load -i ../../../_out/assets/installer-amd64.tar
|
||||
docker tag ghcr.io/siderolabs/installer:$(TALOS_VERSION) $(REGISTRY)/talos:$(call settag,$(TALOS_VERSION))
|
||||
docker push $(REGISTRY)/talos:$(call settag,$(TALOS_VERSION))
|
||||
skopeo copy docker-archive:../../../_out/assets/installer-amd64.tar docker://$(REGISTRY)/talos:$(call settag,$(TALOS_VERSION))
|
||||
|
||||
image-matchbox:
|
||||
image-matchbox: run-builder
|
||||
test -f ../../../_out/assets/kernel-amd64 || make talos-kernel
|
||||
test -f ../../../_out/assets/initramfs-metal-amd64.xz || make talos-initramfs
|
||||
docker buildx build -f images/matchbox/Dockerfile ../../.. \
|
||||
@@ -55,6 +55,8 @@ image-matchbox:
|
||||
--metadata-file images/matchbox.json \
|
||||
--push=$(PUSH) \
|
||||
--load=$(LOAD)
|
||||
echo "$(REGISTRY)/matchbox:$(call settag,$(TAG))@$$(yq e '."containerimage.digest"' images/matchbox.json -o json -r)" \
|
||||
> ../../extra/bootbox/images/matchbox.tag
|
||||
rm -f images/matchbox.json
|
||||
|
||||
assets: talos-iso talos-nocloud talos-metal
|
||||
@@ -62,5 +64,8 @@ assets: talos-iso talos-nocloud talos-metal
|
||||
talos-initramfs talos-kernel talos-installer talos-iso talos-nocloud talos-metal:
|
||||
mkdir -p ../../../_out/assets
|
||||
cat images/talos/profiles/$(subst talos-,,$@).yaml | \
|
||||
docker run --rm -i -v /dev:/dev --privileged "ghcr.io/siderolabs/imager:$(TALOS_VERSION)" --tar-to-stdout - | \
|
||||
kubectl exec -i -n cozy-builder deploy/builder-talos-imager -- imager --tar-to-stdout - | \
|
||||
tar -C ../../../_out/assets -xzf-
|
||||
|
||||
run-builder:
|
||||
make -C ../builder/ apply wait-for-builder
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:alpine3.19 as k8s-await-election-builder
|
||||
FROM golang:alpine3.21 as k8s-await-election-builder
|
||||
|
||||
ARG K8S_AWAIT_ELECTION_GITREPO=https://github.com/LINBIT/k8s-await-election
|
||||
ARG K8S_AWAIT_ELECTION_VERSION=0.4.1
|
||||
@@ -13,7 +13,7 @@ RUN git clone ${K8S_AWAIT_ELECTION_GITREPO} /usr/local/go/k8s-await-election/ \
|
||||
&& make \
|
||||
&& mv ./out/k8s-await-election-${TARGETARCH} /k8s-await-election
|
||||
|
||||
FROM alpine:3.19 AS builder
|
||||
FROM golang:alpine3.21 as builder
|
||||
|
||||
RUN apk add --no-cache make git
|
||||
RUN apk add helm --repository=https://dl-cdn.alpinelinux.org/alpine/edge/community
|
||||
@@ -21,12 +21,14 @@ RUN apk add helm --repository=https://dl-cdn.alpinelinux.org/alpine/edge/communi
|
||||
COPY . /src/
|
||||
WORKDIR /src
|
||||
|
||||
RUN go build -o /cozystack-assets-server -ldflags '-extldflags "-static" -w -s' ./cmd/cozystack-assets-server
|
||||
|
||||
# Check that versions_map is not changed
|
||||
RUN make repos
|
||||
|
||||
FROM alpine:3.19
|
||||
FROM alpine:3.21
|
||||
|
||||
RUN apk add --no-cache make darkhttpd
|
||||
RUN apk add --no-cache make
|
||||
RUN apk add helm kubectl --repository=https://dl-cdn.alpinelinux.org/alpine/edge/community
|
||||
|
||||
COPY scripts /cozystack/scripts
|
||||
@@ -34,6 +36,7 @@ COPY --from=builder /src/packages/core /cozystack/packages/core
|
||||
COPY --from=builder /src/packages/system /cozystack/packages/system
|
||||
COPY --from=builder /src/_out/repos /cozystack/assets/repos
|
||||
COPY --from=builder /src/_out/logos /cozystack/assets/logos
|
||||
COPY --from=builder /cozystack-assets-server /usr/bin/cozystack-assets-server
|
||||
COPY --from=k8s-await-election-builder /k8s-await-election /usr/bin/k8s-await-election
|
||||
COPY dashboards /cozystack/assets/dashboards
|
||||
|
||||
|
||||
@@ -3,24 +3,24 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.8.4
|
||||
version: v1.9.3
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.8.4
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20241210
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20241210
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20241210
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20241210
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.11-v1.8.4
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.6-v1.8.4
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
output:
|
||||
kind: initramfs
|
||||
imageOptions: {}
|
||||
|
||||
@@ -3,24 +3,24 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.8.4
|
||||
version: v1.9.3
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.8.4
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20241210
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20241210
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20241210
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20241210
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.11-v1.8.4
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.6-v1.8.4
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
output:
|
||||
kind: installer
|
||||
imageOptions: {}
|
||||
|
||||
@@ -3,24 +3,24 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.8.4
|
||||
version: v1.9.3
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.8.4
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20241210
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20241210
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20241210
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20241210
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.11-v1.8.4
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.6-v1.8.4
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
output:
|
||||
kind: iso
|
||||
imageOptions: {}
|
||||
|
||||
@@ -3,24 +3,24 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.8.4
|
||||
version: v1.9.3
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.8.4
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20241210
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20241210
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20241210
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20241210
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.11-v1.8.4
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.6-v1.8.4
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
output:
|
||||
kind: kernel
|
||||
imageOptions: {}
|
||||
|
||||
@@ -3,24 +3,24 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.8.4
|
||||
version: v1.9.3
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.8.4
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20241210
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20241210
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20241210
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20241210
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.11-v1.8.4
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.6-v1.8.4
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
output:
|
||||
kind: image
|
||||
imageOptions: { diskSize: 1306525696, diskFormat: raw }
|
||||
|
||||
@@ -3,24 +3,24 @@
|
||||
arch: amd64
|
||||
platform: nocloud
|
||||
secureboot: false
|
||||
version: v1.8.4
|
||||
version: v1.9.3
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.8.4
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20241210
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20241210
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20241210
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20241210
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.11-v1.8.4
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.6-v1.8.4
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
output:
|
||||
kind: image
|
||||
imageOptions: { diskSize: 1306525696, diskFormat: raw }
|
||||
|
||||
@@ -67,13 +67,12 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: darkhttpd
|
||||
- name: assets
|
||||
image: "{{ .Values.cozystack.image }}"
|
||||
command:
|
||||
- /usr/bin/darkhttpd
|
||||
- /cozystack/assets
|
||||
- --port
|
||||
- "8123"
|
||||
- /usr/bin/cozystack-assets-server
|
||||
- "-dir=/cozystack/assets"
|
||||
- "-address=:8123"
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8123
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
cozystack:
|
||||
image: ghcr.io/aenix-io/cozystack/cozystack:v0.21.0@sha256:90487dafccb12705b5e9760595b43c0352f3a94551c55c5fa7778bf9173d1737
|
||||
image: ghcr.io/aenix-io/cozystack/cozystack:v0.26.1@sha256:67c6eb4da3baf2208df9b2ed24cbf758a2180bb3a071ce53141c21b8d17263cf
|
||||
|
||||
@@ -31,12 +31,30 @@ releases:
|
||||
autoDirectNodeRoutes: true
|
||||
routingMode: native
|
||||
|
||||
- name: cozy-proxy
|
||||
releaseName: cozystack
|
||||
chart: cozy-cozy-proxy
|
||||
namespace: cozy-system
|
||||
optional: true
|
||||
dependsOn: [cilium]
|
||||
|
||||
- name: cert-manager-crds
|
||||
releaseName: cert-manager-crds
|
||||
chart: cozy-cert-manager-crds
|
||||
namespace: cozy-cert-manager
|
||||
dependsOn: [cilium]
|
||||
|
||||
- name: cozystack-controller
|
||||
releaseName: cozystack-controller
|
||||
chart: cozy-cozystack-controller
|
||||
namespace: cozy-system
|
||||
dependsOn: [cilium]
|
||||
{{- if eq (index $cozyConfig.data "telemetry-enabled") "false" }}
|
||||
values:
|
||||
cozystackController:
|
||||
disableTelemetry: true
|
||||
{{- end }}
|
||||
|
||||
- name: cert-manager
|
||||
releaseName: cert-manager
|
||||
chart: cozy-cert-manager
|
||||
@@ -64,6 +82,10 @@ releases:
|
||||
privileged: true
|
||||
optional: true
|
||||
dependsOn: [cilium,victoria-metrics-operator]
|
||||
values:
|
||||
scrapeRules:
|
||||
etcd:
|
||||
enabled: true
|
||||
|
||||
- name: metallb
|
||||
releaseName: metallb
|
||||
@@ -98,7 +120,7 @@ releases:
|
||||
chart: cozy-postgres-operator
|
||||
namespace: cozy-postgres-operator
|
||||
optional: true
|
||||
dependsOn: [cilium,cert-manager]
|
||||
dependsOn: [cilium,cert-manager,victoria-metrics-operator]
|
||||
|
||||
- name: kafka-operator
|
||||
releaseName: kafka-operator
|
||||
@@ -152,7 +174,7 @@ releases:
|
||||
chart: cozy-linstor
|
||||
namespace: cozy-linstor
|
||||
privileged: true
|
||||
dependsOn: [piraeus-operator,cilium,cert-manager]
|
||||
dependsOn: [piraeus-operator,cilium,cert-manager,snapshot-controller]
|
||||
|
||||
- name: telepresence
|
||||
releaseName: traffic-manager
|
||||
@@ -188,3 +210,11 @@ releases:
|
||||
namespace: cozy-keycloak
|
||||
optional: true
|
||||
dependsOn: [keycloak]
|
||||
|
||||
- name: bootbox
|
||||
releaseName: bootbox
|
||||
chart: cozy-bootbox
|
||||
namespace: cozy-bootbox
|
||||
privileged: true
|
||||
optional: true
|
||||
dependsOn: [cilium]
|
||||
|
||||
@@ -20,6 +20,17 @@ releases:
|
||||
namespace: cozy-cert-manager
|
||||
dependsOn: []
|
||||
|
||||
- name: cozystack-controller
|
||||
releaseName: cozystack-controller
|
||||
chart: cozy-cozystack-controller
|
||||
namespace: cozy-system
|
||||
dependsOn: [cilium]
|
||||
{{- if eq (index $cozyConfig.data "telemetry-enabled") "false" }}
|
||||
values:
|
||||
cozystackController:
|
||||
disableTelemetry: true
|
||||
{{- end }}
|
||||
|
||||
- name: cert-manager
|
||||
releaseName: cert-manager
|
||||
chart: cozy-cert-manager
|
||||
@@ -47,6 +58,10 @@ releases:
|
||||
privileged: true
|
||||
optional: true
|
||||
dependsOn: [victoria-metrics-operator]
|
||||
values:
|
||||
scrapeRules:
|
||||
etcd:
|
||||
enabled: true
|
||||
|
||||
- name: etcd-operator
|
||||
releaseName: etcd-operator
|
||||
@@ -74,7 +89,7 @@ releases:
|
||||
chart: cozy-postgres-operator
|
||||
namespace: cozy-postgres-operator
|
||||
optional: true
|
||||
dependsOn: [cert-manager]
|
||||
dependsOn: [victoria-metrics-operator]
|
||||
|
||||
- name: kafka-operator
|
||||
releaseName: kafka-operator
|
||||
|
||||
@@ -50,6 +50,12 @@ releases:
|
||||
SVC_CIDR: "{{ index $cozyConfig.data "ipv4-svc-cidr" }}"
|
||||
JOIN_CIDR: "{{ index $cozyConfig.data "ipv4-join-cidr" }}"
|
||||
|
||||
- name: cozy-proxy
|
||||
releaseName: cozystack
|
||||
chart: cozy-cozy-proxy
|
||||
namespace: cozy-system
|
||||
dependsOn: [cilium,kubeovn]
|
||||
|
||||
- name: cert-manager-crds
|
||||
releaseName: cert-manager-crds
|
||||
chart: cozy-cert-manager-crds
|
||||
@@ -62,6 +68,17 @@ releases:
|
||||
namespace: cozy-system
|
||||
dependsOn: [cilium,kubeovn]
|
||||
|
||||
- name: cozystack-controller
|
||||
releaseName: cozystack-controller
|
||||
chart: cozy-cozystack-controller
|
||||
namespace: cozy-system
|
||||
dependsOn: [cilium,kubeovn]
|
||||
{{- if eq (index $cozyConfig.data "telemetry-enabled") "false" }}
|
||||
values:
|
||||
cozystackController:
|
||||
disableTelemetry: true
|
||||
{{- end }}
|
||||
|
||||
- name: cert-manager
|
||||
releaseName: cert-manager
|
||||
chart: cozy-cert-manager
|
||||
@@ -86,12 +103,16 @@ releases:
|
||||
namespace: cozy-monitoring
|
||||
privileged: true
|
||||
dependsOn: [cilium,kubeovn,victoria-metrics-operator]
|
||||
values:
|
||||
scrapeRules:
|
||||
etcd:
|
||||
enabled: true
|
||||
|
||||
- name: kubevirt-operator
|
||||
releaseName: kubevirt-operator
|
||||
chart: cozy-kubevirt-operator
|
||||
namespace: cozy-kubevirt
|
||||
dependsOn: [cilium,kubeovn]
|
||||
dependsOn: [cilium,kubeovn,victoria-metrics-operator]
|
||||
|
||||
- name: kubevirt
|
||||
releaseName: kubevirt
|
||||
@@ -184,7 +205,7 @@ releases:
|
||||
chart: cozy-linstor
|
||||
namespace: cozy-linstor
|
||||
privileged: true
|
||||
dependsOn: [piraeus-operator,cilium,kubeovn,cert-manager]
|
||||
dependsOn: [piraeus-operator,cilium,kubeovn,cert-manager,snapshot-controller]
|
||||
|
||||
- name: snapshot-controller
|
||||
releaseName: snapshot-controller
|
||||
@@ -211,24 +232,62 @@ releases:
|
||||
namespace: cozy-dashboard
|
||||
dependsOn: [cilium,kubeovn,keycloak-configure]
|
||||
values:
|
||||
kubeapps:
|
||||
{{- if .Capabilities.APIVersions.Has "source.toolkit.fluxcd.io/v1" }}
|
||||
{{- with (lookup "source.toolkit.fluxcd.io/v1" "HelmRepository" "cozy-public" "").items }}
|
||||
redis:
|
||||
master:
|
||||
podAnnotations:
|
||||
{{- range $index, $repo := . }}
|
||||
{{- with (($repo.status).artifact).revision }}
|
||||
repository.cozystack.io/{{ $repo.metadata.name }}: {{ quote . }}
|
||||
redis:
|
||||
master:
|
||||
podAnnotations:
|
||||
{{- range $index, $repo := . }}
|
||||
{{- with (($repo.status).artifact).revision }}
|
||||
repository.cozystack.io/{{ $repo.metadata.name }}: {{ quote . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
dashboard:
|
||||
{{- $cozystackBranding:= lookup "v1" "ConfigMap" "cozy-system" "cozystack-branding" }}
|
||||
{{- $branding := dig "data" "branding" "" $cozystackBranding }}
|
||||
{{- if $branding }}
|
||||
customLocale:
|
||||
"Kubeapps": {{ $branding }}
|
||||
{{- end }}
|
||||
customStyle: |
|
||||
{{- $logoImage := dig "data" "logo" "" $cozystackBranding }}
|
||||
{{- if $logoImage }}
|
||||
.kubeapps-logo {
|
||||
background-image: {{ $logoImage }}
|
||||
}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- $dashboardKCconfig := lookup "v1" "ConfigMap" "cozy-dashboard" "kubeapps-auth-config" }}
|
||||
{{- $dashboardKCValues := dig "data" "values.yaml" "" $dashboardKCconfig }}
|
||||
{{- if $dashboardKCValues }}
|
||||
{{- $dashboardKCValues | nindent 4 }}
|
||||
{{- end }}
|
||||
#serviceaccount-selector {
|
||||
display: none;
|
||||
}
|
||||
.login-moreinfo {
|
||||
display: none;
|
||||
}
|
||||
a[href="#/docs"] {
|
||||
display: none;
|
||||
}
|
||||
.login-group .clr-form-control .clr-control-label {
|
||||
display: none;
|
||||
}
|
||||
.appview-separator div.appview-first-row div.center {
|
||||
display: none;
|
||||
}
|
||||
.appview-separator div.appview-first-row section[aria-labelledby="app-secrets"] {
|
||||
display: none;
|
||||
}
|
||||
.appview-first-row section[aria-labelledby="access-urls-title"] {
|
||||
width: 100%;
|
||||
}
|
||||
{{- $dashboardKCconfig := lookup "v1" "ConfigMap" "cozy-dashboard" "kubeapps-auth-config" }}
|
||||
{{- $dashboardKCValues := dig "data" "values.yaml" "" $dashboardKCconfig }}
|
||||
{{- if $dashboardKCValues }}
|
||||
valuesFrom:
|
||||
- kind: ConfigMap
|
||||
name: kubeapps-auth-config
|
||||
valuesKey: values.yaml
|
||||
{{- end }}
|
||||
|
||||
{{- if eq $oidcEnabled "true" }}
|
||||
dependsOn: [keycloak-configure]
|
||||
@@ -270,6 +329,14 @@ releases:
|
||||
optional: true
|
||||
dependsOn: [cilium,kubeovn]
|
||||
|
||||
- name: bootbox
|
||||
releaseName: bootbox
|
||||
chart: cozy-bootbox
|
||||
namespace: cozy-bootbox
|
||||
privileged: true
|
||||
optional: true
|
||||
dependsOn: [cilium,kubeovn]
|
||||
|
||||
{{- if $oidcEnabled }}
|
||||
- name: keycloak
|
||||
releaseName: keycloak
|
||||
@@ -288,4 +355,14 @@ releases:
|
||||
chart: cozy-keycloak-configure
|
||||
namespace: cozy-keycloak
|
||||
dependsOn: [keycloak-operator]
|
||||
values:
|
||||
cozystack:
|
||||
configHash: {{ $cozyConfig | toJson | sha256sum }}
|
||||
{{- end }}
|
||||
|
||||
- name: goldpinger
|
||||
releaseName: goldpinger
|
||||
chart: cozy-goldpinger
|
||||
namespace: cozy-goldpinger
|
||||
privileged: true
|
||||
dependsOn: [monitoring-agents]
|
||||
|
||||
@@ -35,6 +35,17 @@ releases:
|
||||
namespace: cozy-system
|
||||
dependsOn: []
|
||||
|
||||
- name: cozystack-controller
|
||||
releaseName: cozystack-controller
|
||||
chart: cozy-cozystack-controller
|
||||
namespace: cozy-system
|
||||
dependsOn: []
|
||||
{{- if eq (index $cozyConfig.data "telemetry-enabled") "false" }}
|
||||
values:
|
||||
cozystackController:
|
||||
disableTelemetry: true
|
||||
{{- end }}
|
||||
|
||||
- name: cert-manager
|
||||
releaseName: cert-manager
|
||||
chart: cozy-cert-manager
|
||||
@@ -59,6 +70,10 @@ releases:
|
||||
namespace: cozy-monitoring
|
||||
privileged: true
|
||||
dependsOn: [victoria-metrics-operator]
|
||||
values:
|
||||
scrapeRules:
|
||||
etcd:
|
||||
enabled: true
|
||||
|
||||
- name: etcd-operator
|
||||
releaseName: etcd-operator
|
||||
@@ -82,7 +97,7 @@ releases:
|
||||
releaseName: postgres-operator
|
||||
chart: cozy-postgres-operator
|
||||
namespace: cozy-postgres-operator
|
||||
dependsOn: [cert-manager]
|
||||
dependsOn: [cert-manager,victoria-metrics-operator]
|
||||
|
||||
- name: kafka-operator
|
||||
releaseName: kafka-operator
|
||||
@@ -140,9 +155,9 @@ releases:
|
||||
chart: cozy-dashboard
|
||||
namespace: cozy-dashboard
|
||||
values:
|
||||
kubeapps:
|
||||
{{- if .Capabilities.APIVersions.Has "source.toolkit.fluxcd.io/v1" }}
|
||||
{{- with (lookup "source.toolkit.fluxcd.io/v1" "HelmRepository" "cozy-public" "").items }}
|
||||
kubeapps:
|
||||
redis:
|
||||
master:
|
||||
podAnnotations:
|
||||
@@ -153,12 +168,49 @@ releases:
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- $dashboardKCconfig := lookup "v1" "ConfigMap" "cozy-dashboard" "kubeapps-auth-config" }}
|
||||
{{- $dashboardKCValues := dig "data" "values.yaml" "" $dashboardKCconfig }}
|
||||
{{- if $dashboardKCValues }}
|
||||
{{- $dashboardKCValues | nindent 4 }}
|
||||
{{- end }}
|
||||
dashboard:
|
||||
{{- $cozystackBranding:= lookup "v1" "ConfigMap" "cozy-system" "cozystack-branding" }}
|
||||
{{- $branding := dig "data" "branding" "" $cozystackBranding }}
|
||||
{{- if $branding }}
|
||||
customLocale:
|
||||
"Kubeapps": {{ $branding }}
|
||||
{{- end }}
|
||||
customStyle: |
|
||||
{{- $logoImage := dig "data" "logo" "" $cozystackBranding }}
|
||||
{{- if $logoImage }}
|
||||
.kubeapps-logo {
|
||||
background-image: {{ $logoImage }}
|
||||
}
|
||||
{{- end }}
|
||||
#serviceaccount-selector {
|
||||
display: none;
|
||||
}
|
||||
.login-moreinfo {
|
||||
display: none;
|
||||
}
|
||||
a[href="#/docs"] {
|
||||
display: none;
|
||||
}
|
||||
.login-group .clr-form-control .clr-control-label {
|
||||
display: none;
|
||||
}
|
||||
.appview-separator div.appview-first-row div.center {
|
||||
display: none;
|
||||
}
|
||||
.appview-separator div.appview-first-row section[aria-labelledby="app-secrets"] {
|
||||
display: none;
|
||||
}
|
||||
.appview-first-row section[aria-labelledby="access-urls-title"] {
|
||||
width: 100%;
|
||||
}
|
||||
{{- $dashboardKCconfig := lookup "v1" "ConfigMap" "cozy-dashboard" "kubeapps-auth-config" }}
|
||||
{{- $dashboardKCValues := dig "data" "values.yaml" "" $dashboardKCconfig }}
|
||||
{{- if $dashboardKCValues }}
|
||||
valuesFrom:
|
||||
- kind: ConfigMap
|
||||
name: kubeapps-auth-config
|
||||
valuesKey: values.yaml
|
||||
{{- end }}
|
||||
|
||||
{{- if eq $oidcEnabled "true" }}
|
||||
dependsOn: [keycloak-configure]
|
||||
@@ -184,4 +236,14 @@ releases:
|
||||
chart: cozy-keycloak-configure
|
||||
namespace: cozy-keycloak
|
||||
dependsOn: [keycloak-operator]
|
||||
values:
|
||||
cozystack:
|
||||
configHash: {{ $cozyConfig | toJson | sha256sum }}
|
||||
{{- end }}
|
||||
|
||||
- name: goldpinger
|
||||
releaseName: goldpinger
|
||||
chart: cozy-goldpinger
|
||||
namespace: cozy-goldpinger
|
||||
privileged: true
|
||||
dependsOn: [monitoring-agents]
|
||||
|
||||
@@ -36,7 +36,10 @@ image-e2e-sandbox:
|
||||
copy-hack-dir:
|
||||
tar -C ../../../ -cf- hack | kubectl exec -i -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- tar -xf-
|
||||
|
||||
test: wait-for-sandbox copy-hack-dir ## Run the end-to-end tests in existing sandbox.
|
||||
copy-image:
|
||||
cat ../../../_out/assets/nocloud-amd64.raw.xz | kubectl exec -i -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- sh -xec 'xz --decompress > /nocloud-amd64.raw'
|
||||
|
||||
test: wait-for-sandbox copy-hack-dir copy-image ## Run the end-to-end tests in existing sandbox.
|
||||
helm template -n cozy-system installer ../installer | kubectl exec -i -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- sh -c 'cat > /cozystack-installer.yaml'
|
||||
kubectl exec -ti -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- sh -c 'export COZYSTACK_INSTALLER_YAML=$$(cat /cozystack-installer.yaml) && /hack/e2e.sh'
|
||||
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
e2e:
|
||||
image: ghcr.io/aenix-io/cozystack/e2e-sandbox:v0.21.0@sha256:38229517c86e179984a6d39f5510b859d13d965e35b216bc01ce456f9ab5f8b5
|
||||
image: ghcr.io/aenix-io/cozystack/e2e-sandbox:v0.26.1@sha256:e034c6d4232ffe6f87c24ae44100a63b1869210e484c929efac33ffcf60b18b1
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user