Compare commits

..

1 Commits

Author SHA1 Message Date
Andrei Kvapil
d4615701b6 cilium: disable antispoofing 2025-03-06 21:28:44 +01:00
45 changed files with 161 additions and 367 deletions

View File

@@ -68,7 +68,7 @@ spec:
serviceAccountName: cozystack
containers:
- name: cozystack
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.27.0"
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.26.1"
env:
- name: KUBERNETES_SERVICE_HOST
value: localhost
@@ -87,7 +87,7 @@ spec:
fieldRef:
fieldPath: metadata.name
- name: assets
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.27.0"
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.26.1"
command:
- /usr/bin/cozystack-assets-server
- "-dir=/cozystack/assets"

View File

@@ -1 +1 @@
ghcr.io/aenix-io/cozystack/clickhouse-backup:0.6.2@sha256:7a99cabdfd541f863aa5d1b2f7b49afd39838fb94c8448986634a1dc9050751c
ghcr.io/aenix-io/cozystack/clickhouse-backup:0.6.1@sha256:7a99cabdfd541f863aa5d1b2f7b49afd39838fb94c8448986634a1dc9050751c

View File

@@ -1 +1 @@
ghcr.io/aenix-io/cozystack/postgres-backup:0.9.0@sha256:6cc07280c0e2432ed37b2646faf82efe9702c6d93504844744aa505b890cac6f
ghcr.io/aenix-io/cozystack/postgres-backup:0.8.0@sha256:d1f7692b6761f46f24687d885ec335330280346ae4a9ff28b3179681b36106b7

View File

@@ -1 +1 @@
ghcr.io/aenix-io/cozystack/nginx-cache:0.3.1@sha256:72ced2b1d8da2c784d6231af6cb0752170f6ea845c73effb11adb006b7a7fbb2
ghcr.io/aenix-io/cozystack/nginx-cache:0.3.1@sha256:854b3908114de1876038eb9902577595cce93553ce89bf75ac956d22f1e8b8cc

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.15.2
version: 0.15.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View File

@@ -1 +1 @@
ghcr.io/aenix-io/cozystack/cluster-autoscaler:0.15.2@sha256:077023fc24d466ac18f8d43fec41b9a14c0b3d32c0013e836e7448e7a1e7d661
ghcr.io/aenix-io/cozystack/cluster-autoscaler:0.15.1@sha256:73701e37727eedaafdf9efe4baefcf0835f064ee8731219f0c0186c0d0781a5c

View File

@@ -1 +1 @@
ghcr.io/aenix-io/cozystack/kubevirt-cloud-provider:0.15.2@sha256:5ef7198eaaa4e422caa5f3d8f906c908046f1fbaf2d7a1e72b5a98627db3bda8
ghcr.io/aenix-io/cozystack/kubevirt-cloud-provider:0.15.1@sha256:02037bb7a75b35ca1e34924f13e7fa7b25bac2017ddbd7e9ed004c0ff368cce3

View File

@@ -1 +1 @@
ghcr.io/aenix-io/cozystack/kubevirt-csi-driver:0.15.2@sha256:f862c233399b213e376628ffbb55304f08d171e991371d5bde067b47890cc959
ghcr.io/aenix-io/cozystack/kubevirt-csi-driver:0.15.1@sha256:a86d8a4722b81e89820ead959874524c4cc86654c22ad73c421bbf717d62c3f3

View File

@@ -1 +1 @@
ghcr.io/aenix-io/cozystack/ubuntu-container-disk:v1.30.1@sha256:7ce5467b8f34ef7897141b0ca96c455459c2729cae5824a2c20f32b01a841f90
ghcr.io/aenix-io/cozystack/ubuntu-container-disk:v1.30.1@sha256:6f19f3f8a68372c5b212e98a79ff132cc20641bc46fc4b8d359158945dc04043

View File

@@ -250,7 +250,7 @@ spec:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: KubevirtMachineTemplate
name: {{ $.Release.Name }}-{{ $groupName }}-{{ $kubevirtmachinetemplateHash }}
namespace: {{ $.Release.Namespace }}
namespace: default
version: v1.30.1
---
apiVersion: cluster.x-k8s.io/v1beta1

View File

@@ -1 +1 @@
ghcr.io/aenix-io/cozystack/mariadb-backup:0.5.3@sha256:89641695e0c1f4ad7b82697c27a2245bb4a1bc403845235ed0df98e04aa9a71f
ghcr.io/aenix-io/cozystack/mariadb-backup:0.5.2@sha256:9f0b2bc5135e10b29edb2824309059f5b4c4e8b744804b2cf55381171f335675

View File

@@ -1 +1 @@
ghcr.io/aenix-io/cozystack/postgres-backup:0.9.0@sha256:6cc07280c0e2432ed37b2646faf82efe9702c6d93504844744aa505b890cac6f
ghcr.io/aenix-io/cozystack/postgres-backup:0.8.0@sha256:d1f7692b6761f46f24687d885ec335330280346ae4a9ff28b3179681b36106b7

View File

@@ -48,8 +48,7 @@ kubernetes 0.13.0 ced8e5b9
kubernetes 0.14.0 bfbde07c
kubernetes 0.14.1 fde4bcfa
kubernetes 0.15.0 cb7b8158
kubernetes 0.15.1 77df31e1
kubernetes 0.15.2 HEAD
kubernetes 0.15.1 HEAD
mysql 0.1.0 f642698
mysql 0.2.0 8b975ff0
mysql 0.3.0 5ca8823

View File

@@ -1,2 +1,2 @@
cozystack:
image: ghcr.io/aenix-io/cozystack/cozystack:v0.27.0@sha256:aac04571e99e13653f08e6ccc2b2214032455af547f9a887d01f1483e30d2915
image: ghcr.io/aenix-io/cozystack/cozystack:v0.26.1@sha256:67c6eb4da3baf2208df9b2ed24cbf758a2180bb3a071ce53141c21b8d17263cf

View File

@@ -1,2 +1,2 @@
e2e:
image: ghcr.io/aenix-io/cozystack/e2e-sandbox:v0.27.0@sha256:1380b550c37c7316d924c9827122eb6fbb8e7da9aad8014f90b010b40f6c744d
image: ghcr.io/aenix-io/cozystack/e2e-sandbox:v0.26.1@sha256:e034c6d4232ffe6f87c24ae44100a63b1869210e484c929efac33ffcf60b18b1

View File

@@ -1 +1 @@
ghcr.io/aenix-io/cozystack/matchbox:v0.27.0@sha256:ef53e59943706fd9bce33b021b11ef469b44f97a184661f7ac24eb5f1b57fe9e
ghcr.io/aenix-io/cozystack/matchbox:v0.26.1@sha256:f5d1e0f439f49e980888ed53a4bcc65fa97b1c4bc0df86abaa17de1a5a1f71a3

View File

@@ -3,4 +3,4 @@ name: etcd
description: Storage for Kubernetes clusters
icon: /logos/etcd.svg
type: application
version: 2.6.1
version: 2.6.0

View File

@@ -1,39 +0,0 @@
{{- $shouldUpdateCerts := true }}
{{- $configMap := lookup "v1" "ConfigMap" .Release.Namespace "etcd-deployed-version" }}
{{- if $configMap }}
{{- $deployedVersion := index $configMap "data" "version" }}
{{- if $deployedVersion | semverCompare ">= 2.6.1" }}
{{- $shouldUpdateCerts = false }}
{{- end }}
{{- end }}
{{- if $shouldUpdateCerts }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: etcd-hook
annotations:
helm.sh/hook: post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
spec:
template:
metadata:
labels:
policy.cozystack.io/allow-to-apiserver: "true"
spec:
serviceAccountName: etcd-hook
containers:
- name: kubectl
image: bitnami/kubectl:latest
command:
- sh
args:
- -exc
- |-
kubectl --namespace={{ .Release.Namespace }} delete secrets etcd-ca-tls etcd-peer-ca-tls
sleep 10
kubectl --namespace={{ .Release.Namespace }} delete secrets etcd-client-tls etcd-peer-tls etcd-server-tls
kubectl --namespace={{ .Release.Namespace }} delete pods --selector=app.kubernetes.io/instance=etcd,app.kubernetes.io/managed-by=etcd-operator,app.kubernetes.io/name=etcd,cozystack.io/service=etcd
restartPolicy: Never
{{- end }}

View File

@@ -1,26 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations:
helm.sh/hook: post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
name: etcd-hook
rules:
- apiGroups:
- ""
resources:
- secrets
- pods
verbs:
- get
- list
- watch
- delete
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch

View File

@@ -1,15 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: etcd-hook
annotations:
helm.sh/hook: post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: etcd-hook
subjects:
- kind: ServiceAccount
name: etcd-hook
namespace: {{ .Release.Namespace | quote }}

View File

@@ -1,7 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: etcd-hook
annotations:
helm.sh/hook: post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded

View File

@@ -1,6 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: etcd-deployed-version
data:
version: {{ .Chart.Version }}

View File

@@ -1 +1 @@
ghcr.io/aenix-io/cozystack/grafana:1.8.1@sha256:0377abd3cb2c6e27b12ac297f1859aa4d550f1aa14989f824f2315d0dfd1a5b2
ghcr.io/aenix-io/cozystack/grafana:1.8.0@sha256:0377abd3cb2c6e27b12ac297f1859aa4d550f1aa14989f824f2315d0dfd1a5b2

View File

@@ -8,10 +8,6 @@ spec:
replicationFactor: 2
retentionPeriod: {{ .retentionPeriod | quote }}
vminsert:
extraArgs:
# kubevirt and other systems produce a lot of labels
# it's usually more than default 30
maxLabelsPerTimeseries: "60"
replicaCount: 2
resources:
limits:

View File

@@ -7,8 +7,7 @@ etcd 2.2.0 5ca8823
etcd 2.3.0 b908400d
etcd 2.4.0 cb7b8158
etcd 2.5.0 861e6c46
etcd 2.6.0 a7425b0
etcd 2.6.1 HEAD
etcd 2.6.0 HEAD
info 1.0.0 HEAD
ingress 1.0.0 f642698
ingress 1.1.0 838bee5d

View File

@@ -1 +1 @@
ghcr.io/aenix-io/cozystack/s3manager:v0.5.0@sha256:3bf81b4cc5fdd5b99da40a663e15c649b2d992cd933bd56f8bb1bc9dd41a7b11
ghcr.io/aenix-io/cozystack/s3manager:v0.5.0@sha256:efd4a57f1b4b74871181d676dddfcac95c3a3a1e7cc244e21647c6114a0e6438

View File

@@ -1,2 +1,13 @@
ARG VERSION=v1.16.7
FROM quay.io/cilium/cilium-builder:714cfc3420a53a154dba0df63a43bc1378bebffd@sha256:13345d46c1a5b24e3b64c46ff4b334c5bbbbf784b769f1adbb8fad094f177f03 as builder
RUN curl -L https://github.com/cilium/cilium/archive/refs/tags/v1.16.7.tar.gz | tar --strip-components=1 -xzvf -
COPY patches /patches
RUN git apply /patches/*.diff
RUN make -C bpf
#RUN make -C daemon
FROM quay.io/cilium/cilium:${VERSION}
#COPY --from=builder /go/src/github.com/cilium/cilium/daemon/cilium-agent /usr/bin/cilium-agent
COPY --from=builder /go/src/github.com/cilium/cilium/bpf /var/lib/cilium/bpf

View File

@@ -0,0 +1,24 @@
diff --git a/bpf/bpf_lxc.c b/bpf/bpf_lxc.c
index 36ecfde895..39872d35c5 100644
--- a/bpf/bpf_lxc.c
+++ b/bpf/bpf_lxc.c
@@ -796,9 +796,6 @@ static __always_inline int __tail_handle_ipv6(struct __ctx_buff *ctx,
if (unlikely(is_icmp6_ndp(ctx, ip6, ETH_HLEN)))
return icmp6_ndp_handle(ctx, ETH_HLEN, METRIC_EGRESS, ext_err);
- if (unlikely(!is_valid_lxc_src_ip(ip6)))
- return DROP_INVALID_SIP;
-
#ifdef ENABLE_PER_PACKET_LB
/* will tailcall internally or return error */
return __per_packet_lb_svc_xlate_6(ctx, ip6, ext_err);
@@ -1361,9 +1358,6 @@ static __always_inline int __tail_handle_ipv4(struct __ctx_buff *ctx,
return DROP_FRAG_NOSUPPORT;
#endif
- if (unlikely(!is_valid_lxc_src_ipv4(ip4)))
- return DROP_INVALID_SIP;
-
#ifdef ENABLE_MULTICAST
if (mcast_ipv4_is_igmp(ip4)) {
/* note:

View File

@@ -1,2 +1,2 @@
cozystackAPI:
image: ghcr.io/aenix-io/cozystack/cozystack-api:v0.27.0@sha256:054adb2c2c3b380304e77a3f91428fc1d563d7ed2c1aab5d8ee0c5857b1dde99
image: ghcr.io/aenix-io/cozystack/cozystack-api:v0.26.1@sha256:d4f2ad6e8e7b7578337c2c78649e95fcf658f2d8a242bcf6629be21c431f66e7

View File

@@ -1,5 +1,5 @@
cozystackController:
image: ghcr.io/aenix-io/cozystack/cozystack-controller:v0.27.0@sha256:c97b2517aafdc1e906012c9604c792cb744ff1d3017d7c0c3836808dc308b835
image: ghcr.io/aenix-io/cozystack/cozystack-controller:v0.26.1@sha256:186df3406dd2a75f59872ff7d11fe92b6e4ce5787f76da3bc7ad670358ea40fb
debug: false
disableTelemetry: false
cozystackVersion: "v0.27.0"
cozystackVersion: "v0.26.1"

View File

@@ -76,7 +76,7 @@ data:
"kubeappsNamespace": {{ .Release.Namespace | quote }},
"helmGlobalNamespace": {{ include "kubeapps.helmGlobalPackagingNamespace" . | quote }},
"carvelGlobalNamespace": {{ .Values.kubeappsapis.pluginConfig.kappController.packages.v1alpha1.globalPackagingNamespace | quote }},
"appVersion": "v0.27.0",
"appVersion": "v0.26.1",
"authProxyEnabled": {{ .Values.authProxy.enabled }},
"oauthLoginURI": {{ .Values.authProxy.oauthLoginURI | quote }},
"oauthLogoutURI": {{ .Values.authProxy.oauthLogoutURI | quote }},

View File

@@ -18,14 +18,14 @@ kubeapps:
image:
registry: ghcr.io/aenix-io/cozystack
repository: dashboard
tag: v0.27.0
digest: "sha256:a363361571a7740c8544ecc22745e426ad051068a6bbe62d7e7d5e91df4d988e"
tag: v0.26.1
digest: "sha256:c1baa0d3f19201069da28a443a50f0dff1df53b2cbd2e8cfcb9201d25cd6bfc0"
kubeappsapis:
image:
registry: ghcr.io/aenix-io/cozystack
repository: kubeapps-apis
tag: v0.27.0
digest: "sha256:dcffdd5a02433a4caec7b5e9753847cbeb05f2004146c38ec7cee44d02179423"
tag: v0.26.1
digest: "sha256:55694bd7d7fd7948e7cac7b511635da01515dfb34f224ee9e7de7acf54cf6e81"
pluginConfig:
flux:
packages:

View File

@@ -1,13 +0,0 @@
diff --git a/internal/resources/kubeadm_config.go b/internal/resources/kubeadm_config.go
index ae4cfc0..ec7a7da 100644
--- a/internal/resources/kubeadm_config.go
+++ b/internal/resources/kubeadm_config.go
@@ -96,7 +96,7 @@ func (r *KubeadmConfigResource) mutate(ctx context.Context, tenantControlPlane *
TenantControlPlanePort: port,
TenantControlPlaneName: tenantControlPlane.GetName(),
TenantControlPlaneNamespace: tenantControlPlane.GetNamespace(),
- TenantControlPlaneEndpoint: r.getControlPlaneEndpoint(tenantControlPlane.Spec.ControlPlane.Ingress, address, port),
+ TenantControlPlaneEndpoint: r.getControlPlaneEndpoint(tenantControlPlane.Spec.ControlPlane.Ingress, address, 443),
TenantControlPlaneCertSANs: tenantControlPlane.Spec.NetworkProfile.CertSANs,
TenantControlPlaneClusterDomain: tenantControlPlane.Spec.NetworkProfile.ClusterDomain,
TenantControlPlanePodCIDR: tenantControlPlane.Spec.NetworkProfile.PodCIDR,

View File

@@ -3,7 +3,7 @@ kamaji:
deploy: false
image:
pullPolicy: IfNotPresent
tag: v0.27.0@sha256:686348fc4a496ec76aac7d6af9e59e67d5d29af95dd73427054c0019ffc045e6
tag: v0.26.1@sha256:a0504cdab3d36d144999d9b4a8729c53c016095d6958d3cae1acf8699f2fb0b9
repository: ghcr.io/aenix-io/cozystack/kamaji
resources:
limits:

View File

@@ -22,4 +22,4 @@ global:
images:
kubeovn:
repository: kubeovn
tag: v1.13.2@sha256:5ce804458e9b14856300a5bbfa3ecac6cd47203759bbb8a4e62ddb5f0684ed7b
tag: v1.13.2@sha256:d3fa76c0cc48207aef15ff27f6332a3f8570e3db77fb97720af8505b812cdf61

View File

@@ -1,25 +0,0 @@
#!/bin/bash
set -e
terminate() {
echo "Caught signal, terminating"
exit 0
}
trap terminate SIGINT SIGQUIT SIGTERM
echo "Running Linstor controller plunger:"
cat "${0}"
while true; do
# timeout at the start of the loop to give some time for the linstor-controller to start
sleep 30 &
pid=$!
wait $pid
# workaround for https://github.com/LINBIT/linstor-server/issues/437
# try to delete snapshots that are stuck in the DELETE state
linstor -m s l \
| jq -r '.[][] | select(.flags | contains(["DELETE"])) | "linstor snapshot delete \(.resource_name) \(.name)"' \
| sh -x
done

View File

@@ -1,41 +0,0 @@
#!/bin/bash
set -e
terminate() {
echo "Caught signal, terminating"
exit 0
}
trap terminate SIGINT SIGQUIT SIGTERM
echo "Running Linstor per-satellite plunger:"
cat "${0}"
while true; do
# timeout at the start of the loop to give a chance for the fresh linstor-satellite instance to cleanup itself
sleep 30 &
pid=$!
wait $pid
# Detect orphaned loop devices and detach them
# the `/` path could not be a backing file for a loop device, so it's a good indicator of a stuck loop device
# TODO describe the issue in more detail
losetup --json \
| jq -r '.[][]
| select(."back-file" == "/ (deleted)")
| "echo Detaching stuck loop device \(.name);
set -x;
losetup --detach \(.name)"' \
| sh
# Detect secondary volumes that lost connection and can be simply reconnected
disconnected_secondaries=$(drbdadm status | awk '/pvc-.*role:Secondary.*force-io-failures:yes/ {print $1}')
for secondary in $disconnected_secondaries; do (
echo "Trying to reconnect secondary volume ${secondary}"
set -x
drbdadm down "${secondary}"
drbdadm up "${secondary}"
); done
done

View File

@@ -1,24 +0,0 @@
{{- define "cozy.linstor.version" -}}
{{- $piraeusConfigMap := lookup "v1" "ConfigMap" "cozy-linstor" "piraeus-operator-image-config"}}
{{- if not $piraeusConfigMap }}
{{- fail "Piraeus controller is not yet installed, ConfigMap cozy-linstor/piraeus-operator-image-config is missing" }}
{{- end }}
{{- $piraeusImagesConfig := $piraeusConfigMap | dig "data" "0_piraeus_datastore_images.yaml" nil | required "No image config" | fromYaml }}
base: {{ $piraeusImagesConfig.base | required "No image base in piraeus config" }}
controller:
image: {{ $piraeusImagesConfig | dig "components" "linstor-controller" "image" nil | required "No controller image" }}
tag: {{ $piraeusImagesConfig | dig "components" "linstor-controller" "tag" nil | required "No controller tag" }}
satellite:
image: {{ $piraeusImagesConfig | dig "components" "linstor-satellite" "image" nil | required "No satellite image" }}
tag: {{ $piraeusImagesConfig | dig "components" "linstor-satellite" "tag" nil | required "No satellite tag" }}
{{- end -}}
{{- define "cozy.linstor.version.controller" -}}
{{- $version := (include "cozy.linstor.version" .) | fromYaml }}
{{- printf "%s/%s:%s" $version.base $version.controller.image $version.controller.tag }}
{{- end -}}
{{- define "cozy.linstor.version.satellite" -}}
{{- $version := (include "cozy.linstor.version" .) | fromYaml }}
{{- printf "%s/%s:%s" $version.base $version.satellite.image $version.satellite.tag }}
{{- end -}}

View File

@@ -13,33 +13,3 @@ spec:
certManager:
name: linstor-api-ca
kind: Issuer
controller:
enabled: true
podTemplate:
spec:
containers:
- name: plunger
image: {{ include "cozy.linstor.version.controller" . }}
command:
- "/scripts/plunger-controller.sh"
securityContext:
capabilities:
drop:
- ALL
# make some room for live debugging
readOnlyRootFilesystem: false
volumeMounts:
- mountPath: /etc/linstor/client
name: client-tls
readOnly: true
- mountPath: /etc/linstor
name: etc-linstor
readOnly: true
- mountPath: /scripts
name: script-volume
readOnly: true
volumes:
- name: script-volume
configMap:
name: linstor-plunger
defaultMode: 0755

View File

@@ -1,13 +0,0 @@
{{- $files := .Files.Glob "hack/plunger/*.sh" -}}
{{/* TODO Add checksum of scripts to the pod selectors */}}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: linstor-plunger
namespace: cozy-linstor
data:
{{- range $path, $file := $files }}
{{ $path | base }}: |
{{- $file | toString | nindent 4 }}
{{- end -}}

View File

@@ -0,0 +1,15 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: linstor-plunger
namespace: cozy-linstor
data:
plunger.sh: |
#!/bin/bash
set -e
while true; do
# workaround for https://github.com/LINBIT/linstor-server/issues/437
linstor -m s l | jq -r '.[][] | select(.flags | contains(["DELETE"])) | "linstor snapshot delete \(.resource_name) \(.name)"' | sh -x
sleep 1m
done

View File

@@ -0,0 +1,52 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: linstor-plunger
namespace: cozy-linstor
spec:
replicas: 1
selector:
matchLabels:
app: linstor-plunger
template:
metadata:
labels:
app: linstor-plunger
annotations:
checksum/config: {{ include (print $.Template.BasePath "/plunger/configmap.yaml") . | sha256sum }}
spec:
containers:
- name: plunger
image: quay.io/piraeusdatastore/piraeus-server:v1.29.2
command: ["/bin/bash", "/scripts/plunger.sh"]
volumeMounts:
- mountPath: /etc/linstor/client
name: client-tls
readOnly: true
- mountPath: /etc/linstor
name: etc-linstor
readOnly: true
- mountPath: /scripts
name: script-volume
readOnly: true
enableServiceLinks: false
serviceAccountName: linstor-controller
tolerations:
- effect: NoSchedule
key: drbd.linbit.com/lost-quorum
- effect: NoSchedule
key: drbd.linbit.com/force-io-error
volumes:
- name: client-tls
projected:
sources:
- secret:
name: linstor-client-tls
- name: etc-linstor
configMap:
name: linstor-controller-config
- name: script-volume
configMap:
name: linstor-plunger
defaultMode: 0755

View File

@@ -1,18 +0,0 @@
apiVersion: piraeus.io/v1
kind: LinstorSatelliteConfiguration
metadata:
name: cozystack
spec:
internalTLS:
certManager:
name: linstor-internal-ca
kind: Issuer
podTemplate:
spec:
# host-network is recommended by Piraeus while it is not default in the upstream
hostNetwork: true
containers:
- name: linstor-satellite
securityContext:
# real-world installations need some debugging from time to time
readOnlyRootFilesystem: false

View File

@@ -1,52 +0,0 @@
apiVersion: piraeus.io/v1
kind: LinstorSatelliteConfiguration
metadata:
name: cozystack-plunger
spec:
internalTLS:
certManager:
name: linstor-internal-ca
kind: Issuer
podTemplate:
spec:
containers:
- name: plunger
image: {{ include "cozy.linstor.version.satellite" . }}
command:
- "/scripts/plunger-satellite.sh"
securityContext:
capabilities:
drop:
- ALL
# make some room for live debugging
readOnlyRootFilesystem: false
volumeMounts:
- mountPath: /run
name: host-run
- mountPath: /dev
name: dev
- mountPath: /var/lib/drbd
name: var-lib-drbd
- mountPath: /var/lib/linstor.d
name: var-lib-linstor-d
- mountPath: /etc/lvm
name: container-etc-lvm
- mountPath: /etc/lvm/archive
name: etc-lvm-archive
- mountPath: /etc/lvm/backup
name: etc-lvm-backup
- mountPath: /run/lock/lvm
name: run-lock-lvm
- mountPath: /run/lvm
name: run-lvm
- mountPath: /run/udev
name: run-udev
readOnly: true
- mountPath: /scripts
name: script-volume
readOnly: true
volumes:
- name: script-volume
configMap:
name: linstor-plunger
defaultMode: 0755

View File

@@ -1,33 +1,40 @@
apiVersion: piraeus.io/v1
kind: LinstorSatelliteConfiguration
metadata:
name: cozystack-talos
name: linstor-satellites
spec:
internalTLS:
certManager:
name: linstor-internal-ca
kind: Issuer
#storagePools:
#- name: "data"
# lvmPool:
# volumeGroup: "data"
patches:
- target:
group: apps
version: v1
kind: DaemonSet
name: linstor-satellite
patch: |
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: linstor-satellite
spec:
template:
spec:
initContainers:
- target:
kind: Pod
name: satellite
patch: |
apiVersion: v1
kind: Pod
metadata:
name: satellite
spec:
hostNetwork: true
initContainers:
- name: drbd-shutdown-guard
$patch: delete
- name: drbd-module-loader
$patch: delete
containers:
- name: linstor-satellite
volumeMounts:
- mountPath: /run
name: host-run
volumes:
containers:
- name: linstor-satellite
volumeMounts:
- mountPath: /run
name: host-run
securityContext:
readOnlyRootFilesystem: false
volumes:
- name: run-systemd-system
$patch: delete
- name: run-drbd-shutdown-guard