add piraeus and linstor

This commit is contained in:
Andrei Kvapil
2023-12-10 20:47:29 +01:00
parent 80a61c813a
commit 1a4f198c4f
33 changed files with 2852 additions and 0 deletions

View File

@@ -0,0 +1 @@
examples

View File

@@ -0,0 +1,2 @@
name: app
version: 0.0.0

1
system/linstor/Makefile Normal file
View File

@@ -0,0 +1 @@
include ../../hack/app-helm.mk

9
system/linstor/README.md Normal file
View File

@@ -0,0 +1,9 @@
# LINSTOR
DRBD and LVM storage provisioner
- Docs: https://linbit.com/drbd-user-guide/linstor-guide-1_0-en/
- Docs: https://habr.com/ru/companies/flant/articles/680286/
- Github: https://github.com/LINBIT/linstor-server
- Docs: https://piraeus.io/site/docs/intro/
- Github: https://github.com/piraeusdatastore/piraeus-operator

View File

@@ -0,0 +1,28 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: myclaim
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 8Gi
#storageClassName: linstor-lvm
---
apiVersion: v1
kind: Pod
metadata:
name: mypod
spec:
containers:
- name: myfrontend
image: nginx
volumeMounts:
- mountPath: "/var/www/html"
name: mypd
volumes:
- name: mypd
persistentVolumeClaim:
claimName: myclaim

View File

@@ -0,0 +1,9 @@
wget -O /tmp/package-signing-pubkey.asc https://packages.linbit.com/package-signing-pubkey.asc
gpg --yes -o /etc/apt/trusted.gpg.d/linbit-keyring.gpg --dearmor /tmp/package-signing-pubkey.asc
PVERS=$(pveversion | awk -F'[/.]' '{print $2}')
echo "deb [signed-by=/etc/apt/trusted.gpg.d/linbit-keyring.gpg] http://packages.linbit.com/public/ proxmox-$PVERS drbd-9" > /etc/apt/sources.list
apt update && apt -y install drbd-dkms
echo "options drbd usermode_helper=disabled" > /etc/modprobe.d/drbd.conf
echo drbd > /etc/modules-load.d/drbd.conf
modprobe drbd
kubectl label node "${HOSTNAME}" node-role.kubernetes.io/linstor= --overwrite

View File

@@ -0,0 +1,15 @@
apiVersion: piraeus.io/v1
kind: LinstorCluster
metadata:
name: linstorcluster
spec:
nodeSelector:
node-role.kubernetes.io/linstor: ""
internalTLS:
certManager:
name: linstor-internal-ca
kind: Issuer
apiTLS:
certManager:
name: linstor-api-ca
kind: Issuer

View File

@@ -0,0 +1,27 @@
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: linstor-api-ca
namespace: {{ .Release.namespace }}
spec:
commonName: linstor-api-ca
secretName: linstor-api-ca
duration: 87600h # 10 years
isCA: true
usages:
- signing
- key encipherment
- cert sign
issuerRef:
name: ca-bootstrapper
kind: Issuer
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: linstor-api-ca
namespace: {{ .Release.namespace }}
spec:
ca:
secretName: linstor-api-ca

View File

@@ -0,0 +1,27 @@
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: linstor-internal-ca
namespace: {{ .Release.namespace }}
spec:
commonName: linstor-internal-ca
secretName: linstor-internal-ca
duration: 87600h # 10 years
isCA: true
usages:
- signing
- key encipherment
- cert sign
issuerRef:
name: ca-bootstrapper
kind: Issuer
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: linstor-internal-ca
namespace: {{ .Release.namespace }}
spec:
ca:
secretName: linstor-internal-ca

View File

@@ -0,0 +1,48 @@
apiVersion: piraeus.io/v1
kind: LinstorSatelliteConfiguration
metadata:
name: linstor-satellites
spec:
internalTLS:
certManager:
name: linstor-internal-ca
kind: Issuer
storagePools:
- name: "data"
lvmPool:
volumeGroup: "data"
patches:
- target:
kind: Pod
name: satellite
patch: |
apiVersion: v1
kind: Pod
metadata:
name: satellite
spec:
hostNetwork: true
initContainers:
- name: drbd-shutdown-guard
$patch: delete
- name: drbd-module-loader
$patch: delete
volumes:
- name: run-systemd-system
$patch: delete
- name: run-drbd-shutdown-guard
$patch: delete
- name: systemd-bus-socket
$patch: delete
- name: lib-modules
$patch: delete
- name: usr-src
$patch: delete
- name: etc-lvm-backup
hostPath:
path: /var/etc/lvm/backup
type: DirectoryOrCreate
- name: etc-lvm-archive
hostPath:
path: /var/etc/lvm/archive
type: DirectoryOrCreate

View File

@@ -0,0 +1,30 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: linstor-lvm
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: linstor.csi.linbit.com
parameters:
linstor.csi.linbit.com/storagePool: "data"
linstor.csi.linbit.com/layerList: "storage"
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: linstor-drbd-lvm
provisioner: linstor.csi.linbit.com
parameters:
linstor.csi.linbit.com/storagePool: "data"
linstor.csi.linbit.com/autoPlace: "3"
linstor.csi.linbit.com/layerList: "drbd storage"
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
property.linstor.csi.linbit.com/DrbdOptions/auto-quorum: suspend-io
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true

View File

@@ -0,0 +1,9 @@
_helm:
name: linstor
namespace: cozy-linstor
createNamespace: true
privilegedNamespace: true
crds: CreateReplace
dependsOn:
- name: cert-manager
- name: piraeus-operator

View File

@@ -0,0 +1 @@
examples

View File

@@ -0,0 +1,2 @@
name: app
version: 0.0.0

View File

@@ -0,0 +1,7 @@
include ../../hack/app-helm.mk
update:
rm -rf charts
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/piraeusdatastore/piraeus-operator | awk -F'[/^]' 'END{print $$3}') && \
curl -sSL https://github.com/piraeusdatastore/piraeus-operator/archive/refs/tags/$${tag}.tar.gz | \
tar xzvf - --strip 1 piraeus-operator-$${tag#*v}/charts

View File

@@ -0,0 +1,12 @@
# Piraeus Operator
The Piraeus Operator manages LINSTOR clusters in Kubernetes.
We use v2 with [patches](https://github.com/piraeusdatastore/piraeus-operator/blob/v2/docs/how-to/talos.md) for Talos
- Docs: https://linbit.com/drbd-user-guide/linstor-guide-1_0-en/
- Docs: https://habr.com/ru/companies/flant/articles/680286/
- Docs: https://github.com/piraeusdatastore/piraeus-operator/tree/v2/docs
- Github: https://github.com/LINBIT/linstor-server
- Docs: https://piraeus.io/site/docs/intro/
- Github: https://github.com/piraeusdatastore/piraeus-operator

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,18 @@
apiVersion: v2
name: piraeus
description: |
The Piraeus Operator manages software defined storage clusters using LINSTOR in Kubernetes.
type: application
version: 2.3.0
appVersion: "v2.3.0"
maintainers:
- name: Piraeus Datastore
url: https://piraeus.io
home: https://piraeus.io
icon: https://raw.githubusercontent.com/piraeusdatastore/piraeus/master/artwork/sandbox-artwork/icon/color.svg
keywords:
- storage
sources:
- https://github.com/piraeusdatastore/piraeus-operator
- https://github.com/piraeusdatastore/linstor-csi
- https://github.com/LINBIT/linstor-server

View File

@@ -0,0 +1,35 @@
# Piraeus Operator
Deploys the [Piraeus Operator](https://github.com/piraeusdatastore/piraeus-operator) which deploys and manages a simple
and resilient storage solution for Kubernetes.
The main deployment method for Piraeus Operator switched to [`kustomize`](../../docs/tutorial)
in release `v2.0.0`. This chart is intended for users who want to continue using Helm.
This chart **only** configures the Operator, but does not create the `LinstorCluster` resource creating the actual
storage system. Refer to the existing [tutorials](../../docs/tutorial)
and [how-to guides](../../docs/how-to).
## Deploying Piraeus Operator
To deploy Piraeus Operator with Helm, clone this repository and deploy the chart:
```
$ git clone --branch v2 https://github.com/piraeusdatastore/piraeus-operator
$ cd piraeus-operator
$ helm install piraeus-operator charts/piraeus-operator --create-namespace -n piraeus-datastore
```
Follow the instructions printed by Helm to create your storage cluster:
```
$ kubectl apply -f - <<EOF
apiVersion: piraeus.io/v1
kind: LinstorCluster
metadata:
name: linstorcluster
spec: {}
EOF
```
Check out our [documentation](../../docs) for more information.

View File

@@ -0,0 +1,40 @@
Piraeus Operator installed.
{{- $piraeusResources := .Capabilities.APIVersions.Has "piraeus.linbit.com/v1/LinstorController" }}
{{- $linbitResources := .Capabilities.APIVersions.Has "linstor.linbit.com/v1/LinstorController" }}
{{- if or $piraeusResources $linbitResources }}
{{- fail `
Refusing to upgrade from v1 deployment!
=======================================
Please read the upgrade instructions at:
https://github.com/piraeusdatastore/piraeus-operator/blob/v2/docs/how-to/upgrade/index.md
` }}
{{- end }}
{{- if not (.Capabilities.APIVersions.Has "piraeus.io/v1/LinstorCluster") }}
It looks like the necessary CRDs for Piraeus Operator are still missing.
To apply them via helm now use:
helm upgrade {{ .Release.Name }} ./charts/piraeus --reuse-values --set installCRDs=true
Alternatively, you can manage them manually:
kubectl apply --server-side -k "https://github.com/piraeusdatastore/piraeus-operator//config/crd?ref=v2"
{{- end }}
To get started with Piraeus, simply run:
$ kubectl apply -f - <<EOF
apiVersion: piraeus.io/v1
kind: LinstorCluster
metadata:
name: linstorcluster
spec: {}
EOF
For next steps, check out our documentation at https://github.com/piraeusdatastore/piraeus-operator/tree/v2/docs

View File

@@ -0,0 +1,74 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "piraeus-operator.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "piraeus-operator.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "piraeus-operator.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "piraeus-operator.labels" -}}
helm.sh/chart: {{ include "piraeus-operator.chart" . }}
{{ include "piraeus-operator.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "piraeus-operator.selectorLabels" -}}
app.kubernetes.io/component: piraeus-operator
app.kubernetes.io/name: piraeus-datastore
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "piraeus-operator.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "piraeus-operator.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Certificate secret name
*/}}
{{- define "piraeus-operator.certifcateName" -}}
{{- if .Values.tls.certificateSecret }}
{{- .Values.tls.certificateSecret }}
{{- else }}
{{- include "piraeus-operator.fullname" . }}-tls
{{- end }}
{{- end }}

View File

@@ -0,0 +1,95 @@
# DO NOT EDIT; Automatically created by hack/copy-image-config-to-chart.sh
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "piraeus-operator.fullname" . }}-image-config
labels:
{{- include "piraeus-operator.labels" . | nindent 4 }}
data:
0_piraeus_datastore_images.yaml: |
---
# This is the configuration for default images used by piraeus-operator
#
# "base" is the default repository prefix to use.
base: quay.io/piraeusdatastore
# "components" is a mapping of image placeholders to actual image names with tag.
# For example, the image name "linstor-controller" in the kustomize-resources will be replaced by:
# quay.io/piraeusdatastore/piraeus-server:v1.24.2
components:
linstor-controller:
tag: v1.25.1
image: piraeus-server
linstor-satellite:
tag: v1.25.1
image: piraeus-server
linstor-csi:
tag: v1.3.0
image: piraeus-csi
drbd-reactor:
tag: v1.4.0
image: drbd-reactor
ha-controller:
tag: v1.1.4
image: piraeus-ha-controller
drbd-shutdown-guard:
tag: v1.0.0
image: drbd-shutdown-guard
ktls-utils:
tag: v0.10
image: ktls-utils
drbd-module-loader:
tag: v9.2.6
# The special "match" attribute is used to select an image based on the node's reported OS.
# The operator will first check the k8s node's ".status.nodeInfo.osImage" field, and compare it against the list
# here. If one matches, that specific image name will be used instead of the fallback image.
image: drbd9-jammy # Fallback image: chose a fairly recent kernel, which can hopefully compile whatever config is actually in use
match:
- osImage: CentOS Linux 7
image: drbd9-centos7
- osImage: CentOS Linux 8
image: drbd9-centos8
- osImage: AlmaLinux 8
image: drbd9-almalinux8
- osImage: Red Hat Enterprise Linux CoreOS
image: drbd9-almalinux8
- osImage: AlmaLinux 9
image: drbd9-almalinux9
- osImage: Ubuntu 18\.04
image: drbd9-bionic
- osImage: Ubuntu 20\.04
image: drbd9-focal
- osImage: Ubuntu 22\.04
image: drbd9-jammy
- osImage: Debian GNU/Linux 11
image: drbd9-bullseye
- osImage: Debian GNU/Linux 10
image: drbd9-buster
0_sig_storage_images.yaml: |
---
base: registry.k8s.io/sig-storage
components:
csi-attacher:
tag: v4.4.2
image: csi-attacher
csi-livenessprobe:
tag: v2.11.0
image: livenessprobe
csi-provisioner:
tag: v3.6.2
image: csi-provisioner
csi-snapshotter:
tag: v6.3.2
image: csi-snapshotter
csi-resizer:
tag: v1.9.2
image: csi-resizer
csi-external-health-monitor-controller:
tag: v0.10.0
image: csi-external-health-monitor-controller
csi-node-driver-registrar:
tag: v2.9.1
image: csi-node-driver-registrar
{{- range $idx, $value := .Values.imageConfigOverride }}
{{ add $idx 1 }}_helm_override.yaml: |
{{- $value | toYaml | nindent 4 }}
{{- end }}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,101 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "piraeus-operator.fullname" . }}-controller-manager
labels:
{{- include "piraeus-operator.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "piraeus-operator.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "piraeus-operator.selectorLabels" . | nindent 8 }}
annotations:
kubectl.kubernetes.io/default-container: manager
spec:
containers:
- args:
- --health-probe-bind-address=:8081
{{- if .Values.kubeRbacProxy.enabled }}
- --metrics-bind-address=127.0.0.1:8080
{{- else }}
- --metrics-bind-address=0
{{- end }}
{{- range $opt, $val := .Values.operator.options }}
- --{{ $opt | kebabcase }}={{ $val }}
{{- end }}
command:
- /manager
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: IMAGE_CONFIG_MAP_NAME
value: {{ include "piraeus-operator.fullname" . }}-image-config
image: {{ .Values.operator.image.repository }}:{{ .Values.operator.image.tag | default .Chart.AppVersion }}
imagePullPolicy: {{ .Values.operator.image.pullPolicy }}
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
name: manager
ports:
- containerPort: 9443
name: webhook-server
protocol: TCP
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
{{- toYaml .Values.operator.resources | nindent 12 }}
securityContext:
{{- toYaml .Values.operator.securityContext | nindent 12}}
volumeMounts:
- mountPath: /tmp/k8s-webhook-server/serving-certs
name: cert
readOnly: true
{{- if .Values.kubeRbacProxy.enabled }}
- args:
- --secure-listen-address=0.0.0.0:8443
- --upstream=http://127.0.0.1:8080/
- --tls-private-key-file=/etc/tls/tls.key
- --tls-cert-file=/etc/tls/tls.crt
{{- range $opt, $val := .Values.kubeRbacProxy.options }}
- --{{ $opt | kebabcase }}={{ $val }}
{{- end }}
image: {{ .Values.kubeRbacProxy.image.repository }}:{{ .Values.kubeRbacProxy.image.tag }}
imagePullPolicy: {{ .Values.kubeRbacProxy.image.pullPolicy }}
name: kube-rbac-proxy
ports:
- containerPort: 8443
name: https
protocol: TCP
resources:
{{- toYaml .Values.kubeRbacProxy.resources | nindent 12 }}
securityContext:
{{- toYaml .Values.kubeRbacProxy.securityContext | nindent 12}}
volumeMounts:
- mountPath: /etc/tls
name: cert
{{- end }}
securityContext:
runAsNonRoot: true
serviceAccountName: {{ include "piraeus-operator.serviceAccountName" . }}
terminationGracePeriodSeconds: 10
tolerations:
{{- toYaml .Values.tolerations | nindent 8 }}
volumes:
- name: cert
secret:
defaultMode: 420
secretName: {{ include "piraeus-operator.certifcateName" . }}

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "piraeus-operator.fullname" . }}-controller-manager-metrics-service
labels:
{{- include "piraeus-operator.labels" . | nindent 4 }}
spec:
type: ClusterIP
selector:
{{- include "piraeus-operator.selectorLabels" . | nindent 4 }}
ports:
- name: metrics
port: 443
targetPort: 8443

View File

@@ -0,0 +1,461 @@
{{ if .Values.serviceAccount.create }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "piraeus-operator.serviceAccountName" . }}
labels:
{{- include "piraeus-operator.labels" . | nindent 4 }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "piraeus-operator.fullname" . }}-controller-manager
labels:
{{- include "piraeus-operator.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- configmaps
- events
- persistentvolumes
- secrets
- serviceaccounts
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes
- persistentvolumeclaims
verbs:
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- persistentvolumeclaims/status
verbs:
- patch
- apiGroups:
- ""
resources:
- pods
verbs:
- delete
- list
- watch
- apiGroups:
- ""
resources:
- pods/eviction
verbs:
- create
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- daemonsets
- deployments
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- replicasets
verbs:
- get
- apiGroups:
- cert-manager.io
resources:
- certificates
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- events.k8s.io
resources:
- events
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- internal.linstor.linbit.com
resources:
- '*'
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
- apiGroups:
- piraeus.io
resources:
- linstorclusters
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- piraeus.io
resources:
- linstorclusters/finalizers
verbs:
- update
- apiGroups:
- piraeus.io
resources:
- linstorclusters/status
verbs:
- get
- patch
- update
- apiGroups:
- piraeus.io
resources:
- linstornodeconnections
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- piraeus.io
resources:
- linstornodeconnections/finalizers
verbs:
- update
- apiGroups:
- piraeus.io
resources:
- linstornodeconnections/status
verbs:
- get
- patch
- update
- apiGroups:
- piraeus.io
resources:
- linstorsatelliteconfigurations
verbs:
- get
- list
- watch
- apiGroups:
- piraeus.io
resources:
- linstorsatelliteconfigurations/status
verbs:
- get
- patch
- update
- apiGroups:
- piraeus.io
resources:
- linstorsatellites
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- piraeus.io
resources:
- linstorsatellites/finalizers
verbs:
- update
- apiGroups:
- piraeus.io
resources:
- linstorsatellites/status
verbs:
- get
- patch
- update
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterrolebindings
- clusterroles
- rolebindings
- roles
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- security.openshift.io
resourceNames:
- privileged
resources:
- securitycontextconstraints
verbs:
- use
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshotclasses
- volumesnapshots
verbs:
- get
- list
- watch
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshotcontents
verbs:
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshotcontents/status
verbs:
- patch
- update
- apiGroups:
- storage.k8s.io
resources:
- csidrivers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- storage.k8s.io
resources:
- csinodes
verbs:
- get
- list
- patch
- watch
- apiGroups:
- storage.k8s.io
resources:
- csistoragecapacities
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- get
- list
- watch
- apiGroups:
- storage.k8s.io
resources:
- volumeattachments
verbs:
- delete
- get
- list
- patch
- watch
- apiGroups:
- storage.k8s.io
resources:
- volumeattachments/status
verbs:
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "piraeus-operator.fullname" . }}-manager-rolebinding
labels:
{{- include "piraeus-operator.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: '{{ include "piraeus-operator.fullname" . }}-controller-manager'
subjects:
- kind: ServiceAccount
name: '{{ include "piraeus-operator.serviceAccountName" . }}'
namespace: '{{ .Release.Namespace }}'
{{ end }}
{{ if.Values.rbac.create }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "piraeus-operator.fullname" . }}-proxy-role
labels:
{{- include "piraeus-operator.labels" . | nindent 4 }}
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "piraeus-operator.fullname" . }}-proxy-rolebinding
labels:
{{- include "piraeus-operator.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: '{{ include "piraeus-operator.fullname" . }}-proxy-role'
subjects:
- kind: ServiceAccount
name: {{ include "piraeus-operator.serviceAccountName" . }}
namespace: '{{ .Release.Namespace }}'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "piraeus-operator.fullname" . }}-leader-election-role
labels:
{{- include "piraeus-operator.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "piraeus-operator.fullname" . }}-leader-election-rolebinding
labels:
{{- include "piraeus-operator.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: '{{ include "piraeus-operator.fullname" . }}-leader-election-role'
subjects:
- kind: ServiceAccount
name: {{ include "piraeus-operator.serviceAccountName" . }}
namespace: '{{ .Release.Namespace }}'
{{ end }}

View File

@@ -0,0 +1,154 @@
# Check if the TLS secret already exists and initialize variables for later use at the top level
{{- $secret := lookup "v1" "Secret" .Release.Namespace (include "piraeus-operator.certifcateName" .) }}
{{ $ca := "" }}
{{ $key := "" }}
{{ $crt := "" }}
{{- if .Values.tls.certManagerIssuerRef }}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ include "piraeus-operator.fullname" . }}
labels:
{{- include "piraeus-operator.labels" . | nindent 4 }}
spec:
secretName: {{ include "piraeus-operator.certifcateName" . }}
dnsNames:
- {{ include "piraeus-operator.fullname" . }}-webhook-service.{{ .Release.Namespace }}.svc
issuerRef:
{{- toYaml .Values.tls.certManagerIssuerRef | nindent 4 }}
privateKey:
rotationPolicy: Always
---
{{- else if .Values.tls.autogenerate }}
{{- if and $secret (not .Values.tls.renew) }}
{{- $ca = get $secret.data "ca.crt" }}
{{- $key = get $secret.data "tls.key" }}
{{- $crt = get $secret.data "tls.crt" }}
{{- else }}
{{- $serviceName := (printf "%s-webhook-service.%s.svc" (include "piraeus-operator.fullname" .) .Release.Namespace)}}
{{- $cert := genSelfSignedCert $serviceName nil (list $serviceName) 3650 }}
{{- $ca = b64enc $cert.Cert }}
{{- $key = b64enc $cert.Key }}
{{- $crt = b64enc $cert.Cert }}
{{- end }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "piraeus-operator.certifcateName" . }}
labels:
{{- include "piraeus-operator.labels" . | nindent 4 }}
type: kubernetes.io/tls
data:
ca.crt: {{ $ca }}
tls.key: {{ $key }}
tls.crt: {{ $crt }}
{{- end }}
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: {{ include "piraeus-operator.fullname" . }}-validating-webhook-configuration
labels:
{{- include "piraeus-operator.labels" . | nindent 4 }}
{{- if .Values.tls.certManagerIssuerRef }}
annotations:
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "piraeus-operator.fullname" . }}
{{- end }}
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: '{{ include "piraeus-operator.fullname" . }}-webhook-service'
namespace: '{{ .Release.Namespace }}'
path: /validate-piraeus-io-v1-linstorcluster
{{- if not .Values.tls.certManagerIssuerRef }}
caBundle: {{ $ca }}
{{- end }}
failurePolicy: {{ .Values.webhook.failurePolicy }}
timeoutSeconds: {{ .Values.webhook.timeoutSeconds }}
name: vlinstorcluster.kb.io
rules:
- apiGroups:
- piraeus.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- linstorclusters
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
name: '{{ include "piraeus-operator.fullname" . }}-webhook-service'
namespace: '{{ .Release.Namespace }}'
path: /validate-piraeus-io-v1-linstornodeconnection
{{- if not .Values.tls.certManagerIssuerRef }}
caBundle: {{ $ca }}
{{- end }}
failurePolicy: {{ .Values.webhook.failurePolicy }}
timeoutSeconds: {{ .Values.webhook.timeoutSeconds }}
name: vlinstornodeconnection.kb.io
rules:
- apiGroups:
- piraeus.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- linstornodeconnections
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
name: '{{ include "piraeus-operator.fullname" . }}-webhook-service'
namespace: '{{ .Release.Namespace }}'
path: /validate-piraeus-io-v1-linstorsatellite
{{- if not .Values.tls.certManagerIssuerRef }}
caBundle: {{ $ca }}
{{- end }}
failurePolicy: {{ .Values.webhook.failurePolicy }}
timeoutSeconds: {{ .Values.webhook.timeoutSeconds }}
name: vlinstorsatellite.kb.io
rules:
- apiGroups:
- piraeus.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- linstorsatellites
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
name: '{{ include "piraeus-operator.fullname" . }}-webhook-service'
namespace: '{{ .Release.Namespace }}'
path: /validate-piraeus-io-v1-linstorsatelliteconfiguration
{{- if not .Values.tls.certManagerIssuerRef }}
caBundle: {{ $ca }}
{{- end }}
failurePolicy: {{ .Values.webhook.failurePolicy }}
timeoutSeconds: {{ .Values.webhook.timeoutSeconds }}
name: vlinstorsatelliteconfiguration.kb.io
rules:
- apiGroups:
- piraeus.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- linstorsatelliteconfigurations
sideEffects: None

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "piraeus-operator.fullname" . }}-webhook-service
labels:
{{- include "piraeus-operator.labels" . | nindent 4 }}
spec:
type: ClusterIP
selector:
{{- include "piraeus-operator.selectorLabels" . | nindent 4 }}
ports:
- name: https
port: 443
targetPort: 9443

View File

@@ -0,0 +1,108 @@
replicaCount: 1
installCRDs: false
operator:
image:
repository: quay.io/piraeusdatastore/piraeus-operator
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
options:
leaderElect: true
resources: { }
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
kubeRbacProxy:
enabled: false
image:
repository: gcr.io/kubebuilder/kube-rbac-proxy
pullPolicy: IfNotPresent
tag: v0.13.1
options:
logtostderr: "true"
v: 0
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
resources: { }
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
webhook:
timeoutSeconds: 2
failurePolicy: Fail
tls:
certificateSecret: ""
autogenerate: true
renew: false
certManagerIssuerRef: {}
imagePullSecrets: [ ]
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: { }
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
rbac:
# Specifies whether RBAC resources should be created
create: true
podAnnotations: { }
podSecurityContext: {}
# fsGroup: 2000
nodeSelector: { }
tolerations:
- key: drbd.linbit.com/lost-quorum
effect: NoSchedule
- key: drbd.linbit.com/force-io-error
effect: NoSchedule
affinity: { }
podDisruptionBudget:
enabled: true
minAvailable: 1
# maxUnavailable: 1
imageConfigOverride: []
# - base: example.com/piraeus
# components:
# linstor-csi:
# image: linstor-csi
# tag: my-custom-tag
# Results in the image example.com/piraeus/linstor-csi:my-custom-tag being used.
# See templates/config.yaml for available components.

View File

@@ -0,0 +1,28 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: myclaim
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 8Gi
#storageClassName: linstor-lvm
---
apiVersion: v1
kind: Pod
metadata:
name: mypod
spec:
containers:
- name: myfrontend
image: nginx
volumeMounts:
- mountPath: "/var/www/html"
name: mypd
volumes:
- name: mypd
persistentVolumeClaim:
claimName: myclaim

View File

@@ -0,0 +1,9 @@
wget -O /tmp/package-signing-pubkey.asc https://packages.linbit.com/package-signing-pubkey.asc
gpg --yes -o /etc/apt/trusted.gpg.d/linbit-keyring.gpg --dearmor /tmp/package-signing-pubkey.asc
PVERS=$(pveversion | awk -F'[/.]' '{print $2}')
echo "deb [signed-by=/etc/apt/trusted.gpg.d/linbit-keyring.gpg] http://packages.linbit.com/public/ proxmox-$PVERS drbd-9" > /etc/apt/sources.list
apt update && apt -y install drbd-dkms
echo "options drbd usermode_helper=disabled" > /etc/modprobe.d/drbd.conf
echo drbd > /etc/modules-load.d/drbd.conf
modprobe drbd
kubectl label node "${HOSTNAME}" node-role.kubernetes.io/linstor= --overwrite

View File

@@ -0,0 +1,8 @@
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: ca-bootstrapper
namespace: {{ .Release.namespace }}
spec:
selfSigned: {}

View File

@@ -0,0 +1,16 @@
_helm:
name: piraeus-operator
namespace: cozy-linstor # must be same namespace as linstor
createNamespace: true
privilegedNamespace: false
crds: CreateReplace
dependsOn:
- name: cert-manager
piraeus:
installCRDs: true
autogenerate: false
tls:
certManagerIssuerRef:
name: ca-bootstrapper
kind: Issuer