mirror of
https://github.com/outbackdingo/Biohazard.git
synced 2026-01-27 10:18:27 +00:00
fix(rook): rm osd cpu limits
This commit is contained in:
2
.gitignore
vendored
2
.gitignore
vendored
@@ -7,7 +7,7 @@ talosconfig
|
||||
clusterconfig/
|
||||
**/clusterconfig
|
||||
**/clusterconfig/*
|
||||
**/cilium/app/bootstrap-install/charts/*
|
||||
**/cilium*/app/bootstrap-install/charts/*
|
||||
.pem
|
||||
.key
|
||||
.pub
|
||||
|
||||
@@ -33,12 +33,12 @@ tasks:
|
||||
vars:
|
||||
NAME: '{{ or .NAME (fail "Missing `NAME` environment variable!") }}'
|
||||
NS: '{{ .NS | default "default" }}'
|
||||
# TYPE: '{{ .TYPE | default "job" }}'
|
||||
TYPE: '{{ .TYPE | default "job" }}'
|
||||
# WAIT_ARGS: '{{.WAIT_ARGS | default "echo \"{{.NAME}} is still running, logs:\" && kubectl -n {{.NS}} logs {{.NAME}} --since 2s -f;"}}'
|
||||
cmds:
|
||||
- |-
|
||||
until kubectl -n {{.NS}} wait {{.NAME}} --for condition=complete --timeout=2s; do
|
||||
echo "{{.NAME}} is still running, logs:" && kubectl -n {{.NS}} logs {{.NAME}} --since 2s -f || true;
|
||||
until kubectl -n {{.NS}} wait {{.TYPE}}/{{.NAME}} --for condition=complete --timeout=2s; do
|
||||
echo "{{.NAME}} is still running, logs:" && kubectl -n {{.NS}} logs {{.TYPE}}/{{.NAME}} --since 2s -f || true;
|
||||
done
|
||||
|
||||
get-public-ingresses:
|
||||
@@ -99,5 +99,30 @@ tasks:
|
||||
- defer: cat ./client.yaml | envsubst | kubectl delete -f -
|
||||
- task: wait-finish
|
||||
vars:
|
||||
NAME: 'jobs/{{.CLIENT_NAME}}'
|
||||
NAME: '{{.CLIENT_NAME}}'
|
||||
NS: '{{.CLIENT_NS}}'
|
||||
|
||||
kbench:
|
||||
vars: &kbench-vars
|
||||
# SC: '{{ or .SC (fail "Missing `SC` environment variable!") }}'
|
||||
SC: '{{.SC}}'
|
||||
NODE: '{{.NODE}}'
|
||||
NS: '{{ .NS | default "default" }}'
|
||||
env: *kbench-vars
|
||||
cmds:
|
||||
- &kbench-delete |-
|
||||
export KBENCH=$(curl -sL https://raw.githubusercontent.com/yasker/kbench/main/deploy/fio.yaml)
|
||||
[[ ! -z "{{.SC}}" ]] && export KBENCH=$(printf '%s\n' "${KBENCH}" | yq '. | select(.kind == "PersistentVolumeClaim").spec.storageClassName = "{{.SC}}"')
|
||||
[[ ! -z "{{.NODE}}" ]] && export KBENCH=$(printf '%s\n' "${KBENCH}" | yq '. | select(.kind == "Job").spec.template.spec.nodeSelector."kubernetes.io/hostname" = "{{.NODE}}"')
|
||||
printf '%s\n' "${KBENCH}" | kubectl delete -n {{.NS}} -f - || true
|
||||
- |-
|
||||
export KBENCH=$(curl -sL https://raw.githubusercontent.com/yasker/kbench/main/deploy/fio.yaml)
|
||||
[[ ! -z "{{.SC}}" ]] && export KBENCH=$(printf '%s\n' "${KBENCH}" | yq '. | select(.kind == "PersistentVolumeClaim").spec.storageClassName = "{{.SC}}"')
|
||||
[[ ! -z "{{.NODE}}" ]] && export KBENCH=$(printf '%s\n' "${KBENCH}" | yq '. | select(.kind == "Job").spec.template.spec.nodeSelector."kubernetes.io/hostname" = "{{.NODE}}"')
|
||||
printf '%s\n' "${KBENCH}" | kubectl apply -n {{.NS}} -f -
|
||||
- defer: *kbench-delete
|
||||
- task: wait-finish
|
||||
vars:
|
||||
NS: '{{ .NS | default "default" }}'
|
||||
NAME: "kbench"
|
||||
TYPE: "job"
|
||||
|
||||
@@ -22,6 +22,21 @@ includes:
|
||||
taskfile: ../talos
|
||||
|
||||
tasks:
|
||||
toolbox:
|
||||
aliases: ["tb"]
|
||||
desc: Launches shell or runs specified command of a Rook Ceph Toolbox pod deployed using upstream's manifest after patching command, then cleans up.
|
||||
vars:
|
||||
CMD: '{{ .CMD | default "/bin/bash -c " }}'
|
||||
cmds:
|
||||
- kubectl delete -n rook-ceph jobs/rook-ceph-toolbox-job || true
|
||||
- curl -sL https://raw.githubusercontent.com/rook/rook/v1.11.9/deploy/examples/toolbox-job.yaml | yq '.spec.template.spec.containers.0.command = ["/bin/bash", "-c"] | .spec.template.spec.containers.0.args = ["sleep 2147483647"]' | kubectl apply -f -
|
||||
- defer: curl -sL https://raw.githubusercontent.com/rook/rook/v1.11.9/deploy/examples/toolbox-job.yaml | yq '.spec.template.spec.containers.0.command = ["/bin/bash", "-c"] | .spec.template.spec.containers.0.args = ["sleep 2147483647"]' | kubectl delete -f -
|
||||
- task: k8s:wait-pod-running
|
||||
vars:
|
||||
NS: rook-ceph
|
||||
NAME: -l job-name=rook-ceph-toolbox-job
|
||||
- kubectl exec -n rook-ceph jobs/rook-ceph-toolbox-job -it -- /bin/bash
|
||||
|
||||
osd-prepare-logs:
|
||||
aliases: [osdlogs]
|
||||
desc: Stream all logs for the `osd-prepare` Job.
|
||||
|
||||
@@ -117,7 +117,7 @@ nodes:
|
||||
|
||||
- hostname: "humming.${DNS_CLUSTER}"
|
||||
ipAddress: "${IP_VLAN_HUMMING}1"
|
||||
controlPlane: false
|
||||
controlPlane: true
|
||||
installDiskSelector:
|
||||
size: "<= 600GB"
|
||||
type: nvme
|
||||
@@ -282,7 +282,7 @@ controlPlane:
|
||||
- op: create
|
||||
path: /var/etc/frr/vtysh.conf
|
||||
permissions: 0o400
|
||||
content: |
|
||||
content: |-
|
||||
service integrated-vtysh-config
|
||||
pods:
|
||||
- apiVersion: v1
|
||||
|
||||
@@ -83,7 +83,7 @@ spec:
|
||||
cpu: "500m"
|
||||
memory: "3Gi"
|
||||
limits:
|
||||
cpu: "3000m"
|
||||
cpu: 0
|
||||
memory: "10Gi"
|
||||
mgr-sidecar:
|
||||
requests:
|
||||
|
||||
Reference in New Issue
Block a user