Compare commits

...

3 Commits

Author SHA1 Message Date
Jeff McCune
4ce39db745 (#30) Enforce restricted pod security profile on istio-ingress namespace
This patch enforces the restricted pod security profile on the istio
ingress namespace. The istio cni to move the traffic redirection from
the init container to a cni daemon set pod.

Refer to:

 - https://istio.io/latest/docs/setup/additional-setup/pod-security-admission/
 - https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
2024-03-02 11:16:55 -08:00
Jeff McCune
eba58d1639 (#30) Add ingress component and istio-ingressgateway Deployment
Migrated from holos-infra/components/core/istio/ingress
2024-03-02 10:22:21 -08:00
Jeff McCune
765832d90d (#30) Trim istiod 2024-03-01 16:27:49 -08:00
14 changed files with 493 additions and 15 deletions

View File

@@ -3066,7 +3066,7 @@ import (
// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// +optional
securityContext?: null | #SecurityContext @go(SecurityContext,*SecurityContext) @protobuf(15,bytes,opt)
securityContext?: #SecurityContext @go(SecurityContext,*SecurityContext) @protobuf(15,bytes,opt)
// Whether this container should allocate a buffer for stdin in the container runtime. If this
// is not set, reads from stdin in the container will always result in EOF.
@@ -3982,7 +3982,7 @@ import (
// SecurityContext holds pod-level security attributes and common container settings.
// Optional: Defaults to empty. See type description for default values of each field.
// +optional
securityContext?: null | #PodSecurityContext @go(SecurityContext,*PodSecurityContext) @protobuf(14,bytes,opt)
securityContext?: #PodSecurityContext @go(SecurityContext,*PodSecurityContext) @protobuf(14,bytes,opt)
// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
// If specified, these secrets will be passed to individual puller implementations for them to use.

View File

@@ -0,0 +1,6 @@
package v1
#Deployment: {
apiVersion: "apps/v1"
kind: "Deployment"
}

View File

@@ -0,0 +1,10 @@
package holos
#InputKeys: component: "cni"
#TargetNamespace: "kube-system"
#HelmChart & {
namespace: #TargetNamespace
chart: name: "cni"
values: #IstioValues
}

View File

@@ -0,0 +1,101 @@
package holos
import "encoding/json"
#InputKeys: component: "ingress"
#TargetNamespace: "istio-ingress"
#DependsOn: _IstioD
#HelmChart & {
chart: name: "gateway"
namespace: #TargetNamespace
values: #GatewayValues & {
// This component expects the load balancer to send the PROXY protocol header.
// Refer to: https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/guide/service/annotations/#proxy-protocol-v2
podAnnotations: "proxy.istio.io/config": json.Marshal(_ProxyProtocol)
// TODO This configuration is specific to the OIS Metal NLB, refactor it out to the metal collection.
service: {
type: "NodePort"
annotations: "service.beta.kubernetes.io/aws-load-balancer-proxy-protocol": "*"
externalTrafficPolicy: "Local"
// Add 30000 to the port to get the Nodeport
ports: [
{
name: "status-port"
port: 15021
protocol: "TCP"
targetPort: 15021
nodePort: 30021
},
{
name: "http2"
port: 80
protocol: "TCP"
targetPort: 80
nodePort: 30080
},
{
name: "https"
port: 443
protocol: "TCP"
targetPort: 443
nodePort: 30443
},
]
}
}
apiObjects: _APIObjects
}
_ProxyProtocol: gatewayTopology: proxyProtocol: {}
// Additional holos specific API Objects
let LoopbackName = #GatewayValues.name + "-loopback"
let LoopbackDescription = "Allows in-cluster traffic to stay in cluster via traffic routing"
let LoopbackLabels = {
app: LoopbackName
istio: "ingressgateway"
}
_APIObjects: {
Deployment: {
loopback: #Deployment & {
_description: LoopbackDescription
metadata: {
name: LoopbackName
namespace: #TargetNamespace
}
spec: {
selector: matchLabels: LoopbackLabels
template: {
metadata: {
annotations: #CommonAnnotations & {
_Description: LoopbackDescription
"inject.istio.io/templates": "gateway"
}
labels: LoopbackLabels & {"sidecar.istio.io/inject": "true"}
}
spec: {
serviceAccountName: "istio-ingressgateway"
// Allow binding to all ports (such as 80 and 443)
securityContext: {
runAsNonRoot: true
seccompProfile: type: "RuntimeDefault"
sysctls: [{name: "net.ipv4.ip_unprivileged_port_start", value: "0"}]
}
containers: [{
name: "istio-proxy"
image: "auto" // Managed by istiod
securityContext: {
allowPrivilegeEscalation: false
capabilities: drop: ["ALL"]
runAsUser: 1337
runAsGroup: 1337
}
}]
}
}
}
}
}
}

View File

@@ -1,3 +1,13 @@
package holos
#DependsOn: _IstioBase
#HelmChart: {
chart: {
version: "1.20.3"
repository: {
name: "istio"
url: "https://istio-release.storage.googleapis.com/charts"
}
}
}

View File

@@ -8,12 +8,7 @@ import "encoding/yaml"
#HelmChart & {
namespace: #TargetNamespace
chart: {
name: "istiod"
version: "1.20.3"
repository: {
name: "istio"
url: "https://istio-release.storage.googleapis.com/charts"
}
name: "istiod"
}
values: #IstioValues & {
pilot: {

View File

@@ -44,7 +44,7 @@ _MeshConfig: {
"cookie",
"x-forwarded-for",
]
port: 4180
port: 4180
service: "oauth2-proxy.istio-ingress.svc.cluster.local"
}
}, {

View File

@@ -0,0 +1,161 @@
package holos
// Default values.yaml imported from the cni chart
#CNIValues: {
cni: {
hub: ""
tag: ""
variant: ""
image: "install-cni"
pullPolicy: ""
// Refer to https://istio.io/latest/docs/setup/additional-setup/cni/#installing-with-helm
enabled: #IstioValues.istio_cni.enabled
// Configuration log level of istio-cni binary
// by default istio-cni send all logs to UDS server
// if want to see them you need change global.logging.level with cni:debug
logLevel: "debug"
// Configuration file to insert istio-cni plugin configuration
// by default this will be the first file found in the cni-conf-dir
// Example
// cniConfFileName: 10-calico.conflist
// CNI bin and conf dir override settings
// defaults:
cniBinDir: "" // Auto-detected based on version; defaults to /opt/cni/bin.
cniConfDir: "/etc/cni/net.d"
cniConfFileName: ""
// This directory must exist on the node, if it does not, consult your container runtime
// documentation for the appropriate path.
cniNetnsDir: null // Defaults to '/var/run/netns', in minikube/docker/others can be '/var/run/docker/netns'.
excludeNamespaces: [
"istio-system",
"kube-system",
]
// Allows user to set custom affinity for the DaemonSet
affinity: {}
// Custom annotations on pod level, if you need them
podAnnotations: {}
// If this value is set a RoleBinding will be created
// in the same namespace as the istio-cni DaemonSet is created.
// This can be used to bind a preexisting ClusterRole to the istio/cni ServiceAccount
// e.g. if you use PodSecurityPolicies
psp_cluster_role: ""
// Deploy the config files as plugin chain (value "true") or as standalone files in the conf dir (value "false")?
// Some k8s flavors (e.g. OpenShift) do not support the chain approach, set to false if this is the case
chained: #IstioValues.istio_cni.chained
// Allow the istio-cni container to run in privileged mode, needed for some platforms (e.g. OpenShift) or features (repairPods)
privileged: false
// Custom configuration happens based on the CNI provider.
// Possible values: "default", "multus"
provider: "default"
// Configure ambient settings
ambient: {
// If enabled, ambient redirection will be enabled
enabled: false
// Set ambient redirection mode: "iptables" or "ebpf"
redirectMode: "iptables"
// Set ambient config dir path: defaults to /etc/ambient-config
configDir: ""
}
repair: {
enabled: true
hub: ""
tag: ""
// Repair controller has 3 modes. Pick which one meets your use cases. Note only one may be used.
// This defines the action the controller will take when a pod is detected as broken.
// labelPods will label all pods with <brokenPodLabelKey>=<brokenPodLabelValue>.
// This is only capable of identifying broken pods; the user is responsible for fixing them (generally, by deleting them).
labelPods: false
// deletePods will delete any broken pod. These will then be rescheduled, hopefully onto a node that is fully ready.
deletePods: true
// repairPods will dynamically repair any broken pod by setting up the pod networking configuration even after it has started.
// Note the pod will be crashlooping, so this may take a few minutes to become fully functional based on when the retry occurs.
// This requires no RBAC privilege, but does require `securityContext.privileged`.
repairPods: false
initContainerName: "istio-validation"
brokenPodLabelKey: "cni.istio.io/uninitialized"
brokenPodLabelValue: "true"
}
// Set to `type: RuntimeDefault` to use the default profile if available.
seccompProfile: {}
resources: requests: {
cpu: "100m"
memory: "100Mi"
}
resourceQuotas: {
enabled: false
pods: 5000
}
// The number of pods that can be unavailable during rolling update (see
// `updateStrategy.rollingUpdate.maxUnavailable` here:
// https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/daemon-set-v1/#DaemonSetSpec).
// May be specified as a number of pods or as a percent of the total number
// of pods at the start of the update.
rollingMaxUnavailable: 1
}
// Revision is set as 'version' label and part of the resource names when installing multiple control planes.
revision: ""
// For Helm compatibility.
ownerName: ""
global: {
// Default hub for Istio images.
// Releases are published to docker hub under 'istio' project.
// Dev builds from prow are on gcr.io
hub: "docker.io/istio"
// Default tag for Istio images.
tag: "1.20.3"
// Variant of the image to use.
// Currently supported are: [debug, distroless]
variant: ""
// Specify image pull policy if default behavior isn't desired.
// Default behavior: latest images will be Always else IfNotPresent.
imagePullPolicy: ""
// change cni scope level to control logging out of istio-cni-node DaemonSet
logging: {
level: "default:info,cni:info"
}
logAsJson: false
// ImagePullSecrets for all ServiceAccount, list of secrets in the same namespace
// to use for pulling any images in pods that reference this ServiceAccount.
// For components that don't use ServiceAccounts (i.e. grafana, servicegraph, tracing)
// ImagePullSecrets will be added to the corresponding Deployment(StatefulSet) objects.
// Must be set for any cluster configured with private docker registry.
imagePullSecrets: []
// - private-registry-key
// Default resources allocated
defaultResources: {
requests: {
cpu: "100m"
memory: "100Mi"
}
}
}
}

View File

@@ -0,0 +1,170 @@
package holos
// Gateway default values.yaml imported from the gateway chart.
#GatewayValues: {
// Name allows overriding the release name. Generally this should not be set
name: "istio-ingressgateway"
// revision declares which revision this gateway is a part of
revision: ""
// Controls the spec.replicas setting for the Gateway deployment if set.
// Otherwise defaults to Kubernetes Deployment default (1).
replicaCount: null
kind: "Deployment"
rbac: {
// If enabled, roles will be created to enable accessing certificates from Gateways. This is not needed
// when using http://gateway-api.org/.
enabled: true
}
serviceAccount: {
// If set, a service account will be created. Otherwise, the default is used
create: true
// Annotations to add to the service account
annotations: {}
// The name of the service account to use.
// If not set, the release name is used
name: ""
}
podAnnotations: {
"prometheus.io/port": "15020"
"prometheus.io/scrape": "true"
"prometheus.io/path": "/stats/prometheus"
"inject.istio.io/templates": "gateway"
"sidecar.istio.io/inject": "true"
...
}
// Define the security context for the pod.
// If unset, this will be automatically set to the minimum privileges required to bind to port 80 and 443.
// On Kubernetes 1.22+, this only requires the `net.ipv4.ip_unprivileged_port_start` sysctl.
securityContext: {
seccompProfile: type: "RuntimeDefault"
sysctls: [{name: "net.ipv4.ip_unprivileged_port_start", value: "0"}]
}
containerSecurityContext: null
service: {
// Type of service. Set to "None" to disable the service entirely
type: string | *"LoadBalancer"
ports: [...] | *[{
name: "status-port"
port: 15021
protocol: "TCP"
targetPort: 15021
}, {
name: "http2"
port: 80
protocol: "TCP"
targetPort: 80
}, {
name: "https"
port: 443
protocol: "TCP"
targetPort: 443
}]
annotations: {...}
loadBalancerIP: ""
loadBalancerSourceRanges: []
externalTrafficPolicy: string | *""
externalIPs: []
ipFamilyPolicy: ""
ipFamilies: []
}
resources: {
requests: {
cpu: "100m"
memory: "128Mi"
}
limits: {
cpu: "2000m"
memory: "1024Mi"
}
}
autoscaling: {
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
autoscaleBehavior: {}
}
// Pod environment variables
env: {}
// Labels to apply to all resources
labels: {}
// Annotations to apply to all resources
annotations: {}
nodeSelector: {}
tolerations: []
topologySpreadConstraints: []
affinity: {}
// If specified, the gateway will act as a network gateway for the given network.
networkGateway: ""
// Specify image pull policy if default behavior isn't desired.
// Default behavior: latest images will be Always else IfNotPresent
imagePullPolicy: ""
imagePullSecrets: []
// This value is used to configure a Kubernetes PodDisruptionBudget for the gateway.
//
// By default, the `podDisruptionBudget` is disabled (set to `{}`),
// which means that no PodDisruptionBudget resource will be created.
//
// To enable the PodDisruptionBudget, configure it by specifying the
// `minAvailable` or `maxUnavailable`. For example, to set the
// minimum number of available replicas to 1, you can update this value as follows:
//
// podDisruptionBudget:
// minAvailable: 1
//
// Or, to allow a maximum of 1 unavailable replica, you can set:
//
// podDisruptionBudget:
// maxUnavailable: 1
//
// You can also specify the `unhealthyPodEvictionPolicy` field, and the valid values are `IfHealthyBudget` and `AlwaysAllow`.
// For example, to set the `unhealthyPodEvictionPolicy` to `AlwaysAllow`, you can update this value as follows:
//
// podDisruptionBudget:
// minAvailable: 1
// unhealthyPodEvictionPolicy: AlwaysAllow
//
// To disable the PodDisruptionBudget, you can leave it as an empty object `{}`:
//
// podDisruptionBudget: {}
//
podDisruptionBudget: {}
terminationGracePeriodSeconds: 30
// A list of `Volumes` added into the Gateway Pods. See
// https://kubernetes.io/docs/concepts/storage/volumes/.
volumes: []
// A list of `VolumeMounts` added into the Gateway Pods. See
// https://kubernetes.io/docs/concepts/storage/volumes/.
volumeMounts: []
// Configure this to a higher priority class in order to make sure your Istio gateway pods
// will not be killed because of low priority class.
// Refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
// for more detail.
priorityClassName: ""
}

View File

@@ -11,4 +11,4 @@ package holos
_CertManager: CertManager: name: "\(#InstancePrefix)-certmanager"
_Namespaces: Namespaces: name: "\(#StageName)-secrets-namespaces"
_IstioBase: IstioBase: name: "\(#InstancePrefix)-istio-base"
_IstioPilot: IstioPilot: name: "\(#InstancePrefix)-istiod"
_IstioD: IstioD: name: "\(#InstancePrefix)-istiod"

View File

@@ -538,7 +538,10 @@ package holos
// keep in sync with settings used when installing the Istio CNI chart
istio_cni: {
enabled: false
// Refer to https://istio.io/latest/docs/setup/additional-setup/cni/#installing-with-helm
// values.istio_cni.enabled should be set to the same value as values.cni.enabled.
// values.istio_cni.chained should be set to the same value as values.cni.chained.
enabled: true
chained: true
}
}

View File

@@ -1,6 +1,14 @@
package holos
let Privileged = {labels: "pod-security.kubernetes.io/enforce": "privileged"}
// Refer to https://kubernetes.io/docs/concepts/security/pod-security-standards/
let Restricted = {
labels: "pod-security.kubernetes.io/enforce": "restricted"
labels: "pod-security.kubernetes.io/enforce-version": "latest"
}
let Privileged = {
labels: "pod-security.kubernetes.io/enforce": "privileged"
labels: "pod-security.kubernetes.io/enforce-version": "latest"
}
// #PlatformNamespaces is the union of all namespaces across all cluster types. Namespaces are created in all clusters regardless of if they're
// used within the cluster or not. The is important for security and consistency with IAM, RBAC, and Secrets sync between clusters.
@@ -10,7 +18,7 @@ let Privileged = {labels: "pod-security.kubernetes.io/enforce": "privileged"}
{name: "flux-system"},
{name: "ceph-system"} & Privileged,
{name: "istio-system"} & Privileged,
{name: "istio-ingress"} & Privileged,
{name: "istio-ingress"} & Restricted,
{name: "cert-manager"},
{name: "argocd"},
]

View File

@@ -3,6 +3,7 @@ package holos
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ksv1 "kustomize.toolkit.fluxcd.io/kustomization/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
batchv1 "k8s.io/api/batch/v1"
@@ -55,8 +56,20 @@ _apiVersion: "holos.run/v1alpha1"
...
}
#CommonAnnotations: {
_Description: string | *""
"holos.run/description": _Description
...
}
#NamespaceObject: #ClusterObject & {
metadata: namespace: string
_description: string | *""
metadata: {
namespace: string
annotations: #CommonAnnotations & {
_Description: _description
}
}
}
// Kubernetes API Objects
@@ -76,6 +89,7 @@ _apiVersion: "holos.run/v1alpha1"
#Pod: #NamespaceObject & corev1.#Pod
#Job: #NamespaceObject & batchv1.#Job
#CronJob: #NamespaceObject & batchv1.#CronJob
#Deployment: #NamespaceObject & appsv1.#Deployment
// Flux Kustomization CRDs
#Kustomization: #NamespaceObject & ksv1.#Kustomization & {

View File

@@ -1 +1 @@
1
2