mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-09 16:16:23 +00:00
It turns out, the pods don't exit when the master or worker crash. Along the way: remove redundant metadata.
31 lines
733 B
YAML
31 lines
733 B
YAML
kind: ReplicationController
|
|
apiVersion: v1
|
|
metadata:
|
|
name: spark-worker-controller
|
|
spec:
|
|
replicas: 3
|
|
selector:
|
|
component: spark-worker
|
|
template:
|
|
metadata:
|
|
labels:
|
|
component: spark-worker
|
|
spec:
|
|
containers:
|
|
- name: spark-worker
|
|
image: gcr.io/google_containers/spark-worker:1.5.1_v1
|
|
ports:
|
|
- containerPort: 8888
|
|
livenessProbe:
|
|
exec:
|
|
command:
|
|
- /opt/spark/sbin/spark-daemon.sh
|
|
- status
|
|
- org.apache.spark.deploy.worker.Worker
|
|
- '1'
|
|
initialDelaySeconds: 30
|
|
timeoutSeconds: 1
|
|
resources:
|
|
requests:
|
|
cpu: 100m
|