deployment via docker

This commit is contained in:
Jack Murdock
2019-06-03 15:07:46 -07:00
parent 1ac41bf288
commit 76569953a6
13 changed files with 947 additions and 0 deletions

67
deploy/README.md Normal file
View File

@@ -0,0 +1,67 @@
# Deploying XMiDT
## Docker
In order to deploy into Docker, make sure [Docker is installed](https://docs.docker.com/install/).
#### Deploy
_note:_ While tr1d1um is not part of XMiDT(it is WebPA) it is recommend to be
brought up for current ease of use. Future releases will deprecate tr1d1um.
1. Have the services you want to bring up built (Talaria, Scytale, etc.).
- Build the images locally
```bash
# for each service
cd <# service #>
git pull origin master
# notice the dot
docker build -t <# service #>:local .
```
2. Set an environment variables relevant for the services you are deploying. If
you aren't using locally built images, replace `local` with the correct tag:
_note_ currently the images are not hosted anywhere
```bash
export TALARIA_VERSION=local
export SCYTALE_VERSION=local
export CADUCEUS_VERSION=local
export PETASOS_VERSION=local
# This is WebPA not XMiDT
export TR1D1UM_VERSION=local
```
If you don't want to set environment variables, set them inline with each
`docker-compose` command below.
3. To bring the containers up run:
```bash
docker-compose up -d
```
If you only want to bring up, for example, the scytale and talaria, run:
_note_: bringup a subset can cause problems
```bash
docker-compose up -d scytale talaria
```
This can be done with any combination of services and the database.
4. To bring the containers down:
```bash
docker-compose down
```
### INFO
The docker-compose file provides 1 full datacenter with on talaria in a "backup"
datacenter. since this is just pure docker no swarm or kubernets its easiest to
deal with just one dc, and since all ports are exposed the names might seem a little weird.
#### Connection
##### Inside Docker
If the parodus instance is inside of docker, life is easy just connect to the cluster with `petasos:6400`
##### Outside Docker
if you the agent is outside of docker and the ports are exposed correctly life
will be hard since you will need to handle the redirect.
You can initially connect to 'localhost:6400' but on the redirect change `talaria-1:6210` to `localhost:6210`
or you can just connect to a talaria `localhost:6200`

View File

@@ -0,0 +1,91 @@
---
primary:
address: ":6000"
health:
address: ":6001"
options:
- "PayloadsOverZero"
- "PayloadsOverHundred"
- "PayloadsOverThousand"
- "PayloadsOverTenThousand"
pprof:
address: ":6002"
metric:
address: ":6003"
metricsOptions:
namespace: "xmidt"
subsystem: "caduceus"
log:
file: "stdout"
level: "DEBUG"
json: false
env: test
fqdn: caduceus
scheme: http
numWorkerThreads: 10
jobQueueSize: 10
sender:
numWorkersPerSender: 5000
queueSizePerSender: 100000
cutOffPeriod: 10s
linger: 180s
clientTimeout: 60s
deliveryRetries: 1
deliveryInterval: 10ms
responseHeaderTimeout: 10s
profilerFrequency: 15
profilerDuration: 15
profilerQueueSize: 100
totalIncomingPayloadSizeBuckets:
- 100
- 1000
- 10000
perSourceIncomingPayloadSizeBuckets:
- 100
- 1000
- 10000
aws:
accessKey: "supbro"
secretKey: "nahbro"
env: local-dev
sns:
awsEndpoint: http://goaws:4100
region: "us-east-1"
topicArn: arn:aws:sns:us-east-1:000000000000:xmidt-local-caduceus
urlPath: "/api/v2/aws/sns"
waitForDns: 0
authHeader: ["YXV0aEhlYWRlcg=="]
start:
duration: 1
apiPath: http://caduceus:6000/hooks
authHeader: YXV0aEhlYWRlcg==
service:
defaultScheme: http
consul:
client:
address: "consul0:8500"
scheme: "http"
waitTime: "30s"
disableGenerateID: true
vnodeCount: 211
registrations:
-
id: "caduceus"
name: "caduceus"
tags:
- "dev"
- "docker"
- "stage=dev"
- "flavor=docker"
address: "http://caduceus"
scheme: "http"
port: 6000
checks:
-
checkID: "caduceus:http"
http: "http://caduceus:6001/health"
interval: "30s"
deregisterCriticalServiceAfter: "70s"

View File

@@ -0,0 +1,15 @@
{
"datacenter": "dc0",
"node_name": "ConsulServer0",
"log_level": "INFO",
"telemetry": {
"disable_hostname": true,
"statsite_address": "127.0.0.1:8700",
"prometheus_retention_time" : "1h",
"prefix_filter": [
"+consul.raft.apply",
"-consul.http",
"+consul.catalog.service"
]
}
}

View File

@@ -0,0 +1,18 @@
{
"datacenter": "dc1",
"node_name": "ConsulServer1",
"retry_join_wan":[
"consul0"
],
"log_level": "INFO",
"telemetry": {
"disable_hostname": true,
"statsite_address": "127.0.0.1:8700",
"prometheus_retention_time" : "1h",
"prefix_filter": [
"+consul.raft.apply",
"-consul.http",
"+consul.catalog.service"
]
}
}

View File

@@ -0,0 +1,58 @@
---
fqdn: petasos
env: test
scheme: http
primary:
address: ":6400"
health:
address: ":6401"
options:
- "PayloadsOverZero"
- "PayloadsOverHundred"
- "PayloadsOverThousand"
- "PayloadsOverTenThousand"
pprof:
address: ":6402"
metric:
address: ":6403"
metricsOptions:
namespace: "xmidt"
subsystem: "petasos"
control:
address: ":6404"
log:
file: "stdout"
level: "debug"
json: true
service:
defaultScheme: http
consul:
client:
address: "consul0:8500"
scheme: "http"
waitTime: "30s"
disableGenerateID: true
watches:
-
service: "talaria"
passingOnly: true
vnodeCount: 211
redundancy:
dc1:
defaultScheme: http
consul:
client:
address: "consul1:8500"
scheme: "http"
waitTime: "30s"
disableGenerateID: true
watches:
-
service: "talaria"
passingOnly: true
vnodeCount: 211

View File

@@ -0,0 +1,79 @@
---
# my global config
global:
scrape_interval: 2s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 2s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Attach these labels to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
external_labels:
monitor: 'codelab-monitor'
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
# rule_files:
# - "first.rules"
# - "second.rules"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['prometheus:9090']
- job_name: 'docker'
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['docker.for.mac.host.internal:9323']
- job_name: 'caduceus'
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['caduceus:6003']
- job_name: 'petasos'
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['petasos:6403']
- job_name: 'scytale'
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['scytale:6303']
- job_name: 'talaria'
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['talaria-0:6204', 'talaria-1:6214', 'talaria-2:6224']
- job_name: 'tr1d1um'
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['tr1d1um:6103']
- job_name: 'consul'
metrics_path: "/v1/agent/metrics"
params:
format: ["prometheus"]
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['consul0:8500','consul1:8500']

View File

@@ -0,0 +1,83 @@
---
fqdn: scytale
env: test
scheme: http
primary:
address: ":6300"
health:
address: ":6301"
options:
- "PayloadsOverZero"
- "PayloadsOverHundred"
- "PayloadsOverThousand"
- "PayloadsOverTenThousand"
pprof:
address: ":6302"
metric:
address: ":6303"
metricsOptions:
namespace: "xmidt"
subsystem: "scytale"
log:
file: "stdout"
level: "DEBUG"
json: false
fanout:
endpoints: [ "http://petasos:6400/api/v2/device/send" ]
authorization: YXV0aEhlYWRlcg==
fanoutTimeout: "5s"
clientTimeout: "5s"
concurrency: 10
service:
consul:
client:
address: "consul0:8500"
scheme: "http"
disableGenerateID: true
vnodeCount: 211
watches:
-
service: "talaria"
allDatacenters: true
tags:
- "dev"
- "docker"
passingOnly: true
registrations:
-
id: "scytale"
name: "scytale"
tags:
- "dev"
- "docker"
- "stage=dev"
- "flavor=docker"
address: "http://scytale"
scheme: "http"
port: 6200
checks:
-
checkID: "talaria-0:http"
http: "http://scytale:6301/health"
interval: "30s"
deregisterCriticalServiceAfter: "70s"
aws:
accessKey: "supbro"
secretKey: "nahbro"
env: local-dev
sns:
awsEndpoint: http://goaws:4100
region: "us-east-1"
topicArn: arn:aws:sns:us-east-1:000000000000:xmidt-local-caduceus
urlPath: "/api/v2/aws/sns"
waitForDns: 0
authHeader: ["YXV0aEhlYWRlcg=="]
start:
duration: 1
apiPath: http://caduceus:6000/hooks
authHeader: YXV0aEhlYWRlcg==

View File

@@ -0,0 +1,35 @@
Local: # Environment name that can be passed on the command line
# (i.e.: ./goaws [Local | Dev] -- defaults to 'Local')
Host: goaws # hostname of the goaws system (for docker-compose this is the tag name of the container)
# you can now use either 1 port for both sns and sqs or alternatively you can comment out Port and use SqsPort + SnsPort for compatibilyt with
# yopa and (fage-sns + face-sqs). If both ways are in the config file on the one "Port" will be used by GoAws
Port: 4100 # port to listen on.
# SqsPort: 9324 # alterante Sqs Port
# SnsPort: 9292 # alternate Sns Port
Region: us-east-1
LogMessages: true # Log messages (true/false)
LogFile: ./goaws_messages.log # Log filename (for message logging
QueueAttributeDefaults: # default attributes for all queues
VisibilityTimeout: 30 # message visibility timeout
ReceiveMessageWaitTimeSeconds: 0 # receive message max wait time
Topics: # List of topic to create at startup
- Name: xmidt-local-caduceus # Topic name - no Subscriptions
Dev: # Another environment
Host: localhost
Port: 4100
# SqsPort: 9324
# SnsPort: 9292
LogMessages: true
LogFile: ./goaws_messages.log
Queues:
- Name: dev-queue1
- Name: dev-queue2
Topics:
- Name: dev-topic1
Subscriptions:
- QueueName: dev-queue3
Raw: false
- QueueName: dev-queue4
Raw: true
- Name: dev-topic2

View File

@@ -0,0 +1,96 @@
---
fqdn: talaria
env: test
scheme: http
primary:
address: ":6200"
health:
address: ":6201"
pprof:
address: ":6202"
control:
address: ":6203"
metric:
address: ":6204"
metricsOptions:
namespace: "xmidt"
subsystem: "talaria"
log:
file: "stdout"
level: "debug"
json: false
device:
manager:
upgrader:
handshakeTimeout: "10s"
maxDevices: 2000
deviceMessageQueueSize: 100
pingPeriod: "45s"
idlePeriod: "135s"
requestTimeout: "15s"
outbound:
method: "POST"
eventEndpoints:
default: http://caduceus:6000/api/v3/notify
requestTimeout: "125s"
defaultScheme: "http"
allowedSchemes:
- "http"
- "https"
outboundQueueSize: 1000
workerPoolSize: 100
transport:
maxIdleConns: 0
maxIdleConnsPerHost: 100
idleConnTimeout: "120s"
clientTimeout: "160s"
authKey: YXV0aEhlYWRlcg==
inbound:
authKey: YXV0aEhlYWRlcg==
eventMap:
default: http://caduceus:6000/api/v3/notify
service:
defaultScheme: http
consul:
client:
address: "consul0:8500"
scheme: "http"
waitTime: "30s"
disableGenerateID: true
vnodeCount: 211
watches:
-
service: "talaria"
tags:
- "dev"
- "docker"
passingOnly: true
-
service: "caduceus"
tags:
- "dev"
- "docker"
passingOnly: true
registrations:
-
id: "talaria-0"
name: "talaria"
tags:
- "dev"
- "docker"
- "stage=dev"
- "flavor=docker"
address: "http://talaria-0"
scheme: "http"
port: 6200
checks:
-
checkID: "talaria-0:http"
http: "http://talaria-0:6201/health"
interval: "30s"
deregisterCriticalServiceAfter: "70s"

View File

@@ -0,0 +1,96 @@
---
fqdn: talaria
env: test
scheme: http
primary:
address: ":6210"
health:
address: ":6211"
pprof:
address: ":6212"
control:
address: ":6213"
metric:
address: ":6214"
metricsOptions:
namespace: "xmidt"
subsystem: "talaria"
log:
file: "stdout"
level: "debug"
json: false
device:
manager:
upgrader:
handshakeTimeout: "10s"
maxDevices: 2000
deviceMessageQueueSize: 100
pingPeriod: "45s"
idlePeriod: "135s"
requestTimeout: "15s"
outbound:
method: "POST"
eventEndpoints:
default: http://caduceus:6000/api/v3/notify
requestTimeout: "125s"
defaultScheme: "http"
allowedSchemes:
- "http"
- "https"
outboundQueueSize: 1000
workerPoolSize: 100
transport:
maxIdleConns: 0
maxIdleConnsPerHost: 100
idleConnTimeout: "120s"
clientTimeout: "160s"
authKey: YXV0aEhlYWRlcg==
inbound:
authKey: YXV0aEhlYWRlcg==
eventMap:
default: http://caduceus:6000/api/v3/notify
service:
defaultScheme: http
consul:
client:
address: "consul0:8500"
scheme: "http"
waitTime: "30s"
disableGenerateID: true
vnodeCount: 211
watches:
-
service: "talaria"
tags:
- "dev"
- "docker"
passingOnly: true
-
service: "caduceus"
tags:
- "dev"
- "docker"
passingOnly: true
registrations:
-
id: "talaria-1"
name: "talaria"
tags:
- "dev"
- "docker"
- "stage=dev"
- "flavor=docker"
address: "http://talaria-1"
scheme: "http"
port: 6210
checks:
-
checkID: "talaria-1:http"
http: "http://talaria-1:6211/health"
interval: "30s"
deregisterCriticalServiceAfter: "70s"

View File

@@ -0,0 +1,96 @@
---
fqdn: talaria
env: test
scheme: http
primary:
address: ":6220"
health:
address: ":6221"
pprof:
address: ":6222"
control:
address: ":6223"
metric:
address: ":6224"
metricsOptions:
namespace: "xmidt"
subsystem: "talaria"
log:
file: "stdout"
level: "debug"
json: false
device:
manager:
upgrader:
handshakeTimeout: "10s"
maxDevices: 2000
deviceMessageQueueSize: 100
pingPeriod: "45s"
idlePeriod: "135s"
requestTimeout: "15s"
outbound:
method: "POST"
eventEndpoints:
default: http://caduceus:6000/api/v3/notify
requestTimeout: "125s"
defaultScheme: "http"
allowedSchemes:
- "http"
- "https"
outboundQueueSize: 1000
workerPoolSize: 100
transport:
maxIdleConns: 0
maxIdleConnsPerHost: 100
idleConnTimeout: "120s"
clientTimeout: "160s"
authKey: YXV0aEhlYWRlcg==
inbound:
authKey: YXV0aEhlYWRlcg==
eventMap:
default: http://caduceus:6000/api/v3/notify
service:
defaultScheme: http
consul:
client:
address: "consul1:8500"
scheme: "http"
waitTime: "30s"
disableGenerateID: true
vnodeCount: 211
watches:
-
service: "talaria"
tags:
- "dev"
- "docker"
passingOnly: true
-
service: "caduceus"
tags:
- "dev"
- "docker"
passingOnly: true
registrations:
-
id: "talaria-2"
name: "talaria"
tags:
- "dev"
- "docker"
- "stage=dev"
- "flavor=docker"
address: "http://talaria-2"
scheme: "http"
port: 6220
checks:
-
checkID: "talaria-2:http"
http: "http://talaria-2:6221/health"
interval: "30s"
deregisterCriticalServiceAfter: "70s"

View File

@@ -0,0 +1,59 @@
---
fqdn: tr1d1um
env: test
scheme: http
hooksScheme: http
primary:
address: ":6100"
health:
address: ":6101"
options:
- "PayloadsOverZero"
- "PayloadsOverHundred"
- "PayloadsOverThousand"
- "PayloadsOverTenThousand"
readTimeout: "15s"
idleTimeout: "15s"
pprof:
address: ":6102"
readTimeout: "15s"
idleTimeout: "15s"
metric:
address: ":6103"
metricsOptions:
namespace: "webpa"
subsystem: "tr1d1um"
readTimeout: "15s"
idleTimeout: "15s"
log:
file: "stdout"
level: "INFO"
json: false
start:
duration: 1
apiPath: tr1d1um:6100/hooks
waitForDns: 0
aws:
accessKey: "supbro"
secretKey: "nahbro"
env: local-dev
sns:
awsEndpoint: http://goaws:4100
region: "us-east-1"
topicArn: arn:aws:sns:us-east-1:000000000000:xmidt-local-caduceus
urlPath: "/api/v2/aws/sns"
authHeader: ["YXV0aEhlYWRlcg=="]
targetURL: http://scytale:6300
# WRPSource: "dns:tr1d1um.xmidt.comcast.net"
supportedServices:
- "config"
clientTimeout: "135s"
respWaitTimeout: "129s"
netDialerTimeout: "5s"
requestRetryInterval: "2s"
requestMaxRetries: 2

View File

@@ -0,0 +1,154 @@
version: '3.4'
services:
caduceus:
image: caduceus:${CADUCEUS_VERSION}
ports:
- 6000:6000
- 6001:6001
- 6002:6002
- 6003:6003
depends_on:
- goaws
volumes:
- ./docFiles/caduceus.yaml:/etc/caduceus/caduceus.yaml
networks:
- xmidt
tr1d1um:
image: tr1d1um:${TR1D1UM_VERSION}
ports:
- 6100:6100
- 6101:6101
- 6102:6102
depends_on:
- goaws
- scytale
volumes:
- ./docFiles/tr1d1um.yaml:/etc/tr1d1um/tr1d1um.yaml
networks:
- xmidt
scytale:
image: scytale:${SCYTALE_VERSION}
ports:
- 6300:6300
- 6301:6301
- 6302:6302
- 6303:6303
depends_on:
- goaws
- petasos
volumes:
- ./docFiles/scytale.yaml:/etc/scytale/scytale.yaml
networks:
- xmidt
goaws:
image: pafortin/goaws
container_name: goaws
ports:
- 4100:4100
networks:
- xmidt
volumes:
- ./docFiles/sns.yaml:/conf/goaws.yaml
prometheus:
image: prom/prometheus
networks:
- xmidt
ports:
- 9090:9090
depends_on:
- consul0
- consul1
volumes:
- ./docFiles/prometheus.yml:/prometheus-data/prometheus.yml
command: --log.level=debug --config.file=/prometheus-data/prometheus.yml
# Mark petasos Services
petasos: &petasos
image: petasos:${PETASOS_VERSION}
ports:
- 6400:6400
- 6401:6401
- 6402:6402
- 6403:6403
- 6404:6404
depends_on:
- consul0
volumes:
- ./docFiles/petasos.yaml:/etc/petasos/petasos.yaml
networks:
- xmidt
# Mark Talaria Services
talaria-0: &talaria
image: talaria:${TALARIA_VERSION}
ports:
- 6200:6200
- 6201:6201
- 6202:6202
- 6203:6203
- 6204:6204
depends_on:
- consul0
volumes:
- ./docFiles/talaria-0.yaml:/etc/talaria/talaria.yaml
networks:
- xmidt
talaria-1:
<<: *talaria
ports:
- 6210:6210
- 6211:6211
- 6212:6212
- 6213:6213
- 6214:6214
volumes:
- ./docFiles/talaria-1.yaml:/etc/talaria/talaria.yaml
talaria-2:
<<: *talaria
ports:
- 6220:6220
- 6221:6221
- 6222:6222
- 6223:6223
- 6224:6224
volumes:
- ./docFiles/talaria-2.yaml:/etc/talaria/talaria.yaml
# Mark Consul Definitions
consul0:
image: consul:latest
container_name: consul0
hostname: consul0
ports:
- 8400:8400
- 8500:8500
- 8600:8600
- 8600:8600/udp
networks:
- xmidt
volumes:
- ./docFiles/consul-0.json:/consul.json
command: "agent -server -bootstrap-expect 1 -ui -client 0.0.0.0 -config-file consul.json"
consul1:
image: consul:latest
container_name: consul1
hostname: consul1
ports:
- 8401:8400
- 8501:8500
- 8601:8600
- 8601:8600/udp
networks:
- xmidt
depends_on:
- consul0
volumes:
- ./docFiles/consul-1.json:/consul.json
command: "agent -server -bootstrap-expect 1 -ui -client 0.0.0.0 -config-file consul.json"
networks:
xmidt: