[arvados] updated: 2.6.0-348-g3664b849b
git repository hosting
git at public.arvados.org
Wed Jul 26 22:45:08 UTC 2023
Summary of changes:
.../multi_host/aws/pillars/arvados.sls | 16 +++----
.../pillars/letsencrypt_balancer_configuration.sls | 2 +-
.../aws/pillars/nginx_balancer_configuration.sls | 5 +--
.../multi_host/aws/pillars/postgresql.sls | 13 +++---
.../multi_host/aws/pillars/prometheus_server.sls | 32 ++++++++++----
.../local.params.example.multiple_hosts | 49 +++++++++++++++++++---
tools/salt-install/local.params.secrets.example | 1 +
tools/salt-install/provision.sh | 36 ++++++++++++----
8 files changed, 112 insertions(+), 42 deletions(-)
via 3664b849b6f4f12a11f7ea9509b28c0a9a74fac1 (commit)
via 8d438557f4f3da941eb9fa695da905d488c6301b (commit)
via c07ebf7af530652d49f03222b4098e5ac4e15ca7 (commit)
via 12f5ee701ad89bdbea4149e4f55b0dc9d31b40ba (commit)
via f5c6eb8491fb0d0a94bc1699ad7d84b054e0fec0 (commit)
from 4ac2b369a544ef3af5ccc8cc5fff69575c175032 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
commit 3664b849b6f4f12a11f7ea9509b28c0a9a74fac1
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Wed Jul 26 19:43:57 2023 -0300
20610: Fixes prometheus configuration to track the controller nodes.
Also, fixes a-d-c config, as it was also assumed to be on the controller node.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_server.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_server.sls
index e64694f4f..6dc90c840 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_server.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_server.sls
@@ -3,6 +3,9 @@
#
# SPDX-License-Identifier: AGPL-3.0
+{%- set controller_nodes = "__CONTROLLER_NODES__".split(',') %}
+{%- set enable_balancer = ("__ENABLE_BALANCER__"|to_bool) %}
+
### PROMETHEUS
prometheus:
wanted:
@@ -42,12 +45,25 @@ prometheus:
cluster: __CLUSTER__
- job_name: arvados_controller
bearer_token: __MANAGEMENT_TOKEN__
+ {%- if enable_balancer %}
+ scheme: http
+ {%- else %}
scheme: https
+ {%- endif %}
static_configs:
+ {%- if enable_balancer %}
+ {%- for controller in controller_nodes %}
+ - targets: ['{{ controller }}']
+ labels:
+ instance: {{ controller.split('.')[0] }}.__CLUSTER__
+ cluster: __CLUSTER__
+ {%- endfor %}
+ {%- else %}
- targets: ['__DOMAIN__:443']
labels:
instance: controller.__CLUSTER__
cluster: __CLUSTER__
+ {%- endif %}
- job_name: keep_web
bearer_token: __MANAGEMENT_TOKEN__
scheme: https
@@ -73,7 +89,7 @@ prometheus:
- job_name: arvados_dispatch_cloud
bearer_token: __MANAGEMENT_TOKEN__
static_configs:
- - targets: ['__CONTROLLER_INT_IP__:9006']
+ - targets: ['__DISPATCHER_INT_IP__:9006']
labels:
instance: arvados-dispatch-cloud.__CLUSTER__
cluster: __CLUSTER__
commit 8d438557f4f3da941eb9fa695da905d488c6301b
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Wed Jul 26 18:06:41 2023 -0300
20610: Removes duplication on balancer & backend declarations.
Instead of having to explicitly declare those, just use the ROLES map
to get the balancer and controller nodes.
Also, explicitly gives PG access to Keep-Balance instead of assuming it's
installed on the same node as the controller.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_balancer_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_balancer_configuration.sls
index f2d4b647b..f2de52d33 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_balancer_configuration.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_balancer_configuration.sls
@@ -6,5 +6,5 @@
### LETSENCRYPT
letsencrypt:
domainsets:
- __BALANCER_NODENAME__.__DOMAIN__:
+ __BALANCER_NODENAME__:
- __DOMAIN__
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_balancer_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_balancer_configuration.sls
index 251799530..92ad3af2e 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_balancer_configuration.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_balancer_configuration.sls
@@ -5,8 +5,7 @@
{%- import_yaml "ssl_key_encrypted.sls" as ssl_key_encrypted_pillar %}
{%- set domain = "__DOMAIN__" %}
-{%- set enable_balancer = ("__ENABLE_BALANCER__"|to_bool) %}
-{%- set balancer_backends = "__BALANCER_BACKENDS__".split(",") if enable_balancer else [] %}
+{%- set balancer_backends = "__CONTROLLER_NODES__".split(",") %}
### NGINX
nginx:
@@ -21,7 +20,7 @@ nginx:
'__CLUSTER_INT_CIDR__': 0
upstream controller_upstream:
{%- for backend in balancer_backends %}
- 'server {{ backend }}.{{ domain }}:80': ''
+ 'server {{ backend }}:80': ''
{%- endfor %}
### SNIPPETS
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/postgresql.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/postgresql.sls
index afe843aa7..cc9d7eedf 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/postgresql.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/postgresql.sls
@@ -4,9 +4,9 @@
# SPDX-License-Identifier: AGPL-3.0
{%- set domain = "__DOMAIN__" %}
-{%- set enable_balancer = ("__ENABLE_BALANCER__"|to_bool) %}
-{%- set balancer_backends = "__BALANCER_BACKENDS__".split(",") if enable_balancer else [] %}
+{%- set controller_nodes = "__CONTROLLER_NODES__".split(",") %}
{%- set dispatcher_ip = "__DISPATCHER_INT_IP__" %}
+{%- set keepbalance_ip = "__KEEPBALANCE_INT_IP__" %}
### POSTGRESQL
postgres:
@@ -23,14 +23,11 @@ postgres:
- ['host', 'all', 'all', '::1/128', 'md5']
- ['host', '__CLUSTER___arvados', '__CLUSTER___arvados', '127.0.0.1/32']
- ['host', '__CLUSTER___arvados', '__CLUSTER___arvados', '{{ dispatcher_ip }}/32']
- {%- if enable_balancer %}
- {%- for backend in balancer_backends %}
- {%- set controller_ip = salt['cmd.run']("getent hosts "+backend+"."+domain+" | awk '{print $1 ; exit}'", python_shell=True) %}
+ - ['host', '__CLUSTER___arvados', '__CLUSTER___arvados', '{{ keepbalance_ip }}/32']
+ {%- for controller_hostname in controller_nodes %}
+ {%- set controller_ip = salt['cmd.run']("getent hosts "+controller_hostname+" | awk '{print $1 ; exit}'", python_shell=True) %}
- ['host', '__CLUSTER___arvados', '__CLUSTER___arvados', '{{ controller_ip }}/32']
{%- endfor %}
- {%- else %}
- - ['host', '__CLUSTER___arvados', '__CLUSTER___arvados', '__CONTROLLER_INT_IP__/32']
- {%- endif %}
users:
__CLUSTER___arvados:
ensure: present
diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts
index c16b9cc27..4234a965d 100644
--- a/tools/salt-install/local.params.example.multiple_hosts
+++ b/tools/salt-install/local.params.example.multiple_hosts
@@ -165,8 +165,6 @@ SHELL_INT_IP=10.1.2.17
# Load balancing settings
ENABLE_BALANCER="no"
-BALANCER_BACKENDS="controller1,controller2"
-BALANCER_NODENAME="controller"
# Performance tuning parameters
#CONTROLLER_NGINX_WORKERS=
diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh
index c79d16dec..78bd976e6 100755
--- a/tools/salt-install/provision.sh
+++ b/tools/salt-install/provision.sh
@@ -466,8 +466,8 @@ for f in $(ls "${SOURCE_PILLARS_DIR}"/*); do
s#__MONITORING_PASSWORD__#${MONITORING_PASSWORD}#g;
s#__DISPATCHER_SSH_PRIVKEY__#${DISPATCHER_SSH_PRIVKEY//$'\n'/\\n}#g;
s#__ENABLE_BALANCER__#${ENABLE_BALANCER}#g;
- s#__BALANCER_NODENAME__#${BALANCER_NODENAME}#g;
- s#__BALANCER_BACKENDS__#${BALANCER_BACKENDS}#g;
+ s#__BALANCER_NODENAME__#${ROLES['balancer']}#g;
+ s#__CONTROLLER_NODES__#${ROLES['controller']}#g;
s#__NODELIST__#${NODELIST}#g;
s#__DISPATCHER_INT_IP__#${DISPATCHER_INT_IP}#g;
s#__KEEPBALANCE_INT_IP__#${KEEPBALANCE_INT_IP}#g;
@@ -558,8 +558,8 @@ if [ -d "${SOURCE_STATES_DIR}" ]; then
s#__MONITORING_PASSWORD__#${MONITORING_PASSWORD}#g;
s#__DISPATCHER_SSH_PRIVKEY__#${DISPATCHER_SSH_PRIVKEY//$'\n'/\\n}#g;
s#__ENABLE_BALANCER__#${ENABLE_BALANCER}#g;
- s#__BALANCER_NODENAME__#${BALANCER_NODENAME}#g;
- s#__BALANCER_BACKENDS__#${BALANCER_BACKENDS}#g;
+ s#__BALANCER_NODENAME__#${ROLES['balancer']}#g;
+ s#__CONTROLLER_NODES__#${ROLES['controller']}#g;
s#__NODELIST__#${NODELIST}#g;
s#__DISPATCHER_INT_IP__#${DISPATCHER_INT_IP}#g;
s#__KEEPBALANCE_INT_IP__#${KEEPBALANCE_INT_IP}#g;
@@ -877,9 +877,9 @@ else
grep -q "letsencrypt" ${P_DIR}/top.sls || echo " - letsencrypt" >> ${P_DIR}/top.sls
grep -q "letsencrypt_${R}_configuration" ${P_DIR}/top.sls || echo " - letsencrypt_${R}_configuration" >> ${P_DIR}/top.sls
- sed -i "s/__CERT_REQUIRES__/cmd: create-initial-cert-${BALANCER_NODENAME}.${DOMAIN}*/g;
- s#__CERT_PEM__#/etc/letsencrypt/live/${BALANCER_NODENAME}.${DOMAIN}/fullchain.pem#g;
- s#__CERT_KEY__#/etc/letsencrypt/live/${BALANCER_NODENAME}.${DOMAIN}/privkey.pem#g" \
+ sed -i "s/__CERT_REQUIRES__/cmd: create-initial-cert-${ROLES["balancer"]}*/g;
+ s#__CERT_PEM__#/etc/letsencrypt/live/${ROLES["balancer"]}/fullchain.pem#g;
+ s#__CERT_KEY__#/etc/letsencrypt/live/${ROLES["balancer"]}/privkey.pem#g" \
${P_DIR}/nginx_${R}_configuration.sls
if [ "${USE_LETSENCRYPT_ROUTE53}" = "yes" ]; then
commit c07ebf7af530652d49f03222b4098e5ac4e15ca7
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Wed Jul 26 16:57:09 2023 -0300
20610: Adds node list & role->nodes map variables.
Uses the node list variable to configure prometheus node exporter from the
configuration in local.params.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_server.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_server.sls
index bbf997b7b..e64694f4f 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_server.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_server.sls
@@ -59,7 +59,7 @@ prometheus:
- job_name: keep_balance
bearer_token: __MANAGEMENT_TOKEN__
static_configs:
- - targets: ['__CONTROLLER_INT_IP__:9005']
+ - targets: ['__KEEPBALANCE_INT_IP__:9005']
labels:
instance: keep-balance.__CLUSTER__
cluster: __CLUSTER__
@@ -90,14 +90,14 @@ prometheus:
cluster: __CLUSTER__
# Nodes
+ {%- set node_list = "__NODELIST__".split(',') %}
+ {%- set nodes = [] %}
+ {%- for node in node_list %}
+ {%- set _ = nodes.append(node.split('.')[0]) %}
+ {%- endfor %}
- job_name: node
static_configs:
- {% for node in [
- 'controller',
- 'keep0',
- 'workbench',
- 'shell',
- ] %}
+ {% for node in nodes %}
- targets: [ "{{ node }}.__DOMAIN__:9100" ]
labels:
instance: "{{ node }}.__CLUSTER__"
diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts
index d1aab048c..c16b9cc27 100644
--- a/tools/salt-install/local.params.example.multiple_hosts
+++ b/tools/salt-install/local.params.example.multiple_hosts
@@ -102,6 +102,35 @@ NODES=(
[shell.${DOMAIN}]=shell
)
+# Comma-separated list of nodes. This is used to dynamically adjust
+# salt pillars.
+NODELIST=""
+for node in "${!NODES[@]}"; do
+ if [ -z "$NODELIST" ]; then
+ NODELIST="$node"
+ else
+ NODELIST="$NODELIST,$node"
+ fi
+done
+
+# The mapping of roles to nodes. This is used to dinamically adjust
+# salt pillars.
+declare -A ROLES
+for node in "${!NODES[@]}"; do
+ roles="${NODES[$node]}"
+
+ # Split the comma-separated roles into an array
+ IFS=',' read -ra roles_array <<< "$roles"
+
+ for role in "${roles_array[@]}"; do
+ if [ -n "${ROLES[$role]:-}" ]; then
+ ROLES["$role"]="${ROLES[$role]},$node"
+ else
+ ROLES["$role"]=$node
+ fi
+ done
+done
+
# Host SSL port where you want to point your browser to access Arvados
# Defaults to 443 for regular runs, and to 8443 when called in Vagrant.
# You can point it to another port if desired
diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh
index 52a7f70ad..c79d16dec 100755
--- a/tools/salt-install/provision.sh
+++ b/tools/salt-install/provision.sh
@@ -241,6 +241,8 @@ T_DIR="/tmp/cluster_tests"
arguments ${@}
declare -A NODES
+declare -A ROLES
+declare NODELIST
if [[ -s ${CONFIG_FILE} && -s ${CONFIG_FILE}.secrets ]]; then
source ${CONFIG_FILE}.secrets
@@ -466,6 +468,7 @@ for f in $(ls "${SOURCE_PILLARS_DIR}"/*); do
s#__ENABLE_BALANCER__#${ENABLE_BALANCER}#g;
s#__BALANCER_NODENAME__#${BALANCER_NODENAME}#g;
s#__BALANCER_BACKENDS__#${BALANCER_BACKENDS}#g;
+ s#__NODELIST__#${NODELIST}#g;
s#__DISPATCHER_INT_IP__#${DISPATCHER_INT_IP}#g;
s#__KEEPBALANCE_INT_IP__#${KEEPBALANCE_INT_IP}#g;
s#__COMPUTE_AMI__#${COMPUTE_AMI}#g;
@@ -557,6 +560,7 @@ if [ -d "${SOURCE_STATES_DIR}" ]; then
s#__ENABLE_BALANCER__#${ENABLE_BALANCER}#g;
s#__BALANCER_NODENAME__#${BALANCER_NODENAME}#g;
s#__BALANCER_BACKENDS__#${BALANCER_BACKENDS}#g;
+ s#__NODELIST__#${NODELIST}#g;
s#__DISPATCHER_INT_IP__#${DISPATCHER_INT_IP}#g;
s#__KEEPBALANCE_INT_IP__#${KEEPBALANCE_INT_IP}#g;
s#__COMPUTE_AMI__#${COMPUTE_AMI}#g;
commit 12f5ee701ad89bdbea4149e4f55b0dc9d31b40ba
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Wed Jul 26 10:53:31 2023 -0300
20610: Fixes a template variable usage.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
index 2672bf88b..3b7089d30 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
@@ -111,7 +111,7 @@ arvados:
{%- set max_reqs = "__CONTROLLER_MAX_CONCURRENT_REQUESTS__" %}
{%- if max_reqs != "" and max_reqs is number %}
API:
- MaxConcurrentRequests: max_reqs
+ MaxConcurrentRequests: {{ max_reqs }}
{%- endif %}
### CONTAINERS
commit f5c6eb8491fb0d0a94bc1699ad7d84b054e0fec0
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Wed Jul 26 10:51:25 2023 -0300
20610: Removes the need to directly edit the arvados.sls pillar.
All params are now in local.params.* files.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
index b3b4d447a..2672bf88b 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
@@ -122,15 +122,15 @@ arvados:
ResourceTags:
Name: __CLUSTER__-compute-node
BootProbeCommand: 'systemctl is-system-running'
- ImageID: ami-FIXMEFIXMEFIXMEFI
+ ImageID: __COMPUTE_AMI__
Driver: ec2
DriverParameters:
- Region: FIXME
+ Region: __COMPUTE_AWS_REGION__
EBSVolumeType: gp3
- AdminUsername: FIXME
+ AdminUsername: __COMPUTE_USER__
### This SG should allow SSH from the dispatcher to the compute nodes
- SecurityGroupIDs: ['sg-FIXMEFIXMEFIXMEFI']
- SubnetID: subnet-FIXMEFIXMEFIXMEFI
+ SecurityGroupIDs: ['__COMPUTE_SG__']
+ SubnetID: __COMPUTE_SUBNET__
IAMInstanceProfile: __CLUSTER__-compute-node-00-iam-role
DispatchPrivateKey: {{ dispatcher_ssh_privkey|yaml_dquote }}
@@ -145,7 +145,7 @@ arvados:
DriverParameters:
Bucket: __CLUSTER__-nyw5e-000000000000000-volume
IAMRole: __CLUSTER__-keepstore-00-iam-role
- Region: FIXME
+ Region: __KEEP_AWS_REGION__
Users:
NewUsersAreActive: true
@@ -163,7 +163,7 @@ arvados:
'http://__DISPATCHER_INT_IP__:9006': {}
Keepbalance:
InternalURLs:
- 'http://__CONTROLLER_INT_IP__:9005': {}
+ 'http://__KEEPBALANCE_INT_IP__:9005': {}
Keepproxy:
ExternalURL: 'https://keep.__DOMAIN__:__KEEP_EXT_SSL_PORT__'
InternalURLs:
diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts
index 50d3d0ca8..d1aab048c 100644
--- a/tools/salt-install/local.params.example.multiple_hosts
+++ b/tools/salt-install/local.params.example.multiple_hosts
@@ -28,6 +28,8 @@ INITIAL_USER_EMAIL="admin at cluster_fixme_or_this_wont_work.domain_fixme_or_this_w
# Comment out to disable.
USE_SSH_JUMPHOST="controller.${DOMAIN}"
+AWS_REGION="fixme_or_this_wont_work"
+
# SSL CERTIFICATES
# Arvados requires SSL certificates to work correctly. This installer supports these options:
# * self-signed: let the installer create self-signed certificate(s)
@@ -42,7 +44,17 @@ USE_LETSENCRYPT_ROUTE53="yes"
# For that reason, you'll need to provide AWS credentials with permissions to manage
# RRs in the route53 zone for the cluster.
# WARNING!: If AWS credentials files already exist in the hosts, they won't be replaced.
-LE_AWS_REGION="us-east-1"
+LE_AWS_REGION="${AWS_REGION}"
+
+# Compute node configurations
+COMPUTE_AMI="ami_id_fixme_or_this_wont_work"
+COMPUTE_SG="security_group_fixme_or_this_wont_work"
+COMPUTE_SUBNET="subnet_fixme_or_this_wont_work"
+COMPUTE_AWS_REGION="${AWS_REGION}"
+COMPUTE_USER="${DEPLOY_USER}"
+
+# Keep S3 backend region
+KEEP_AWS_REGION="${AWS_REGION}"
# If you going to provide your own certificates for Arvados, the provision script can
# help you deploy them. In order to do that, you need to set `SSL_MODE=bring-your-own` above,
@@ -72,11 +84,10 @@ LE_AWS_REGION="us-east-1"
# a custom AWS secret name for each node to retrieve the password.
SSL_KEY_ENCRYPTED="no"
SSL_KEY_AWS_SECRET_NAME="${CLUSTER}-arvados-ssl-privkey-password"
-SSL_KEY_AWS_REGION="us-east-1"
+SSL_KEY_AWS_REGION="${AWS_REGION}"
# Customize Prometheus & Grafana web UI access credentials
MONITORING_USERNAME=${INITIAL_USER}
-MONITORING_PASSWORD=${INITIAL_USER_PASSWORD}
MONITORING_EMAIL=${INITIAL_USER_EMAIL}
# Sets the directory for Grafana dashboards
# GRAFANA_DASHBOARDS_DIR="${SCRIPT_DIR}/local_config_dir/dashboards"
@@ -111,6 +122,7 @@ CLUSTER_INT_CIDR=10.1.0.0/16
# https://doc.arvados.org/main/install/salt-multi-host.html
CONTROLLER_INT_IP=10.1.1.11
DISPATCHER_INT_IP=${CONTROLLER_INT_IP}
+KEEPBALANCE_INT_IP=${CONTROLLER_INT_IP}
WEBSOCKET_INT_IP=${CONTROLLER_INT_IP}
DATABASE_INT_IP=${CONTROLLER_INT_IP}
WORKBENCH1_INT_IP=10.1.1.15
diff --git a/tools/salt-install/local.params.secrets.example b/tools/salt-install/local.params.secrets.example
index bec56e00b..36cdb57b8 100644
--- a/tools/salt-install/local.params.secrets.example
+++ b/tools/salt-install/local.params.secrets.example
@@ -6,6 +6,7 @@
# These are the security-sensitive parameters to configure the installation
INITIAL_USER_PASSWORD="fixme"
+MONITORING_PASSWORD=${INITIAL_USER_PASSWORD}
# YOU SHOULD CHANGE THESE TO SOME RANDOM STRINGS
BLOB_SIGNING_KEY=fixmeblobsigningkeymushaveatleast32characters
diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh
index 60837c50a..52a7f70ad 100755
--- a/tools/salt-install/provision.sh
+++ b/tools/salt-install/provision.sh
@@ -466,7 +466,14 @@ for f in $(ls "${SOURCE_PILLARS_DIR}"/*); do
s#__ENABLE_BALANCER__#${ENABLE_BALANCER}#g;
s#__BALANCER_NODENAME__#${BALANCER_NODENAME}#g;
s#__BALANCER_BACKENDS__#${BALANCER_BACKENDS}#g;
- s#__DISPATCHER_INT_IP__#${DISPATCHER_INT_IP}#g" \
+ s#__DISPATCHER_INT_IP__#${DISPATCHER_INT_IP}#g;
+ s#__KEEPBALANCE_INT_IP__#${KEEPBALANCE_INT_IP}#g;
+ s#__COMPUTE_AMI__#${COMPUTE_AMI}#g;
+ s#__COMPUTE_SG__#${COMPUTE_SG}#g;
+ s#__COMPUTE_SUBNET__#${COMPUTE_SUBNET}#g;
+ s#__COMPUTE_AWS_REGION__#${COMPUTE_AWS_REGION}#g;
+ s#__COMPUTE_USER__#${COMPUTE_USER}#g;
+ s#__KEEP_AWS_REGION__#${KEEP_AWS_REGION}#g" \
"${f}" > "${P_DIR}"/$(basename "${f}")
done
@@ -550,7 +557,14 @@ if [ -d "${SOURCE_STATES_DIR}" ]; then
s#__ENABLE_BALANCER__#${ENABLE_BALANCER}#g;
s#__BALANCER_NODENAME__#${BALANCER_NODENAME}#g;
s#__BALANCER_BACKENDS__#${BALANCER_BACKENDS}#g;
- s#__DISPATCHER_INT_IP__#${DISPATCHER_INT_IP}#g" \
+ s#__DISPATCHER_INT_IP__#${DISPATCHER_INT_IP}#g;
+ s#__KEEPBALANCE_INT_IP__#${KEEPBALANCE_INT_IP}#g;
+ s#__COMPUTE_AMI__#${COMPUTE_AMI}#g;
+ s#__COMPUTE_SG__#${COMPUTE_SG}#g;
+ s#__COMPUTE_SUBNET__#${COMPUTE_SUBNET}#g;
+ s#__COMPUTE_AWS_REGION__#${COMPUTE_AWS_REGION}#g;
+ s#__COMPUTE_USER__#${COMPUTE_USER}#g;
+ s#__KEEP_AWS_REGION__#${KEEP_AWS_REGION}#g" \
"${f}" > "${F_DIR}/extra/extra"/$(basename "${f}")
done
fi
-----------------------------------------------------------------------
hooks/post-receive
--
More information about the arvados-commits
mailing list