[arvados] created: 2.6.0-343-g4ac2b369a
git repository hosting
git at public.arvados.org
Thu Jun 29 21:13:51 UTC 2023
at 4ac2b369a544ef3af5ccc8cc5fff69575c175032 (commit)
commit 4ac2b369a544ef3af5ccc8cc5fff69575c175032
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Thu Jun 29 16:38:04 2023 -0300
20610: Tweaks postgresql access control to allow multiple controller hosts.
Also, explicitly adds the dispatcher's IP to the ACL, in case is located
on a different node.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/postgresql.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/postgresql.sls
index 10cbb6c34..afe843aa7 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/postgresql.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/postgresql.sls
@@ -3,6 +3,11 @@
#
# SPDX-License-Identifier: AGPL-3.0
+{%- set domain = "__DOMAIN__" %}
+{%- set enable_balancer = ("__ENABLE_BALANCER__"|to_bool) %}
+{%- set balancer_backends = "__BALANCER_BACKENDS__".split(",") if enable_balancer else [] %}
+{%- set dispatcher_ip = "__DISPATCHER_INT_IP__" %}
+
### POSTGRESQL
postgres:
pkgs_extra:
@@ -17,7 +22,15 @@ postgres:
- ['host', 'all', 'all', '127.0.0.1/32', 'md5']
- ['host', 'all', 'all', '::1/128', 'md5']
- ['host', '__CLUSTER___arvados', '__CLUSTER___arvados', '127.0.0.1/32']
+ - ['host', '__CLUSTER___arvados', '__CLUSTER___arvados', '{{ dispatcher_ip }}/32']
+ {%- if enable_balancer %}
+ {%- for backend in balancer_backends %}
+ {%- set controller_ip = salt['cmd.run']("getent hosts "+backend+"."+domain+" | awk '{print $1 ; exit}'", python_shell=True) %}
+ - ['host', '__CLUSTER___arvados', '__CLUSTER___arvados', '{{ controller_ip }}/32']
+ {%- endfor %}
+ {%- else %}
- ['host', '__CLUSTER___arvados', '__CLUSTER___arvados', '__CONTROLLER_INT_IP__/32']
+ {%- endif %}
users:
__CLUSTER___arvados:
ensure: present
commit 0a038ad220c098896a66e6a9b5630af1e36a608b
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Thu Jun 29 16:33:28 2023 -0300
20610: Tweaks controller's nginx pillar to support "balanced mode".
When balancer is enabled:
* SSL nginx is not needed
* Each controller should answer to its own hostname instead of the cluster's
domain.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_controller_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_controller_configuration.sls
index d0fd6a131..d87f55f4e 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_controller_configuration.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_controller_configuration.sls
@@ -4,6 +4,8 @@
# SPDX-License-Identifier: AGPL-3.0
{%- import_yaml "ssl_key_encrypted.sls" as ssl_key_encrypted_pillar %}
+{%- set balanced_controller = ("__ENABLE_BALANCER__"|to_bool) %}
+{%- set server_name = grains['fqdn'] if balanced_controller else "__DOMAIN__" %}
### NGINX
nginx:
@@ -28,14 +30,31 @@ nginx:
overwrite: true
config:
- server:
- - server_name: __DOMAIN__
+ - server_name: {{ server_name }}
- listen:
- 80 default
- location /.well-known:
- root: /var/www
+ {%- if balanced_controller %}
+ - index: index.html index.htm
+ - location /:
+ - proxy_pass: 'http://controller_upstream'
+ - proxy_read_timeout: 300
+ - proxy_connect_timeout: 90
+ - proxy_redirect: 'off'
+ - proxy_max_temp_file_size: 0
+ - proxy_request_buffering: 'off'
+ - proxy_buffering: 'off'
+ - proxy_http_version: '1.1'
+ - access_log: /var/log/nginx/{{ server_name }}.access.log combined
+ - error_log: /var/log/nginx/{{ server_name }}.error.log
+ - client_max_body_size: 128m
+ {%- else %}
- location /:
- return: '301 https://$host$request_uri'
+ {%- endif %}
+ {%- if not balanced_controller %}
arvados_controller_ssl.conf:
enabled: true
overwrite: true
@@ -43,7 +62,7 @@ nginx:
__CERT_REQUIRES__
config:
- server:
- - server_name: __DOMAIN__
+ - server_name: {{ server_name }}
- listen:
- __CONTROLLER_EXT_SSL_PORT__ http2 ssl
- index: index.html index.htm
@@ -69,6 +88,7 @@ nginx:
{%- if ssl_key_encrypted_pillar.ssl_key_encrypted.enabled %}
- ssl_password_file: {{ '/run/arvados/' | path_join(ssl_key_encrypted_pillar.ssl_key_encrypted.privkey_password_filename) }}
{%- endif %}
- - access_log: /var/log/nginx/controller.__DOMAIN__.access.log combined
- - error_log: /var/log/nginx/controller.__DOMAIN__.error.log
+ - access_log: /var/log/nginx/{{ server_name }}.access.log combined
+ - error_log: /var/log/nginx/{{ server_name }}.error.log
- client_max_body_size: 128m
+ {%- endif %}
commit 6346b3b3b78f5ef838d71945b7b03bc60cb4868a
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Thu Jun 29 16:13:06 2023 -0300
20610: Updates arvados' pillar to handle the dispatcher's IP separately.
Also, re-arranges the _INT_IP envvars to be more explicit about IP sharing.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
index 98fcf5f6d..b3b4d447a 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
@@ -160,7 +160,7 @@ arvados:
'http://localhost:8003': {}
DispatchCloud:
InternalURLs:
- 'http://__CONTROLLER_INT_IP__:9006': {}
+ 'http://__DISPATCHER_INT_IP__:9006': {}
Keepbalance:
InternalURLs:
'http://__CONTROLLER_INT_IP__:9005': {}
diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts
index b26e67a10..50d3d0ca8 100644
--- a/tools/salt-install/local.params.example.multiple_hosts
+++ b/tools/salt-install/local.params.example.multiple_hosts
@@ -110,16 +110,16 @@ CLUSTER_INT_CIDR=10.1.0.0/16
# Note the IPs in this example are shared between roles, as suggested in
# https://doc.arvados.org/main/install/salt-multi-host.html
CONTROLLER_INT_IP=10.1.1.11
-DISPATCHER_INT_IP=10.1.1.11
-WEBSOCKET_INT_IP=10.1.1.11
-KEEP_INT_IP=10.1.1.15
+DISPATCHER_INT_IP=${CONTROLLER_INT_IP}
+WEBSOCKET_INT_IP=${CONTROLLER_INT_IP}
+DATABASE_INT_IP=${CONTROLLER_INT_IP}
+WORKBENCH1_INT_IP=10.1.1.15
# Both for collections and downloads
-KEEPWEB_INT_IP=10.1.1.15
+KEEPWEB_INT_IP=${WORKBENCH1_INT_IP}
+WORKBENCH2_INT_IP=${WORKBENCH1_INT_IP}
+WEBSHELL_INT_IP=${WORKBENCH1_INT_IP}
+KEEP_INT_IP=${WORKBENCH1_INT_IP}
KEEPSTORE0_INT_IP=10.1.2.13
-WORKBENCH1_INT_IP=10.1.1.15
-WORKBENCH2_INT_IP=10.1.1.15
-WEBSHELL_INT_IP=10.1.1.15
-DATABASE_INT_IP=10.1.1.11
SHELL_INT_IP=10.1.2.17
# Load balancing settings
commit 1bc272d03b72e21ec95318a56303547dc1833af3
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Thu Jun 29 16:05:31 2023 -0300
20610: Adds the new pillars for the balancer role.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_balancer_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_balancer_configuration.sls
new file mode 100644
index 000000000..f2d4b647b
--- /dev/null
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_balancer_configuration.sls
@@ -0,0 +1,10 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### LETSENCRYPT
+letsencrypt:
+ domainsets:
+ __BALANCER_NODENAME__.__DOMAIN__:
+ - __DOMAIN__
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_balancer_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_balancer_configuration.sls
new file mode 100644
index 000000000..251799530
--- /dev/null
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_balancer_configuration.sls
@@ -0,0 +1,111 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+{%- import_yaml "ssl_key_encrypted.sls" as ssl_key_encrypted_pillar %}
+{%- set domain = "__DOMAIN__" %}
+{%- set enable_balancer = ("__ENABLE_BALANCER__"|to_bool) %}
+{%- set balancer_backends = "__BALANCER_BACKENDS__".split(",") if enable_balancer else [] %}
+
+### NGINX
+nginx:
+ ### SERVER
+ server:
+ config:
+ ### STREAMS
+ http:
+ 'geo $external_client':
+ default: 1
+ '127.0.0.0/8': 0
+ '__CLUSTER_INT_CIDR__': 0
+ upstream controller_upstream:
+ {%- for backend in balancer_backends %}
+ 'server {{ backend }}.{{ domain }}:80': ''
+ {%- endfor %}
+
+ ### SNIPPETS
+ snippets:
+ # Based on https://ssl-config.mozilla.org/#server=nginx&version=1.14.2&config=intermediate&openssl=1.1.1d&guideline=5.4
+ ssl_hardening_default.conf:
+ - ssl_session_timeout: 1d
+ - ssl_session_cache: 'shared:arvadosSSL:10m'
+ - ssl_session_tickets: 'off'
+
+ # intermediate configuration
+ - ssl_protocols: TLSv1.2 TLSv1.3
+ - ssl_ciphers: ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
+ - ssl_prefer_server_ciphers: 'off'
+
+ # HSTS (ngx_http_headers_module is required) (63072000 seconds)
+ - add_header: 'Strict-Transport-Security "max-age=63072000" always'
+
+ # OCSP stapling
+ - ssl_stapling: 'on'
+ - ssl_stapling_verify: 'on'
+
+ # verify chain of trust of OCSP response using Root CA and Intermediate certs
+ # - ssl_trusted_certificate /path/to/root_CA_cert_plus_intermediates
+
+ # curl https://ssl-config.mozilla.org/ffdhe2048.txt > /path/to/dhparam
+ # - ssl_dhparam: /path/to/dhparam
+
+ # replace with the IP address of your resolver
+ # - resolver: 127.0.0.1
+
+ ### SITES
+ servers:
+ managed:
+ # Remove default webserver
+ default:
+ enabled: false
+ ### DEFAULT
+ arvados_balancer_default.conf:
+ enabled: true
+ overwrite: true
+ config:
+ - server:
+ - server_name: {{ domain }}
+ - listen:
+ - 80 default
+ - location /.well-known:
+ - root: /var/www
+ - location /:
+ - return: '301 https://$host$request_uri'
+
+ arvados_balancer_ssl.conf:
+ enabled: true
+ overwrite: true
+ requires:
+ __CERT_REQUIRES__
+ config:
+ - server:
+ - server_name: {{ domain }}
+ - listen:
+ - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
+ - index: index.html index.htm
+ - location /:
+ - proxy_pass: 'http://controller_upstream'
+ - proxy_read_timeout: 300
+ - proxy_connect_timeout: 90
+ - proxy_redirect: 'off'
+ - proxy_set_header: X-Forwarded-Proto https
+ - proxy_set_header: 'Host $http_host'
+ - proxy_set_header: 'X-Real-IP $remote_addr'
+ - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+ - proxy_set_header: 'X-External-Client $external_client'
+ - proxy_set_header: 'Upgrade $http_upgrade'
+ - proxy_set_header: 'Connection "upgrade"'
+ - proxy_max_temp_file_size: 0
+ - proxy_request_buffering: 'off'
+ - proxy_buffering: 'off'
+ - proxy_http_version: '1.1'
+ - include: snippets/ssl_hardening_default.conf
+ - ssl_certificate: __CERT_PEM__
+ - ssl_certificate_key: __CERT_KEY__
+ {%- if ssl_key_encrypted_pillar.ssl_key_encrypted.enabled %}
+ - ssl_password_file: {{ '/run/arvados/' | path_join(ssl_key_encrypted_pillar.ssl_key_encrypted.privkey_password_filename) }}
+ {%- endif %}
+ - access_log: /var/log/nginx/{{ domain }}.access.log combined
+ - error_log: /var/log/nginx/{{ domain }}.error.log
+ - client_max_body_size: 128m
commit f76c285eb00543bc612d144556171c6fe9f50893
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Thu Jun 29 15:59:53 2023 -0300
20610: Adds 'balancer' role to the installer scripts.
Also, fixes a salt bootstrap issue dur to the fact that as of today, the 3004
version is no longer considered 'stable'.
Also, changes the way we use SSH so that no envvars are forwarded to remote
hosts, to avoid having 'setlocale' warnings mixed with salt cmd.run
outputs when using Debian distros to run the installer. This avoids issues
on the first run, before salt configures the remote node's locale.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/installer.sh b/tools/salt-install/installer.sh
index 4ce490421..cfa7b1454 100755
--- a/tools/salt-install/installer.sh
+++ b/tools/salt-install/installer.sh
@@ -47,6 +47,11 @@ declare GITTARGET
# This will be populated by loadconfig()
declare USE_SSH_JUMPHOST
+# The temp file that will get used to disable envvar forwarding to avoid locale
+# issues in Debian distros.
+# This will be populated by loadconfig()
+declare SSH_CONFFILE
+
checktools() {
local MISSING=''
for a in git ip ; do
@@ -133,14 +138,20 @@ loadconfig() {
source ${CONFIG_FILE}.secrets
source ${CONFIG_FILE}
GITTARGET=arvados-deploy-config-${CLUSTER}
+
+ # Set up SSH so that it doesn't forward any environment variable. This is to avoid
+ # getting "setlocale" errors on the first run, depending on the distro being used
+ # to run the installer (like Debian).
+ SSH_CONFFILE=$(mktemp)
+ echo "Include config SendEnv -*" > ${SSH_CONFFILE}
}
ssh_cmd() {
local NODE=$1
if [ -z "${USE_SSH_JUMPHOST}" -o "${NODE}" == "${USE_SSH_JUMPHOST}" -o "${NODE}" == "localhost" ]; then
- echo "ssh"
+ echo "ssh -F ${SSH_CONFFILE}"
else
- echo "ssh -J ${DEPLOY_USER}@${USE_SSH_JUMPHOST}"
+ echo "ssh -F ${SSH_CONFFILE} -J ${DEPLOY_USER}@${USE_SSH_JUMPHOST}"
fi
}
@@ -296,13 +307,22 @@ case "$subcmd" in
for NODE in "${!NODES[@]}"
do
- # then 'api' or 'controller' roles
+ # then 'api' or 'controller' roles
if [[ "${NODES[$NODE]}" =~ (api|controller) ]] ; then
deploynode $NODE "${NODES[$NODE]}"
unset NODES[$NODE]
fi
done
+ for NODE in "${!NODES[@]}"
+ do
+ # then 'balancer' role
+ if [[ "${NODES[$NODE]}" =~ (balancer) ]] ; then
+ deploynode $NODE "${NODES[$NODE]}"
+ unset NODES[$NODE]
+ fi
+ done
+
for NODE in "${!NODES[@]}"
do
# Everything else (we removed the nodes that we
diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts
index d1cdfeb3c..b26e67a10 100644
--- a/tools/salt-install/local.params.example.multiple_hosts
+++ b/tools/salt-install/local.params.example.multiple_hosts
@@ -110,6 +110,7 @@ CLUSTER_INT_CIDR=10.1.0.0/16
# Note the IPs in this example are shared between roles, as suggested in
# https://doc.arvados.org/main/install/salt-multi-host.html
CONTROLLER_INT_IP=10.1.1.11
+DISPATCHER_INT_IP=10.1.1.11
WEBSOCKET_INT_IP=10.1.1.11
KEEP_INT_IP=10.1.1.15
# Both for collections and downloads
@@ -121,6 +122,11 @@ WEBSHELL_INT_IP=10.1.1.15
DATABASE_INT_IP=10.1.1.11
SHELL_INT_IP=10.1.2.17
+# Load balancing settings
+ENABLE_BALANCER="no"
+BALANCER_BACKENDS="controller1,controller2"
+BALANCER_NODENAME="controller"
+
# Performance tuning parameters
#CONTROLLER_NGINX_WORKERS=
#CONTROLLER_MAX_CONCURRENT_REQUESTS=
diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh
index 3314d04ff..60837c50a 100755
--- a/tools/salt-install/provision.sh
+++ b/tools/salt-install/provision.sh
@@ -26,6 +26,7 @@ usage() {
echo >&2 " -r, --roles List of Arvados roles to apply to the host, comma separated"
echo >&2 " Possible values are:"
echo >&2 " api"
+ echo >&2 " balancer"
echo >&2 " controller"
echo >&2 " dispatcher"
echo >&2 " keepproxy"
@@ -109,7 +110,7 @@ arguments() {
for i in ${2//,/ }
do
# Verify the role exists
- if [[ ! "database,api,controller,keepstore,websocket,keepweb,workbench2,webshell,keepbalance,keepproxy,shell,workbench,dispatcher,monitoring" == *"$i"* ]]; then
+ if [[ ! "database,api,balancer,controller,keepstore,websocket,keepweb,workbench2,webshell,keepbalance,keepproxy,shell,workbench,dispatcher,monitoring" == *"$i"* ]]; then
echo "The role '${i}' is not a valid role"
usage
exit 1
@@ -319,7 +320,7 @@ else
echo "Salt already installed"
else
curl -L https://bootstrap.saltstack.com -o /tmp/bootstrap_salt.sh
- sh /tmp/bootstrap_salt.sh -XdfP -x python3 stable ${SALT_VERSION}
+ sh /tmp/bootstrap_salt.sh -XdfP -x python3 old-stable ${SALT_VERSION}
/bin/systemctl stop salt-minion.service
/bin/systemctl disable salt-minion.service
fi
@@ -461,7 +462,11 @@ for f in $(ls "${SOURCE_PILLARS_DIR}"/*); do
s#__MONITORING_USERNAME__#${MONITORING_USERNAME}#g;
s#__MONITORING_EMAIL__#${MONITORING_EMAIL}#g;
s#__MONITORING_PASSWORD__#${MONITORING_PASSWORD}#g;
- s#__DISPATCHER_SSH_PRIVKEY__#${DISPATCHER_SSH_PRIVKEY//$'\n'/\\n}#g" \
+ s#__DISPATCHER_SSH_PRIVKEY__#${DISPATCHER_SSH_PRIVKEY//$'\n'/\\n}#g;
+ s#__ENABLE_BALANCER__#${ENABLE_BALANCER}#g;
+ s#__BALANCER_NODENAME__#${BALANCER_NODENAME}#g;
+ s#__BALANCER_BACKENDS__#${BALANCER_BACKENDS}#g;
+ s#__DISPATCHER_INT_IP__#${DISPATCHER_INT_IP}#g" \
"${f}" > "${P_DIR}"/$(basename "${f}")
done
@@ -541,7 +546,11 @@ if [ -d "${SOURCE_STATES_DIR}" ]; then
s#__MONITORING_USERNAME__#${MONITORING_USERNAME}#g;
s#__MONITORING_EMAIL__#${MONITORING_EMAIL}#g;
s#__MONITORING_PASSWORD__#${MONITORING_PASSWORD}#g;
- s#__DISPATCHER_SSH_PRIVKEY__#${DISPATCHER_SSH_PRIVKEY//$'\n'/\\n}#g" \
+ s#__DISPATCHER_SSH_PRIVKEY__#${DISPATCHER_SSH_PRIVKEY//$'\n'/\\n}#g;
+ s#__ENABLE_BALANCER__#${ENABLE_BALANCER}#g;
+ s#__BALANCER_NODENAME__#${BALANCER_NODENAME}#g;
+ s#__BALANCER_BACKENDS__#${BALANCER_BACKENDS}#g;
+ s#__DISPATCHER_INT_IP__#${DISPATCHER_INT_IP}#g" \
"${f}" > "${F_DIR}/extra/extra"/$(basename "${f}")
done
fi
@@ -804,17 +813,19 @@ else
echo " - extra.passenger_rvm" >> ${S_DIR}/top.sls
### If we don't install and run LE before arvados-api-server, it fails and breaks everything
### after it. So we add this here as we are, after all, sharing the host for api and controller
- if [ "${SSL_MODE}" = "lets-encrypt" ]; then
- if [ "${USE_LETSENCRYPT_ROUTE53}" = "yes" ]; then
- grep -q "aws_credentials" ${S_DIR}/top.sls || echo " - aws_credentials" >> ${S_DIR}/top.sls
- fi
- grep -q "letsencrypt" ${S_DIR}/top.sls || echo " - letsencrypt" >> ${S_DIR}/top.sls
- else
- # Use custom certs
- if [ "${SSL_MODE}" = "bring-your-own" ]; then
- copy_custom_cert ${CUSTOM_CERTS_DIR} controller
+ if [ "${ENABLE_BALANCER}" == "no" ]; then
+ if [ "${SSL_MODE}" = "lets-encrypt" ]; then
+ if [ "${USE_LETSENCRYPT_ROUTE53}" = "yes" ]; then
+ grep -q "aws_credentials" ${S_DIR}/top.sls || echo " - aws_credentials" >> ${S_DIR}/top.sls
+ fi
+ grep -q "letsencrypt" ${S_DIR}/top.sls || echo " - letsencrypt" >> ${S_DIR}/top.sls
+ else
+ # Use custom certs
+ if [ "${SSL_MODE}" = "bring-your-own" ]; then
+ copy_custom_cert ${CUSTOM_CERTS_DIR} controller
+ fi
+ grep -q controller ${P_DIR}/extra_custom_certs.sls || echo " - controller" >> ${P_DIR}/extra_custom_certs.sls
fi
- grep -q controller ${P_DIR}/extra_custom_certs.sls || echo " - controller" >> ${P_DIR}/extra_custom_certs.sls
fi
grep -q "arvados.${R}" ${S_DIR}/top.sls || echo " - arvados.${R}" >> ${S_DIR}/top.sls
# Pillars
@@ -828,8 +839,89 @@ else
NGINX_INSTALL_SOURCE="install_from_phusionpassenger"
sed -i "s/__NGINX_INSTALL_SOURCE__/${NGINX_INSTALL_SOURCE}/g" ${P_DIR}/nginx_passenger.sls
;;
- "controller" | "websocket" | "workbench" | "workbench2" | "webshell" | "keepweb" | "keepproxy")
- # States
+ "balancer")
+ ### States ###
+ grep -q "\- nginx$" ${S_DIR}/top.sls || echo " - nginx" >> ${S_DIR}/top.sls
+
+ if [ "${SSL_MODE}" = "lets-encrypt" ]; then
+ grep -q "letsencrypt" ${S_DIR}/top.sls || echo " - letsencrypt" >> ${S_DIR}/top.sls
+ if [ "x${USE_LETSENCRYPT_ROUTE53}" = "xyes" ]; then
+ grep -q "aws_credentials" ${S_DIR}/top.sls || echo " - aws_credentials" >> ${S_DIR}/top.sls
+ fi
+ elif [ "${SSL_MODE}" = "bring-your-own" ]; then
+ copy_custom_cert ${CUSTOM_CERTS_DIR} ${R}
+ fi
+
+ ### Pillars ###
+ grep -q "nginx_${R}_configuration" ${P_DIR}/top.sls || echo " - nginx_${R}_configuration" >> ${P_DIR}/top.sls
+
+ if [ "${SSL_MODE}" = "lets-encrypt" ]; then
+ grep -q "letsencrypt" ${P_DIR}/top.sls || echo " - letsencrypt" >> ${P_DIR}/top.sls
+
+ grep -q "letsencrypt_${R}_configuration" ${P_DIR}/top.sls || echo " - letsencrypt_${R}_configuration" >> ${P_DIR}/top.sls
+ sed -i "s/__CERT_REQUIRES__/cmd: create-initial-cert-${BALANCER_NODENAME}.${DOMAIN}*/g;
+ s#__CERT_PEM__#/etc/letsencrypt/live/${BALANCER_NODENAME}.${DOMAIN}/fullchain.pem#g;
+ s#__CERT_KEY__#/etc/letsencrypt/live/${BALANCER_NODENAME}.${DOMAIN}/privkey.pem#g" \
+ ${P_DIR}/nginx_${R}_configuration.sls
+
+ if [ "${USE_LETSENCRYPT_ROUTE53}" = "yes" ]; then
+ grep -q "aws_credentials" ${P_DIR}/top.sls || echo " - aws_credentials" >> ${P_DIR}/top.sls
+ fi
+ elif [ "${SSL_MODE}" = "bring-your-own" ]; then
+ grep -q "ssl_key_encrypted" ${P_DIR}/top.sls || echo " - ssl_key_encrypted" >> ${P_DIR}/top.sls
+ sed -i "s/__CERT_REQUIRES__/file: extra_custom_certs_file_copy_arvados-${R}.pem/g;
+ s#__CERT_PEM__#/etc/nginx/ssl/arvados-${R}.pem#g;
+ s#__CERT_KEY__#/etc/nginx/ssl/arvados-${R}.key#g" \
+ ${P_DIR}/nginx_${R}_configuration.sls
+ grep -q "${R}" ${P_DIR}/extra_custom_certs.sls || echo " - ${R}" >> ${P_DIR}/extra_custom_certs.sls
+ fi
+ ;;
+ "controller")
+ ### States ###
+ grep -q "\- nginx$" ${S_DIR}/top.sls || echo " - nginx" >> ${S_DIR}/top.sls
+ grep -q "arvados.${R}" ${S_DIR}/top.sls || echo " - arvados.${R}" >> ${S_DIR}/top.sls
+
+ if [ "${ENABLE_BALANCER}" == "no" ]; then
+ if [ "${SSL_MODE}" = "lets-encrypt" ]; then
+ if [ "x${USE_LETSENCRYPT_ROUTE53}" = "xyes" ]; then
+ grep -q "aws_credentials" ${S_DIR}/top.sls || echo " - aws_credentials" >> ${S_DIR}/top.sls
+ fi
+ grep -q "letsencrypt" ${S_DIR}/top.sls || echo " - letsencrypt" >> ${S_DIR}/top.sls
+ elif [ "${SSL_MODE}" = "bring-your-own" ]; then
+ copy_custom_cert ${CUSTOM_CERTS_DIR} ${R}
+ fi
+ fi
+
+ ### Pillars ###
+ grep -q "nginx_passenger" ${P_DIR}/top.sls || echo " - nginx_passenger" >> ${P_DIR}/top.sls
+ grep -q "nginx_${R}_configuration" ${P_DIR}/top.sls || echo " - nginx_${R}_configuration" >> ${P_DIR}/top.sls
+
+ if [ "${ENABLE_BALANCER}" == "no" ]; then
+ if [ "${SSL_MODE}" = "lets-encrypt" ]; then
+ if [ "${USE_LETSENCRYPT_ROUTE53}" = "yes" ]; then
+ grep -q "aws_credentials" ${P_DIR}/top.sls || echo " - aws_credentials" >> ${P_DIR}/top.sls
+ fi
+
+ grep -q "letsencrypt" ${P_DIR}/top.sls || echo " - letsencrypt" >> ${P_DIR}/top.sls
+ grep -q "letsencrypt_${R}_configuration" ${P_DIR}/top.sls || echo " - letsencrypt_${R}_configuration" >> ${P_DIR}/top.sls
+ sed -i "s/__CERT_REQUIRES__/cmd: create-initial-cert-${R}.${DOMAIN}*/g;
+ s#__CERT_PEM__#/etc/letsencrypt/live/${R}.${DOMAIN}/fullchain.pem#g;
+ s#__CERT_KEY__#/etc/letsencrypt/live/${R}.${DOMAIN}/privkey.pem#g" \
+ ${P_DIR}/nginx_${R}_configuration.sls
+ else
+ grep -q "ssl_key_encrypted" ${P_DIR}/top.sls || echo " - ssl_key_encrypted" >> ${P_DIR}/top.sls
+ sed -i "s/__CERT_REQUIRES__/file: extra_custom_certs_file_copy_arvados-${R}.pem/g;
+ s#__CERT_PEM__#/etc/nginx/ssl/arvados-${R}.pem#g;
+ s#__CERT_KEY__#/etc/nginx/ssl/arvados-${R}.key#g" \
+ ${P_DIR}/nginx_${R}_configuration.sls
+ grep -q ${R} ${P_DIR}/extra_custom_certs.sls || echo " - ${R}" >> ${P_DIR}/extra_custom_certs.sls
+ fi
+ fi
+ # We need to tweak the Nginx's pillar depending whether we want plain nginx or nginx+passenger
+ sed -i "s/__NGINX_INSTALL_SOURCE__/${NGINX_INSTALL_SOURCE}/g" ${P_DIR}/nginx_passenger.sls
+ ;;
+ "websocket" | "workbench" | "workbench2" | "webshell" | "keepweb" | "keepproxy")
+ ### States ###
if [ "${R}" = "workbench" ]; then
grep -q " - logrotate" ${S_DIR}/top.sls || echo " - logrotate" >> ${S_DIR}/top.sls
NGINX_INSTALL_SOURCE="install_from_phusionpassenger"
@@ -841,6 +933,7 @@ else
else
grep -q "\- nginx$" ${S_DIR}/top.sls || echo " - nginx" >> ${S_DIR}/top.sls
fi
+
if [ "${SSL_MODE}" = "lets-encrypt" ]; then
if [ "x${USE_LETSENCRYPT_ROUTE53}" = "xyes" ]; then
grep -q "aws_credentials" ${S_DIR}/top.sls || echo " - aws_credentials" >> ${S_DIR}/top.sls
@@ -859,11 +952,13 @@ else
fi
fi
fi
+
# webshell role is just a nginx vhost, so it has no state
if [ "${R}" != "webshell" ]; then
grep -q "arvados.${R}" ${S_DIR}/top.sls || echo " - arvados.${R}" >> ${S_DIR}/top.sls
fi
- # Pillars
+
+ ### Pillars ###
if [ "${R}" = "workbench" ]; then
grep -q "logrotate_wb1" ${P_DIR}/top.sls || echo " - logrotate_wb1" >> ${P_DIR}/top.sls
fi
commit 5741ee2bffbb565d946a064f7712dac7f7059f99
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Thu Jun 29 15:57:40 2023 -0300
20610: Removes unused keepstore reference from Terraform.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/terraform/aws/services/locals.tf b/tools/salt-install/terraform/aws/services/locals.tf
index 618da3a51..b82f885e2 100644
--- a/tools/salt-install/terraform/aws/services/locals.tf
+++ b/tools/salt-install/terraform/aws/services/locals.tf
@@ -22,7 +22,6 @@ locals {
default = aws_iam_instance_profile.default_instance_profile
controller = aws_iam_instance_profile.dispatcher_instance_profile
keep0 = aws_iam_instance_profile.keepstore_instance_profile
- keep1 = aws_iam_instance_profile.keepstore_instance_profile
}
private_subnet_id = data.terraform_remote_state.vpc.outputs.private_subnet_id
public_subnet_id = data.terraform_remote_state.vpc.outputs.public_subnet_id
-----------------------------------------------------------------------
hooks/post-receive
--
More information about the arvados-commits
mailing list