[arvados] created: 2.6.0-543-g627f0941ad
git repository hosting
git at public.arvados.org
Fri Aug 25 19:22:04 UTC 2023
at 627f0941ad141017f3bc214395e69b1367c2b20f (commit)
commit 627f0941ad141017f3bc214395e69b1367c2b20f
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Fri Aug 25 16:20:08 2023 -0300
20889: Fixes Keep S3 bucket instructions.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/doc/install/salt-multi-host.html.textile.liquid b/doc/install/salt-multi-host.html.textile.liquid
index 2d2317f0b0..eaffcf582c 100644
--- a/doc/install/salt-multi-host.html.textile.liquid
+++ b/doc/install/salt-multi-host.html.textile.liquid
@@ -314,16 +314,14 @@ The @multi_host/aws@ template uses S3 for storage. Arvados also supports "files
h3. Object storage in S3 (AWS Specific)
-Open @local_config_dir/pillars/arvados.sls@ and edit as follows:
+If you "followed the recommendend naming scheme":#keep-bucket for both the bucket and role (or used the provided Terraform script), you're done.
-# In the @arvados.cluster.Volumes.DriverParameters@ section, set @Region@ to the appropriate AWS region (e.g. 'us-east-1')
+If you did not follow the recommendend naming scheme for either the bucket or role, you'll need to update these parameters in @local.params@:
-If "followed the recommendend naming scheme":#keep-bucket for both the bucket and role (or used the provided Terraform script), you're done.
+# Set @KEEP_AWS_S3_BUCKET@ to the value of "keepstore bucket you created earlier":#keep-bucket
+# Set @KEEP_AWS_IAM_ROLE@ to "keepstore role you created earlier":#keep-bucket
-If you did not follow the recommendend naming scheme for either the bucket or role, you'll need to update these parameters as well:
-
-# Set @Bucket@ to the value of "keepstore bucket you created earlier":#keep-bucket
-# Set @IAMRole@ to "keepstore role you created earlier":#keep-bucket
+You can also configure a specific AWS Region for the S3 bucket by setting @KEEP_AWS_REGION at .
{% include 'ssl_config_multi' %}
commit e78d91e8d1758c075ea35faa8538eab40ebcbfaa
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Fri Aug 25 16:13:22 2023 -0300
20889: Adds documentation explaining the use of TLS cert keys encryption.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/doc/_includes/_ssl_config_multi.liquid b/doc/_includes/_ssl_config_multi.liquid
index b4d6eff616..473d824f2e 100644
--- a/doc/_includes/_ssl_config_multi.liquid
+++ b/doc/_includes/_ssl_config_multi.liquid
@@ -38,3 +38,13 @@ To supply your own certificates, change the configuration like this:
{% include 'multi_host_install_custom_certificates' %}
All certificate files will be used by nginx. You may need to include intermediate certificates in your certificate files. See "the nginx documentation":http://nginx.org/en/docs/http/configuring_https_servers.html#chains for more details.
+
+h4(#secure-tls-keys). Securing your TLS certificate keys (optional)
+
+When using @SSL_MODE=bring-your-own@, if you need to keep your TLS certificate keys encrypted on the server nodes, you can do it when deploying in AWS by using the "Secrets Manager":https://aws.amazon.com/es/secrets-manager/ service.
+
+When using Terraform, the secret and related permission cloud resources are created automatically, and you can customize the secret's name by editing @terraform/services/terraform.tfvars@ and setting its suffix in @ssl_password_secret_name_suffix at .
+
+In @local.params@ you need to set @SSL_KEY_ENCRYPTED@ to @yes@ and change the default values for @SSL_KEY_AWS_SECRET_NAME@ and @SSL_KEY_AWS_REGION@ if necessary.
+
+Then, you should set the appropriate password as a plain-text value on AWS's web console, so that it can be used by the necessary nodes. This should be done before running @installer.sh deploy@ to avoid any failures when trying to start the @nginx@ servers.
diff --git a/doc/install/salt-multi-host.html.textile.liquid b/doc/install/salt-multi-host.html.textile.liquid
index 697c95b82b..2d2317f0b0 100644
--- a/doc/install/salt-multi-host.html.textile.liquid
+++ b/doc/install/salt-multi-host.html.textile.liquid
@@ -21,6 +21,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
# "Choose the SSL configuration":#certificates
## "Using a Let's Encrypt certificates":#lets-encrypt
## "Bring your own certificates":#bring-your-own
+### "Securing your TLS certificate keys":#secure-tls-keys
# "Create a compute image":#create_a_compute_image
# "Begin installation":#installation
# "Further customization of the installation":#further_customization
commit f23064f5b8ce4f8c28edda478dfa8e773e61d8f5
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Fri Aug 25 15:30:04 2023 -0300
20889: Fixes installer.sh indentation to match the how provision.sh is written.
It had a mix of space-based and tab-based indentation that made code a bit
difficult to read.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/installer.sh b/tools/salt-install/installer.sh
index 2ad002990e..d41c50e8c5 100755
--- a/tools/salt-install/installer.sh
+++ b/tools/salt-install/installer.sh
@@ -58,394 +58,392 @@ declare USE_SSH_JUMPHOST
declare SSH_CONFFILE
checktools() {
- local MISSING=''
- for a in git ip ; do
- if ! which $a ; then
- MISSING="$MISSING $a"
- fi
- done
- if [[ -n "$MISSING" ]] ; then
- echo "Some tools are missing, please make sure you have the 'git' and 'iproute2' packages installed"
- exit 1
+ local MISSING=''
+ for a in git ip; do
+ if ! which $a; then
+ MISSING="$MISSING $a"
fi
+ done
+ if [[ -n "$MISSING" ]]; then
+ echo "Some tools are missing, please make sure you have the 'git' and 'iproute2' packages installed"
+ exit 1
+ fi
}
cleanup() {
- local NODE=$1
- local SSH=`ssh_cmd "$NODE"`
- # Delete the old repository
- $SSH $DEPLOY_USER@$NODE rm -rf ${GITTARGET}.git ${GITTARGET}
+ local NODE=$1
+ local SSH=$(ssh_cmd "$NODE")
+ # Delete the old repository
+ $SSH $DEPLOY_USER@$NODE rm -rf ${GITTARGET}.git ${GITTARGET}
}
sync() {
- local NODE=$1
- local BRANCH=$2
-
- # Synchronizes the configuration by creating a git repository on
- # each node, pushing our branch, and updating the checkout.
-
- if [[ "$NODE" != localhost ]] ; then
- SSH=`ssh_cmd "$NODE"`
- GIT="eval `git_cmd $NODE`"
+ local NODE=$1
+ local BRANCH=$2
- cleanup $NODE
+ # Synchronizes the configuration by creating a git repository on
+ # each node, pushing our branch, and updating the checkout.
- # Update the git remote for the remote repository.
- if ! $GIT remote add $NODE $DEPLOY_USER@$NODE:${GITTARGET}.git ; then
- $GIT remote set-url $NODE $DEPLOY_USER@$NODE:${GITTARGET}.git
- fi
+ if [[ "$NODE" != localhost ]]; then
+ SSH=$(ssh_cmd "$NODE")
+ GIT="eval $(git_cmd $NODE)"
- # Initialize the git repository. We're
- # actually going to make two repositories here because git
- # will complain if you try to push to a repository with a
- # checkout. So we're going to create a "bare" repository
- # and then clone a regular repository (with a checkout)
- # from that.
+ cleanup $NODE
- $SSH $DEPLOY_USER@$NODE git init --bare --shared=0600 ${GITTARGET}.git
- $GIT push $NODE $BRANCH
- $SSH $DEPLOY_USER@$NODE "umask 0077 && git clone -s ${GITTARGET}.git ${GITTARGET} && git -C ${GITTARGET} checkout ${BRANCH}"
+ # Update the git remote for the remote repository.
+ if ! $GIT remote add $NODE $DEPLOY_USER@$NODE:${GITTARGET}.git; then
+ $GIT remote set-url $NODE $DEPLOY_USER@$NODE:${GITTARGET}.git
fi
+
+ # Initialize the git repository. We're
+ # actually going to make two repositories here because git
+ # will complain if you try to push to a repository with a
+ # checkout. So we're going to create a "bare" repository
+ # and then clone a regular repository (with a checkout)
+ # from that.
+
+ $SSH $DEPLOY_USER@$NODE git init --bare --shared=0600 ${GITTARGET}.git
+ $GIT push $NODE $BRANCH
+ $SSH $DEPLOY_USER@$NODE "umask 0077 && git clone -s ${GITTARGET}.git ${GITTARGET} && git -C ${GITTARGET} checkout ${BRANCH}"
+ fi
}
deploynode() {
- local NODE=$1
- local ROLES=$2
- local BRANCH=$3
+ local NODE=$1
+ local ROLES=$2
+ local BRANCH=$3
- # Deploy a node. This runs the provision script on the node, with
- # the appropriate roles.
+ # Deploy a node. This runs the provision script on the node, with
+ # the appropriate roles.
- sync $NODE $BRANCH
+ sync $NODE $BRANCH
- if [[ -z "$ROLES" ]] ; then
- echo "No roles specified for $NODE, will deploy all roles"
- else
- ROLES="--roles ${ROLES}"
- fi
+ if [[ -z "$ROLES" ]]; then
+ echo "No roles specified for $NODE, will deploy all roles"
+ else
+ ROLES="--roles ${ROLES}"
+ fi
- logfile=deploy-${NODE}-$(date -Iseconds).log
- SSH=`ssh_cmd "$NODE"`
+ logfile=deploy-${NODE}-$(date -Iseconds).log
+ SSH=$(ssh_cmd "$NODE")
- if [[ "$NODE" = localhost ]] ; then
- SUDO=''
- if [[ $(whoami) != 'root' ]] ; then
- SUDO=sudo
- fi
- $SUDO ./provision.sh --config ${CONFIG_FILE} ${ROLES} 2>&1 | tee $logfile
- else
- $SSH $DEPLOY_USER@$NODE "cd ${GITTARGET} && git log -n1 HEAD && DISABLED_CONTROLLER=\"$DISABLED_CONTROLLER\" sudo --preserve-env=DISABLED_CONTROLLER ./provision.sh --config ${CONFIG_FILE} ${ROLES}" 2>&1 | tee $logfile
- cleanup $NODE
+ if [[ "$NODE" = localhost ]]; then
+ SUDO=''
+ if [[ $(whoami) != 'root' ]]; then
+ SUDO=sudo
fi
+ $SUDO ./provision.sh --config ${CONFIG_FILE} ${ROLES} 2>&1 | tee $logfile
+ else
+ $SSH $DEPLOY_USER@$NODE "cd ${GITTARGET} && git log -n1 HEAD && DISABLED_CONTROLLER=\"$DISABLED_CONTROLLER\" sudo --preserve-env=DISABLED_CONTROLLER ./provision.sh --config ${CONFIG_FILE} ${ROLES}" 2>&1 | tee $logfile
+ cleanup $NODE
+ fi
}
checkcert() {
- local CERTNAME=$1
- local CERTPATH="${CONFIG_DIR}/certs/${CERTNAME}"
- if [[ ! -f "${CERTPATH}.crt" || ! -e "${CERTPATH}.key" ]]; then
- echo "Missing ${CERTPATH}.crt or ${CERTPATH}.key files"
- exit 1
- fi
+ local CERTNAME=$1
+ local CERTPATH="${CONFIG_DIR}/certs/${CERTNAME}"
+ if [[ ! -f "${CERTPATH}.crt" || ! -e "${CERTPATH}.key" ]]; then
+ echo "Missing ${CERTPATH}.crt or ${CERTPATH}.key files"
+ exit 1
+ fi
}
loadconfig() {
- if ! [[ -s ${CONFIG_FILE} && -s ${CONFIG_FILE}.secrets ]]; then
- echo "Must be run from initialized setup dir, maybe you need to 'initialize' first?"
- fi
- source common.sh
- GITTARGET=arvados-deploy-config-${CLUSTER}
-
- # Set up SSH so that it doesn't forward any environment variable. This is to avoid
- # getting "setlocale" errors on the first run, depending on the distro being used
- # to run the installer (like Debian).
- SSH_CONFFILE=$(mktemp)
- echo "Include config SendEnv -*" > ${SSH_CONFFILE}
+ if ! [[ -s ${CONFIG_FILE} && -s ${CONFIG_FILE}.secrets ]]; then
+ echo "Must be run from initialized setup dir, maybe you need to 'initialize' first?"
+ fi
+ source common.sh
+ GITTARGET=arvados-deploy-config-${CLUSTER}
+
+ # Set up SSH so that it doesn't forward any environment variable. This is to avoid
+ # getting "setlocale" errors on the first run, depending on the distro being used
+ # to run the installer (like Debian).
+ SSH_CONFFILE=$(mktemp)
+ echo "Include config SendEnv -*" >${SSH_CONFFILE}
}
ssh_cmd() {
- local NODE=$1
- if [ -z "${USE_SSH_JUMPHOST}" -o "${NODE}" == "${USE_SSH_JUMPHOST}" -o "${NODE}" == "localhost" ]; then
- echo "ssh -F ${SSH_CONFFILE}"
- else
- echo "ssh -F ${SSH_CONFFILE} -J ${DEPLOY_USER}@${USE_SSH_JUMPHOST}"
- fi
+ local NODE=$1
+ if [ -z "${USE_SSH_JUMPHOST}" -o "${NODE}" == "${USE_SSH_JUMPHOST}" -o "${NODE}" == "localhost" ]; then
+ echo "ssh -F ${SSH_CONFFILE}"
+ else
+ echo "ssh -F ${SSH_CONFFILE} -J ${DEPLOY_USER}@${USE_SSH_JUMPHOST}"
+ fi
}
git_cmd() {
- local NODE=$1
- echo "GIT_SSH_COMMAND=\"`ssh_cmd ${NODE}`\" git"
+ local NODE=$1
+ echo "GIT_SSH_COMMAND=\"$(ssh_cmd ${NODE})\" git"
}
set +u
subcmd="$1"
set -u
-if [[ -n "$subcmd" ]] ; then
- shift
+if [[ -n "$subcmd" ]]; then
+ shift
fi
case "$subcmd" in
- initialize)
- if [[ ! -f provision.sh ]] ; then
- echo "Must be run from arvados/tools/salt-install"
- exit
- fi
-
- checktools
-
- set +u
- SETUPDIR=$1
- PARAMS=$2
- SLS=$3
- TERRAFORM=$4
- set -u
-
- err=
- if [[ -z "$PARAMS" || ! -f local.params.example.$PARAMS ]] ; then
- echo "Not found: local.params.example.$PARAMS"
- echo "Expected one of multiple_hosts, single_host_multiple_hostnames, single_host_single_hostname"
- err=1
- fi
-
- if [[ -z "$SLS" || ! -d config_examples/$SLS ]] ; then
- echo "Not found: config_examples/$SLS"
- echo "Expected one of multi_host/aws, single_host/multiple_hostnames, single_host/single_hostname"
- err=1
- fi
-
- if [[ -z "$SETUPDIR" || -z "$PARAMS" || -z "$SLS" ]]; then
- echo "installer.sh <setup dir to initialize> <params template> <config template>"
- err=1
- fi
-
- if [[ -n "$err" ]] ; then
- exit 1
- fi
-
- echo "Initializing $SETUPDIR"
- git init --shared=0600 $SETUPDIR
- cp -r *.sh tests $SETUPDIR
-
- cp local.params.example.$PARAMS $SETUPDIR/${CONFIG_FILE}
- cp local.params.secrets.example $SETUPDIR/${CONFIG_FILE}.secrets
- cp -r config_examples/$SLS $SETUPDIR/${CONFIG_DIR}
-
- if [[ -n "$TERRAFORM" ]] ; then
- mkdir $SETUPDIR/terraform
- cp -r $TERRAFORM/* $SETUPDIR/terraform/
- fi
-
- cd $SETUPDIR
- echo '*.log' > .gitignore
- echo '**/.terraform' >> .gitignore
- echo '**/.infracost' >> .gitignore
-
- if [[ -n "$TERRAFORM" ]] ; then
- git add terraform
- fi
-
- git add *.sh ${CONFIG_FILE} ${CONFIG_FILE}.secrets ${CONFIG_DIR} tests .gitignore
- git commit -m"initial commit"
-
- echo
- echo "Setup directory $SETUPDIR initialized."
- if [[ -n "$TERRAFORM" ]] ; then
- (cd $SETUPDIR/terraform/vpc && terraform init)
- (cd $SETUPDIR/terraform/data-storage && terraform init)
- (cd $SETUPDIR/terraform/services && terraform init)
- echo "Now go to $SETUPDIR, customize 'terraform/vpc/terraform.tfvars' as needed, then run 'installer.sh terraform'"
- else
- echo "Now go to $SETUPDIR, customize '${CONFIG_FILE}', '${CONFIG_FILE}.secrets' and '${CONFIG_DIR}' as needed, then run 'installer.sh deploy'"
- fi
- ;;
-
- terraform)
- logfile=terraform-$(date -Iseconds).log
- (cd terraform/vpc && terraform apply -auto-approve) 2>&1 | tee -a $logfile
- (cd terraform/data-storage && terraform apply -auto-approve) 2>&1 | tee -a $logfile
- (cd terraform/services && terraform apply -auto-approve) 2>&1 | grep -v letsencrypt_iam_secret_access_key | tee -a $logfile
- (cd terraform/services && echo -n 'letsencrypt_iam_secret_access_key = ' && terraform output letsencrypt_iam_secret_access_key) 2>&1 | tee -a $logfile
- ;;
-
- terraform-destroy)
- logfile=terraform-$(date -Iseconds).log
- (cd terraform/services && terraform destroy) 2>&1 | tee -a $logfile
- (cd terraform/data-storage && terraform destroy) 2>&1 | tee -a $logfile
- (cd terraform/vpc && terraform destroy) 2>&1 | tee -a $logfile
- ;;
-
- generate-tokens)
- for i in BLOB_SIGNING_KEY MANAGEMENT_TOKEN SYSTEM_ROOT_TOKEN ANONYMOUS_USER_TOKEN WORKBENCH_SECRET_KEY DATABASE_PASSWORD; do
- echo ${i}=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 32 ; echo '')
- done
- ;;
-
- deploy)
- set +u
- NODE=$1
- set -u
-
- checktools
-
- loadconfig
-
- if grep -rni 'fixme' ${CONFIG_FILE} ${CONFIG_FILE}.secrets ${CONFIG_DIR} ; then
- echo
- echo "Some parameters still need to be updated. Please fix them and then re-run deploy."
- exit 1
- fi
-
- if [[ ${SSL_MODE} == "bring-your-own" ]]; then
- if [[ ! -z "${ROLE2NODES['balancer']:-}" ]]; then
- checkcert balancer
- fi
- if [[ ! -z "${ROLE2NODES['controller']:-}" ]]; then
- checkcert controller
- fi
- if [[ ! -z "${ROLE2NODES['keepproxy']:-}" ]]; then
- checkcert keepproxy
- fi
- if [[ ! -z "${ROLE2NODES['keepweb']:-}" ]]; then
- checkcert collections
- checkcert download
- fi
- if [[ ! -z "${ROLE2NODES['monitoring']:-}" ]]; then
- checkcert grafana
- checkcert prometheus
- fi
- if [[ ! -z "${ROLE2NODES['webshell']:-}" ]]; then
- checkcert webshell
- fi
- if [[ ! -z "${ROLE2NODES['websocket']:-}" ]]; then
- checkcert websocket
- fi
- if [[ ! -z "${ROLE2NODES['workbench']:-}" ]]; then
- checkcert workbench
- fi
- if [[ ! -z "${ROLE2NODES['workbench2']:-}" ]]; then
- checkcert workbench2
- fi
- fi
-
- BRANCH=$(git rev-parse --abbrev-ref HEAD)
-
- set -x
-
- git add -A
- if ! git diff --cached --exit-code --quiet ; then
- git commit -m"prepare for deploy"
- fi
-
- # Used for rolling updates to disable individual nodes at the
- # load balancer.
- export DISABLED_CONTROLLER=""
- if [[ -z "$NODE" ]]; then
- for NODE in "${!NODES[@]}"
- do
- # First, just confirm we can ssh to each node.
- `ssh_cmd "$NODE"` $DEPLOY_USER@$NODE true
- done
-
- for NODE in "${!NODES[@]}"
- do
- # Do 'database' role first,
- if [[ "${NODES[$NODE]}" =~ database ]] ; then
- deploynode $NODE "${NODES[$NODE]}" $BRANCH
- unset NODES[$NODE]
- fi
- done
-
- BALANCER=${ROLE2NODES['balancer']:-}
-
- # Check if there are multiple controllers, they'll be comma-separated
- # in ROLE2NODES
- if [[ ${ROLE2NODES['controller']} =~ , ]] ;
- then
- # If we have multiple controllers then there must be
- # load balancer. We want to do a rolling update, take
- # down each node at the load balancer before updating
- # it.
-
- for NODE in "${!NODES[@]}"
- do
- if [[ "${NODES[$NODE]}" =~ controller ]] ; then
- export DISABLED_CONTROLLER=$NODE
-
- # Update balancer that the node is disabled
- deploynode $BALANCER "${NODES[$BALANCER]}" $BRANCH
-
- # Now update the node itself
- deploynode $NODE "${NODES[$NODE]}" $BRANCH
- unset NODES[$NODE]
- fi
- done
- else
- # Only one controller, check if it wasn't already taken care of.
- NODE=${ROLE2NODES['controller']}
- if [[ ! -z "${NODES[$NODE]:-}" ]]; then
- deploynode $NODE "${NODES[$NODE]}" $BRANCH
- unset NODES[$NODE]
- fi
- fi
-
- if [[ -n "$BALANCER" ]] ; then
- # Deploy balancer. In the rolling update case, this
- # will re-enable all the controllers at the balancer.
- export DISABLED_CONTROLLER=""
- deploynode $BALANCER "${NODES[$BALANCER]}" $BRANCH
- unset NODES[$BALANCER]
- fi
-
- for NODE in "${!NODES[@]}"
- do
- # Everything else (we removed the nodes that we
- # already deployed from the list)
- deploynode $NODE "${NODES[$NODE]}" $BRANCH
- done
- else
- # Just deploy the node that was supplied on the command line.
- deploynode $NODE "${NODES[$NODE]}" $BRANCH
- fi
-
- set +x
- echo
- echo "Completed deploy, run 'installer.sh diagnostics' to verify the install"
-
- ;;
-
- diagnostics)
- loadconfig
-
- set +u
- declare LOCATION=$1
- set -u
-
- if ! which arvados-client ; then
- echo "arvados-client not found, install 'arvados-client' package with 'apt-get' or 'yum'"
- exit 1
- fi
-
- if [[ -z "$LOCATION" ]] ; then
- echo "Need to provide '-internal-client' or '-external-client'"
- echo
- echo "-internal-client You are running this on the same private network as the Arvados cluster (e.g. on one of the Arvados nodes)"
- echo "-external-client You are running this outside the private network of the Arvados cluster (e.g. your workstation)"
- exit 1
- fi
-
- export ARVADOS_API_HOST="${DOMAIN}:${CONTROLLER_EXT_SSL_PORT}"
- export ARVADOS_API_TOKEN="$SYSTEM_ROOT_TOKEN"
-
- arvados-client diagnostics $LOCATION
- ;;
-
- *)
- echo "Arvados installer"
- echo ""
- echo "initialize initialize the setup directory for configuration"
- echo "terraform create cloud resources using terraform"
- echo "terraform-destroy destroy cloud resources created by terraform"
- echo "generate-tokens generate random values for tokens"
- echo "deploy deploy the configuration from the setup directory"
- echo "diagnostics check your install using diagnostics"
- ;;
+initialize)
+ if [[ ! -f provision.sh ]]; then
+ echo "Must be run from arvados/tools/salt-install"
+ exit
+ fi
+
+ checktools
+
+ set +u
+ SETUPDIR=$1
+ PARAMS=$2
+ SLS=$3
+ TERRAFORM=$4
+ set -u
+
+ err=
+ if [[ -z "$PARAMS" || ! -f local.params.example.$PARAMS ]]; then
+ echo "Not found: local.params.example.$PARAMS"
+ echo "Expected one of multiple_hosts, single_host_multiple_hostnames, single_host_single_hostname"
+ err=1
+ fi
+
+ if [[ -z "$SLS" || ! -d config_examples/$SLS ]]; then
+ echo "Not found: config_examples/$SLS"
+ echo "Expected one of multi_host/aws, single_host/multiple_hostnames, single_host/single_hostname"
+ err=1
+ fi
+
+ if [[ -z "$SETUPDIR" || -z "$PARAMS" || -z "$SLS" ]]; then
+ echo "installer.sh <setup dir to initialize> <params template> <config template>"
+ err=1
+ fi
+
+ if [[ -n "$err" ]]; then
+ exit 1
+ fi
+
+ echo "Initializing $SETUPDIR"
+ git init --shared=0600 $SETUPDIR
+ cp -r *.sh tests $SETUPDIR
+
+ cp local.params.example.$PARAMS $SETUPDIR/${CONFIG_FILE}
+ cp local.params.secrets.example $SETUPDIR/${CONFIG_FILE}.secrets
+ cp -r config_examples/$SLS $SETUPDIR/${CONFIG_DIR}
+
+ if [[ -n "$TERRAFORM" ]]; then
+ mkdir $SETUPDIR/terraform
+ cp -r $TERRAFORM/* $SETUPDIR/terraform/
+ fi
+
+ cd $SETUPDIR
+ echo '*.log' >.gitignore
+ echo '**/.terraform' >>.gitignore
+ echo '**/.infracost' >>.gitignore
+
+ if [[ -n "$TERRAFORM" ]]; then
+ git add terraform
+ fi
+
+ git add *.sh ${CONFIG_FILE} ${CONFIG_FILE}.secrets ${CONFIG_DIR} tests .gitignore
+ git commit -m"initial commit"
+
+ echo
+ echo "Setup directory $SETUPDIR initialized."
+ if [[ -n "$TERRAFORM" ]]; then
+ (cd $SETUPDIR/terraform/vpc && terraform init)
+ (cd $SETUPDIR/terraform/data-storage && terraform init)
+ (cd $SETUPDIR/terraform/services && terraform init)
+ echo "Now go to $SETUPDIR, customize 'terraform/vpc/terraform.tfvars' as needed, then run 'installer.sh terraform'"
+ else
+ echo "Now go to $SETUPDIR, customize '${CONFIG_FILE}', '${CONFIG_FILE}.secrets' and '${CONFIG_DIR}' as needed, then run 'installer.sh deploy'"
+ fi
+ ;;
+
+terraform)
+ logfile=terraform-$(date -Iseconds).log
+ (cd terraform/vpc && terraform apply -auto-approve) 2>&1 | tee -a $logfile
+ (cd terraform/data-storage && terraform apply -auto-approve) 2>&1 | tee -a $logfile
+ (cd terraform/services && terraform apply -auto-approve) 2>&1 | grep -v letsencrypt_iam_secret_access_key | tee -a $logfile
+ (cd terraform/services && echo -n 'letsencrypt_iam_secret_access_key = ' && terraform output letsencrypt_iam_secret_access_key) 2>&1 | tee -a $logfile
+ ;;
+
+terraform-destroy)
+ logfile=terraform-$(date -Iseconds).log
+ (cd terraform/services && terraform destroy) 2>&1 | tee -a $logfile
+ (cd terraform/data-storage && terraform destroy) 2>&1 | tee -a $logfile
+ (cd terraform/vpc && terraform destroy) 2>&1 | tee -a $logfile
+ ;;
+
+generate-tokens)
+ for i in BLOB_SIGNING_KEY MANAGEMENT_TOKEN SYSTEM_ROOT_TOKEN ANONYMOUS_USER_TOKEN WORKBENCH_SECRET_KEY DATABASE_PASSWORD; do
+ echo ${i}=$(
+ tr -dc A-Za-z0-9 </dev/urandom | head -c 32
+ echo ''
+ )
+ done
+ ;;
+
+deploy)
+ set +u
+ NODE=$1
+ set -u
+
+ checktools
+
+ loadconfig
+
+ if grep -rni 'fixme' ${CONFIG_FILE} ${CONFIG_FILE}.secrets ${CONFIG_DIR}; then
+ echo
+ echo "Some parameters still need to be updated. Please fix them and then re-run deploy."
+ exit 1
+ fi
+
+ if [[ ${SSL_MODE} == "bring-your-own" ]]; then
+ if [[ ! -z "${ROLE2NODES['balancer']:-}" ]]; then
+ checkcert balancer
+ fi
+ if [[ ! -z "${ROLE2NODES['controller']:-}" ]]; then
+ checkcert controller
+ fi
+ if [[ ! -z "${ROLE2NODES['keepproxy']:-}" ]]; then
+ checkcert keepproxy
+ fi
+ if [[ ! -z "${ROLE2NODES['keepweb']:-}" ]]; then
+ checkcert collections
+ checkcert download
+ fi
+ if [[ ! -z "${ROLE2NODES['monitoring']:-}" ]]; then
+ checkcert grafana
+ checkcert prometheus
+ fi
+ if [[ ! -z "${ROLE2NODES['webshell']:-}" ]]; then
+ checkcert webshell
+ fi
+ if [[ ! -z "${ROLE2NODES['websocket']:-}" ]]; then
+ checkcert websocket
+ fi
+ if [[ ! -z "${ROLE2NODES['workbench']:-}" ]]; then
+ checkcert workbench
+ fi
+ if [[ ! -z "${ROLE2NODES['workbench2']:-}" ]]; then
+ checkcert workbench2
+ fi
+ fi
+
+ BRANCH=$(git rev-parse --abbrev-ref HEAD)
+
+ set -x
+
+ git add -A
+ if ! git diff --cached --exit-code --quiet; then
+ git commit -m"prepare for deploy"
+ fi
+
+ # Used for rolling updates to disable individual nodes at the
+ # load balancer.
+ export DISABLED_CONTROLLER=""
+ if [[ -z "$NODE" ]]; then
+ for NODE in "${!NODES[@]}"; do
+ # First, just confirm we can ssh to each node.
+ $(ssh_cmd "$NODE") $DEPLOY_USER@$NODE true
+ done
+
+ for NODE in "${!NODES[@]}"; do
+ # Do 'database' role first,
+ if [[ "${NODES[$NODE]}" =~ database ]]; then
+ deploynode $NODE "${NODES[$NODE]}" $BRANCH
+ unset NODES[$NODE]
+ fi
+ done
+
+ BALANCER=${ROLE2NODES['balancer']:-}
+
+ # Check if there are multiple controllers, they'll be comma-separated
+ # in ROLE2NODES
+ if [[ ${ROLE2NODES['controller']} =~ , ]]; then
+ # If we have multiple controllers then there must be
+ # load balancer. We want to do a rolling update, take
+ # down each node at the load balancer before updating
+ # it.
+
+ for NODE in "${!NODES[@]}"; do
+ if [[ "${NODES[$NODE]}" =~ controller ]]; then
+ export DISABLED_CONTROLLER=$NODE
+
+ # Update balancer that the node is disabled
+ deploynode $BALANCER "${NODES[$BALANCER]}" $BRANCH
+
+ # Now update the node itself
+ deploynode $NODE "${NODES[$NODE]}" $BRANCH
+ unset NODES[$NODE]
+ fi
+ done
+ else
+ # Only one controller, check if it wasn't already taken care of.
+ NODE=${ROLE2NODES['controller']}
+ if [[ ! -z "${NODES[$NODE]:-}" ]]; then
+ deploynode $NODE "${NODES[$NODE]}" $BRANCH
+ unset NODES[$NODE]
+ fi
+ fi
+
+ if [[ -n "$BALANCER" ]]; then
+ # Deploy balancer. In the rolling update case, this
+ # will re-enable all the controllers at the balancer.
+ export DISABLED_CONTROLLER=""
+ deploynode $BALANCER "${NODES[$BALANCER]}" $BRANCH
+ unset NODES[$BALANCER]
+ fi
+
+ for NODE in "${!NODES[@]}"; do
+ # Everything else (we removed the nodes that we
+ # already deployed from the list)
+ deploynode $NODE "${NODES[$NODE]}" $BRANCH
+ done
+ else
+ # Just deploy the node that was supplied on the command line.
+ deploynode $NODE "${NODES[$NODE]}" $BRANCH
+ fi
+
+ set +x
+ echo
+ echo "Completed deploy, run 'installer.sh diagnostics' to verify the install"
+
+ ;;
+
+diagnostics)
+ loadconfig
+
+ set +u
+ declare LOCATION=$1
+ set -u
+
+ if ! which arvados-client; then
+ echo "arvados-client not found, install 'arvados-client' package with 'apt-get' or 'yum'"
+ exit 1
+ fi
+
+ if [[ -z "$LOCATION" ]]; then
+ echo "Need to provide '-internal-client' or '-external-client'"
+ echo
+ echo "-internal-client You are running this on the same private network as the Arvados cluster (e.g. on one of the Arvados nodes)"
+ echo "-external-client You are running this outside the private network of the Arvados cluster (e.g. your workstation)"
+ exit 1
+ fi
+
+ export ARVADOS_API_HOST="${DOMAIN}:${CONTROLLER_EXT_SSL_PORT}"
+ export ARVADOS_API_TOKEN="$SYSTEM_ROOT_TOKEN"
+
+ arvados-client diagnostics $LOCATION
+ ;;
+
+*)
+ echo "Arvados installer"
+ echo ""
+ echo "initialize initialize the setup directory for configuration"
+ echo "terraform create cloud resources using terraform"
+ echo "terraform-destroy destroy cloud resources created by terraform"
+ echo "generate-tokens generate random values for tokens"
+ echo "deploy deploy the configuration from the setup directory"
+ echo "diagnostics check your install using diagnostics"
+ ;;
esac
commit 371aed14aedb33ed2279a1decab6c6c310fca028
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Fri Aug 25 15:22:56 2023 -0300
20889: Checks that cert files are present before trying to use them.
Also, some documentation fixes.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/doc/_includes/_multi_host_install_custom_certificates.liquid b/doc/_includes/_multi_host_install_custom_certificates.liquid
index eac40218cc..2d8bbfc806 100644
--- a/doc/_includes/_multi_host_install_custom_certificates.liquid
+++ b/doc/_includes/_multi_host_install_custom_certificates.liquid
@@ -20,14 +20,17 @@ Copy your certificates to the directory specified with the variable @CUSTOM_CERT
The script expects cert/key files with these basenames (matching the role except for <i>keepweb</i>, which is split in both <i>download / collections</i>):
+# @balancer@ -- Optional on multi-node installations
+# @collections@ -- Part of keepweb, must be a wildcard for @*.collections.${DOMAIN}@
# @controller@
-# @websocket@ -- note: corresponds to default domain @ws.${DOMAIN}@
-# @keepproxy@ -- note: corresponds to default domain @keep.${DOMAIN}@
# @download@ -- Part of keepweb
-# @collections@ -- Part of keepweb, must be a wildcard for @*.collections.${DOMAIN}@
+# @grafana@ -- Service available by default on multi-node installations
+# @keepproxy@ -- Corresponds to default domain @keep.${DOMAIN}@
+# @prometheus@ -- Service available by default on multi-node installations
+# @webshell@
+# @websocket@ -- Corresponds to default domain @ws.${DOMAIN}@
# @workbench@
# @workbench2@
-# @webshell@
For example, for the @keepproxy@ service the script will expect to find this certificate:
diff --git a/doc/install/salt-multi-host.html.textile.liquid b/doc/install/salt-multi-host.html.textile.liquid
index 81aa2ab099..697c95b82b 100644
--- a/doc/install/salt-multi-host.html.textile.liquid
+++ b/doc/install/salt-multi-host.html.textile.liquid
@@ -267,8 +267,8 @@ The @local.params.secrets@ file is intended to store security-sensitive data suc
h3. Parameters from @local.params@:
-# Set @CLUSTER@ to the 5-character cluster identifier (e.g "xarv1")
-# Set @DOMAIN@ to the base DNS domain of the environment, e.g. "xarv1.example.com"
+# Set @CLUSTER@ to the 5-character cluster identifier. (e.g. "xarv1")
+# Set @DOMAIN@ to the base DNS domain of the environment. (e.g. "xarv1.example.com")
# Set the @*_INT_IP@ variables with the internal (private) IP addresses of each host. Since services share hosts, some hosts are the same. See "note about /etc/hosts":#etchosts
# Edit @CLUSTER_INT_CIDR@, this should be the CIDR of the private network that Arvados is running on, e.g. the VPC. If you used terraform, this is emitted as @cluster_int_cidr at .
_CIDR stands for "Classless Inter-Domain Routing" and describes which portion of the IP address that refers to the network. For example 192.168.3.0/24 means that the first 24 bits are the network (192.168.3) and the last 8 bits are a specific host on that network._
@@ -342,11 +342,13 @@ Arvados requires a database that is compatible with PostgreSQL 9.5 or later. Fo
...
)
</code></pre>
-# In @local.params@, set @DATABASE_INT_IP@ to the database endpoint (can be a hostname, does not have to be an IP address).
-<pre><code>DATABASE_INT_IP=...
+# In @local.params@, set @DATABASE_INT_IP@ to empty string and @DATABASE_EXTERNAL_SERVICE_HOST_OR_IP@ to the database endpoint (can be a hostname, does not have to be an IP address).
+<pre><code>DATABASE_INT_IP=""
+...
+DATABASE_EXTERNAL_SERVICE_HOST_OR_IP="arvados.xxxxxxx.eu-east-1.rds.amazonaws.com"
</code></pre>
-# In @local.params@, set @DATABASE_PASSWORD@ to the correct value. "See the previous section describing correct quoting":#localparams
-# In @local_config_dir/pillars/arvados.sls@ you may need to adjust the database name and user. This can be found in the section @arvados.cluster.database at .
+# In @local.params.secrets@, set @DATABASE_PASSWORD@ to the correct value. "See the previous section describing correct quoting":#localparams
+# In @local.params@ you may need to adjust the database name and user.
h2(#further_customization). Further customization of the installation (optional)
diff --git a/tools/salt-install/installer.sh b/tools/salt-install/installer.sh
index a1e3841a3d..2ad002990e 100755
--- a/tools/salt-install/installer.sh
+++ b/tools/salt-install/installer.sh
@@ -139,6 +139,15 @@ deploynode() {
fi
}
+checkcert() {
+ local CERTNAME=$1
+ local CERTPATH="${CONFIG_DIR}/certs/${CERTNAME}"
+ if [[ ! -f "${CERTPATH}.crt" || ! -e "${CERTPATH}.key" ]]; then
+ echo "Missing ${CERTPATH}.crt or ${CERTPATH}.key files"
+ exit 1
+ fi
+}
+
loadconfig() {
if ! [[ -s ${CONFIG_FILE} && -s ${CONFIG_FILE}.secrets ]]; then
echo "Must be run from initialized setup dir, maybe you need to 'initialize' first?"
@@ -285,6 +294,38 @@ case "$subcmd" in
exit 1
fi
+ if [[ ${SSL_MODE} == "bring-your-own" ]]; then
+ if [[ ! -z "${ROLE2NODES['balancer']:-}" ]]; then
+ checkcert balancer
+ fi
+ if [[ ! -z "${ROLE2NODES['controller']:-}" ]]; then
+ checkcert controller
+ fi
+ if [[ ! -z "${ROLE2NODES['keepproxy']:-}" ]]; then
+ checkcert keepproxy
+ fi
+ if [[ ! -z "${ROLE2NODES['keepweb']:-}" ]]; then
+ checkcert collections
+ checkcert download
+ fi
+ if [[ ! -z "${ROLE2NODES['monitoring']:-}" ]]; then
+ checkcert grafana
+ checkcert prometheus
+ fi
+ if [[ ! -z "${ROLE2NODES['webshell']:-}" ]]; then
+ checkcert webshell
+ fi
+ if [[ ! -z "${ROLE2NODES['websocket']:-}" ]]; then
+ checkcert websocket
+ fi
+ if [[ ! -z "${ROLE2NODES['workbench']:-}" ]]; then
+ checkcert workbench
+ fi
+ if [[ ! -z "${ROLE2NODES['workbench2']:-}" ]]; then
+ checkcert workbench2
+ fi
+ fi
+
BRANCH=$(git rev-parse --abbrev-ref HEAD)
set -x
commit b4e0af034e1454e39f3e7b2e38c5ed944d9fd1ba
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Fri Aug 25 10:30:11 2023 -0300
20889: Fixes a bug when database & controller roles are on a single node.
The database role gets handled first, so the controller node gets removed
from the list and then when checking if multiple nodes were available, the
code path for a single controller didn't checked if the node was already
removed from the node list.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/installer.sh b/tools/salt-install/installer.sh
index fe04047431..a1e3841a3d 100755
--- a/tools/salt-install/installer.sh
+++ b/tools/salt-install/installer.sh
@@ -338,10 +338,12 @@ case "$subcmd" in
fi
done
else
- # Only one controller
+ # Only one controller, check if it wasn't already taken care of.
NODE=${ROLE2NODES['controller']}
- deploynode $NODE "${NODES[$NODE]}" $BRANCH
- unset NODES[$NODE]
+ if [[ ! -z "${NODES[$NODE]:-}" ]]; then
+ deploynode $NODE "${NODES[$NODE]}" $BRANCH
+ unset NODES[$NODE]
+ fi
fi
if [[ -n "$BALANCER" ]] ; then
commit ebeaa637c88e314feebea81037c4706467f1a090
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Thu Aug 24 18:47:26 2023 -0300
20889: Allows customization on database name and user.
Also, allows specifying external PG service for the single host cases.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
index 5fe2c0a6e9..84df363c2e 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
@@ -7,6 +7,9 @@
{%- set max_workers = [_workers, 8]|max %}
{%- set max_reqs = ("__CONTROLLER_MAX_QUEUED_REQUESTS__" or 128)|int %}
{%- set database_host = ("__DATABASE_EXTERNAL_SERVICE_HOST_OR_IP__" or "__DATABASE_INT_IP__") %}
+{%- set database_name = "__DATABASE_NAME__" %}
+{%- set database_user = "__DATABASE_USER__" %}
+{%- set database_password = "__DATABASE_PASSWORD__" %}
# The variables commented out are the default values that the formula uses.
# The uncommented values are REQUIRED values. If you don't set them, running
@@ -73,10 +76,10 @@ arvados:
database:
# max concurrent connections per arvados server daemon
# connection_pool_max: 32
- name: __CLUSTER___arvados
+ name: {{ database_name }}
host: {{ database_host }}
- password: "__DATABASE_PASSWORD__"
- user: __CLUSTER___arvados
+ password: {{ database_password }}
+ user: {{ database_user }}
encoding: en_US.utf8
client_encoding: UTF8
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/arvados.sls b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/arvados.sls
index 35544730ad..5883f19241 100644
--- a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/arvados.sls
+++ b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/arvados.sls
@@ -5,6 +5,11 @@
#
# SPDX-License-Identifier: AGPL-3.0
+{%- set database_host = ("__DATABASE_EXTERNAL_SERVICE_HOST_OR_IP__" or "127.0.0.1") %}
+{%- set database_name = "__DATABASE_NAME__" %}
+{%- set database_user = "__DATABASE_USER__" %}
+{%- set database_password = "__DATABASE_PASSWORD__" %}
+
# The variables commented out are the default values that the formula uses.
# The uncommented values are REQUIRED values. If you don't set them, running
# this formula will fail.
@@ -65,10 +70,10 @@ arvados:
database:
# max concurrent connections per arvados server daemon
# connection_pool_max: 32
- name: __CLUSTER___arvados
- host: 127.0.0.1
- password: "__DATABASE_PASSWORD__"
- user: __CLUSTER___arvados
+ name: {{ database_name }}
+ host: {{ database_host }}
+ password: {{ database_password }}
+ user: {{ database_user }}
extra_conn_params:
client_encoding: UTF8
# Centos7 does not enable SSL by default, so we disable
diff --git a/tools/salt-install/config_examples/single_host/single_hostname/pillars/arvados.sls b/tools/salt-install/config_examples/single_host/single_hostname/pillars/arvados.sls
index 10a9b79c94..e85b709c2c 100644
--- a/tools/salt-install/config_examples/single_host/single_hostname/pillars/arvados.sls
+++ b/tools/salt-install/config_examples/single_host/single_hostname/pillars/arvados.sls
@@ -5,6 +5,11 @@
#
# SPDX-License-Identifier: AGPL-3.0
+{%- set database_host = ("__DATABASE_EXTERNAL_SERVICE_HOST_OR_IP__" or "127.0.0.1") %}
+{%- set database_name = "__DATABASE_NAME__" %}
+{%- set database_user = "__DATABASE_USER__" %}
+{%- set database_password = "__DATABASE_PASSWORD__" %}
+
# The variables commented out are the default values that the formula uses.
# The uncommented values are REQUIRED values. If you don't set them, running
# this formula will fail.
@@ -65,10 +70,10 @@ arvados:
database:
# max concurrent connections per arvados server daemon
# connection_pool_max: 32
- name: __CLUSTER___arvados
- host: 127.0.0.1
- password: "__DATABASE_PASSWORD__"
- user: __CLUSTER___arvados
+ name: {{ database_name }}
+ host: {{ database_host }}
+ password: {{ database_password }}
+ user: {{ database_user }}
extra_conn_params:
client_encoding: UTF8
# Centos7 does not enable SSL by default, so we disable
diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts
index 63a51f7731..0cfae3e284 100644
--- a/tools/salt-install/local.params.example.multiple_hosts
+++ b/tools/salt-install/local.params.example.multiple_hosts
@@ -141,6 +141,8 @@ KEEP_INT_IP=${WORKBENCH1_INT_IP}
KEEPSTORE0_INT_IP=10.1.2.13
SHELL_INT_IP=10.1.2.17
+DATABASE_NAME="${CLUSTER}_arvados"
+DATABASE_USER="${CLUSTER}_arvados"
# Set this if using an external PostgreSQL service.
#DATABASE_EXTERNAL_SERVICE_HOST_OR_IP=
diff --git a/tools/salt-install/local.params.example.single_host_multiple_hostnames b/tools/salt-install/local.params.example.single_host_multiple_hostnames
index 551051939e..54da585d38 100644
--- a/tools/salt-install/local.params.example.single_host_multiple_hostnames
+++ b/tools/salt-install/local.params.example.single_host_multiple_hostnames
@@ -76,6 +76,11 @@ KEEP_INT_IP=""
KEEPSTORE0_INT_IP=""
SHELL_INT_IP=""
+DATABASE_NAME="${CLUSTER}_arvados"
+DATABASE_USER="${CLUSTER}_arvados"
+# Set this if using an external PostgreSQL service.
+#DATABASE_EXTERNAL_SERVICE_HOST_OR_IP=
+
# The directory to check for the config files (pillars, states) you want to use.
# There are a few examples under 'config_examples'.
# CONFIG_DIR="local_config_dir"
diff --git a/tools/salt-install/local.params.example.single_host_single_hostname b/tools/salt-install/local.params.example.single_host_single_hostname
index ab0ee1be1b..d42b4cb54c 100644
--- a/tools/salt-install/local.params.example.single_host_single_hostname
+++ b/tools/salt-install/local.params.example.single_host_single_hostname
@@ -86,6 +86,11 @@ KEEP_INT_IP=""
KEEPSTORE0_INT_IP=""
SHELL_INT_IP=""
+DATABASE_NAME="${CLUSTER}_arvados"
+DATABASE_USER="${CLUSTER}_arvados"
+# Set this if using an external PostgreSQL service.
+#DATABASE_EXTERNAL_SERVICE_HOST_OR_IP=
+
# The directory to check for the config files (pillars, states) you want to use.
# There are a few examples under 'config_examples'.
# CONFIG_DIR="local_config_dir"
diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh
index e76d3cc6ba..b44fc8b19b 100755
--- a/tools/salt-install/provision.sh
+++ b/tools/salt-install/provision.sh
@@ -175,7 +175,11 @@ apply_var_substitutions() {
s#__LE_AWS_REGION__#${LE_AWS_REGION:-}#g;
s#__LE_AWS_SECRET_ACCESS_KEY__#${LE_AWS_SECRET_ACCESS_KEY:-}#g;
s#__LE_AWS_ACCESS_KEY_ID__#${LE_AWS_ACCESS_KEY_ID:-}#g;
+ s#__DATABASE_NAME__#${DATABASE_NAME}#g;
+ s#__DATABASE_USER__#${DATABASE_USER}#g;
s#__DATABASE_PASSWORD__#${DATABASE_PASSWORD}#g;
+ s#__DATABASE_INT_IP__#${DATABASE_INT_IP:-}#g;
+ s#__DATABASE_EXTERNAL_SERVICE_HOST_OR_IP__#${DATABASE_EXTERNAL_SERVICE_HOST_OR_IP:-}#g;
s#__KEEPWEB_EXT_SSL_PORT__#${KEEPWEB_EXT_SSL_PORT}#g;
s#__KEEP_EXT_SSL_PORT__#${KEEP_EXT_SSL_PORT}#g;
s#__MANAGEMENT_TOKEN__#${MANAGEMENT_TOKEN}#g;
@@ -196,8 +200,6 @@ apply_var_substitutions() {
s#__SHELL_INT_IP__#${SHELL_INT_IP}#g;
s#__WORKBENCH1_INT_IP__#${WORKBENCH1_INT_IP}#g;
s#__WORKBENCH2_INT_IP__#${WORKBENCH2_INT_IP}#g;
- s#__DATABASE_INT_IP__#${DATABASE_INT_IP:-}#g;
- s#__DATABASE_EXTERNAL_SERVICE_HOST_OR_IP__#${DATABASE_EXTERNAL_SERVICE_HOST_OR_IP:-}#g;
s#__WORKBENCH_SECRET_KEY__#${WORKBENCH_SECRET_KEY}#g;
s#__SSL_KEY_ENCRYPTED__#${SSL_KEY_ENCRYPTED}#g;
s#__SSL_KEY_AWS_REGION__#${SSL_KEY_AWS_REGION:-}#g;
commit 707c3f219b6cf7baea3f5d2960343dc637cb7a9f
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Thu Aug 24 18:05:10 2023 -0300
20889: Allows configurable S3 bucket name and IAM profile for Keep backend.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
index 29d414cf5c..5fe2c0a6e9 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
@@ -146,8 +146,8 @@ arvados:
Replication: 2
Driver: S3
DriverParameters:
- Bucket: __CLUSTER__-nyw5e-000000000000000-volume
- IAMRole: __CLUSTER__-keepstore-00-iam-role
+ Bucket: __KEEP_AWS_S3_BUCKET__
+ IAMRole: __KEEP_AWS_IAM_ROLE__
Region: __KEEP_AWS_REGION__
Users:
diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts
index 1fc38fca4b..63a51f7731 100644
--- a/tools/salt-install/local.params.example.multiple_hosts
+++ b/tools/salt-install/local.params.example.multiple_hosts
@@ -53,8 +53,10 @@ COMPUTE_SUBNET="subnet_fixme_or_this_wont_work"
COMPUTE_AWS_REGION="${AWS_REGION}"
COMPUTE_USER="${DEPLOY_USER}"
-# Keep S3 backend region
+# Keep S3 backend settings
KEEP_AWS_REGION="${AWS_REGION}"
+KEEP_AWS_S3_BUCKET="${CLUSTER}-nyw5e-000000000000000-volume"
+KEEP_AWS_IAM_ROLE="${CLUSTER}-keepstore-00-iam-role"
# If you going to provide your own certificates for Arvados, the provision script can
# help you deploy them. In order to do that, you need to set `SSL_MODE=bring-your-own` above,
diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh
index b2c5d71928..e76d3cc6ba 100755
--- a/tools/salt-install/provision.sh
+++ b/tools/salt-install/provision.sh
@@ -222,6 +222,8 @@ apply_var_substitutions() {
s#__COMPUTE_SUBNET__#${COMPUTE_SUBNET:-}#g;
s#__COMPUTE_AWS_REGION__#${COMPUTE_AWS_REGION:-}#g;
s#__COMPUTE_USER__#${COMPUTE_USER:-}#g;
+ s#__KEEP_AWS_S3_BUCKET__#${KEEP_AWS_S3_BUCKET:-}#g;
+ s#__KEEP_AWS_IAM_ROLE__#${KEEP_AWS_IAM_ROLE:-}#g;
s#__KEEP_AWS_REGION__#${KEEP_AWS_REGION:-}#g" \
"${SRCFILE}" > "${DSTFILE}"
}
commit fcd1d0754f12f601967067ed1931d0d380f5a426
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Thu Aug 24 17:56:05 2023 -0300
20889: Adds config handling of external database service.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
index 3017900880..29d414cf5c 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
@@ -6,6 +6,7 @@
{%- set _workers = ("__CONTROLLER_MAX_WORKERS__" or grains['num_cpus']*2)|int %}
{%- set max_workers = [_workers, 8]|max %}
{%- set max_reqs = ("__CONTROLLER_MAX_QUEUED_REQUESTS__" or 128)|int %}
+{%- set database_host = ("__DATABASE_EXTERNAL_SERVICE_HOST_OR_IP__" or "__DATABASE_INT_IP__") %}
# The variables commented out are the default values that the formula uses.
# The uncommented values are REQUIRED values. If you don't set them, running
@@ -73,7 +74,7 @@ arvados:
# max concurrent connections per arvados server daemon
# connection_pool_max: 32
name: __CLUSTER___arvados
- host: __DATABASE_INT_IP__
+ host: {{ database_host }}
password: "__DATABASE_PASSWORD__"
user: __CLUSTER___arvados
encoding: en_US.utf8
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_server.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_server.sls
index 70320d610c..26877f35df 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_server.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_server.sls
@@ -99,6 +99,7 @@ prometheus:
instance: arvados-dispatch-cloud.__CLUSTER__
cluster: __CLUSTER__
+ {%- if "__DATABASE_INT_IP__" != "" %}
# Database
- job_name: postgresql
static_configs:
@@ -109,6 +110,7 @@ prometheus:
labels:
instance: database.__CLUSTER__
cluster: __CLUSTER__
+ {%- endif %}
# Nodes
{%- set node_list = "__NODELIST__".split(',') %}
diff --git a/tools/salt-install/config_examples/multi_host/aws/states/host_entries.sls b/tools/salt-install/config_examples/multi_host/aws/states/host_entries.sls
index 68aeab3abb..42f492e811 100644
--- a/tools/salt-install/config_examples/multi_host/aws/states/host_entries.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/states/host_entries.sls
@@ -8,12 +8,15 @@
{%- set tpldir = curr_tpldir %}
#CRUDE, but functional
+
+{%- if "__DATABASE_INT_IP__" != "" %}
extra_extra_hosts_entries_etc_hosts_database_host_present:
host.present:
- ip: __DATABASE_INT_IP__
- names:
- db.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
- database.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+{%- endif %}
extra_extra_hosts_entries_etc_hosts_api_host_present:
host.present:
diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts
index 0cf52c7343..1fc38fca4b 100644
--- a/tools/salt-install/local.params.example.multiple_hosts
+++ b/tools/salt-install/local.params.example.multiple_hosts
@@ -139,6 +139,9 @@ KEEP_INT_IP=${WORKBENCH1_INT_IP}
KEEPSTORE0_INT_IP=10.1.2.13
SHELL_INT_IP=10.1.2.17
+# Set this if using an external PostgreSQL service.
+#DATABASE_EXTERNAL_SERVICE_HOST_OR_IP=
+
# Performance tuning parameters. If these are not set, workers
# defaults on the number of cpus and queued requests defaults to 128.
#CONTROLLER_MAX_WORKERS=
diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh
index ea98fdec33..b2c5d71928 100755
--- a/tools/salt-install/provision.sh
+++ b/tools/salt-install/provision.sh
@@ -196,7 +196,8 @@ apply_var_substitutions() {
s#__SHELL_INT_IP__#${SHELL_INT_IP}#g;
s#__WORKBENCH1_INT_IP__#${WORKBENCH1_INT_IP}#g;
s#__WORKBENCH2_INT_IP__#${WORKBENCH2_INT_IP}#g;
- s#__DATABASE_INT_IP__#${DATABASE_INT_IP}#g;
+ s#__DATABASE_INT_IP__#${DATABASE_INT_IP:-}#g;
+ s#__DATABASE_EXTERNAL_SERVICE_HOST_OR_IP__#${DATABASE_EXTERNAL_SERVICE_HOST_OR_IP:-}#g;
s#__WORKBENCH_SECRET_KEY__#${WORKBENCH_SECRET_KEY}#g;
s#__SSL_KEY_ENCRYPTED__#${SSL_KEY_ENCRYPTED}#g;
s#__SSL_KEY_AWS_REGION__#${SSL_KEY_AWS_REGION:-}#g;
commit 45f934ab9b1d0b6b9ce1797aa37660c28d5b114c
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Thu Aug 24 17:35:39 2023 -0300
20889: Removes unnecessary documentation about rolling upgrades.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/doc/install/salt-multi-host.html.textile.liquid b/doc/install/salt-multi-host.html.textile.liquid
index cad0675449..81aa2ab099 100644
--- a/doc/install/salt-multi-host.html.textile.liquid
+++ b/doc/install/salt-multi-host.html.textile.liquid
@@ -31,7 +31,6 @@ SPDX-License-Identifier: CC-BY-SA-3.0
# "Initial user and login":#initial_user
# "Monitoring and Metrics":#monitoring
# "Load balancing controllers":#load_balancing
-## "Rolling upgrades procedure":#rolling-upgrades
# "After the installation":#post_install
h2(#introduction). Introduction
@@ -520,57 +519,9 @@ Once the infrastructure is deployed, you'll then need to define which node will
)
</code></pre>
-Note that we also set the @database@ role to its own node.
+Note that we also set the @database@ role to its own node instead of just leaving it in a shared controller node.
-h3(#rolling-upgrades). Rolling upgrades procedure
-
-Once you have more than one controller backend node, it's easy to take one at a time from the backend pool to upgrade it to a newer version of Arvados (which might involve applying database migrations) by adding its name to the @DISABLED_CONTROLLER@ variable in @local.params at . For example:
-
-<pre><code>...
-DISABLED_CONTROLLER="controller1"
-...</code></pre>
-
-Then, apply the configuration change to just the load-balancer:
-
-<pre><code class="userinput">./installer.sh deploy controller.xarv1.example.com</code></pre>
-
-This will allow you to do the necessary changes to the @controller1@ node without service disruption, as it will not be receiving any traffic until you remove it from the @DISABLED_CONTROLLER@ variable.
-
-Next step is applying the @deploy@ command to @controller1@:
-
-<pre><code class="userinput">./installer.sh deploy controller1.xarv1.example.com</code></pre>
-
-After that, disable the other controller node by editing @local.params@:
-
-<pre><code>...
-DISABLED_CONTROLLER="controller2"
-...</code></pre>
-
-...applying the changes on the balancer node:
-
-<pre><code class="userinput">./installer.sh deploy controller.xarv1.example.com</code></pre>
-
-Then, deploy the changes to the recently disabled @controller2@ node:
-
-<pre><code class="userinput">./installer.sh deploy controller2.xarv1.example.com</code></pre>
-
-This won't cause a service interruption because the load balancer is already routing all traffic to the othe @controller1@ node.
-
-And the last step is enabling both controller nodes by making the following change to @local.params@:
-
-<pre><code>...
-DISABLED_CONTROLLER=""
-...</code></pre>
-
-...and running:
-
-<pre><code class="userinput">./installer.sh deploy controller.xarv1.example.com</code></pre>
-
-This should get all your @controller@ nodes correctly upgraded, and you can continue executing the @deploy@ command with the rest of the nodes individually, or just run:
-
-<pre><code class="userinput">./installer.sh deploy</code></pre>
-
-Only the nodes with pending changes might require certain services to be restarted. In this example, the @workbench@ node will have the remaining Arvados services upgraded and restarted. However, these services are not as critical as the ones on the @controller@ nodes.
+Each time you run @installer.sh deploy@, the system will automatically do rolling upgrades. This means it will make changes to one controller node at a time, after removing it from the balancer so that there's no downtime.
h2(#post_install). After the installation
commit e52080d58838ddb5b1c157113e8a4876db7ba924
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Thu Aug 24 16:31:45 2023 -0300
20889: Removes DISABLED_CONTROLLER from the user-editable local.params file.
As the rolling upgrade procedure is done automatically, this is not needed
and could cause confusion by being on local.params
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/common.sh b/tools/salt-install/common.sh
index 215fb50ecb..7036e116c9 100644
--- a/tools/salt-install/common.sh
+++ b/tools/salt-install/common.sh
@@ -17,6 +17,7 @@ else
fi
USE_SSH_JUMPHOST=${USE_SSH_JUMPHOST:-}
+DISABLED_CONTROLLER=""
# Comma-separated list of nodes. This is used to dynamically adjust
# salt pillars.
diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts
index 26cd16ed57..0cf52c7343 100644
--- a/tools/salt-install/local.params.example.multiple_hosts
+++ b/tools/salt-install/local.params.example.multiple_hosts
@@ -139,11 +139,6 @@ KEEP_INT_IP=${WORKBENCH1_INT_IP}
KEEPSTORE0_INT_IP=10.1.2.13
SHELL_INT_IP=10.1.2.17
-# In a load balanced deployment, you can do rolling upgrades by specifying one
-# controller node name at a time, so that it gets removed from the pool and can
-# be upgraded.
-DISABLED_CONTROLLER=""
-
# Performance tuning parameters. If these are not set, workers
# defaults on the number of cpus and queued requests defaults to 128.
#CONTROLLER_MAX_WORKERS=
diff --git a/tools/salt-install/local.params.example.single_host_multiple_hostnames b/tools/salt-install/local.params.example.single_host_multiple_hostnames
index fc48bb76cc..551051939e 100644
--- a/tools/salt-install/local.params.example.single_host_multiple_hostnames
+++ b/tools/salt-install/local.params.example.single_host_multiple_hostnames
@@ -76,8 +76,6 @@ KEEP_INT_IP=""
KEEPSTORE0_INT_IP=""
SHELL_INT_IP=""
-DISABLED_CONTROLLER=""
-
# The directory to check for the config files (pillars, states) you want to use.
# There are a few examples under 'config_examples'.
# CONFIG_DIR="local_config_dir"
diff --git a/tools/salt-install/local.params.example.single_host_single_hostname b/tools/salt-install/local.params.example.single_host_single_hostname
index 30f97de29d..ab0ee1be1b 100644
--- a/tools/salt-install/local.params.example.single_host_single_hostname
+++ b/tools/salt-install/local.params.example.single_host_single_hostname
@@ -86,8 +86,6 @@ KEEP_INT_IP=""
KEEPSTORE0_INT_IP=""
SHELL_INT_IP=""
-DISABLED_CONTROLLER=""
-
# The directory to check for the config files (pillars, states) you want to use.
# There are a few examples under 'config_examples'.
# CONFIG_DIR="local_config_dir"
commit c32f2129960b060195b773f19d8582f1c693953d
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Thu Aug 24 16:07:41 2023 -0300
20889: Sets default values for environment variables.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/common.sh b/tools/salt-install/common.sh
index 0c5dd50e6c..215fb50ecb 100644
--- a/tools/salt-install/common.sh
+++ b/tools/salt-install/common.sh
@@ -16,6 +16,8 @@ else
exit 1
fi
+USE_SSH_JUMPHOST=${USE_SSH_JUMPHOST:-}
+
# Comma-separated list of nodes. This is used to dynamically adjust
# salt pillars.
NODELIST=""
diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh
index 3a5662beda..ea98fdec33 100755
--- a/tools/salt-install/provision.sh
+++ b/tools/salt-install/provision.sh
@@ -173,8 +173,8 @@ apply_var_substitutions() {
s#__INITIAL_USER_PASSWORD__#${INITIAL_USER_PASSWORD}#g;
s#__INITIAL_USER__#${INITIAL_USER}#g;
s#__LE_AWS_REGION__#${LE_AWS_REGION:-}#g;
- s#__LE_AWS_SECRET_ACCESS_KEY__#${LE_AWS_SECRET_ACCESS_KEY}#g;
- s#__LE_AWS_ACCESS_KEY_ID__#${LE_AWS_ACCESS_KEY_ID}#g;
+ s#__LE_AWS_SECRET_ACCESS_KEY__#${LE_AWS_SECRET_ACCESS_KEY:-}#g;
+ s#__LE_AWS_ACCESS_KEY_ID__#${LE_AWS_ACCESS_KEY_ID:-}#g;
s#__DATABASE_PASSWORD__#${DATABASE_PASSWORD}#g;
s#__KEEPWEB_EXT_SSL_PORT__#${KEEPWEB_EXT_SSL_PORT}#g;
s#__KEEP_EXT_SSL_PORT__#${KEEP_EXT_SSL_PORT}#g;
@@ -450,7 +450,7 @@ echo "...arvados"
test -d arvados || git clone --quiet https://git.arvados.org/arvados-formula.git ${F_DIR}/arvados
# If we want to try a specific branch of the formula
-if [ "x${BRANCH:-}" != "xmain" ]; then
+if [[ ! -z "${BRANCH:-}" && "x${BRANCH}" != "xmain" ]]; then
( cd ${F_DIR}/arvados && git checkout --quiet -t origin/"${BRANCH}" -b "${BRANCH}" )
elif [ "x${ARVADOS_TAG:-}" != "x" ]; then
( cd ${F_DIR}/arvados && git checkout --quiet tags/"${ARVADOS_TAG}" -b "${ARVADOS_TAG}" )
commit b87f98819a9d71c55ab8b4512f5feb8c604f6921
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Thu Aug 24 15:36:27 2023 -0300
20889: Adds configurable data retention parameter for Prometheus.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_server.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_server.sls
index 9952e3bbc4..70320d610c 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_server.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_server.sls
@@ -5,6 +5,7 @@
{%- set controller_nodes = "__CONTROLLER_NODES__".split(',') %}
{%- set enable_balancer = ("__ENABLE_BALANCER__"|to_bool) %}
+{%- set data_retention_time = "__PROMETHEUS_DATA_RETENTION_TIME__" %}
### PROMETHEUS
prometheus:
@@ -18,6 +19,9 @@ prometheus:
use_upstream_archive: true
component:
prometheus:
+ service:
+ args:
+ storage.tsdb.retention.time: {{ data_retention_time }}
config:
global:
scrape_interval: 15s
diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts
index 5d4ebdc1d5..26cd16ed57 100644
--- a/tools/salt-install/local.params.example.multiple_hosts
+++ b/tools/salt-install/local.params.example.multiple_hosts
@@ -89,9 +89,14 @@ SSL_KEY_AWS_REGION="${AWS_REGION}"
# Customize Prometheus & Grafana web UI access credentials
MONITORING_USERNAME=${INITIAL_USER}
MONITORING_EMAIL=${INITIAL_USER_EMAIL}
+
# Sets the directory for Grafana dashboards
# GRAFANA_DASHBOARDS_DIR="${SCRIPT_DIR}/local_config_dir/dashboards"
+# Sets the amount of data (expressed in time) Prometheus keeps on its
+# time-series database. Default is 15 days.
+# PROMETHEUS_DATA_RETENTION_TIME="180d"
+
# The mapping of nodes to roles
# installer.sh will log in to each of these nodes and then provision
# it for the specified roles.
diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh
index 5dd40dd76c..3a5662beda 100755
--- a/tools/salt-install/provision.sh
+++ b/tools/salt-install/provision.sh
@@ -211,6 +211,7 @@ apply_var_substitutions() {
s#__DISABLED_CONTROLLER__#${DISABLED_CONTROLLER}#g;
s#__BALANCER_NODENAME__#${ROLE2NODES['balancer']:-}#g;
s#__PROMETHEUS_NODENAME__#${ROLE2NODES['monitoring']:-}#g;
+ s#__PROMETHEUS_DATA_RETENTION_TIME__#${PROMETHEUS_DATA_RETENTION_TIME:-15d}#g;
s#__CONTROLLER_NODES__#${ROLE2NODES['controller']:-}#g;
s#__NODELIST__#${NODELIST}#g;
s#__DISPATCHER_INT_IP__#${DISPATCHER_INT_IP}#g;
commit fe79ab0df1b03383c52283a3dcd37020bebd08cb
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Wed Aug 23 17:27:21 2023 -0300
20889: Install Prometheus server from archives instead of package repo.
This avoids some incompatibilities (at least on Ubuntu 20.04) with grafana.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_server.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_server.sls
index 6dc90c840b..9952e3bbc4 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_server.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_server.sls
@@ -14,7 +14,8 @@ prometheus:
- alertmanager
- node_exporter
pkg:
- use_upstream_repo: true
+ use_upstream_repo: false
+ use_upstream_archive: true
component:
prometheus:
config:
-----------------------------------------------------------------------
hooks/post-receive
--
More information about the arvados-commits
mailing list