[arvados] updated: 2.4.1-6-gb8c1729e6
git repository hosting
git at public.arvados.org
Thu Jun 30 18:09:31 UTC 2022
Summary of changes:
services/login-sync/bin/arvados-login-sync | 16 +-
.../multi_host/aws/pillars/arvados.sls | 2 +-
.../multi_host/aws/pillars/postgresql.sls | 2 +-
.../aws/states/shell_cron_add_login_sync.sls | 7 +
.../multiple_hostnames/pillars/nginx_passenger.sls | 2 +-
.../multiple_hostnames/pillars/postgresql.sls | 2 +-
.../multiple_hostnames/states/host_entries.sls | 2 +-
.../single_hostname/pillars/nginx_passenger.sls | 2 +-
.../single_hostname/pillars/postgresql.sls | 2 +-
.../single_hostname/states/host_entries.sls | 2 +-
tools/salt-install/installer.sh | 257 +++++++++++++++++++++
.../local.params.example.multiple_hosts | 19 +-
...l.params.example.single_host_multiple_hostnames | 11 +
...ocal.params.example.single_host_single_hostname | 11 +
tools/salt-install/provision.sh | 4 +-
15 files changed, 327 insertions(+), 14 deletions(-)
create mode 100755 tools/salt-install/installer.sh
via b8c1729e63dcf8e94b296d6ee90cf7c16c79d72c (commit)
from 0841b144012a6cd54c927c2141d72411b0c86070 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
commit b8c1729e63dcf8e94b296d6ee90cf7c16c79d72c
Author: Peter Amstutz <peter.amstutz at curii.com>
Date: Thu Jun 30 14:06:48 2022 -0400
Merge branch '18870-installer' refs #18870
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>
diff --git a/services/login-sync/bin/arvados-login-sync b/services/login-sync/bin/arvados-login-sync
index da8a21efa..5c6691ab9 100755
--- a/services/login-sync/bin/arvados-login-sync
+++ b/services/login-sync/bin/arvados-login-sync
@@ -10,6 +10,7 @@ require 'etc'
require 'fileutils'
require 'yaml'
require 'optparse'
+require 'open3'
req_envs = %w(ARVADOS_API_HOST ARVADOS_API_TOKEN ARVADOS_VIRTUAL_MACHINE_UUID)
req_envs.each do |k|
@@ -124,11 +125,12 @@ begin
unless pwnam[l[:username]]
STDERR.puts "Creating account #{l[:username]}"
# Create new user
- unless system("useradd", "-m",
+ out, st = Open3.capture2e("useradd", "-m",
"-c", username,
"-s", "/bin/bash",
username)
- STDERR.puts "Account creation failed for #{l[:username]}: #{$?}"
+ if st.exitstatus != 0
+ STDERR.puts "Account creation failed for #{l[:username]}:\n#{out}"
next
end
begin
@@ -150,7 +152,10 @@ begin
if existing_groups.index(addgroup).nil?
# User should be in group, but isn't, so add them.
STDERR.puts "Add user #{username} to #{addgroup} group"
- system("usermod", "-aG", addgroup, username)
+ out, st = Open3.capture2e("usermod", "-aG", addgroup, username)
+ if st.exitstatus != 0
+ STDERR.puts "Failed to add #{username} to #{addgroup} group:\n#{out}"
+ end
end
end
@@ -158,7 +163,10 @@ begin
if groups.index(removegroup).nil?
# User is in a group, but shouldn't be, so remove them.
STDERR.puts "Remove user #{username} from #{removegroup} group"
- system("gpasswd", "-d", username, removegroup)
+ out, st = Open3.capture2e("gpasswd", "-d", username, removegroup)
+ if st.exitstatus != 0
+ STDERR.puts "Failed to remove user #{username} from #{removegroup} group:\n#{out}"
+ end
end
end
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
index c284934dd..941bd95f1 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
@@ -84,7 +84,7 @@ arvados:
resources:
virtual_machines:
shell:
- name: shell
+ name: shell.__CLUSTER__.__DOMAIN__
backend: __SHELL_INT_IP__
port: 4200
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/postgresql.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/postgresql.sls
index e06ddd041..d6320da24 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/postgresql.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/postgresql.sls
@@ -19,7 +19,7 @@ postgres:
users:
__CLUSTER___arvados:
ensure: present
- password: __DATABASE_PASSWORD__
+ password: "__DATABASE_PASSWORD__"
# tablespaces:
# arvados_tablespace:
diff --git a/tools/salt-install/config_examples/multi_host/aws/states/shell_cron_add_login_sync.sls b/tools/salt-install/config_examples/multi_host/aws/states/shell_cron_add_login_sync.sls
index 86c591e97..9028b9b10 100644
--- a/tools/salt-install/config_examples/multi_host/aws/states/shell_cron_add_login_sync.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/states/shell_cron_add_login_sync.sls
@@ -75,6 +75,13 @@ extra_shell_cron_add_login_sync_add_{{ vm }}_arvados_virtual_machine_uuid_cron_e
- onlyif:
- /bin/grep -qE "[a-z0-9]{5}-2x53u-[a-z0-9]{15}" /tmp/vm_uuid_{{ vm }}
+extra_shell_cron_add_login_sync_add_{{ vm }}_sbin_to_path_cron_env_present:
+ cron.env_present:
+ - name: PATH
+ - value: "/bin:/usr/bin:/usr/sbin"
+ - onlyif:
+ - /bin/grep -qE "[a-z0-9]{5}-2x53u-[a-z0-9]{15}" /tmp/vm_uuid_{{ vm }}
+
extra_shell_cron_add_login_sync_add_{{ vm }}_arvados_login_sync_cron_present:
cron.present:
- name: /usr/local/bin/arvados-login-sync
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_passenger.sls b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_passenger.sls
index dfddf3b62..cf0877971 100644
--- a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_passenger.sls
+++ b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_passenger.sls
@@ -55,7 +55,7 @@ nginx:
- add_header: 'Strict-Transport-Security "max-age=63072000" always'
# OCSP stapling
- # FIXME! Stapling does not work with self-signed certificates, so disabling for tests
+ # NOTE! Stapling does not work with self-signed certificates, so disabling for tests
# - ssl_stapling: 'on'
# - ssl_stapling_verify: 'on'
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/postgresql.sls b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/postgresql.sls
index f3bc09f65..edb961eba 100644
--- a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/postgresql.sls
+++ b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/postgresql.sls
@@ -38,7 +38,7 @@ postgres:
users:
__CLUSTER___arvados:
ensure: present
- password: __DATABASE_PASSWORD__
+ password: "__DATABASE_PASSWORD__"
# tablespaces:
# arvados_tablespace:
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/states/host_entries.sls b/tools/salt-install/config_examples/single_host/multiple_hostnames/states/host_entries.sls
index 379f4765c..c2d34ea28 100644
--- a/tools/salt-install/config_examples/single_host/multiple_hostnames/states/host_entries.sls
+++ b/tools/salt-install/config_examples/single_host/multiple_hostnames/states/host_entries.sls
@@ -12,7 +12,7 @@ arvados_test_salt_states_examples_single_host_etc_hosts_host_present:
- ip: 127.0.1.1
- names:
- {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
- # FIXME! This just works for our testings.
+ # NOTE! This just works for our testings.
# Won't work if the cluster name != host name
{%- for entry in [
'api',
diff --git a/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_passenger.sls b/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_passenger.sls
index 21c1510de..26e2baf04 100644
--- a/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_passenger.sls
+++ b/tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_passenger.sls
@@ -55,7 +55,7 @@ nginx:
- add_header: 'Strict-Transport-Security "max-age=63072000" always'
# OCSP stapling
- # FIXME! Stapling does not work with self-signed certificates, so disabling for tests
+ # NOTE! Stapling does not work with self-signed certificates, so disabling for tests
# - ssl_stapling: 'on'
# - ssl_stapling_verify: 'on'
diff --git a/tools/salt-install/config_examples/single_host/single_hostname/pillars/postgresql.sls b/tools/salt-install/config_examples/single_host/single_hostname/pillars/postgresql.sls
index a69b88cb1..14452a990 100644
--- a/tools/salt-install/config_examples/single_host/single_hostname/pillars/postgresql.sls
+++ b/tools/salt-install/config_examples/single_host/single_hostname/pillars/postgresql.sls
@@ -40,7 +40,7 @@ postgres:
users:
__CLUSTER___arvados:
ensure: present
- password: __DATABASE_PASSWORD__
+ password: "__DATABASE_PASSWORD__"
# tablespaces:
# arvados_tablespace:
diff --git a/tools/salt-install/config_examples/single_host/single_hostname/states/host_entries.sls b/tools/salt-install/config_examples/single_host/single_hostname/states/host_entries.sls
index a688f4f8c..51308fffa 100644
--- a/tools/salt-install/config_examples/single_host/single_hostname/states/host_entries.sls
+++ b/tools/salt-install/config_examples/single_host/single_hostname/states/host_entries.sls
@@ -21,7 +21,7 @@ arvados_test_salt_states_examples_single_host_etc_hosts_host_present:
- ip: 127.0.1.1
- names:
- {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
- # FIXME! This just works for our testing.
+ # NOTE! This just works for our testing.
# Won't work if the cluster name != host name
{%- for entry in [
'api',
diff --git a/tools/salt-install/installer.sh b/tools/salt-install/installer.sh
new file mode 100755
index 000000000..e5ff7be4e
--- /dev/null
+++ b/tools/salt-install/installer.sh
@@ -0,0 +1,257 @@
+#!/bin/bash
+
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: CC-BY-SA-3.0
+
+#
+# installer.sh
+#
+# Helps manage the configuration in a git repository, and then deploy
+# nodes by pushing a copy of the git repository to each node and
+# running the provision script to do the actual installation and
+# configuration.
+#
+
+set -eu
+
+# The parameter file
+declare CONFIG_FILE=local.params
+
+# The salt template directory
+declare CONFIG_DIR=local_config_dir
+
+# The 5-character Arvados cluster id
+# This will be populated by loadconfig()
+declare CLUSTER
+
+# The parent domain (not including the cluster id)
+# This will be populated by loadconfig()
+declare DOMAIN
+
+# A bash associative array listing each node and mapping to the roles
+# that should be provisioned on those nodes.
+# This will be populated by loadconfig()
+declare -A NODES
+
+# The ssh user we'll use
+# This will be populated by loadconfig()
+declare DEPLOY_USER
+
+# The git repository that we'll push to on all the nodes
+# This will be populated by loadconfig()
+declare GITTARGET
+
+sync() {
+ local NODE=$1
+ local BRANCH=$2
+
+ # Synchronizes the configuration by creating a git repository on
+ # each node, pushing our branch, and updating the checkout.
+
+ if [[ "$NODE" != localhost ]] ; then
+ if ! ssh $NODE test -d ${GITTARGET}.git ; then
+
+ # Initialize the git repository (1st time case). We're
+ # actually going to make two repositories here because git
+ # will complain if you try to push to a repository with a
+ # checkout. So we're going to create a "bare" repository
+ # and then clone a regular repository (with a checkout)
+ # from that.
+
+ ssh $NODE git init --bare ${GITTARGET}.git
+ if ! git remote add $NODE $DEPLOY_USER@$NODE:${GITTARGET}.git ; then
+ git remote set-url $NODE $DEPLOY_USER@$NODE:${GITTARGET}.git
+ fi
+ git push $NODE $BRANCH
+ ssh $NODE git clone ${GITTARGET}.git ${GITTARGET}
+ fi
+
+ # The update case.
+ #
+ # Push to the bare repository on the remote node, then in the
+ # remote node repository with the checkout, pull the branch
+ # from the bare repository.
+
+ git push $NODE $BRANCH
+ ssh $NODE "git -C ${GITTARGET} checkout ${BRANCH} && git -C ${GITTARGET} pull"
+ fi
+}
+
+deploynode() {
+ local NODE=$1
+ local ROLES=$2
+
+ # Deploy a node. This runs the provision script on the node, with
+ # the appropriate roles.
+
+ if [[ -z "$ROLES" ]] ; then
+ echo "No roles declared for '$NODE' in ${CONFIG_FILE}"
+ exit 1
+ fi
+
+ if [[ "$NODE" = localhost ]] ; then
+ sudo ./provision.sh --config ${CONFIG_FILE} --roles ${ROLES}
+ else
+ ssh $DEPLOY_USER@$NODE "cd ${GITTARGET} && sudo ./provision.sh --config ${CONFIG_FILE} --roles ${ROLES}"
+ fi
+}
+
+loadconfig() {
+ if [[ ! -s $CONFIG_FILE ]] ; then
+ echo "Must be run from initialized setup dir, maybe you need to 'initialize' first?"
+ fi
+ source ${CONFIG_FILE}
+ GITTARGET=arvados-deploy-config-${CLUSTER}
+}
+
+subcmd="$1"
+if [[ -n "$subcmd" ]] ; then
+ shift
+fi
+case "$subcmd" in
+ initialize)
+ if [[ ! -f provision.sh ]] ; then
+ echo "Must be run from arvados/tools/salt-install"
+ exit
+ fi
+
+ set +u
+ SETUPDIR=$1
+ PARAMS=$2
+ SLS=$3
+ set -u
+
+ err=
+ if [[ -z "$PARAMS" || ! -f local.params.example.$PARAMS ]] ; then
+ echo "Not found: local.params.example.$PARAMS"
+ echo "Expected one of multiple_hosts, single_host_multiple_hostnames, single_host_single_hostname"
+ err=1
+ fi
+
+ if [[ -z "$SLS" || ! -d config_examples/$SLS ]] ; then
+ echo "Not found: config_examples/$SLS"
+ echo "Expected one of multi_host/aws, single_host/multiple_hostnames, single_host/single_hostname"
+ err=1
+ fi
+
+ if [[ -z "$SETUPDIR" || -z "$PARAMS" || -z "$SLS" ]]; then
+ echo "installer.sh <setup dir to initialize> <params template> <config template>"
+ err=1
+ fi
+
+ if [[ -n "$err" ]] ; then
+ exit 1
+ fi
+
+ echo "Initializing $SETUPDIR"
+ git init $SETUPDIR
+ cp -r *.sh tests $SETUPDIR
+
+ cp local.params.example.$PARAMS $SETUPDIR/${CONFIG_FILE}
+ cp -r config_examples/$SLS $SETUPDIR/${CONFIG_DIR}
+
+ cd $SETUPDIR
+ git add *.sh ${CONFIG_FILE} ${CONFIG_DIR} tests
+ git commit -m"initial commit"
+
+ echo "setup directory initialized, now go to $SETUPDIR, edit '${CONFIG_FILE}' and '${CONFIG_DIR}' as needed, then run 'installer.sh deploy'"
+ ;;
+ deploy)
+ set +u
+ NODE=$1
+ set -u
+
+ loadconfig
+
+ if grep -rni 'fixme' ${CONFIG_FILE} ${CONFIG_DIR} ; then
+ echo
+ echo "Some parameters still need to be updated. Please fix them and then re-run deploy."
+ exit 1
+ fi
+
+ BRANCH=$(git branch --show-current)
+
+ set -x
+
+ git add -A
+ if ! git diff --cached --exit-code ; then
+ git commit -m"prepare for deploy"
+ fi
+
+ if [[ -z "$NODE" ]]; then
+ for NODE in "${!NODES[@]}"
+ do
+ # First, push the git repo to each node. This also
+ # confirms that we have git and can log into each
+ # node.
+ sync $NODE $BRANCH
+ done
+
+ for NODE in "${!NODES[@]}"
+ do
+ # Do 'database' role first,
+ if [[ "${NODES[$NODE]}" =~ database ]] ; then
+ deploynode $NODE ${NODES[$NODE]}
+ unset NODES[$NODE]
+ fi
+ done
+
+ for NODE in "${!NODES[@]}"
+ do
+ # then 'api' or 'controller' roles
+ if [[ "${NODES[$NODE]}" =~ (api|controller) ]] ; then
+ deploynode $NODE ${NODES[$NODE]}
+ unset NODES[$NODE]
+ fi
+ done
+
+ for NODE in "${!NODES[@]}"
+ do
+ # Everything else (we removed the nodes that we
+ # already deployed from the list)
+ deploynode $NODE ${NODES[$NODE]}
+ done
+ else
+ # Just deploy the node that was supplied on the command line.
+ sync $NODE $BRANCH
+ deploynode $NODE
+ fi
+
+ echo
+ echo "Completed deploy, run 'installer.sh diagnostics' to verify the install"
+
+ ;;
+ diagnostics)
+ loadconfig
+
+ set +u
+ declare LOCATION=$1
+ set -u
+
+ if ! which arvados-client ; then
+ echo "arvados-client not found, install 'arvados-client' package with 'apt-get' or 'yum'"
+ exit 1
+ fi
+
+ if [[ -z "$LOCATION" ]] ; then
+ echo "Need to provide '-internal-client' or '-external-client'"
+ echo
+ echo "-internal-client You are running this on the same private network as the Arvados cluster (e.g. on one of the Arvados nodes)"
+ echo "-external-client You are running this outside the private network of the Arvados cluster (e.g. your workstation)"
+ exit 1
+ fi
+
+ export ARVADOS_API_HOST="${CLUSTER}.${DOMAIN}"
+ export ARVADOS_API_TOKEN="$SYSTEM_ROOT_TOKEN"
+
+ arvados-client diagnostics $LOCATION
+ ;;
+ *)
+ echo "Arvados installer"
+ echo ""
+ echo "initialize initialize the setup directory for configuration"
+ echo "deploy deploy the configuration from the setup directory"
+ echo "diagnostics check your install using diagnostics"
+ ;;
+esac
diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts
index 31a69e984..ade1ad467 100644
--- a/tools/salt-install/local.params.example.multiple_hosts
+++ b/tools/salt-install/local.params.example.multiple_hosts
@@ -8,9 +8,26 @@
# The Arvados cluster ID, needs to be 5 lowercase alphanumeric characters.
CLUSTER="cluster_fixme_or_this_wont_work"
-# The domainname you want tou give to your cluster's hosts
+# The domain name you want to give to your cluster's hosts
+# the end result hostnames will be $SERVICE.$CLUSTER.$DOMAIN
DOMAIN="domain_fixme_or_this_wont_work"
+# For multi-node installs, the ssh log in for each node
+# must be root or able to sudo
+DEPLOY_USER=root
+
+# The mapping of nodes to roles
+# installer.sh will log in to each of these nodes and then provision
+# it for the specified roles.
+NODES=(
+ [controller.${CLUSTER}.${DOMAIN}]=api,controller,websocket,dispatcher,keepbalance
+ [keep0.${CLUSTER}.${DOMAIN}]=keepstore
+ [keep1.${CLUSTER}.${DOMAIN}]=keepstore
+ [keep.${CLUSTER}.${DOMAIN}]=keepproxy,keepweb
+ [workbench.${CLUSTER}.${DOMAIN}]=workbench,workbench2,webshell
+ [shell.${CLUSTER}.${DOMAIN}]=shell
+)
+
# Host SSL port where you want to point your browser to access Arvados
# Defaults to 443 for regular runs, and to 8443 when called in Vagrant.
# You can point it to another port if desired
diff --git a/tools/salt-install/local.params.example.single_host_multiple_hostnames b/tools/salt-install/local.params.example.single_host_multiple_hostnames
index 2ce155651..20f334166 100644
--- a/tools/salt-install/local.params.example.single_host_multiple_hostnames
+++ b/tools/salt-install/local.params.example.single_host_multiple_hostnames
@@ -11,6 +11,17 @@ CLUSTER="cluster_fixme_or_this_wont_work"
# The domainname you want tou give to your cluster's hosts
DOMAIN="domain_fixme_or_this_wont_work"
+# For multi-node installs, the ssh log in for each node
+# must be root or able to sudo
+DEPLOY_USER=root
+
+# The mapping of nodes to roles
+# installer.sh will log in to each of these nodes and then provision
+# it for the specified roles.
+NODES=(
+ [localhost]=api,controller,websocket,dispatcher,keepbalance,keepstore,keepproxy,keepweb,workbench,workbench2,webshell
+)
+
# External ports used by the Arvados services
CONTROLLER_EXT_SSL_PORT=443
KEEP_EXT_SSL_PORT=25101
diff --git a/tools/salt-install/local.params.example.single_host_single_hostname b/tools/salt-install/local.params.example.single_host_single_hostname
index 7add9868d..a68450094 100644
--- a/tools/salt-install/local.params.example.single_host_single_hostname
+++ b/tools/salt-install/local.params.example.single_host_single_hostname
@@ -11,6 +11,17 @@ CLUSTER="cluster_fixme_or_this_wont_work"
# The domainname for your cluster's hosts
DOMAIN="domain_fixme_or_this_wont_work"
+# For multi-node installs, the ssh log in for each node
+# must be root or able to sudo
+DEPLOY_USER=root
+
+# The mapping of nodes to roles
+# installer.sh will log in to each of these nodes and then provision
+# it for the specified roles.
+NODES=(
+ [localhost]=api,controller,websocket,dispatcher,keepbalance,keepstore,keepproxy,keepweb,workbench,workbench2,webshell
+)
+
# Set this value when installing a cluster in a single host with a single
# hostname to access all the instances. HOSTNAME_EXT should be set to the
# external hostname for the instance.
diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh
index 204c3db83..7107bd080 100755
--- a/tools/salt-install/provision.sh
+++ b/tools/salt-install/provision.sh
@@ -226,6 +226,8 @@ T_DIR="/tmp/cluster_tests"
arguments ${@}
+declare -A NODES
+
if [ -s ${CONFIG_FILE} ]; then
source ${CONFIG_FILE}
else
@@ -244,7 +246,7 @@ if [ ! -d ${CONFIG_DIR} ]; then
exit 1
fi
-if grep -q 'fixme_or_this_wont_work' ${CONFIG_FILE} ; then
+if grep -rni 'fixme' ${CONFIG_FILE} ${CONFIG_DIR} ; then
echo >&2 "The config file ${CONFIG_FILE} has some parameters that need to be modified."
echo >&2 "Please, fix them and re-run the provision script."
exit 1
-----------------------------------------------------------------------
hooks/post-receive
--
More information about the arvados-commits
mailing list